From 157c2a8b3ec4f77a862e3d0de4be46ef691b349c Mon Sep 17 00:00:00 2001 From: VMware GitHub Bot Date: Thu, 6 Sep 2018 14:28:56 -0500 Subject: [PATCH 01/90] Add CONTRIBUTING template --- CONTRIBUTING.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f1bf47d..abec9ed 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,6 +5,8 @@ read our [Developer Certificate of Origin](https://cla.vmware.com/dco). All cont signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on as an open-source patch. +## Community + ## Contribution Flow This is a rough outline of what a contributor's workflow looks like: @@ -31,7 +33,7 @@ When your branch gets out of sync with the vmware/main branch, use the following ``` shell git checkout my-new-feature git fetch -a -git pull --rebase upstream main +git pull --rebase upstream master git push --force-with-lease origin my-new-feature ``` From f7763e6d8bb92bcd17efd33c04f6bde04040e614 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Fri, 6 Apr 2018 07:46:07 -0700 Subject: [PATCH 02/90] Initial empty repository From 7b35571d9e6bd9b6feff56adb7080a93e6235f98 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Sun, 8 Apr 2018 13:55:48 -0700 Subject: [PATCH 03/90] CKL: Initial version for starting repository Change-Id: Ic8dc81a437d9c72e841048359745e245c6c4895b --- .gitignore | 18 +++ .gitreview | 5 + HyperMake | 99 +++++++++++++++++ README.md | 6 + src/vendor/manifest | 167 ++++++++++++++++++++++++++++ support/scripts/check.sh | 76 +++++++++++++ support/scripts/ci.sh | 2 + support/scripts/functions.sh | 65 +++++++++++ support/scripts/test.sh | 5 + support/toolchain/HyperMake | 28 +++++ support/toolchain/docker/Dockerfile | 10 ++ 11 files changed, 481 insertions(+) create mode 100644 .gitreview create mode 100644 HyperMake create mode 100644 src/vendor/manifest create mode 100755 support/scripts/check.sh create mode 100755 support/scripts/ci.sh create mode 100644 support/scripts/functions.sh create mode 100755 support/scripts/test.sh create mode 100644 support/toolchain/HyperMake create mode 100644 support/toolchain/docker/Dockerfile diff --git a/.gitignore b/.gitignore index 5e7d273..79c027d 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,21 @@ * # Except this file !.gitignore +======= +/src/gen +/src/vendor +!/src/vendor/manifest +/bin +/pkg +/tmp +/log +/vms +/run +/go +.hmake +.hmakerc +.project +.idea +.vscode +*_mock_test.go +filenames diff --git a/.gitreview b/.gitreview new file mode 100644 index 0000000..ef3128e --- /dev/null +++ b/.gitreview @@ -0,0 +1,5 @@ +[gerrit] +host=review.ec.eng.vmware.com +port=29418 +project=cascade-kinesis-client +defaultbranch=develop diff --git a/HyperMake b/HyperMake new file mode 100644 index 0000000..b966419 --- /dev/null +++ b/HyperMake @@ -0,0 +1,99 @@ +--- +format: hypermake.v0 + +name: cascade-kinesis-client +description: Kinesis Client in Go + +targets: + rebuild-toolchain: + description: build toolchain image + watches: + - support/docker/toolchain + build: support/docker/toolchain + + toolchain: + description: placeholder for additional toolchain dependencies + + deps: + description: setup dependencies + after: + - 'deps-*' + + gen: + description: generate source code + after: + - 'gen-*' + + build: + description: build source code + after: + - 'build-*' + + test: + description: run unit tests + after: + - deps + - gen + always: true + cmds: + - ./support/scripts/test.sh + + ci: + description: run CI tests + after: + - check + cmds: + - ./support/scripts/ci.sh + + checkfmt: + description: check code format + after: + - toolchain + watches: + - support/scripts/check.sh + always: true + cmds: + - ./support/scripts/check.sh fmt + + lint: + description: run lint to check code + after: + - toolchain + watches: + - support/scripts/check.sh + always: true + cmds: + - ./support/scripts/check.sh lint + + scanast: + description: run Go AST security scan + after: + - toolchain + watches: + - '**/**/*.go' + - './support/scripts/check.sh' + cmds: + - ./support/scripts/check.sh scanast + + check: + description: run all code checks + after: + - checkfmt + - lint + + deps-kcl: + description: populate vendor packages + after: + - toolchain + watches: + - src/vendor/manifest + workdir: src + cmds: + - gvt restore + +settings: + default-targets: + - ci + docker: + image: 'vmware/cascade-kcl-toolchain:latest' + src-volume: /home/cascade-kinesis-client diff --git a/README.md b/README.md index 9db664b..57cd94a 100644 --- a/README.md +++ b/README.md @@ -27,3 +27,9 @@ as an open-source patch. For more detailed information, refer to [CONTRIBUTING.m ## License +======= +# Cascade Kinesis Client Library for GO + +The **Cascade Kinesis Client Library for GO** (Cascade KCL) enables Go developers to easily consume and process data from [Amazon Kinesis][kinesis]. + +It is a re-implementation on Amazon's Kinesis Client Library in pure Go without using KCL's multi-language support. diff --git a/src/vendor/manifest b/src/vendor/manifest new file mode 100644 index 0000000..9d112c1 --- /dev/null +++ b/src/vendor/manifest @@ -0,0 +1,167 @@ +{ + "version": 0, + "dependencies": [ + { + "importpath": "github.com/aws/aws-sdk-go", + "repository": "https://github.com/aws/aws-sdk-go", + "vcs": "git", + "revision": "365b4d34369496e650e3056b33fce4e1a25cfc72", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/asaskevich/govalidator", + "repository": "https://github.com/asaskevich/govalidator", + "vcs": "git", + "revision": "38ddb4612a5dfc2878731749ee825853d9f0aaa1", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/sirupsen/logrus", + "repository": "https://github.com/sirupsen/logrus", + "vcs": "git", + "revision": "51dc0fc64317a2861273909081f9c315786533eb", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/beorn7/perks/quantile", + "repository": "https://github.com/beorn7/perks", + "vcs": "git", + "revision": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9", + "branch": "master", + "path": "/quantile", + "notests": true + }, + { + "importpath": "gopkg.in/gemnasium/logrus-airbrake-hook.v2", + "repository": "https://gopkg.in/gemnasium/logrus-airbrake-hook.v2", + "vcs": "git", + "revision": "e928b033a891c0175fb643d5aa0779e86325eb12", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/matttproud/golang_protobuf_extensions/pbutil", + "repository": "https://github.com/matttproud/golang_protobuf_extensions", + "vcs": "git", + "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c", + "branch": "master", + "path": "/pbutil", + "notests": true + }, + { + "importpath": "github.com/prometheus/client_golang/prometheus", + "repository": "https://github.com/prometheus/client_golang", + "vcs": "git", + "revision": "26b897001974f2b4ee6688377873e4d6f61d533c", + "branch": "master", + "path": "prometheus", + "notests": true + }, + { + "importpath": "github.com/prometheus/client_model/go", + "repository": "https://github.com/prometheus/client_model", + "vcs": "git", + "revision": "6f3806018612930941127f2a7c6c453ba2c527d2", + "branch": "master", + "path": "/go", + "notests": true + }, + { + "importpath": "github.com/prometheus/common/expfmt", + "repository": "https://github.com/prometheus/common", + "vcs": "git", + "revision": "3e6a7635bac6573d43f49f97b47eb9bda195dba8", + "branch": "master", + "path": "/expfmt", + "notests": true + }, + { + "importpath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", + "repository": "https://github.com/prometheus/common", + "vcs": "git", + "revision": "3e6a7635bac6573d43f49f97b47eb9bda195dba8", + "branch": "master", + "path": "internal/bitbucket.org/ww/goautoneg", + "notests": true + }, + { + "importpath": "github.com/prometheus/common/model", + "repository": "https://github.com/prometheus/common", + "vcs": "git", + "revision": "3e6a7635bac6573d43f49f97b47eb9bda195dba8", + "branch": "master", + "path": "model", + "notests": true + }, + { + "importpath": "github.com/astaxie/beego/cache", + "repository": "https://github.com/astaxie/beego", + "vcs": "git", + "revision": "a7354d2d084003e4122d6e69f7e5ab594fd117b2", + "branch": "master", + "path": "cache", + "notests": true + }, + { + "importpath": "github.com/prometheus/procfs", + "repository": "https://github.com/prometheus/procfs", + "vcs": "git", + "revision": "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/golang/protobuf/proto", + "repository": "https://github.com/golang/protobuf", + "vcs": "git", + "revision": "6a1fa9404c0aebf36c879bc50152edcc953910d2", + "branch": "master", + "path": "/proto", + "notests": true + }, + { + "importpath": "github.com/golang/protobuf/ptypes/any", + "repository": "https://github.com/golang/protobuf", + "vcs": "git", + "revision": "6a1fa9404c0aebf36c879bc50152edcc953910d2", + "branch": "master", + "path": "ptypes/any", + "notests": true + }, + { + "importpath": "github.com/google/uuid", + "repository": "https://github.com/google/uuid", + "vcs": "git", + "revision": "6a5e28554805e78ea6141142aba763936c4761c0", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/matryer/try", + "repository": "https://github.com/matryer/try", + "vcs": "git", + "revision": "312d2599e12e89ca89b52a09597394f449235d80", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/stretchr/testify", + "repository": "https://github.com/stretchr/testify", + "vcs": "git", + "revision": "12b6f73e6084dad08a7c6e575284b177ecafbc71", + "branch": "master", + "notests": true + }, + { + "importpath": "gopkg.in/yaml.v2", + "repository": "https://gopkg.in/yaml.v2", + "vcs": "git", + "revision": "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b", + "branch": "v2", + "notests": true + } + ] +} diff --git a/support/scripts/check.sh b/support/scripts/check.sh new file mode 100755 index 0000000..fbab703 --- /dev/null +++ b/support/scripts/check.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +. support/scripts/functions.sh + +checkfmt() { + local files="$(gofmt -l $(local_go_pkgs))" + if [ -n "$files" ]; then + echo "You need to run \"gofmt -w ./\" to fix your formating." + echo "$files" >&2 + return 1 + fi +} + +lint() { + gometalinter \ + --exclude=_mock.go \ + --disable=gotype \ + --vendor \ + --skip=test \ + --fast \ + --deadline=600s \ + --severity=golint:error \ + --errors \ + $(local_go_pkgs) +} + +scanast() { + set +e + gas ./... > security.log 2>&1 + set -e + + local issues=$(grep -E "Severity: MEDIUM" security.log | wc -l) + if [ -n $issues ] && [ $issues -gt 0 ]; then + echo "" + echo "Medium Severity Issues:" + grep -E "Severity: MEDIUM" -A 1 security.log + echo $issues "medium severity issues found." + fi + + local issues=$(grep -E "Severity: HIGH" security.log | grep -v "vendor") + local issues_count=$(grep -E "Severity: HIGH" security.log | grep -v "vendor" | wc -l) + if [ -n $issues_count ] && [ $issues_count -gt 0 ]; then + echo "" + echo "High Severity Issues:" + grep -E "Severity: HIGH" -A 1 security.log + echo $issues_count "high severity issues found." + echo $issues + echo "You need to resolve the high severity issues at the least." + exit 1 + fi + + local issues=$(grep -E "Errors unhandled" security.log | grep -v "vendor" | grep -v "/src/go/src") + local issues_count=$(grep -E "Errors unhandled" security.log | grep -v "vendor" | grep -v "/src/go/src" | wc -l) + if [ -n $issues_count ] && [ $issues_count -gt 0 ]; then + echo "" + echo "Unhandled errors:" + grep -E "Errors unhandled" security.log + echo $issues_count "unhandled errors, please indicate with the right comment that this case is ok, or handle the error." + echo $issues + echo "You need to resolve the all unhandled errors." + exit 1 + fi + rm security.log +} + +usage() { + echo "check.sh fmt|lint" >&2 + exit 2 +} + +case "$1" in + fmt) checkfmt ;; + lint) lint ;; + scanast) scanast;; + *) usage ;; +esac diff --git a/support/scripts/ci.sh b/support/scripts/ci.sh new file mode 100755 index 0000000..012ea40 --- /dev/null +++ b/support/scripts/ci.sh @@ -0,0 +1,2 @@ +#!/bin/bash +./support/scripts/test.sh diff --git a/support/scripts/functions.sh b/support/scripts/functions.sh new file mode 100644 index 0000000..c76d266 --- /dev/null +++ b/support/scripts/functions.sh @@ -0,0 +1,65 @@ +set -ex + +# PROJ_ROOT specifies the project root +export PROJ_ROOT="$HMAKE_PROJECT_DIR" + +# Add /go in GOPATH because that's the original GOPATH in toolchain +export GOPATH=/go:$PROJ_ROOT + +local_go_pkgs() { + find . -name '*.go' | \ + grep -Fv '/vendor/' | \ + grep -Fv '/go/' | \ + grep -Fv '/gen/' | \ + grep -Fv '/tmp/' | \ + grep -Fv '/run/' | \ + grep -Fv '/tests/' | \ + sort -u +} + +local_test_pkgs() { + find ./src/test -name '*.go' | \ + grep -Fv '_test.go' | \ + sed -r 's|(.+)/[^/]+\.go$|\1|g' | \ + sort -u +} + +version_suffix() { + local suffix=$(git log -1 --format=%h 2>/dev/null || true) + if [ -n "$suffix" ]; then + test -z "$(git status --porcelain 2>/dev/null || true)" || suffix="${suffix}+" + echo -n "-g${suffix}" + else + echo -n -dev + fi +} + +git_commit_hash() { + echo $(git rev-parse --short HEAD) +} + +# Due to Go plugin genhash algorithm simply takes full source path +# from archive, it generates different plugin hash if source path of +# shared pkg is different, and causes load failure. +# as a workaround, lookup shared pkg and place it to fixed path +FIX_GOPATH=/tmp/go + +fix_go_pkg() { + local pkg="$1" base + for p in ${GOPATH//:/ }; do + if [ -d "$p/src/$pkg" ]; then + base="$p" + break + fi + done + + if [ -z "$base" ]; then + echo "Package $pkg not found in GOPATH: $GOPATH" >&2 + return 1 + fi + + local fix_pkg_path="$FIX_GOPATH/src/$pkg" + rm -f "$fix_pkg_path" + mkdir -p "$(dirname $fix_pkg_path)" + ln -s "$base/src/$pkg" "$fix_pkg_path" +} diff --git a/support/scripts/test.sh b/support/scripts/test.sh new file mode 100755 index 0000000..78c0986 --- /dev/null +++ b/support/scripts/test.sh @@ -0,0 +1,5 @@ +#!/bin/bash +. support/scripts/functions.sh + +# Run only the unit tests and not integration tests +go test -race $(local_go_pkgs) diff --git a/support/toolchain/HyperMake b/support/toolchain/HyperMake new file mode 100644 index 0000000..708ccf3 --- /dev/null +++ b/support/toolchain/HyperMake @@ -0,0 +1,28 @@ +--- +format: hypermake.v0 + +name: cascade-kcl +description: Amazon Kinesis Client Library in Go + +targets: + rebuild-toolchain: + description: build toolchain image + watches: + - docker + build: docker + cache: false + tags: + - vmware/cascade-kcl-toolchain:latest + + push-toolchain: + description: push toolchain image + after: + - rebuild-toolchain + push: + - vmware/cascade-toolchain:latest + +settings: + default-targets: + - rebuild-toolchain + docker: + image: 'vmware/cascade-kcl-toolchain:0.0.0' diff --git a/support/toolchain/docker/Dockerfile b/support/toolchain/docker/Dockerfile new file mode 100644 index 0000000..764f92f --- /dev/null +++ b/support/toolchain/docker/Dockerfile @@ -0,0 +1,10 @@ +FROM golang:1.10 +ENV PATH /go/bin:/src/bin:/root/go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go:/src +RUN go get -v github.com/alecthomas/gometalinter && \ + go get -v golang.org/x/tools/cmd/... && \ + go get -v github.com/FiloSottile/gvt && \ + go get -v github.com/GoASTScanner/gas/cmd/gas/... && \ + go get github.com/derekparker/delve/cmd/dlv && \ + gometalinter --install && \ + chmod -R a+rw /go \ No newline at end of file From 702335374c1a52d248babcfaeab4cd57f7a591a5 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Tue, 10 Apr 2018 20:50:18 -0700 Subject: [PATCH 04/90] [WIP] KCL: create configuration and interface for kinesis client library This is to create configuration and client interface in order to give user an overview on how the Kinesis client library works. In order not to reinvent wheel, the api is designed closely aligned with Amazon Kinesis Client Library in Java. add errors. remove @throws and use @error instead. https://jira.eng.vmware.com/browse/CNA-614 Change-Id: I78a269b328c14df37f878eccef192ff022a669cc --- src/clientlibrary/config/config.go | 233 ++++++++++++++++++ src/clientlibrary/config/config_test.go | 23 ++ .../config/initial-stream-pos.go | 13 + src/clientlibrary/config/kcl-config.go | 147 +++++++++++ .../record-processor-checkpointer.go | 227 +++++++++++++++++ .../interfaces/record-processor.go | 44 ++++ .../lib/checkpoint/checkpoint.go | 26 ++ src/clientlibrary/lib/worker/worker.go | 1 + src/clientlibrary/types/inputs.go | 39 +++ src/clientlibrary/types/sequence-number.go | 11 + src/clientlibrary/utils/uuid.go | 14 ++ src/common/errors.go | 146 +++++++++++ src/vendor/manifest | 2 +- 13 files changed, 925 insertions(+), 1 deletion(-) create mode 100644 src/clientlibrary/config/config.go create mode 100644 src/clientlibrary/config/config_test.go create mode 100644 src/clientlibrary/config/initial-stream-pos.go create mode 100644 src/clientlibrary/config/kcl-config.go create mode 100644 src/clientlibrary/interfaces/record-processor-checkpointer.go create mode 100644 src/clientlibrary/interfaces/record-processor.go create mode 100644 src/clientlibrary/lib/checkpoint/checkpoint.go create mode 100644 src/clientlibrary/lib/worker/worker.go create mode 100644 src/clientlibrary/types/inputs.go create mode 100644 src/clientlibrary/types/sequence-number.go create mode 100644 src/clientlibrary/utils/uuid.go create mode 100644 src/common/errors.go diff --git a/src/clientlibrary/config/config.go b/src/clientlibrary/config/config.go new file mode 100644 index 0000000..add0c4b --- /dev/null +++ b/src/clientlibrary/config/config.go @@ -0,0 +1,233 @@ +package config + +import ( + "log" + "math" + "strings" + "time" +) + +const ( + EPSILON_MS = 25 + + // LATEST start after the most recent data record (fetch new data). + LATEST = InitialPositionInStream(1) + // TRIM_HORIZON start from the oldest available data record + TRIM_HORIZON = LATEST + 1 + // AT_TIMESTAMP start from the record at or after the specified server-side timestamp. + AT_TIMESTAMP = TRIM_HORIZON + 1 + + // The location in the shard from which the KinesisClientLibrary will start fetching records from + // when the application starts for the first time and there is no checkpoint for the shard. + DEFAULT_INITIAL_POSITION_IN_STREAM = LATEST + + // Fail over time in milliseconds. A worker which does not renew it's lease within this time interval + // will be regarded as having problems and it's shards will be assigned to other workers. + // For applications that have a large number of shards, this may be set to a higher number to reduce + // the number of DynamoDB IOPS required for tracking leases. + DEFAULT_FAILOVER_TIME_MILLIS = 10000 + + // Max records to fetch from Kinesis in a single GetRecords call. + DEFAULT_MAX_RECORDS = 10000 + + // The default value for how long the {@link ShardConsumer} should sleep if no records are returned from the call to + DEFAULT_IDLETIME_BETWEEN_READS_MILLIS = 1000 + + // Don't call processRecords() on the record processor for empty record lists. + DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST = false + + // Interval in milliseconds between polling to check for parent shard completion. + // Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on + // completion of parent shards). + DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS = 10000 + + // Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks. + DEFAULT_SHARD_SYNC_INTERVAL_MILLIS = 60000 + + // Cleanup leases upon shards completion (don't wait until they expire in Kinesis). + // Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by default we try + // to delete the ones we don't need any longer. + DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION = true + + // Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures). + DEFAULT_TASK_BACKOFF_TIME_MILLIS = 500 + + // Buffer metrics for at most this long before publishing to CloudWatch. + DEFAULT_METRICS_BUFFER_TIME_MILLIS = 10000 + + // Buffer at most this many metrics before publishing to CloudWatch. + DEFAULT_METRICS_MAX_QUEUE_SIZE = 10000 + + // KCL will validate client provided sequence numbers with a call to Amazon Kinesis before checkpointing for calls + // to {@link RecordProcessorCheckpointer#checkpoint(String)} by default. + DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING = true + + // The max number of leases (shards) this worker should process. + // This can be useful to avoid overloading (and thrashing) a worker when a host has resource constraints + // or during deployment. + // NOTE: Setting this to a low value can cause data loss if workers are not able to pick up all shards in the + // stream due to the max limit. + DEFAULT_MAX_LEASES_FOR_WORKER = math.MaxInt16 + + // Max leases to steal from another worker at one time (for load balancing). + // Setting this to a higher number can allow for faster load convergence (e.g. during deployments, cold starts), + // but can cause higher churn in the system. + DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME = 1 + + // The Amazon DynamoDB table used for tracking leases will be provisioned with this read capacity. + DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY = 10 + + // The Amazon DynamoDB table used for tracking leases will be provisioned with this write capacity. + DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY = 10 + + // The Worker will skip shard sync during initialization if there are one or more leases in the lease table. This + // assumes that the shards and leases are in-sync. This enables customers to choose faster startup times (e.g. + // during incremental deployments of an application). + DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST = false + + // The amount of milliseconds to wait before graceful shutdown forcefully terminates. + DEFAULT_SHUTDOWN_GRACE_MILLIS = 5000 + + // The size of the thread pool to create for the lease renewer to use. + DEFAULT_MAX_LEASE_RENEWAL_THREADS = 20 + + // The sleep time between two listShards calls from the proxy when throttled. + DEFAULT_LIST_SHARDS_BACKOFF_TIME_IN_MILLIS = 1500 + + // The number of times the Proxy will retry listShards call when throttled. + DEFAULT_MAX_LIST_SHARDS_RETRY_ATTEMPTS = 50 +) + +type ( + // InitialPositionInStream Used to specify the position in the stream where a new application should start from + // This is used during initial application bootstrap (when a checkpoint doesn't exist for a shard or its parents) + InitialPositionInStream int + + // Class that houses the entities needed to specify the position in the stream from where a new application should + // start. + InitialPositionInStreamExtended struct { + position InitialPositionInStream + + // The time stamp of the data record from which to start reading. Used with + // shard iterator type AT_TIMESTAMP. A time stamp is the Unix epoch date with + // precision in milliseconds. For example, 2016-04-04T19:58:46.480-00:00 or + // 1459799926.480. If a record with this exact time stamp does not exist, the + // iterator returned is for the next (later) record. If the time stamp is older + // than the current trim horizon, the iterator returned is for the oldest untrimmed + // data record (TRIM_HORIZON). + timestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + } + + // Configuration for the Kinesis Client Library. + KinesisClientLibConfiguration struct { + // applicationName is name of application. Kinesis allows multiple applications to consume the same stream. + applicationName string + + // tableName is name of the dynamo db table for managing kinesis stream default to applicationName + tableName string + + // streamName is the name of Kinesis stream + streamName string + + // workerID used to distinguish different workers/processes of a Kinesis application + workerID string + + // kinesisEndpoint endpoint + kinesisEndpoint string + + // dynamoDB endpoint + dynamoDBEndpoint string + + // initialPositionInStream specifies the position in the stream where a new application should start from + initialPositionInStream InitialPositionInStream + + // initialPositionInStreamExtended provides actual AT_TMESTAMP value + initialPositionInStreamExtended InitialPositionInStreamExtended + + // credentials to access Kinesis/Dynamo/CloudWatch: https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/ + // Note: No need to configure here. Use NewEnvCredentials for testing and EC2RoleProvider for production + + // failoverTimeMillis Lease duration (leases not renewed within this period will be claimed by others) + failoverTimeMillis int + + /// maxRecords Max records to read per Kinesis getRecords() call + maxRecords int + + // idleTimeBetweenReadsInMillis Idle time between calls to fetch data from Kinesis + idleTimeBetweenReadsInMillis int + + // callProcessRecordsEvenForEmptyRecordList Call the IRecordProcessor::processRecords() API even if + // GetRecords returned an empty record list. + callProcessRecordsEvenForEmptyRecordList bool + + // parentShardPollIntervalMillis Wait for this long between polls to check if parent shards are done + parentShardPollIntervalMillis int + + // shardSyncIntervalMillis Time between tasks to sync leases and Kinesis shards + shardSyncIntervalMillis int + + // cleanupTerminatedShardsBeforeExpiry Clean up shards we've finished processing (don't wait for expiration) + cleanupTerminatedShardsBeforeExpiry bool + + // kinesisClientConfig Client Configuration used by Kinesis client + // dynamoDBClientConfig Client Configuration used by DynamoDB client + // cloudWatchClientConfig Client Configuration used by CloudWatch client + // Note: we will use default client provided by AWS SDK + + // taskBackoffTimeMillis Backoff period when tasks encounter an exception + taskBackoffTimeMillis int + + // metricsBufferTimeMillis Metrics are buffered for at most this long before publishing to CloudWatch + metricsBufferTimeMillis int + + // metricsMaxQueueSize Max number of metrics to buffer before publishing to CloudWatch + metricsMaxQueueSize int + + // validateSequenceNumberBeforeCheckpointing whether KCL should validate client provided sequence numbers + validateSequenceNumberBeforeCheckpointing bool + + // regionName The region name for the service + regionName string + + // shutdownGraceMillis The number of milliseconds before graceful shutdown terminates forcefully + shutdownGraceMillis int + + // Operation parameters + + // Max leases this Worker can handle at a time + maxLeasesForWorker int + + // Max leases to steal at one time (for load balancing) + maxLeasesToStealAtOneTime int + + // Read capacity to provision when creating the lease table (dynamoDB). + initialLeaseTableReadCapacity int + + // Write capacity to provision when creating the lease table. + initialLeaseTableWriteCapacity int + + // Worker should skip syncing shards and leases at startup if leases are present + // This is useful for optimizing deployments to large fleets working on a stable stream. + skipShardSyncAtWorkerInitializationIfLeasesExist bool + } +) + +func empty(s string) bool { + return len(strings.TrimSpace(s)) == 0 +} + +// checkIsValuePositive make sure the value is possitive. +func checkIsValueNotEmpty(key string, value string) { + if empty(value) { + // There is no point to continue for incorrect configuration. Fail fast! + log.Panicf("Non-empty value exepected for %v, actual: %v", key, value) + } +} + +// checkIsValuePositive make sure the value is possitive. +func checkIsValuePositive(key string, value int) { + if value <= 0 { + // There is no point to continue for incorrect configuration. Fail fast! + log.Panicf("Positive value exepected for %v, actual: %v", key, value) + } +} diff --git a/src/clientlibrary/config/config_test.go b/src/clientlibrary/config/config_test.go new file mode 100644 index 0000000..30318e4 --- /dev/null +++ b/src/clientlibrary/config/config_test.go @@ -0,0 +1,23 @@ +package config + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestConfig(t *testing.T) { + kclConfig := NewKinesisClientLibConfig("appName", "streamName", "workerId"). + WithFailoverTimeMillis(500). + WithMaxRecords(100). + WithInitialPositionInStream(TRIM_HORIZON). + WithIdleTimeBetweenReadsInMillis(20). + WithCallProcessRecordsEvenForEmptyRecordList(true). + WithTaskBackoffTimeMillis(10). + WithMetricsBufferTimeMillis(500). + WithMetricsMaxQueueSize(200). + WithRegionName("us-west-2") + + assert.Equal(t, "appName", kclConfig.applicationName) + assert.Equal(t, "500", kclConfig.failoverTimeMillis) +} diff --git a/src/clientlibrary/config/initial-stream-pos.go b/src/clientlibrary/config/initial-stream-pos.go new file mode 100644 index 0000000..54e9d39 --- /dev/null +++ b/src/clientlibrary/config/initial-stream-pos.go @@ -0,0 +1,13 @@ +package config + +import ( + "time" +) + +func newInitialPositionAtTimestamp(timestamp *time.Time) *InitialPositionInStreamExtended { + return &InitialPositionInStreamExtended{position: AT_TIMESTAMP, timestamp: timestamp} +} + +func newInitialPosition(position InitialPositionInStream) *InitialPositionInStreamExtended { + return &InitialPositionInStreamExtended{position: position, timestamp: nil} +} diff --git a/src/clientlibrary/config/kcl-config.go b/src/clientlibrary/config/kcl-config.go new file mode 100644 index 0000000..bbe8e6a --- /dev/null +++ b/src/clientlibrary/config/kcl-config.go @@ -0,0 +1,147 @@ +package config + +import ( + "clientlibrary/utils" + "time" +) + +// NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. +func NewKinesisClientLibConfig(applicationName, streamName, workerID string) *KinesisClientLibConfiguration { + checkIsValueNotEmpty("applicationName", applicationName) + checkIsValueNotEmpty("streamName", streamName) + checkIsValueNotEmpty("applicationName", applicationName) + + if empty(workerID) { + workerID = utils.MustNewUUID() + } + + // populate the KCL configuration with default values + return &KinesisClientLibConfiguration{ + applicationName: applicationName, + tableName: applicationName, + streamName: streamName, + workerID: workerID, + kinesisEndpoint: "", + initialPositionInStream: DEFAULT_INITIAL_POSITION_IN_STREAM, + initialPositionInStreamExtended: *newInitialPosition(DEFAULT_INITIAL_POSITION_IN_STREAM), + failoverTimeMillis: DEFAULT_FAILOVER_TIME_MILLIS, + maxRecords: DEFAULT_MAX_RECORDS, + idleTimeBetweenReadsInMillis: DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, + callProcessRecordsEvenForEmptyRecordList: DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, + parentShardPollIntervalMillis: DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS, + shardSyncIntervalMillis: DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, + cleanupTerminatedShardsBeforeExpiry: DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, + taskBackoffTimeMillis: DEFAULT_TASK_BACKOFF_TIME_MILLIS, + metricsBufferTimeMillis: DEFAULT_METRICS_BUFFER_TIME_MILLIS, + metricsMaxQueueSize: DEFAULT_METRICS_MAX_QUEUE_SIZE, + validateSequenceNumberBeforeCheckpointing: DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, + regionName: "", + shutdownGraceMillis: DEFAULT_SHUTDOWN_GRACE_MILLIS, + maxLeasesForWorker: DEFAULT_MAX_LEASES_FOR_WORKER, + maxLeasesToStealAtOneTime: DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME, + initialLeaseTableReadCapacity: DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, + initialLeaseTableWriteCapacity: DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, + skipShardSyncAtWorkerInitializationIfLeasesExist: DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST, + } +} + +// WithTableName to provide alternative lease table in DynamoDB +func (c *KinesisClientLibConfiguration) WithTableName(tableName string) *KinesisClientLibConfiguration { + c.tableName = tableName + return c +} + +func (c *KinesisClientLibConfiguration) WithKinesisEndpoint(kinesisEndpoint string) *KinesisClientLibConfiguration { + c.kinesisEndpoint = kinesisEndpoint + return c +} + +func (c *KinesisClientLibConfiguration) WithInitialPositionInStream(initialPositionInStream InitialPositionInStream) *KinesisClientLibConfiguration { + c.initialPositionInStream = initialPositionInStream + c.initialPositionInStreamExtended = *newInitialPosition(initialPositionInStream) + return c +} + +func (c *KinesisClientLibConfiguration) WithTimestampAtInitialPositionInStream(timestamp *time.Time) *KinesisClientLibConfiguration { + c.initialPositionInStream = AT_TIMESTAMP + c.initialPositionInStreamExtended = *newInitialPositionAtTimestamp(timestamp) + return c +} + +func (c *KinesisClientLibConfiguration) WithFailoverTimeMillis(failoverTimeMillis int) *KinesisClientLibConfiguration { + checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis) + c.failoverTimeMillis = failoverTimeMillis + return c +} + +func (c *KinesisClientLibConfiguration) WithShardSyncIntervalMillis(shardSyncIntervalMillis int) *KinesisClientLibConfiguration { + checkIsValuePositive("ShardSyncIntervalMillis", shardSyncIntervalMillis) + c.shardSyncIntervalMillis = shardSyncIntervalMillis + return c +} + +func (c *KinesisClientLibConfiguration) WithMaxRecords(maxRecords int) *KinesisClientLibConfiguration { + checkIsValuePositive("MaxRecords", maxRecords) + c.maxRecords = maxRecords + return c +} + +/** + * Controls how long the KCL will sleep if no records are returned from Kinesis + * + *

+ * This value is only used when no records are returned; if records are returned, the {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ProcessTask} will + * immediately retrieve the next set of records after the call to + * {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#processRecords(ProcessRecordsInput)} + * has returned. Setting this value to high may result in the KCL being unable to catch up. If you are changing this + * value it's recommended that you enable {@link #withCallProcessRecordsEvenForEmptyRecordList(boolean)}, and + * monitor how far behind the records retrieved are by inspecting + * {@link com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput#getMillisBehindLatest()}, and the + * CloudWatch + * Metric: GetRecords.MillisBehindLatest + *

+ * + * @param idleTimeBetweenReadsInMillis + * how long to sleep between GetRecords calls when no records are returned. + * @return KinesisClientLibConfiguration + */ +func (c *KinesisClientLibConfiguration) WithIdleTimeBetweenReadsInMillis(idleTimeBetweenReadsInMillis int) *KinesisClientLibConfiguration { + checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis) + c.idleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis + return c +} + +func (c *KinesisClientLibConfiguration) WithCallProcessRecordsEvenForEmptyRecordList(callProcessRecordsEvenForEmptyRecordList bool) *KinesisClientLibConfiguration { + c.callProcessRecordsEvenForEmptyRecordList = callProcessRecordsEvenForEmptyRecordList + return c +} + +func (c *KinesisClientLibConfiguration) WithTaskBackoffTimeMillis(taskBackoffTimeMillis int) *KinesisClientLibConfiguration { + checkIsValuePositive("taskBackoffTimeMillis", taskBackoffTimeMillis) + c.taskBackoffTimeMillis = taskBackoffTimeMillis + return c +} + +// WithMetricsBufferTimeMillis configures Metrics are buffered for at most this long before publishing to CloudWatch +func (c *KinesisClientLibConfiguration) WithMetricsBufferTimeMillis(metricsBufferTimeMillis int) *KinesisClientLibConfiguration { + checkIsValuePositive("metricsBufferTimeMillis", metricsBufferTimeMillis) + c.metricsBufferTimeMillis = metricsBufferTimeMillis + return c +} + +// WithMetricsMaxQueueSize configures Max number of metrics to buffer before publishing to CloudWatch +func (c *KinesisClientLibConfiguration) WithMetricsMaxQueueSize(metricsMaxQueueSize int) *KinesisClientLibConfiguration { + checkIsValuePositive("metricsMaxQueueSize", metricsMaxQueueSize) + c.metricsMaxQueueSize = metricsMaxQueueSize + return c +} + +// WithRegionName configures region for the stream +func (c *KinesisClientLibConfiguration) WithRegionName(regionName string) *KinesisClientLibConfiguration { + checkIsValueNotEmpty("regionName", regionName) + c.regionName = regionName + return c +} + +// Getters diff --git a/src/clientlibrary/interfaces/record-processor-checkpointer.go b/src/clientlibrary/interfaces/record-processor-checkpointer.go new file mode 100644 index 0000000..c752f04 --- /dev/null +++ b/src/clientlibrary/interfaces/record-processor-checkpointer.go @@ -0,0 +1,227 @@ +package interfaces + +import ( + ks "github.com/aws/aws-sdk-go/service/kinesis" + + . "clientlibrary/types" +) + +type ( + IPreparedCheckpointer interface { + getPendingCheckpoint() ExtendedSequenceNumber + + /** + * This method will record a pending checkpoint. + * + * @error ThrottlingError Can't store checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @error ShutdownError The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @error InvalidStateError Can't store checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @error KinesisClientLibDependencyError Encountered an issue when storing the checkpoint. The application can + * backoff and retry. + * @error IllegalArgumentError The sequence number being checkpointed is invalid because it is out of range, + * i.e. it is smaller than the last check point value (prepared or committed), or larger than the greatest + * sequence number seen by the associated record processor. + */ + checkpoint() error + } + + /** + * Used by RecordProcessors when they want to checkpoint their progress. + * The Kinesis Client Library will pass an object implementing this interface to RecordProcessors, so they can + * checkpoint their progress. + */ + IRecordProcessorCheckpointer interface { + + /** + * This method will checkpoint the progress at the last data record that was delivered to the record processor. + * Upon fail over (after a successful checkpoint() call), the new/replacement RecordProcessor instance + * will receive data records whose sequenceNumber > checkpoint position (for each partition key). + * In steady state, applications should checkpoint periodically (e.g. once every 5 minutes). + * Calling this API too frequently can slow down the application (because it puts pressure on the underlying + * checkpoint storage layer). + * + * @error ThrottlingError Can't store checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @error ShutdownError The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @error InvalidStateError Can't store checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @error KinesisClientLibDependencyError Encountered an issue when storing the checkpoint. The application can + * backoff and retry. + */ + checkpoint() error + + /** + * This method will checkpoint the progress at the provided record. This method is analogous to + * {@link #checkpoint()} but provides the ability to specify the record at which to + * checkpoint. + * + * @param record A record at which to checkpoint in this shard. Upon failover, + * the Kinesis Client Library will start fetching records after this record's sequence number. + * @error ThrottlingError Can't store checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @error ShutdownError The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @error InvalidStateError Can't store checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @error KinesisClientLibDependencyError Encountered an issue when storing the checkpoint. The application can + * backoff and retry. + */ + checkpointByRecord(record *ks.Record) error + + /** + * This method will checkpoint the progress at the provided sequenceNumber. This method is analogous to + * {@link #checkpoint()} but provides the ability to specify the sequence number at which to + * checkpoint. + * + * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover, + * the Kinesis Client Library will start fetching records after this sequence number. + * @error ThrottlingError Can't store checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @error ShutdownError The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @error InvalidStateError Can't store checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @error KinesisClientLibDependencyError Encountered an issue when storing the checkpoint. The application can + * backoff and retry. + * @error IllegalArgumentError The sequence number is invalid for one of the following reasons: + * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the + * greatest sequence number seen by the associated record processor. + * 2.) It is not a valid sequence number for a record in this shard. + */ + checkpointBySequenceNumber(sequenceNumber string) error + + /** + * This method will checkpoint the progress at the provided sequenceNumber and subSequenceNumber, the latter for + * aggregated records produced with the Producer Library. This method is analogous to {@link #checkpoint()} + * but provides the ability to specify the sequence and subsequence numbers at which to checkpoint. + * + * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover, the Kinesis + * Client Library will start fetching records after the given sequence and subsequence numbers. + * @param subSequenceNumber A subsequence number at which to checkpoint within this shard. Upon failover, the + * Kinesis Client Library will start fetching records after the given sequence and subsequence numbers. + * @error ThrottlingError Can't store checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @error ShutdownError The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @error InvalidStateError Can't store checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @error KinesisClientLibDependencyError Encountered an issue when storing the checkpoint. The application can + * backoff and retry. + * @error IllegalArgumentError The sequence number is invalid for one of the following reasons: + * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the + * greatest sequence number seen by the associated record processor. + * 2.) It is not a valid sequence number for a record in this shard. + */ + checkpointBySequenceNumberEx(sequenceNumber string, subSequenceNumber int64) error + + /** + * This method will record a pending checkpoint at the last data record that was delivered to the record processor. + * If the application fails over between calling prepareCheckpoint() and checkpoint(), the init() method of the next + * IRecordProcessor for this shard will be informed of the prepared sequence number + * + * Application should use this to assist with idempotency across failover by calling prepareCheckpoint before having + * side effects, then by calling checkpoint on the returned PreparedCheckpointer after side effects are complete. + * Use the sequence number passed in to init() to behave idempotently. + * + * @return an IPreparedCheckpointer object that can be called later to persist the checkpoint. + * + * @error ThrottlingError Can't store pending checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @error ShutdownError The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @error InvalidStateError Can't store pending checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @error KinesisClientLibDependencyError Encountered an issue when storing the pending checkpoint. The + * application can backoff and retry. + */ + prepareCheckpoint() (*IPreparedCheckpointer, error) + + /** + * This method will record a pending checkpoint at the at the provided record. This method is analogous to + * {@link #prepareCheckpoint()} but provides the ability to specify the record at which to prepare the checkpoint. + * + * @param record A record at which to prepare checkpoint in this shard. + * + * Application should use this to assist with idempotency across failover by calling prepareCheckpoint before having + * side effects, then by calling checkpoint on the returned PreparedCheckpointer after side effects are complete. + * Use the sequence number and application state passed in to init() to behave idempotently. + * + * @return an IPreparedCheckpointer object that can be called later to persist the checkpoint. + * + * @error ThrottlingError Can't store pending checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @error ShutdownError The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @error InvalidStateError Can't store pending checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @error KinesisClientLibDependencyError Encountered an issue when storing the pending checkpoint. The + * application can backoff and retry. + * @error IllegalArgumentError The sequence number is invalid for one of the following reasons: + * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the + * greatest sequence number seen by the associated record processor. + * 2.) It is not a valid sequence number for a record in this shard. + */ + prepareCheckpointByRecord(record *ks.Record) (*IPreparedCheckpointer, error) + + /** + * This method will record a pending checkpoint at the provided sequenceNumber. This method is analogous to + * {@link #prepareCheckpoint()} but provides the ability to specify the sequence number at which to checkpoint. + * + * @param sequenceNumber A sequence number at which to prepare checkpoint in this shard. + + * @return an IPreparedCheckpointer object that can be called later to persist the checkpoint. + * + * @error ThrottlingError Can't store pending checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @error ShutdownError The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @error InvalidStateError Can't store pending checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @error KinesisClientLibDependencyError Encountered an issue when storing the pending checkpoint. The + * application can backoff and retry. + * @error IllegalArgumentError The sequence number is invalid for one of the following reasons: + * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the + * greatest sequence number seen by the associated record processor. + * 2.) It is not a valid sequence number for a record in this shard. + */ + prepareCheckpointBySequenceNumber(sequenceNumber string) (*IPreparedCheckpointer, error) + + /** + * This method will record a pending checkpoint at the provided sequenceNumber and subSequenceNumber, the latter for + * aggregated records produced with the Producer Library. This method is analogous to {@link #prepareCheckpoint()} + * but provides the ability to specify the sequence number at which to checkpoint + * + * @param sequenceNumber A sequence number at which to prepare checkpoint in this shard. + * @param subSequenceNumber A subsequence number at which to prepare checkpoint within this shard. + * + * @return an IPreparedCheckpointer object that can be called later to persist the checkpoint. + * + * @error ThrottlingError Can't store pending checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @error ShutdownError The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @error InvalidStateError Can't store pending checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @error KinesisClientLibDependencyError Encountered an issue when storing the pending checkpoint. The + * application can backoff and retry. + * @error IllegalArgumentError The sequence number is invalid for one of the following reasons: + * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the + * greatest sequence number seen by the associated record processor. + * 2.) It is not a valid sequence number for a record in this shard. + */ + prepareCheckpointBySequenceNumberEx(sequenceNumber string, subSequenceNumber int64) (*IPreparedCheckpointer, error) + } +) diff --git a/src/clientlibrary/interfaces/record-processor.go b/src/clientlibrary/interfaces/record-processor.go new file mode 100644 index 0000000..ab704a2 --- /dev/null +++ b/src/clientlibrary/interfaces/record-processor.go @@ -0,0 +1,44 @@ +package interfaces + +import ( + . "clientlibrary/types" +) + +// IRecordProcessor is the interface for some callback functions invoked by KCL will +// The main task of using KCL is to provide implementation on IRecordProcessor interface. +// Note: This is exactly the same interface as Amazon KCL IRecordProcessor v2 +type IRecordProcessor interface { + /** + * Invoked by the Amazon Kinesis Client Library before data records are delivered to the RecordProcessor instance + * (via processRecords). + * + * @param initializationInput Provides information related to initialization + */ + initialize(initializationInput InitializationInput) + + /** + * Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the + * application. + * Upon fail over, the new instance will get records with sequence number > checkpoint position + * for each partition key. + * + * @param processRecordsInput Provides the records to be processed as well as information and capabilities related + * to them (eg checkpointing). + */ + processRecords(processRecordsInput ProcessRecordsInput) + + /** + * Invoked by the Amazon Kinesis Client Library to indicate it will no longer send data records to this + * RecordProcessor instance. + * + *

Warning

+ * + * When the value of {@link ShutdownInput#getShutdownReason()} is + * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you + * checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress. + * + * @param shutdownInput + * Provides information and capabilities (eg checkpointing) related to shutdown of this record processor. + */ + shutdown(shutdownInput ShutdownInput) +} diff --git a/src/clientlibrary/lib/checkpoint/checkpoint.go b/src/clientlibrary/lib/checkpoint/checkpoint.go new file mode 100644 index 0000000..9f0facc --- /dev/null +++ b/src/clientlibrary/lib/checkpoint/checkpoint.go @@ -0,0 +1,26 @@ +package checkpoint + +import ( + . "clientlibrary/types" +) + +const ( + // TRIM_HORIZON starts from the first available record in the shard. + TRIM_HORIZON = SentinelCheckpoint(iota + 1) + // LATEST starts from the latest record in the shard. + LATEST + // SHARD_END We've completely processed all records in this shard. + SHARD_END + // AT_TIMESTAMP starts from the record at or after the specified server-side timestamp. + AT_TIMESTAMP +) + +type ( + SentinelCheckpoint int + + // Checkpoint: a class encapsulating the 2 pieces of state stored in a checkpoint. + Checkpoint struct { + checkpoint *ExtendedSequenceNumber + pendingCheckpoint *ExtendedSequenceNumber + } +) diff --git a/src/clientlibrary/lib/worker/worker.go b/src/clientlibrary/lib/worker/worker.go new file mode 100644 index 0000000..4df0094 --- /dev/null +++ b/src/clientlibrary/lib/worker/worker.go @@ -0,0 +1 @@ +package worker diff --git a/src/clientlibrary/types/inputs.go b/src/clientlibrary/types/inputs.go new file mode 100644 index 0000000..9668e64 --- /dev/null +++ b/src/clientlibrary/types/inputs.go @@ -0,0 +1,39 @@ +package types + +import ( + "time" + + ks "github.com/aws/aws-sdk-go/service/kinesis" + + . "clientlibrary/interfaces" +) + +const ( + REQUESTED = ShutdownReason(1) + TERMINATE = REQUESTED + 1 + ZOMBIE = TERMINATE + 1 +) + +// Containers for the parameters to the IRecordProcessor +type ( + ShutdownReason int + + InitializationInput struct { + shardId string + extendedSequenceNumber *ExtendedSequenceNumber + pendingCheckpointSequenceNumber *ExtendedSequenceNumber + } + + ProcessRecordsInput struct { + cacheEntryTime *time.Time + cacheExitTime *time.Time + records []*ks.Record + checkpointer *IRecordProcessorCheckpointer + millisBehindLatest int64 + } + + ShutdownInput struct { + shutdownReason ShutdownReason + checkpointer *IRecordProcessorCheckpointer + } +) diff --git a/src/clientlibrary/types/sequence-number.go b/src/clientlibrary/types/sequence-number.go new file mode 100644 index 0000000..0dddb57 --- /dev/null +++ b/src/clientlibrary/types/sequence-number.go @@ -0,0 +1,11 @@ +package types + +// ExtendedSequenceNumber represents a two-part sequence number for records aggregated by the Kinesis Producer Library. +// +// The KPL combines multiple user records into a single Kinesis record. Each user record therefore has an integer +// sub-sequence number, in addition to the regular sequence number of the Kinesis record. The sub-sequence number +// is used to checkpoint within an aggregated record. +type ExtendedSequenceNumber struct { + sequenceNumber string + subSequenceNumber int64 +} diff --git a/src/clientlibrary/utils/uuid.go b/src/clientlibrary/utils/uuid.go new file mode 100644 index 0000000..64883b8 --- /dev/null +++ b/src/clientlibrary/utils/uuid.go @@ -0,0 +1,14 @@ +package utils + +import ( + guuid "github.com/google/uuid" +) + +// MustNewUUID generates a new UUID and panics if failed +func MustNewUUID() string { + id, err := guuid.NewUUID() + if err != nil { + panic(err) + } + return id.String() +} diff --git a/src/common/errors.go b/src/common/errors.go new file mode 100644 index 0000000..f9ab2af --- /dev/null +++ b/src/common/errors.go @@ -0,0 +1,146 @@ +package common + +import ( + "fmt" + "net/http" +) + +// ErrorCode is unified definition of numerical error codes +type ErrorCode int32 + +// pre-defined error codes +const ( + // System Wide 20000 - 20199 + KinesisClientLibError ErrorCode = 20000 + + // KinesisClientLibrary Retryable Errors 20001 - 20099 + KinesisClientLibRetryableError ErrorCode = 20001 + + KinesisClientLibIOError ErrorCode = 20002 + BlockedOnParentShardError ErrorCode = 20003 + KinesisClientLibDependencyError ErrorCode = 20004 + ThrottlingError ErrorCode = 20005 + + // KinesisClientLibrary NonRetryable Errors 20100 - 20149 + KinesisClientLibNonRetryableException ErrorCode = 20000 + + InvalidStateError ErrorCode = 20101 + ShutdownError ErrorCode = 20102 + + // Kinesis Lease Errors 20150 - 20199 + LeasingError ErrorCode = 20150 + + LeasingInvalidStateError ErrorCode = 20151 + LeasingDependencyError ErrorCode = 20152 + LeasingProvisionedThroughputError ErrorCode = 20153 + + // Error indicates passing illegal or inappropriate argument + IllegalArgumentError ErrorCode = 20198 + + // NotImplemented + KinesisClientLibNotImplemented ErrorCode = 20199 +) + +var errorMap = map[ErrorCode]ClientLibraryError{ + KinesisClientLibError: {ErrorCode: KinesisClientLibError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Top level error of Kinesis Client Library"}, + + // Retryable + KinesisClientLibRetryableError: {ErrorCode: KinesisClientLibRetryableError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Retryable exceptions (e.g. transient errors). The request/operation is expected to succeed upon (back off and) retry."}, + KinesisClientLibIOError: {ErrorCode: KinesisClientLibIOError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Error in reading/writing information (e.g. shard information from Kinesis may not be current/complete)."}, + BlockedOnParentShardError: {ErrorCode: BlockedOnParentShardError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Cannot start processing data for a shard because the data from the parent shard has not been completely processed (yet)."}, + KinesisClientLibDependencyError: {ErrorCode: KinesisClientLibDependencyError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Cannot talk to its dependencies (e.g. fetching data from Kinesis, DynamoDB table reads/writes, emitting metrics to CloudWatch)."}, + ThrottlingError: {ErrorCode: ThrottlingError, Retryable: true, Status: http.StatusTooManyRequests, Msg: "Requests are throttled by a service (e.g. DynamoDB when storing a checkpoint)."}, + + // Non-Retryable + KinesisClientLibNonRetryableException: {ErrorCode: KinesisClientLibNonRetryableException, Retryable: false, Status: http.StatusServiceUnavailable, Msg: "Non-retryable exceptions. Simply retrying the same request/operation is not expected to succeed."}, + InvalidStateError: {ErrorCode: InvalidStateError, Retryable: false, Status: http.StatusServiceUnavailable, Msg: "Kinesis Library has issues with internal state (e.g. DynamoDB table is not found)."}, + ShutdownError: {ErrorCode: ShutdownError, Retryable: false, Status: http.StatusServiceUnavailable, Msg: "The RecordProcessor instance has been shutdown (e.g. and attempts a checkpiont)."}, + + // Leasing + LeasingError: {ErrorCode: LeasingError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Top-level error type for the leasing code."}, + LeasingInvalidStateError: {ErrorCode: LeasingInvalidStateError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Error in a lease operation has failed because DynamoDB is an invalid state"}, + LeasingDependencyError: {ErrorCode: LeasingDependencyError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Error in a lease operation has failed because a dependency of the leasing system has failed."}, + LeasingProvisionedThroughputError: {ErrorCode: LeasingProvisionedThroughputError, Retryable: false, Status: http.StatusServiceUnavailable, Msg: "Error in a lease operation has failed due to lack of provisioned throughput for a DynamoDB table."}, + + // IllegalArgumentError + IllegalArgumentError: {ErrorCode: IllegalArgumentError, Retryable: false, Status: http.StatusBadRequest, Msg: "Error indicates that a method has been passed an illegal or inappropriate argument."}, + + // Not Implemented + KinesisClientLibNotImplemented: {ErrorCode: KinesisClientLibNotImplemented, Retryable: false, Status: http.StatusNotImplemented, Msg: "Not Implemented"}, +} + +// Message returns the message of the error code +func (c ErrorCode) Message() string { + return errorMap[c].Msg +} + +// MakeErr makes an error with default message +func (c ErrorCode) MakeErr() *ClientLibraryError { + e := errorMap[c] + return &e +} + +// MakeError makes an error with message and data +func (c ErrorCode) MakeError(detail string) error { + e := errorMap[c] + return e.WithDetail(detail) +} + +// ClientLibraryError is unified error +type ClientLibraryError struct { + // ErrorCode is the numerical error code. + ErrorCode `json:"code"` + // Retryable is a bool flag to indicate the whether the error is retryable or not. + Retryable bool `json:"tryable"` + // Status is the HTTP status code. + Status int `json:"status"` + // Msg provides a terse description of the error. Its value is defined in errorMap. + Msg string `json:"msg"` + // Detail provides a detailed description of the error. Its value is set using WithDetail. + Detail string `json:"detail"` +} + +// Error implements error +func (e *ClientLibraryError) Error() string { + var prefix string + if e.Retryable { + prefix = "Retryable" + } else { + prefix = "NonRetryable" + } + msg := fmt.Sprintf("%v Error [%d]: %s", prefix, int32(e.ErrorCode), e.Msg) + if e.Detail != "" { + msg = fmt.Sprintf("%s, detail: %s", msg, e.Detail) + } + return msg +} + +// WithMsg overwrites the default error message +func (e *ClientLibraryError) WithMsg(format string, v ...interface{}) *ClientLibraryError { + e.Msg = fmt.Sprintf(format, v...) + return e +} + +// WithDetail adds a detailed message to error +func (e *ClientLibraryError) WithDetail(format string, v ...interface{}) *ClientLibraryError { + if len(e.Detail) == 0 { + e.Detail = fmt.Sprintf(format, v...) + } else { + e.Detail += ", " + fmt.Sprintf(format, v...) + } + return e +} + +// WithCause adds CauseBy to error +func (e *ClientLibraryError) WithCause(err error) *ClientLibraryError { + if err != nil { + // Store error message in Detail, so the info can be preserved + // when CascadeError is marshaled to json. + if len(e.Detail) == 0 { + e.Detail = err.Error() + } else { + e.Detail += ", cause: " + err.Error() + } + } + return e +} diff --git a/src/vendor/manifest b/src/vendor/manifest index 9d112c1..1b8ad4e 100644 --- a/src/vendor/manifest +++ b/src/vendor/manifest @@ -159,7 +159,7 @@ "importpath": "gopkg.in/yaml.v2", "repository": "https://gopkg.in/yaml.v2", "vcs": "git", - "revision": "cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b", + "revision": "5420a8b6744d3b0345ab293f6fcba19c978f1183", "branch": "v2", "notests": true } From 425daf70ce08e0e99922fdfd02d43e5de1971185 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Thu, 12 Apr 2018 21:02:30 -0700 Subject: [PATCH 05/90] KCL: Implement Shard Lease (part 1/2) This is the first part of implementing shard lease for Kinesis Client library. It creates dynamoDB table for managing Kinesis stream shard lease. https://jira.eng.vmware.com/browse/CNA-636 Adjust error code value range. Change-Id: I16565fa15332843101235fb14545ee69c2599f2f --- src/{ => clientlibrary}/common/errors.go | 43 +- src/clientlibrary/config/config.go | 107 ++--- src/clientlibrary/config/config_test.go | 6 +- .../config/initial-stream-pos.go | 4 +- src/clientlibrary/config/kcl-config.go | 105 +++-- src/leases/dynamoutils/dynamoutils.go | 78 ++++ src/leases/impl/kinesis-client-lease.go | 116 +++++ src/leases/impl/lease-manager.go | 440 ++++++++++++++++++ src/leases/impl/lease-serializer.go | 184 ++++++++ src/leases/impl/lease.go | 113 +++++ src/leases/interfaces/lease-manager.go | 162 +++++++ src/leases/interfaces/lease-renewer.go | 78 ++++ src/leases/interfaces/lease-serializer.go | 86 ++++ src/leases/interfaces/lease-taker.go | 28 ++ src/leases/interfaces/lease.go | 21 + 15 files changed, 1444 insertions(+), 127 deletions(-) rename src/{ => clientlibrary}/common/errors.go (84%) create mode 100644 src/leases/dynamoutils/dynamoutils.go create mode 100644 src/leases/impl/kinesis-client-lease.go create mode 100644 src/leases/impl/lease-manager.go create mode 100644 src/leases/impl/lease-serializer.go create mode 100644 src/leases/impl/lease.go create mode 100644 src/leases/interfaces/lease-manager.go create mode 100644 src/leases/interfaces/lease-renewer.go create mode 100644 src/leases/interfaces/lease-serializer.go create mode 100644 src/leases/interfaces/lease-taker.go create mode 100644 src/leases/interfaces/lease.go diff --git a/src/common/errors.go b/src/clientlibrary/common/errors.go similarity index 84% rename from src/common/errors.go rename to src/clientlibrary/common/errors.go index f9ab2af..adddc26 100644 --- a/src/common/errors.go +++ b/src/clientlibrary/common/errors.go @@ -10,35 +10,36 @@ type ErrorCode int32 // pre-defined error codes const ( - // System Wide 20000 - 20199 - KinesisClientLibError ErrorCode = 20000 + // System Wide 41000 - 42000 + KinesisClientLibError ErrorCode = 41000 - // KinesisClientLibrary Retryable Errors 20001 - 20099 - KinesisClientLibRetryableError ErrorCode = 20001 + // KinesisClientLibrary Retryable Errors 41001 - 41100 + KinesisClientLibRetryableError ErrorCode = 41001 - KinesisClientLibIOError ErrorCode = 20002 - BlockedOnParentShardError ErrorCode = 20003 - KinesisClientLibDependencyError ErrorCode = 20004 - ThrottlingError ErrorCode = 20005 + KinesisClientLibIOError ErrorCode = 41002 + BlockedOnParentShardError ErrorCode = 41003 + KinesisClientLibDependencyError ErrorCode = 41004 + ThrottlingError ErrorCode = 41005 - // KinesisClientLibrary NonRetryable Errors 20100 - 20149 - KinesisClientLibNonRetryableException ErrorCode = 20000 + // KinesisClientLibrary NonRetryable Errors 41100 - 41200 + KinesisClientLibNonRetryableException ErrorCode = 41100 - InvalidStateError ErrorCode = 20101 - ShutdownError ErrorCode = 20102 + InvalidStateError ErrorCode = 41101 + ShutdownError ErrorCode = 41102 - // Kinesis Lease Errors 20150 - 20199 - LeasingError ErrorCode = 20150 + // Kinesis Lease Errors 41200 - 41300 + LeasingError ErrorCode = 41200 - LeasingInvalidStateError ErrorCode = 20151 - LeasingDependencyError ErrorCode = 20152 - LeasingProvisionedThroughputError ErrorCode = 20153 + LeasingInvalidStateError ErrorCode = 41201 + LeasingDependencyError ErrorCode = 41202 + LeasingProvisionedThroughputError ErrorCode = 41203 - // Error indicates passing illegal or inappropriate argument - IllegalArgumentError ErrorCode = 20198 + // Misc Errors 41300 - 41400 + // NotImplemented + KinesisClientLibNotImplemented ErrorCode = 41301 - // NotImplemented - KinesisClientLibNotImplemented ErrorCode = 20199 + // Error indicates passing illegal or inappropriate argument + IllegalArgumentError ErrorCode = 41302 ) var errorMap = map[ErrorCode]ClientLibraryError{ diff --git a/src/clientlibrary/config/config.go b/src/clientlibrary/config/config.go index add0c4b..e2c82c5 100644 --- a/src/clientlibrary/config/config.go +++ b/src/clientlibrary/config/config.go @@ -14,7 +14,7 @@ const ( LATEST = InitialPositionInStream(1) // TRIM_HORIZON start from the oldest available data record TRIM_HORIZON = LATEST + 1 - // AT_TIMESTAMP start from the record at or after the specified server-side timestamp. + // AT_TIMESTAMP start from the record at or after the specified server-side Timestamp. AT_TIMESTAMP = TRIM_HORIZON + 1 // The location in the shard from which the KinesisClientLibrary will start fetching records from @@ -99,14 +99,14 @@ const ( ) type ( - // InitialPositionInStream Used to specify the position in the stream where a new application should start from + // InitialPositionInStream Used to specify the Position in the stream where a new application should start from // This is used during initial application bootstrap (when a checkpoint doesn't exist for a shard or its parents) InitialPositionInStream int - // Class that houses the entities needed to specify the position in the stream from where a new application should + // Class that houses the entities needed to specify the Position in the stream from where a new application should // start. InitialPositionInStreamExtended struct { - position InitialPositionInStream + Position InitialPositionInStream // The time stamp of the data record from which to start reading. Used with // shard iterator type AT_TIMESTAMP. A time stamp is the Unix epoch date with @@ -115,100 +115,103 @@ type ( // iterator returned is for the next (later) record. If the time stamp is older // than the current trim horizon, the iterator returned is for the oldest untrimmed // data record (TRIM_HORIZON). - timestamp *time.Time `type:"timestamp" timestampFormat:"unix"` + Timestamp *time.Time `type:"Timestamp" timestampFormat:"unix"` } // Configuration for the Kinesis Client Library. KinesisClientLibConfiguration struct { - // applicationName is name of application. Kinesis allows multiple applications to consume the same stream. - applicationName string + // ApplicationName is name of application. Kinesis allows multiple applications to consume the same stream. + ApplicationName string - // tableName is name of the dynamo db table for managing kinesis stream default to applicationName - tableName string + // TableName is name of the dynamo db table for managing kinesis stream default to ApplicationName + TableName string - // streamName is the name of Kinesis stream - streamName string + // StreamName is the name of Kinesis stream + StreamName string - // workerID used to distinguish different workers/processes of a Kinesis application - workerID string + // WorkerID used to distinguish different workers/processes of a Kinesis application + WorkerID string - // kinesisEndpoint endpoint - kinesisEndpoint string + // KinesisEndpoint endpoint + KinesisEndpoint string - // dynamoDB endpoint - dynamoDBEndpoint string + // DynamoDB endpoint + DynamoDBEndpoint string - // initialPositionInStream specifies the position in the stream where a new application should start from - initialPositionInStream InitialPositionInStream + // InitialPositionInStream specifies the Position in the stream where a new application should start from + InitialPositionInStream InitialPositionInStream - // initialPositionInStreamExtended provides actual AT_TMESTAMP value - initialPositionInStreamExtended InitialPositionInStreamExtended + // InitialPositionInStreamExtended provides actual AT_TMESTAMP value + InitialPositionInStreamExtended InitialPositionInStreamExtended // credentials to access Kinesis/Dynamo/CloudWatch: https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/ // Note: No need to configure here. Use NewEnvCredentials for testing and EC2RoleProvider for production - // failoverTimeMillis Lease duration (leases not renewed within this period will be claimed by others) - failoverTimeMillis int + // FailoverTimeMillis Lease duration (leases not renewed within this period will be claimed by others) + FailoverTimeMillis int - /// maxRecords Max records to read per Kinesis getRecords() call - maxRecords int + /// MaxRecords Max records to read per Kinesis getRecords() call + MaxRecords int - // idleTimeBetweenReadsInMillis Idle time between calls to fetch data from Kinesis - idleTimeBetweenReadsInMillis int + // IdleTimeBetweenReadsInMillis Idle time between calls to fetch data from Kinesis + IdleTimeBetweenReadsInMillis int - // callProcessRecordsEvenForEmptyRecordList Call the IRecordProcessor::processRecords() API even if + // CallProcessRecordsEvenForEmptyRecordList Call the IRecordProcessor::processRecords() API even if // GetRecords returned an empty record list. - callProcessRecordsEvenForEmptyRecordList bool + CallProcessRecordsEvenForEmptyRecordList bool - // parentShardPollIntervalMillis Wait for this long between polls to check if parent shards are done - parentShardPollIntervalMillis int + // ParentShardPollIntervalMillis Wait for this long between polls to check if parent shards are done + ParentShardPollIntervalMillis int - // shardSyncIntervalMillis Time between tasks to sync leases and Kinesis shards - shardSyncIntervalMillis int + // ShardSyncIntervalMillis Time between tasks to sync leases and Kinesis shards + ShardSyncIntervalMillis int - // cleanupTerminatedShardsBeforeExpiry Clean up shards we've finished processing (don't wait for expiration) - cleanupTerminatedShardsBeforeExpiry bool + // CleanupTerminatedShardsBeforeExpiry Clean up shards we've finished processing (don't wait for expiration) + CleanupTerminatedShardsBeforeExpiry bool // kinesisClientConfig Client Configuration used by Kinesis client // dynamoDBClientConfig Client Configuration used by DynamoDB client // cloudWatchClientConfig Client Configuration used by CloudWatch client // Note: we will use default client provided by AWS SDK - // taskBackoffTimeMillis Backoff period when tasks encounter an exception - taskBackoffTimeMillis int + // TaskBackoffTimeMillis Backoff period when tasks encounter an exception + TaskBackoffTimeMillis int - // metricsBufferTimeMillis Metrics are buffered for at most this long before publishing to CloudWatch - metricsBufferTimeMillis int + // MetricsBufferTimeMillis Metrics are buffered for at most this long before publishing to CloudWatch + MetricsBufferTimeMillis int - // metricsMaxQueueSize Max number of metrics to buffer before publishing to CloudWatch - metricsMaxQueueSize int + // MetricsMaxQueueSize Max number of metrics to buffer before publishing to CloudWatch + MetricsMaxQueueSize int - // validateSequenceNumberBeforeCheckpointing whether KCL should validate client provided sequence numbers - validateSequenceNumberBeforeCheckpointing bool + // ValidateSequenceNumberBeforeCheckpointing whether KCL should validate client provided sequence numbers + ValidateSequenceNumberBeforeCheckpointing bool - // regionName The region name for the service - regionName string + // RegionName The region name for the service + RegionName string - // shutdownGraceMillis The number of milliseconds before graceful shutdown terminates forcefully - shutdownGraceMillis int + // ShutdownGraceMillis The number of milliseconds before graceful shutdown terminates forcefully + ShutdownGraceMillis int // Operation parameters // Max leases this Worker can handle at a time - maxLeasesForWorker int + MaxLeasesForWorker int // Max leases to steal at one time (for load balancing) - maxLeasesToStealAtOneTime int + MaxLeasesToStealAtOneTime int // Read capacity to provision when creating the lease table (dynamoDB). - initialLeaseTableReadCapacity int + InitialLeaseTableReadCapacity int // Write capacity to provision when creating the lease table. - initialLeaseTableWriteCapacity int + InitialLeaseTableWriteCapacity int // Worker should skip syncing shards and leases at startup if leases are present // This is useful for optimizing deployments to large fleets working on a stable stream. - skipShardSyncAtWorkerInitializationIfLeasesExist bool + SkipShardSyncAtWorkerInitializationIfLeasesExist bool + + // The max number of threads in the worker thread pool to getRecords. + WorkerThreadPoolSize int } ) diff --git a/src/clientlibrary/config/config_test.go b/src/clientlibrary/config/config_test.go index 30318e4..19f1481 100644 --- a/src/clientlibrary/config/config_test.go +++ b/src/clientlibrary/config/config_test.go @@ -7,7 +7,7 @@ import ( ) func TestConfig(t *testing.T) { - kclConfig := NewKinesisClientLibConfig("appName", "streamName", "workerId"). + kclConfig := NewKinesisClientLibConfig("appName", "StreamName", "workerId"). WithFailoverTimeMillis(500). WithMaxRecords(100). WithInitialPositionInStream(TRIM_HORIZON). @@ -18,6 +18,6 @@ func TestConfig(t *testing.T) { WithMetricsMaxQueueSize(200). WithRegionName("us-west-2") - assert.Equal(t, "appName", kclConfig.applicationName) - assert.Equal(t, "500", kclConfig.failoverTimeMillis) + assert.Equal(t, "appName", kclConfig.ApplicationName) + assert.Equal(t, "500", kclConfig.FailoverTimeMillis) } diff --git a/src/clientlibrary/config/initial-stream-pos.go b/src/clientlibrary/config/initial-stream-pos.go index 54e9d39..20ecbfc 100644 --- a/src/clientlibrary/config/initial-stream-pos.go +++ b/src/clientlibrary/config/initial-stream-pos.go @@ -5,9 +5,9 @@ import ( ) func newInitialPositionAtTimestamp(timestamp *time.Time) *InitialPositionInStreamExtended { - return &InitialPositionInStreamExtended{position: AT_TIMESTAMP, timestamp: timestamp} + return &InitialPositionInStreamExtended{Position: AT_TIMESTAMP, Timestamp: timestamp} } func newInitialPosition(position InitialPositionInStream) *InitialPositionInStreamExtended { - return &InitialPositionInStreamExtended{position: position, timestamp: nil} + return &InitialPositionInStreamExtended{Position: position, Timestamp: nil} } diff --git a/src/clientlibrary/config/kcl-config.go b/src/clientlibrary/config/kcl-config.go index bbe8e6a..2f8bc0b 100644 --- a/src/clientlibrary/config/kcl-config.go +++ b/src/clientlibrary/config/kcl-config.go @@ -7,9 +7,9 @@ import ( // NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. func NewKinesisClientLibConfig(applicationName, streamName, workerID string) *KinesisClientLibConfiguration { - checkIsValueNotEmpty("applicationName", applicationName) - checkIsValueNotEmpty("streamName", streamName) - checkIsValueNotEmpty("applicationName", applicationName) + checkIsValueNotEmpty("ApplicationName", applicationName) + checkIsValueNotEmpty("StreamName", streamName) + checkIsValueNotEmpty("ApplicationName", applicationName) if empty(workerID) { workerID = utils.MustNewUUID() @@ -17,72 +17,73 @@ func NewKinesisClientLibConfig(applicationName, streamName, workerID string) *Ki // populate the KCL configuration with default values return &KinesisClientLibConfiguration{ - applicationName: applicationName, - tableName: applicationName, - streamName: streamName, - workerID: workerID, - kinesisEndpoint: "", - initialPositionInStream: DEFAULT_INITIAL_POSITION_IN_STREAM, - initialPositionInStreamExtended: *newInitialPosition(DEFAULT_INITIAL_POSITION_IN_STREAM), - failoverTimeMillis: DEFAULT_FAILOVER_TIME_MILLIS, - maxRecords: DEFAULT_MAX_RECORDS, - idleTimeBetweenReadsInMillis: DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, - callProcessRecordsEvenForEmptyRecordList: DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, - parentShardPollIntervalMillis: DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS, - shardSyncIntervalMillis: DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, - cleanupTerminatedShardsBeforeExpiry: DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, - taskBackoffTimeMillis: DEFAULT_TASK_BACKOFF_TIME_MILLIS, - metricsBufferTimeMillis: DEFAULT_METRICS_BUFFER_TIME_MILLIS, - metricsMaxQueueSize: DEFAULT_METRICS_MAX_QUEUE_SIZE, - validateSequenceNumberBeforeCheckpointing: DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, - regionName: "", - shutdownGraceMillis: DEFAULT_SHUTDOWN_GRACE_MILLIS, - maxLeasesForWorker: DEFAULT_MAX_LEASES_FOR_WORKER, - maxLeasesToStealAtOneTime: DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME, - initialLeaseTableReadCapacity: DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, - initialLeaseTableWriteCapacity: DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, - skipShardSyncAtWorkerInitializationIfLeasesExist: DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST, + ApplicationName: applicationName, + TableName: applicationName, + StreamName: streamName, + WorkerID: workerID, + KinesisEndpoint: "", + InitialPositionInStream: DEFAULT_INITIAL_POSITION_IN_STREAM, + InitialPositionInStreamExtended: *newInitialPosition(DEFAULT_INITIAL_POSITION_IN_STREAM), + FailoverTimeMillis: DEFAULT_FAILOVER_TIME_MILLIS, + MaxRecords: DEFAULT_MAX_RECORDS, + IdleTimeBetweenReadsInMillis: DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, + CallProcessRecordsEvenForEmptyRecordList: DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, + ParentShardPollIntervalMillis: DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS, + ShardSyncIntervalMillis: DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, + CleanupTerminatedShardsBeforeExpiry: DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, + TaskBackoffTimeMillis: DEFAULT_TASK_BACKOFF_TIME_MILLIS, + MetricsBufferTimeMillis: DEFAULT_METRICS_BUFFER_TIME_MILLIS, + MetricsMaxQueueSize: DEFAULT_METRICS_MAX_QUEUE_SIZE, + ValidateSequenceNumberBeforeCheckpointing: DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, + RegionName: "", + ShutdownGraceMillis: DEFAULT_SHUTDOWN_GRACE_MILLIS, + MaxLeasesForWorker: DEFAULT_MAX_LEASES_FOR_WORKER, + MaxLeasesToStealAtOneTime: DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME, + InitialLeaseTableReadCapacity: DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, + InitialLeaseTableWriteCapacity: DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, + SkipShardSyncAtWorkerInitializationIfLeasesExist: DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST, + WorkerThreadPoolSize: 1, } } // WithTableName to provide alternative lease table in DynamoDB func (c *KinesisClientLibConfiguration) WithTableName(tableName string) *KinesisClientLibConfiguration { - c.tableName = tableName + c.TableName = tableName return c } func (c *KinesisClientLibConfiguration) WithKinesisEndpoint(kinesisEndpoint string) *KinesisClientLibConfiguration { - c.kinesisEndpoint = kinesisEndpoint + c.KinesisEndpoint = kinesisEndpoint return c } func (c *KinesisClientLibConfiguration) WithInitialPositionInStream(initialPositionInStream InitialPositionInStream) *KinesisClientLibConfiguration { - c.initialPositionInStream = initialPositionInStream - c.initialPositionInStreamExtended = *newInitialPosition(initialPositionInStream) + c.InitialPositionInStream = initialPositionInStream + c.InitialPositionInStreamExtended = *newInitialPosition(initialPositionInStream) return c } func (c *KinesisClientLibConfiguration) WithTimestampAtInitialPositionInStream(timestamp *time.Time) *KinesisClientLibConfiguration { - c.initialPositionInStream = AT_TIMESTAMP - c.initialPositionInStreamExtended = *newInitialPositionAtTimestamp(timestamp) + c.InitialPositionInStream = AT_TIMESTAMP + c.InitialPositionInStreamExtended = *newInitialPositionAtTimestamp(timestamp) return c } func (c *KinesisClientLibConfiguration) WithFailoverTimeMillis(failoverTimeMillis int) *KinesisClientLibConfiguration { checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis) - c.failoverTimeMillis = failoverTimeMillis + c.FailoverTimeMillis = failoverTimeMillis return c } func (c *KinesisClientLibConfiguration) WithShardSyncIntervalMillis(shardSyncIntervalMillis int) *KinesisClientLibConfiguration { checkIsValuePositive("ShardSyncIntervalMillis", shardSyncIntervalMillis) - c.shardSyncIntervalMillis = shardSyncIntervalMillis + c.ShardSyncIntervalMillis = shardSyncIntervalMillis return c } func (c *KinesisClientLibConfiguration) WithMaxRecords(maxRecords int) *KinesisClientLibConfiguration { checkIsValuePositive("MaxRecords", maxRecords) - c.maxRecords = maxRecords + c.MaxRecords = maxRecords return c } @@ -102,46 +103,52 @@ func (c *KinesisClientLibConfiguration) WithMaxRecords(maxRecords int) *KinesisC * Metric: GetRecords.MillisBehindLatest *

* - * @param idleTimeBetweenReadsInMillis + * @param IdleTimeBetweenReadsInMillis * how long to sleep between GetRecords calls when no records are returned. * @return KinesisClientLibConfiguration */ func (c *KinesisClientLibConfiguration) WithIdleTimeBetweenReadsInMillis(idleTimeBetweenReadsInMillis int) *KinesisClientLibConfiguration { checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis) - c.idleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis + c.IdleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis return c } func (c *KinesisClientLibConfiguration) WithCallProcessRecordsEvenForEmptyRecordList(callProcessRecordsEvenForEmptyRecordList bool) *KinesisClientLibConfiguration { - c.callProcessRecordsEvenForEmptyRecordList = callProcessRecordsEvenForEmptyRecordList + c.CallProcessRecordsEvenForEmptyRecordList = callProcessRecordsEvenForEmptyRecordList return c } func (c *KinesisClientLibConfiguration) WithTaskBackoffTimeMillis(taskBackoffTimeMillis int) *KinesisClientLibConfiguration { - checkIsValuePositive("taskBackoffTimeMillis", taskBackoffTimeMillis) - c.taskBackoffTimeMillis = taskBackoffTimeMillis + checkIsValuePositive("TaskBackoffTimeMillis", taskBackoffTimeMillis) + c.TaskBackoffTimeMillis = taskBackoffTimeMillis return c } // WithMetricsBufferTimeMillis configures Metrics are buffered for at most this long before publishing to CloudWatch func (c *KinesisClientLibConfiguration) WithMetricsBufferTimeMillis(metricsBufferTimeMillis int) *KinesisClientLibConfiguration { - checkIsValuePositive("metricsBufferTimeMillis", metricsBufferTimeMillis) - c.metricsBufferTimeMillis = metricsBufferTimeMillis + checkIsValuePositive("MetricsBufferTimeMillis", metricsBufferTimeMillis) + c.MetricsBufferTimeMillis = metricsBufferTimeMillis return c } // WithMetricsMaxQueueSize configures Max number of metrics to buffer before publishing to CloudWatch func (c *KinesisClientLibConfiguration) WithMetricsMaxQueueSize(metricsMaxQueueSize int) *KinesisClientLibConfiguration { - checkIsValuePositive("metricsMaxQueueSize", metricsMaxQueueSize) - c.metricsMaxQueueSize = metricsMaxQueueSize + checkIsValuePositive("MetricsMaxQueueSize", metricsMaxQueueSize) + c.MetricsMaxQueueSize = metricsMaxQueueSize return c } // WithRegionName configures region for the stream func (c *KinesisClientLibConfiguration) WithRegionName(regionName string) *KinesisClientLibConfiguration { - checkIsValueNotEmpty("regionName", regionName) - c.regionName = regionName + checkIsValueNotEmpty("RegionName", regionName) + c.RegionName = regionName return c } -// Getters +// WithWorkerThreadPoolSize configures worker thread pool size +func (c *KinesisClientLibConfiguration) WithWorkerThreadPoolSize(n int) *KinesisClientLibConfiguration { + checkIsValuePositive("WorkerThreadPoolSize", n) + c.WorkerThreadPoolSize = n + return c +} + diff --git a/src/leases/dynamoutils/dynamoutils.go b/src/leases/dynamoutils/dynamoutils.go new file mode 100644 index 0000000..8d286be --- /dev/null +++ b/src/leases/dynamoutils/dynamoutils.go @@ -0,0 +1,78 @@ +package util + +import ( + "strconv" + + "clientlibrary/common" + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +/** + * Some static utility functions used by our LeaseSerializers. + */ + +func CreateAttributeValueFromSS(collectionValue []*string) (*dynamodb.AttributeValue, error) { + if len(collectionValue) == 0 { + return nil, common.IllegalArgumentError.MakeErr().WithDetail("Collection attributeValues cannot be null or empty.") + } + + attrib := &dynamodb.AttributeValue{} + attrib.SetSS(collectionValue) + + return attrib, nil +} + +func CreateAttributeValueFromString(stringValue string) (*dynamodb.AttributeValue, error) { + if len(stringValue) == 0 { + return nil, common.IllegalArgumentError.MakeErr().WithDetail("String attributeValues cannot be null or empty.") + } + + attrib := &dynamodb.AttributeValue{} + attrib.SetS(stringValue) + + return attrib, nil +} + +func CreateAttributeValueFromLong(longValue int64) (*dynamodb.AttributeValue, error) { + attrib := &dynamodb.AttributeValue{} + attrib.SetN(strconv.FormatInt(longValue, 10)) + + return attrib, nil +} + +func SafeGetLong(dynamoRecord map[string]*dynamodb.AttributeValue, key string) int64 { + av := dynamoRecord[key] + + if av == nil || av.N == nil { + return 0 + } + + var val int64 + val, err := strconv.ParseInt(*av.N, 10, 64) + + if err != nil { + return 0 + } + + return val +} + +func SafeGetString(dynamoRecord map[string]*dynamodb.AttributeValue, key string) *string { + av := dynamoRecord[key] + if av == nil { + return nil + } + + return av.S +} + +func SafeGetSS(dynamoRecord map[string]*dynamodb.AttributeValue, key string) []*string { + av := dynamoRecord[key] + + if av == nil { + var emptyslice []*string + return emptyslice + } + + return av.SS +} diff --git a/src/leases/impl/kinesis-client-lease.go b/src/leases/impl/kinesis-client-lease.go new file mode 100644 index 0000000..6132a40 --- /dev/null +++ b/src/leases/impl/kinesis-client-lease.go @@ -0,0 +1,116 @@ +package impl + +import ( + . "clientlibrary/types" +) + +// KinesisClientLease is a Lease subclass containing KinesisClientLibrary related fields for checkpoints. +type KinesisClientLease struct { + checkpoint *ExtendedSequenceNumber + pendingCheckpoint *ExtendedSequenceNumber + ownerSwitchesSinceCheckpoint int64 + parentShardIds *[]string + + // coreLease to hold lease information + // Note: golang doesn't support inheritance, use composition instead. + coreLease Lease +} + +// GetCheckpoint returns most recently application-supplied checkpoint value. During fail over, the new worker +// will pick up after the old worker's last checkpoint. +func (l *KinesisClientLease) GetCheckpoint() *ExtendedSequenceNumber { + return l.checkpoint +} + +// GetPendingCheckpoint returns pending checkpoint, possibly null. +func (l *KinesisClientLease) GetPendingCheckpoint() *ExtendedSequenceNumber { + return l.pendingCheckpoint +} + +// GetOwnerSwitchesSinceCheckpoint counts of distinct lease holders between checkpoints. +func (l *KinesisClientLease) GetOwnerSwitchesSinceCheckpoint() int64 { + return l.ownerSwitchesSinceCheckpoint +} + +// GetParentShardIds returns shardIds that parent this lease. Used for resharding. +func (l *KinesisClientLease) GetParentShardIds() *[]string { + return l.parentShardIds +} + +// SetCheckpoint +func (l *KinesisClientLease) SetCheckpoint(checkpoint *ExtendedSequenceNumber) { + l.checkpoint = checkpoint +} + +// SetPendingCheckpoint +func (l *KinesisClientLease) SetPendingCheckpoint(pendingCheckpoint *ExtendedSequenceNumber) { + l.pendingCheckpoint = pendingCheckpoint +} + +// SetOwnerSwitchesSinceCheckpoint +func (l *KinesisClientLease) SetOwnerSwitchesSinceCheckpoint(ownerSwitchesSinceCheckpoint int64) { + l.ownerSwitchesSinceCheckpoint = ownerSwitchesSinceCheckpoint +} + +// SetParentShardIds +func (l *KinesisClientLease) SetParentShardIds(parentShardIds *[]string) { + l.parentShardIds = parentShardIds +} + +// GetLeaseKey retrieves leaseKey - identifies the unit of work associated with this lease. +func (l *KinesisClientLease) GetLeaseKey() string { + return l.coreLease.GetLeaseKey() +} + +// GetLeaseOwner gets current owner of the lease, may be "". +func (l *KinesisClientLease) GetLeaseOwner() string { + return l.coreLease.GetLeaseOwner() +} + +// GetLeaseCounter retrieves leaseCounter which is incremented periodically by the holder of the lease. Used for optimistic locking. +func (l *KinesisClientLease) GetLeaseCounter() int64 { + return l.coreLease.GetLeaseCounter() +} + +// GetConcurrencyToken returns concurrency token +func (l *KinesisClientLease) GetConcurrencyToken() string { + return l.coreLease.GetConcurrencyToken() +} + +// GetLastCounterIncrementNanos returns concurrency token +func (l *KinesisClientLease) GetLastCounterIncrementNanos() int64 { + return l.coreLease.GetLastCounterIncrementNanos() +} + +// SetLeaseKey sets leaseKey - LeaseKey is immutable once set. +func (l *KinesisClientLease) SetLeaseKey(leaseKey string) error { + return l.coreLease.SetLeaseKey(leaseKey) +} + +// SetLeaseOwner set current owner of the lease, may be "". +func (l *KinesisClientLease) SetLeaseOwner(leaseOwner string) { + l.coreLease.SetLeaseOwner(leaseOwner) +} + +// SetLeaseCounter sets leaseCounter which is incremented periodically by the holder of the lease. Used for optimistic locking. +func (l *KinesisClientLease) SetLeaseCounter(leaseCounter int64) { + l.coreLease.SetLeaseCounter(leaseCounter) +} + +// SetConcurrencyToken +func (l *KinesisClientLease) SetConcurrencyToken(concurrencyToken string) { + l.coreLease.SetConcurrencyToken(concurrencyToken) +} + +// SetLastCounterIncrementNanos returns concurrency token +func (l *KinesisClientLease) SetLastCounterIncrementNanos(lastCounterIncrementNanos int64) { + l.coreLease.SetLastCounterIncrementNanos(lastCounterIncrementNanos) +} + +// IsExpired to check whether lease expired using +// @param leaseDurationNanos duration of lease in nanoseconds +// @param asOfNanos time in nanoseconds to check expiration as-of +// @return true if lease is expired as-of given time, false otherwise +func (l *KinesisClientLease) IsExpired(leaseDurationNanos, asOfNanos int64) bool { + return l.coreLease.IsExpired(leaseDurationNanos, asOfNanos) +} diff --git a/src/leases/impl/lease-manager.go b/src/leases/impl/lease-manager.go new file mode 100644 index 0000000..a447e11 --- /dev/null +++ b/src/leases/impl/lease-manager.go @@ -0,0 +1,440 @@ +package impl + +import ( + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + + . "leases/interfaces" +) + +const ( + // CREATING - The table is being created. + TABLE_CREATING = "CREATING" + + // UPDATING - The table is being updated. + TABLE_UPDATING = "UPDATING" + + // DELETING - The table is being deleted. + TABLE_DELETING = "DELETING" + + // ACTIVE - The table is ready for use. + TABLE_ACTIVE = "ACTIVE" +) + +// LeaseManager is an implementation of ILeaseManager that uses DynamoDB. +type LeaseManager struct { + tableName string + dynamoDBClient dynamodbiface.DynamoDBAPI + serializer ILeaseSerializer + consistentReads bool +} + +func NewLeaseManager(tableName string, dynamoDBClient dynamodbiface.DynamoDBAPI, serializer ILeaseSerializer) *LeaseManager { + return &LeaseManager{ + tableName: tableName, + dynamoDBClient: dynamoDBClient, + serializer: serializer, + consistentReads: false, + } +} + +/** + * Creates the table that will store leases. Succeeds if table already exists. + * + * @param readCapacity + * @param writeCapacity + * + * @return true if we created a new table (table didn't exist before) + * + * @error ProvisionedThroughputError if we cannot create the lease table due to per-AWS-account capacity + * restrictions. + * @error LeasingDependencyError if DynamoDB createTable fails in an unexpected way + */ +func (l *LeaseManager) CreateLeaseTableIfNotExists(readCapacity, writeCapacity int64) (bool, error) { + status, _ := l.tableStatus() + + if status != nil { + return false, nil + } + + input := &dynamodb.CreateTableInput{ + AttributeDefinitions: l.serializer.GetAttributeDefinitions(), + KeySchema: l.serializer.GetKeySchema(), + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(readCapacity), + WriteCapacityUnits: aws.Int64(writeCapacity), + }, + TableName: aws.String(l.tableName), + } + _, err := l.dynamoDBClient.CreateTable(input) + + if err != nil { + return false, err + } + return true, nil +} + +/** + * @return true if the lease table already exists. + * + * @error LeasingDependencyError if DynamoDB describeTable fails in an unexpected way + */ +func (l *LeaseManager) LeaseTableExists() (bool, error) { + status, _ := l.tableStatus() + + if status != nil || aws.StringValue(status) == TABLE_ACTIVE { + return true, nil + } + return false, nil +} + +/** + * Blocks until the lease table exists by polling leaseTableExists. + * + * @param secondsBetweenPolls time to wait between polls in seconds + * @param timeoutSeconds total time to wait in seconds + * + * @return true if table exists, false if timeout was reached + * + * @error LeasingDependencyError if DynamoDB describeTable fails in an unexpected way + */ +func (l *LeaseManager) WaitUntilLeaseTableExists(secondsBetweenPolls, timeoutSeconds int64) (bool, error) { + delay := time.Duration(secondsBetweenPolls) * time.Second + deadline := time.Now().Add(time.Duration(timeoutSeconds) * time.Second) + + var err error + for time.Now().Before(deadline) { + flag := false + flag, err = l.LeaseTableExists() + + if flag { + return true, nil + } + + time.Sleep(delay) + } + + return false, err +} + +/** + * List all objects in table synchronously. + * + * @error LeasingDependencyError if DynamoDB scan fails in an unexpected way + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB scan fails due to lack of capacity + * + * @return list of leases + */ +func (l *LeaseManager) ListLeases() ([]ILease, error) { + return l.list(0) +} + +/** + * Create a new lease. Conditional on a lease not already existing with this shardId. + * + * @param lease the lease to create + * + * @return true if lease was created, false if lease already exists + * + * @error LeasingDependencyError if DynamoDB put fails in an unexpected way + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB put fails due to lack of capacity + */ +func (l *LeaseManager) CreateLeaseIfNotExists(lease ILease) (bool, error) { + input := &dynamodb.PutItemInput{ + TableName: aws.String(l.tableName), + Item: l.serializer.ToDynamoRecord(lease), + Expected: l.serializer.GetDynamoNonexistantExpectation(), + } + _, err := l.dynamoDBClient.PutItem(input) + return err != nil, err +} + +/** + * @param shardId Get the lease for this shardId and it is the leaseKey + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB get fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB get fails in an unexpected way + * + * @return lease for the specified shardId, or null if one doesn't exist + */ +func (l *LeaseManager) GetLease(shardId string) (ILease, error) { + input := &dynamodb.GetItemInput{ + TableName: aws.String(l.tableName), + Key: l.serializer.GetDynamoHashKey(shardId), + ConsistentRead: aws.Bool(l.consistentReads), + } + result, err := l.dynamoDBClient.GetItem(input) + if err != nil { + return nil, err + } + dynamoRecord := result.Item + if dynamoRecord == nil { + return nil, nil + } + lease := l.serializer.FromDynamoRecord(dynamoRecord) + return lease, nil +} + +/** + * Renew a lease by incrementing the lease counter. Conditional on the leaseCounter in DynamoDB matching the leaseCounter + * of the input. Mutates the leaseCounter of the passed-in lease object after updating the record in DynamoDB. + * + * @param lease the lease to renew + * + * @return true if renewal succeeded, false otherwise + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB update fails in an unexpected way + */ +func (l *LeaseManager) RenewLease(lease ILease) (bool, error) { + input := &dynamodb.UpdateItemInput{ + TableName: aws.String(l.tableName), + Key: l.serializer.GetDynamoHashKey(lease.GetLeaseKey()), + Expected: l.serializer.GetDynamoLeaseCounterExpectation(lease), + } + _, err := l.dynamoDBClient.UpdateItem(input) + + if err != nil { + // If we had a spurious retry during the Dynamo update, then this conditional PUT failure + // might be incorrect. So, we get the item straight away and check if the lease owner + lease counter + // are what we expected. + expectedOwner := lease.GetLeaseOwner() + expectedCounter := lease.GetLeaseCounter() + 1 + updatedLease, _ := l.GetLease(lease.GetLeaseKey()) + if updatedLease == nil || expectedOwner != updatedLease.GetLeaseOwner() || + expectedCounter != updatedLease.GetLeaseCounter() { + return false, nil + } + + log.Println("Detected spurious renewal failure for lease with key " + lease.GetLeaseKey() + ", but recovered") + } + + lease.SetLeaseCounter(lease.GetLeaseCounter() + 1) + return err != nil, err + +} + +/** + * Take a lease for the given owner by incrementing its leaseCounter and setting its owner field. Conditional on + * the leaseCounter in DynamoDB matching the leaseCounter of the input. Mutates the leaseCounter and owner of the + * passed-in lease object after updating DynamoDB. + * + * @param lease the lease to take + * @param owner the new owner + * + * @return true if lease was successfully taken, false otherwise + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB update fails in an unexpected way + */ +func (l *LeaseManager) TakeLease(lease ILease, owner string) (bool, error) { + input := &dynamodb.UpdateItemInput{ + TableName: aws.String(l.tableName), + Key: l.serializer.GetDynamoHashKey(lease.GetLeaseKey()), + Expected: l.serializer.GetDynamoLeaseCounterExpectation(lease), + } + + updates := l.serializer.GetDynamoLeaseCounterUpdate(lease) + + // putAll to updates + for k, v := range l.serializer.GetDynamoTakeLeaseUpdate(lease, owner) { + updates[k] = v + } + input.SetAttributeUpdates(updates) + _, err := l.dynamoDBClient.UpdateItem(input) + + if err != nil { + return false, err + } + + lease.SetLeaseCounter(lease.GetLeaseCounter() + 1) + lease.SetLeaseOwner(owner) + return true, nil +} + +/** + * Evict the current owner of lease by setting owner to null. Conditional on the owner in DynamoDB matching the owner of + * the input. Mutates the lease counter and owner of the passed-in lease object after updating the record in DynamoDB. + * + * @param lease the lease to void + * + * @return true if eviction succeeded, false otherwise + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB update fails in an unexpected way + */ +func (l *LeaseManager) EvictLease(lease ILease) (bool, error) { + input := &dynamodb.UpdateItemInput{ + TableName: aws.String(l.tableName), + Key: l.serializer.GetDynamoHashKey(lease.GetLeaseKey()), + Expected: l.serializer.GetDynamoLeaseCounterExpectation(lease), + } + + updates := l.serializer.GetDynamoLeaseCounterUpdate(lease) + + // putAll to updates + for k, v := range l.serializer.GetDynamoEvictLeaseUpdate(lease) { + updates[k] = v + } + input.SetAttributeUpdates(updates) + _, err := l.dynamoDBClient.UpdateItem(input) + + if err != nil { + return false, err + } + + lease.SetLeaseCounter(lease.GetLeaseCounter() + 1) + lease.SetLeaseOwner("") + return true, nil +} + +/** + * Delete the given lease from DynamoDB. Does nothing when passed a lease that does not exist in DynamoDB. + * + * @param lease the lease to delete + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB delete fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB delete fails in an unexpected way + */ +func (l *LeaseManager) DeleteLease(lease ILease) error { + input := &dynamodb.DeleteItemInput{ + TableName: aws.String(l.tableName), + Key: l.serializer.GetDynamoHashKey(lease.GetLeaseKey()), + } + _, err := l.dynamoDBClient.DeleteItem(input) + return err +} + +/** + * Delete all leases from DynamoDB. Useful for tools/utils and testing. + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB scan or delete fail due to lack of capacity + * @error LeasingDependencyError if DynamoDB scan or delete fail in an unexpected way + */ +func (l *LeaseManager) DeleteAll() error { + allLeases, err := l.ListLeases() + if err != nil { + return err + } + + for _, v := range allLeases { + err := l.DeleteLease(v) + if err != nil { + return err + } + } + return nil +} + +/** + * Update application-specific fields of the given lease in DynamoDB. Does not update fields managed by the leasing + * library such as leaseCounter, leaseOwner, or leaseKey. Conditional on the leaseCounter in DynamoDB matching the + * leaseCounter of the input. Increments the lease counter in DynamoDB so that updates can be contingent on other + * updates. Mutates the lease counter of the passed-in lease object. + * + * @return true if update succeeded, false otherwise + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB update fails in an unexpected way + */ +func (l *LeaseManager) UpdateLease(lease ILease) (bool, error) { + input := &dynamodb.UpdateItemInput{ + TableName: aws.String(l.tableName), + Key: l.serializer.GetDynamoHashKey(lease.GetLeaseKey()), + Expected: l.serializer.GetDynamoLeaseCounterExpectation(lease), + } + + updates := l.serializer.GetDynamoLeaseCounterUpdate(lease) + + // putAll to updates + for k, v := range l.serializer.GetDynamoUpdateLeaseUpdate(lease) { + updates[k] = v + } + input.SetAttributeUpdates(updates) + _, err := l.dynamoDBClient.UpdateItem(input) + + if err != nil { + return false, err + } + + lease.SetLeaseCounter(lease.GetLeaseCounter() + 1) + return true, nil +} + +/** + * Check (synchronously) if there are any leases in the lease table. + * + * @return true if there are no leases in the lease table + * + * @error LeasingDependencyError if DynamoDB scan fails in an unexpected way + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB scan fails due to lack of capacity + */ +func (l *LeaseManager) IsLeaseTableEmpty() (bool, error) { + result, err := l.list(1) + if err != nil { + return true, err + } + return len(result) > 0, nil +} + +// tableStatus check the current lease table status +func (l *LeaseManager) tableStatus() (*string, error) { + input := &dynamodb.DescribeTableInput{ + TableName: aws.String(l.tableName), + } + + result, err := l.dynamoDBClient.DescribeTable(input) + if err != nil { + return nil, err + } + + return result.Table.TableStatus, nil +} + +// List with the given page size (number of items to consider at a time). Package access for integration testing. +func (l *LeaseManager) list(limit int64) ([]ILease, error) { + input := &dynamodb.ScanInput{ + TableName: aws.String(l.tableName), + } + + if limit > 0 { + input.SetLimit(limit) + } + + result := []ILease{} + + for { + scanResult, err := l.dynamoDBClient.Scan(input) + if err != nil || scanResult == nil { + break + } + + for _, v := range scanResult.Items { + result = append(result, l.serializer.FromDynamoRecord(v)) + } + + lastEvaluatedKey := scanResult.LastEvaluatedKey + if lastEvaluatedKey == nil { + scanResult = nil + break + } else { + input.SetExclusiveStartKey(lastEvaluatedKey) + } + } + + return result, nil +} diff --git a/src/leases/impl/lease-serializer.go b/src/leases/impl/lease-serializer.go new file mode 100644 index 0000000..3e3f3b3 --- /dev/null +++ b/src/leases/impl/lease-serializer.go @@ -0,0 +1,184 @@ +package impl + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + + dynamoutils "leases/dynamoutils" + . "leases/interfaces" +) + +const ( + LEASE_KEY_KEY = "leaseKey" + LEASE_OWNER_KEY = "leaseOwner" + LEASE_COUNTER_KEY = "leaseCounter" +) + +/** + * An implementation of ILeaseSerializer for basic Lease objects. Can also instantiate subclasses of Lease so that + * LeaseSerializer can be decorated by other classes if you need to add fields to leases. + */ +type LeaseSerializer struct { +} + +/** + * Construct a DynamoDB record out of a Lease object + * + * @param lease lease object to serialize + * @return an attribute value map representing the lease object + */ +func (lc *LeaseSerializer) ToDynamoRecord(lease ILease) map[string]*dynamodb.AttributeValue { + result := map[string]*dynamodb.AttributeValue{} + + result[LEASE_KEY_KEY], _ = dynamoutils.CreateAttributeValueFromString(lease.GetLeaseKey()) + result[LEASE_COUNTER_KEY], _ = dynamoutils.CreateAttributeValueFromLong(lease.GetLeaseCounter()) + + if len(lease.GetLeaseOwner()) > 0 { + result[LEASE_OWNER_KEY], _ = dynamoutils.CreateAttributeValueFromString(lease.GetLeaseOwner()) + } + + return result +} + +/** + * Construct a Lease object out of a DynamoDB record. + * + * @param dynamoRecord attribute value map from DynamoDB + * @return a deserialized lease object representing the attribute value map + */ +func (lc *LeaseSerializer) FromDynamoRecord(dynamoRecord map[string]*dynamodb.AttributeValue) ILease { + result := &Lease{} + + result.SetLeaseKey(aws.StringValue(dynamoutils.SafeGetString(dynamoRecord, LEASE_KEY_KEY))) + result.SetLeaseOwner(aws.StringValue(dynamoutils.SafeGetString(dynamoRecord, LEASE_OWNER_KEY))) + result.SetLeaseCounter(dynamoutils.SafeGetLong(dynamoRecord, LEASE_COUNTER_KEY)) + return result +} + +/** + * Special getDynamoHashKey implementation used by ILeaseManager.getLease(). + * + * @param leaseKey + * @return the attribute value map representing a Lease's hash key given a string. + */ +func (lc *LeaseSerializer) GetDynamoHashKey(leaseKey string) map[string]*dynamodb.AttributeValue { + result := map[string]*dynamodb.AttributeValue{} + result[LEASE_KEY_KEY], _ = dynamoutils.CreateAttributeValueFromString(leaseKey) + return result +} + +/** + * @param lease + * @return the attribute value map asserting that a lease counter is what we expect. + */ +func (lc *LeaseSerializer) GetDynamoLeaseCounterExpectation(lease ILease) map[string]*dynamodb.ExpectedAttributeValue { + result := map[string]*dynamodb.ExpectedAttributeValue{} + expectedAV := &dynamodb.ExpectedAttributeValue{} + val, _ := dynamoutils.CreateAttributeValueFromLong(lease.GetLeaseCounter()) + expectedAV.SetValue(val) + result[LEASE_COUNTER_KEY] = expectedAV + return result +} + +/** + * @param lease + * @return the attribute value map asserting that the lease owner is what we expect. + */ +func (lc *LeaseSerializer) GetDynamoLeaseOwnerExpectation(lease ILease) map[string]*dynamodb.ExpectedAttributeValue { + result := map[string]*dynamodb.ExpectedAttributeValue{} + expectedAV := &dynamodb.ExpectedAttributeValue{} + val, _ := dynamoutils.CreateAttributeValueFromString(lease.GetLeaseOwner()) + expectedAV.SetValue(val) + result[LEASE_OWNER_KEY] = expectedAV + return result + +} + +/** + * @return the attribute value map asserting that a lease does not exist. + */ +func (lc *LeaseSerializer) GetDynamoNonexistantExpectation() map[string]*dynamodb.ExpectedAttributeValue { + result := map[string]*dynamodb.ExpectedAttributeValue{} + expectedAV := &dynamodb.ExpectedAttributeValue{} + expectedAV.SetExists(false) + result[LEASE_KEY_KEY] = expectedAV + + return result +} + +/** + * @param lease + * @return the attribute value map that increments a lease counter + */ +func (lc *LeaseSerializer) GetDynamoLeaseCounterUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate { + result := map[string]*dynamodb.AttributeValueUpdate{} + updatedAV := &dynamodb.AttributeValueUpdate{} + // Increase the lease counter by 1 + val, _ := dynamoutils.CreateAttributeValueFromLong(lease.GetLeaseCounter() + 1) + updatedAV.SetValue(val) + updatedAV.SetAction(dynamodb.AttributeActionPut) + result[LEASE_COUNTER_KEY] = updatedAV + return result +} + +/** + * @param lease + * @param newOwner + * @return the attribute value map that takes a lease for a new owner + */ +func (lc *LeaseSerializer) GetDynamoTakeLeaseUpdate(lease ILease, newOwner string) map[string]*dynamodb.AttributeValueUpdate { + result := map[string]*dynamodb.AttributeValueUpdate{} + updatedAV := &dynamodb.AttributeValueUpdate{} + val, _ := dynamoutils.CreateAttributeValueFromString(lease.GetLeaseOwner()) + updatedAV.SetValue(val) + updatedAV.SetAction(dynamodb.AttributeActionPut) + result[LEASE_OWNER_KEY] = updatedAV + return result +} + +/** + * @param lease + * @return the attribute value map that voids a lease + */ +func (lc *LeaseSerializer) GetDynamoEvictLeaseUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate { + result := map[string]*dynamodb.AttributeValueUpdate{} + updatedAV := &dynamodb.AttributeValueUpdate{} + updatedAV.SetValue(nil) + updatedAV.SetAction(dynamodb.AttributeActionDelete) + result[LEASE_OWNER_KEY] = updatedAV + return result +} + +/** + * @param lease + * @return the attribute value map that updates application-specific data for a lease and increments the lease + * counter + */ +func (lc *LeaseSerializer) GetDynamoUpdateLeaseUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate { + result := map[string]*dynamodb.AttributeValueUpdate{} + return result +} + +/** + * @return the key schema for creating a DynamoDB table to store leases + */ +func (lc *LeaseSerializer) GetKeySchema() []*dynamodb.KeySchemaElement { + keySchema := []*dynamodb.KeySchemaElement{} + schemaElement := &dynamodb.KeySchemaElement{} + schemaElement.SetAttributeName(LEASE_KEY_KEY) + schemaElement.SetKeyType(dynamodb.KeyTypeHash) + keySchema = append(keySchema, schemaElement) + return keySchema +} + +/** + * @return attribute definitions for creating a DynamoDB table to store leases + */ +func (lc *LeaseSerializer) GetAttributeDefinitions() []*dynamodb.AttributeDefinition { + definitions := []*dynamodb.AttributeDefinition{} + definition := &dynamodb.AttributeDefinition{} + definition.SetAttributeName(LEASE_KEY_KEY) + definition.SetAttributeType(dynamodb.ScalarAttributeTypeS) + definitions = append(definitions, definition) + return definitions +} diff --git a/src/leases/impl/lease.go b/src/leases/impl/lease.go new file mode 100644 index 0000000..b87ecf3 --- /dev/null +++ b/src/leases/impl/lease.go @@ -0,0 +1,113 @@ +package impl + +import ( + cc "clientlibrary/common" + "time" +) + +const ( + // We will consider leases to be expired if they are more than 90 days. + MAX_ABS_AGE_NANOS = int64(90 * 24 * time.Hour) +) + +// Lease structure contains data pertaining to a Lease. Distributed systems may use leases to partition work across a +// fleet of workers. Each unit of work (identified by a leaseKey) has a corresponding Lease. Every worker will contend +// for all leases - only one worker will successfully take each one. The worker should hold the lease until it is ready to stop +// processing the corresponding unit of work, or until it fails. When the worker stops holding the lease, another worker will +// take and hold the lease. +type Lease struct { + leaseKey string + leaseOwner string + leaseCounter int64 + + // This field is used to prevent updates to leases that we have lost and re-acquired. It is deliberately not + // persisted in DynamoDB and excluded from hashCode and equals. + concurrencyToken string + + // This field is used by LeaseRenewer and LeaseTaker to track the last time a lease counter was incremented. It is + // deliberately not persisted in DynamoDB and excluded from hashCode and equals. + lastCounterIncrementNanos int64 +} + +// CloneLease to clone a lease object +func CopyLease(lease *Lease) *Lease { + return &Lease{ + leaseKey: lease.leaseKey, + leaseOwner: lease.leaseOwner, + leaseCounter: lease.leaseCounter, + concurrencyToken: lease.concurrencyToken, + lastCounterIncrementNanos: lease.lastCounterIncrementNanos, + } +} + +// GetLeaseKey retrieves leaseKey - identifies the unit of work associated with this lease. +func (l *Lease) GetLeaseKey() string { + return l.leaseKey +} + +// GetLeaseOwner gets current owner of the lease, may be "". +func (l *Lease) GetLeaseOwner() string { + return l.leaseOwner +} + +// GetLeaseCounter retrieves leaseCounter which is incremented periodically by the holder of the lease. Used for optimistic locking. +func (l *Lease) GetLeaseCounter() int64 { + return l.leaseCounter +} + +// GetConcurrencyToken returns concurrency token +func (l *Lease) GetConcurrencyToken() string { + return l.concurrencyToken +} + +// GetLastCounterIncrementNanos returns concurrency token +func (l *Lease) GetLastCounterIncrementNanos() int64 { + return l.lastCounterIncrementNanos +} + +// SetLeaseKey sets leaseKey - LeaseKey is immutable once set. +func (l *Lease) SetLeaseKey(leaseKey string) error { + if len(l.leaseKey) > 0 { + return cc.IllegalArgumentError.MakeErr().WithDetail("LeaseKey is immutable once set") + } + + l.leaseKey = leaseKey + return nil +} + +// SetLeaseOwner set current owner of the lease, may be "". +func (l *Lease) SetLeaseOwner(leaseOwner string) { + l.leaseOwner = leaseOwner +} + +// SetLeaseCounter sets leaseCounter which is incremented periodically by the holder of the lease. Used for optimistic locking. +func (l *Lease) SetLeaseCounter(leaseCounter int64) { + l.leaseCounter = leaseCounter +} + +// SetConcurrencyToken +func (l *Lease) SetConcurrencyToken(concurrencyToken string) { + l.concurrencyToken = concurrencyToken +} + +// SetLastCounterIncrementNanos returns concurrency token +func (l *Lease) SetLastCounterIncrementNanos(lastCounterIncrementNanos int64) { + l.lastCounterIncrementNanos = lastCounterIncrementNanos +} + +// IsExpired to check whether lease expired using +// @param leaseDurationNanos duration of lease in nanoseconds +// @param asOfNanos time in nanoseconds to check expiration as-of +// @return true if lease is expired as-of given time, false otherwise +func (l *Lease) IsExpired(leaseDurationNanos, asOfNanos int64) bool { + if l.lastCounterIncrementNanos == 0 { + return true + } + + age := asOfNanos - l.lastCounterIncrementNanos + if age > MAX_ABS_AGE_NANOS { + return true + } else { + return age > leaseDurationNanos + } +} diff --git a/src/leases/interfaces/lease-manager.go b/src/leases/interfaces/lease-manager.go new file mode 100644 index 0000000..8f27aa2 --- /dev/null +++ b/src/leases/interfaces/lease-manager.go @@ -0,0 +1,162 @@ +package interfaces + +// ILeaseManager supports basic CRUD operations for Leases. +type ILeaseManager interface { + + /** + * Creates the table that will store leases. Succeeds if table already exists. + * + * @param readCapacity + * @param writeCapacity + * + * @return true if we created a new table (table didn't exist before) + * + * @error ProvisionedThroughputError if we cannot create the lease table due to per-AWS-account capacity + * restrictions. + * @error LeasingDependencyError if DynamoDB createTable fails in an unexpected way + */ + CreateLeaseTableIfNotExists(readCapacity, writeCapacity int64) (bool, error) + + /** + * @return true if the lease table already exists. + * + * @error LeasingDependencyError if DynamoDB describeTable fails in an unexpected way + */ + LeaseTableExists() (bool, error) + + /** + * Blocks until the lease table exists by polling leaseTableExists. + * + * @param secondsBetweenPolls time to wait between polls in seconds + * @param timeoutSeconds total time to wait in seconds + * + * @return true if table exists, false if timeout was reached + * + * @error LeasingDependencyError if DynamoDB describeTable fails in an unexpected way + */ + WaitUntilLeaseTableExists(secondsBetweenPolls, timeoutSeconds int64) (bool, error) + + /** + * List all objects in table synchronously. + * + * @error LeasingDependencyError if DynamoDB scan fails in an unexpected way + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB scan fails due to lack of capacity + * + * @return list of leases + */ + ListLeases() ([]ILease, error) + + /** + * Create a new lease. Conditional on a lease not already existing with this shardId. + * + * @param lease the lease to create + * + * @return true if lease was created, false if lease already exists + * + * @error LeasingDependencyError if DynamoDB put fails in an unexpected way + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB put fails due to lack of capacity + */ + CreateLeaseIfNotExists(lease ILease) (bool, error) + + /** + * @param shardId Get the lease for this shardId + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB get fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB get fails in an unexpected way + * + * @return lease for the specified shardId, or null if one doesn't exist + */ + GetLease(shardId string) (ILease, error) + + /** + * Renew a lease by incrementing the lease counter. Conditional on the leaseCounter in DynamoDB matching the leaseCounter + * of the input. Mutates the leaseCounter of the passed-in lease object after updating the record in DynamoDB. + * + * @param lease the lease to renew + * + * @return true if renewal succeeded, false otherwise + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB update fails in an unexpected way + */ + RenewLease(lease ILease) (bool, error) + + /** + * Take a lease for the given owner by incrementing its leaseCounter and setting its owner field. Conditional on + * the leaseCounter in DynamoDB matching the leaseCounter of the input. Mutates the leaseCounter and owner of the + * passed-in lease object after updating DynamoDB. + * + * @param lease the lease to take + * @param owner the new owner + * + * @return true if lease was successfully taken, false otherwise + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB update fails in an unexpected way + */ + TakeLease(lease ILease, owner string) (bool, error) + + /** + * Evict the current owner of lease by setting owner to null. Conditional on the owner in DynamoDB matching the owner of + * the input. Mutates the lease counter and owner of the passed-in lease object after updating the record in DynamoDB. + * + * @param lease the lease to void + * + * @return true if eviction succeeded, false otherwise + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB update fails in an unexpected way + */ + EvictLease(lease ILease) (bool, error) + + /** + * Delete the given lease from DynamoDB. Does nothing when passed a lease that does not exist in DynamoDB. + * + * @param lease the lease to delete + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB delete fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB delete fails in an unexpected way + */ + DeleteLease(lease ILease) error + + /** + * Delete all leases from DynamoDB. Useful for tools/utils and testing. + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB scan or delete fail due to lack of capacity + * @error LeasingDependencyError if DynamoDB scan or delete fail in an unexpected way + */ + DeleteAll() error + + /** + * Update application-specific fields of the given lease in DynamoDB. Does not update fields managed by the leasing + * library such as leaseCounter, leaseOwner, or leaseKey. Conditional on the leaseCounter in DynamoDB matching the + * leaseCounter of the input. Increments the lease counter in DynamoDB so that updates can be contingent on other + * updates. Mutates the lease counter of the passed-in lease object. + * + * @return true if update succeeded, false otherwise + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB update fails in an unexpected way + */ + UpdateLease(lease ILease) (bool, error) + + /** + * Check (synchronously) if there are any leases in the lease table. + * + * @return true if there are no leases in the lease table + * + * @error LeasingDependencyError if DynamoDB scan fails in an unexpected way + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB scan fails due to lack of capacity + */ + IsLeaseTableEmpty() (bool, error) +} diff --git a/src/leases/interfaces/lease-renewer.go b/src/leases/interfaces/lease-renewer.go new file mode 100644 index 0000000..6e52049 --- /dev/null +++ b/src/leases/interfaces/lease-renewer.go @@ -0,0 +1,78 @@ +package interfaces + +// LeaseTable hold current lease mapping shardId --> Lease +type LeaseTable map[string]*ILease + +/** + * ILeaseRenewer objects are used by LeaseCoordinator to renew leases held by the LeaseCoordinator. Each + * LeaseCoordinator instance corresponds to one worker, and uses exactly one ILeaseRenewer to manage lease renewal for + * that worker. + */ +type ILeaseRenewer interface { + + /** + * Bootstrap initial set of leases from the LeaseManager (e.g. upon process restart, pick up leases we own) + * @error LeasingDependencyError on unexpected DynamoDB failures + * @error LeasingInvalidStateError if lease table doesn't exist + * @error ProvisionedThroughputError if DynamoDB reads fail due to insufficient capacity + */ + Initialize() error + + /** + * Attempt to renew all currently held leases. + * + * @error LeasingDependencyError on unexpected DynamoDB failures + * @error LeasingInvalidStateError if lease table does not exist + */ + RenewLeases() error + + /** + * @return currently held leases. Key is shardId, value is corresponding Lease object. A lease is currently held if + * we successfully renewed it on the last run of renewLeases(). Lease objects returned are deep copies - + * their lease counters will not tick. + */ + GetCurrentlyHeldLeases() *LeaseTable + + /** + * @param leaseKey key of the lease to retrieve + * + * @return a deep copy of a currently held lease, or null if we don't hold the lease + */ + GetCurrentlyHeldLease(leaseKey string) *ILease + + /** + * Adds leases to this LeaseRenewer's set of currently held leases. Leases must have lastRenewalNanos set to the + * last time the lease counter was incremented before being passed to this method. + * + * @param newLeases new leases. + */ + AddLeasesToRenew(newLeases []ILease) + + /** + * Clears this LeaseRenewer's set of currently held leases. + */ + ClearCurrentlyHeldLeases() + + /** + * Stops the lease renewer from continunig to maintain the given lease. + * + * @param lease the lease to drop. + */ + DropLease(lease ILease) + + /** + * Update application-specific fields in a currently held lease. Cannot be used to update internal fields such as + * leaseCounter, leaseOwner, etc. Fails if we do not hold the lease, or if the concurrency token does not match + * the concurrency token on the internal authoritative copy of the lease (ie, if we lost and re-acquired the lease). + * + * @param lease lease object containing updated data + * @param concurrencyToken obtained by calling Lease.getConcurrencyToken for a currently held lease + * + * @return true if update succeeds, false otherwise + * + * @error LeasingInvalidStateError if lease table does not exist + * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity + * @error LeasingDependencyError if DynamoDB update fails in an unexpected way + */ + UpdateLease(lease ILease, concurrencyToken string) (bool, error) +} diff --git a/src/leases/interfaces/lease-serializer.go b/src/leases/interfaces/lease-serializer.go new file mode 100644 index 0000000..a8601d0 --- /dev/null +++ b/src/leases/interfaces/lease-serializer.go @@ -0,0 +1,86 @@ +package interfaces + +import ( + "github.com/aws/aws-sdk-go/service/dynamodb" +) + +// ILeaseSerializer an utility class that manages the mapping of Lease objects/operations to records in DynamoDB. +type ILeaseSerializer interface { + + /** + * Construct a DynamoDB record out of a Lease object + * + * @param lease lease object to serialize + * @return an attribute value map representing the lease object + */ + ToDynamoRecord(lease ILease) map[string]*dynamodb.AttributeValue + + /** + * Construct a Lease object out of a DynamoDB record. + * + * @param dynamoRecord attribute value map from DynamoDB + * @return a deserialized lease object representing the attribute value map + */ + FromDynamoRecord(dynamoRecord map[string]*dynamodb.AttributeValue) ILease + + /** + * Special getDynamoHashKey implementation used by ILeaseManager.getLease(). + * + * @param leaseKey + * @return the attribute value map representing a Lease's hash key given a string. + */ + GetDynamoHashKey(leaseKey string) map[string]*dynamodb.AttributeValue + + /** + * @param lease + * @return the attribute value map asserting that a lease counter is what we expect. + */ + GetDynamoLeaseCounterExpectation(lease ILease) map[string]*dynamodb.ExpectedAttributeValue + + /** + * @param lease + * @return the attribute value map asserting that the lease owner is what we expect. + */ + GetDynamoLeaseOwnerExpectation(lease ILease) map[string]*dynamodb.ExpectedAttributeValue + + /** + * @return the attribute value map asserting that a lease does not exist. + */ + GetDynamoNonexistantExpectation() map[string]*dynamodb.ExpectedAttributeValue + + /** + * @param lease + * @return the attribute value map that increments a lease counter + */ + GetDynamoLeaseCounterUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate + + /** + * @param lease + * @param newOwner + * @return the attribute value map that takes a lease for a new owner + */ + GetDynamoTakeLeaseUpdate(lease ILease, newOwner string) map[string]*dynamodb.AttributeValueUpdate + + /** + * @param lease + * @return the attribute value map that voids a lease + */ + GetDynamoEvictLeaseUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate + + /** + * @param lease + * @return the attribute value map that updates application-specific data for a lease and increments the lease + * counter + */ + GetDynamoUpdateLeaseUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate + + /** + * @return the key schema for creating a DynamoDB table to store leases + */ + GetKeySchema() []*dynamodb.KeySchemaElement + + /** + * @return attribute definitions for creating a DynamoDB table to store leases + */ + GetAttributeDefinitions() []*dynamodb.AttributeDefinition +} diff --git a/src/leases/interfaces/lease-taker.go b/src/leases/interfaces/lease-taker.go new file mode 100644 index 0000000..0dbaf1b --- /dev/null +++ b/src/leases/interfaces/lease-taker.go @@ -0,0 +1,28 @@ +package interfaces + +/** + * ILeaseTaker is used by LeaseCoordinator to take new leases, or leases that other workers fail to renew. Each + * LeaseCoordinator instance corresponds to one worker and uses exactly one ILeaseTaker to take leases for that worker. + */ +type ILeaseTaker interface { + + /** + * Compute the set of leases available to be taken and attempt to take them. Lease taking rules are: + * + * 1) If a lease's counter hasn't changed in long enough, try to take it. + * 2) If we see a lease we've never seen before, take it only if owner == null. If it's owned, odds are the owner is + * holding it. We can't tell until we see it more than once. + * 3) For load balancing purposes, you may violate rules 1 and 2 for EXACTLY ONE lease per call of takeLeases(). + * + * @return map of shardId to Lease object for leases we just successfully took. + * + * @error LeasingDependencyError on unexpected DynamoDB failures + * @error LeasingInvalidStateError if lease table does not exist + */ + TakeLeases() map[string]ILease + + /** + * @return workerIdentifier for this LeaseTaker + */ + GetWorkerIdentifier() string +} diff --git a/src/leases/interfaces/lease.go b/src/leases/interfaces/lease.go new file mode 100644 index 0000000..f3da35a --- /dev/null +++ b/src/leases/interfaces/lease.go @@ -0,0 +1,21 @@ +package interfaces + +// ILease is the interface for all Leases +type ILease interface { + GetLeaseKey() string + SetLeaseKey(leaseKey string) error + + GetLeaseOwner() string + SetLeaseOwner(leaseOwner string) + + GetLeaseCounter() int64 + SetLeaseCounter(leaseCounter int64) + + GetConcurrencyToken() string + SetConcurrencyToken(concurrencyToken string) + + GetLastCounterIncrementNanos() int64 + SetLastCounterIncrementNanos(lastCounterIncrementNanos int64) + + IsExpired(leaseDurationNanos, asOfNanos int64) bool +} From 1969713863271ecf9ea5d23546b858d77d0e755f Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Fri, 13 Apr 2018 14:26:56 -0700 Subject: [PATCH 06/90] KCL: Fix unit test Fix code bug for removing cyclic dependency and fix unit test. Test: hmake test Change-Id: Ib4d4ba416d0133542e6459459ddf43079ff53ab8 --- src/clientlibrary/common/errors.go | 8 ++--- src/clientlibrary/config/config.go | 2 +- src/clientlibrary/config/config_test.go | 2 +- src/clientlibrary/config/kcl-config.go | 31 +++++++++---------- .../{types => interfaces}/inputs.go | 4 +-- .../record-processor-checkpointer.go | 2 -- .../interfaces/record-processor.go | 4 --- .../{types => interfaces}/sequence-number.go | 2 +- .../lib/checkpoint/checkpoint.go | 2 +- src/leases/impl/kinesis-client-lease.go | 2 +- support/scripts/functions.sh | 1 + 11 files changed, 26 insertions(+), 34 deletions(-) rename src/clientlibrary/{types => interfaces}/inputs.go (94%) rename src/clientlibrary/{types => interfaces}/sequence-number.go (96%) diff --git a/src/clientlibrary/common/errors.go b/src/clientlibrary/common/errors.go index adddc26..1a366c1 100644 --- a/src/clientlibrary/common/errors.go +++ b/src/clientlibrary/common/errors.go @@ -35,11 +35,11 @@ const ( LeasingProvisionedThroughputError ErrorCode = 41203 // Misc Errors 41300 - 41400 - // NotImplemented - KinesisClientLibNotImplemented ErrorCode = 41301 + // NotImplemented + KinesisClientLibNotImplemented ErrorCode = 41301 - // Error indicates passing illegal or inappropriate argument - IllegalArgumentError ErrorCode = 41302 + // Error indicates passing illegal or inappropriate argument + IllegalArgumentError ErrorCode = 41302 ) var errorMap = map[ErrorCode]ClientLibraryError{ diff --git a/src/clientlibrary/config/config.go b/src/clientlibrary/config/config.go index e2c82c5..8f3322c 100644 --- a/src/clientlibrary/config/config.go +++ b/src/clientlibrary/config/config.go @@ -211,7 +211,7 @@ type ( SkipShardSyncAtWorkerInitializationIfLeasesExist bool // The max number of threads in the worker thread pool to getRecords. - WorkerThreadPoolSize int + WorkerThreadPoolSize int } ) diff --git a/src/clientlibrary/config/config_test.go b/src/clientlibrary/config/config_test.go index 19f1481..7d72137 100644 --- a/src/clientlibrary/config/config_test.go +++ b/src/clientlibrary/config/config_test.go @@ -19,5 +19,5 @@ func TestConfig(t *testing.T) { WithRegionName("us-west-2") assert.Equal(t, "appName", kclConfig.ApplicationName) - assert.Equal(t, "500", kclConfig.FailoverTimeMillis) + assert.Equal(t, 500, kclConfig.FailoverTimeMillis) } diff --git a/src/clientlibrary/config/kcl-config.go b/src/clientlibrary/config/kcl-config.go index 2f8bc0b..edc63db 100644 --- a/src/clientlibrary/config/kcl-config.go +++ b/src/clientlibrary/config/kcl-config.go @@ -17,11 +17,11 @@ func NewKinesisClientLibConfig(applicationName, streamName, workerID string) *Ki // populate the KCL configuration with default values return &KinesisClientLibConfiguration{ - ApplicationName: applicationName, - TableName: applicationName, - StreamName: streamName, - WorkerID: workerID, - KinesisEndpoint: "", + ApplicationName: applicationName, + TableName: applicationName, + StreamName: streamName, + WorkerID: workerID, + KinesisEndpoint: "", InitialPositionInStream: DEFAULT_INITIAL_POSITION_IN_STREAM, InitialPositionInStreamExtended: *newInitialPosition(DEFAULT_INITIAL_POSITION_IN_STREAM), FailoverTimeMillis: DEFAULT_FAILOVER_TIME_MILLIS, @@ -29,12 +29,12 @@ func NewKinesisClientLibConfig(applicationName, streamName, workerID string) *Ki IdleTimeBetweenReadsInMillis: DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, CallProcessRecordsEvenForEmptyRecordList: DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, ParentShardPollIntervalMillis: DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS, - ShardSyncIntervalMillis: DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, - CleanupTerminatedShardsBeforeExpiry: DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, - TaskBackoffTimeMillis: DEFAULT_TASK_BACKOFF_TIME_MILLIS, - MetricsBufferTimeMillis: DEFAULT_METRICS_BUFFER_TIME_MILLIS, - MetricsMaxQueueSize: DEFAULT_METRICS_MAX_QUEUE_SIZE, - ValidateSequenceNumberBeforeCheckpointing: DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, + ShardSyncIntervalMillis: DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, + CleanupTerminatedShardsBeforeExpiry: DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, + TaskBackoffTimeMillis: DEFAULT_TASK_BACKOFF_TIME_MILLIS, + MetricsBufferTimeMillis: DEFAULT_METRICS_BUFFER_TIME_MILLIS, + MetricsMaxQueueSize: DEFAULT_METRICS_MAX_QUEUE_SIZE, + ValidateSequenceNumberBeforeCheckpointing: DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, RegionName: "", ShutdownGraceMillis: DEFAULT_SHUTDOWN_GRACE_MILLIS, MaxLeasesForWorker: DEFAULT_MAX_LEASES_FOR_WORKER, @@ -42,7 +42,7 @@ func NewKinesisClientLibConfig(applicationName, streamName, workerID string) *Ki InitialLeaseTableReadCapacity: DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, InitialLeaseTableWriteCapacity: DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, SkipShardSyncAtWorkerInitializationIfLeasesExist: DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST, - WorkerThreadPoolSize: 1, + WorkerThreadPoolSize: 1, } } @@ -147,8 +147,7 @@ func (c *KinesisClientLibConfiguration) WithRegionName(regionName string) *Kines // WithWorkerThreadPoolSize configures worker thread pool size func (c *KinesisClientLibConfiguration) WithWorkerThreadPoolSize(n int) *KinesisClientLibConfiguration { - checkIsValuePositive("WorkerThreadPoolSize", n) - c.WorkerThreadPoolSize = n - return c + checkIsValuePositive("WorkerThreadPoolSize", n) + c.WorkerThreadPoolSize = n + return c } - diff --git a/src/clientlibrary/types/inputs.go b/src/clientlibrary/interfaces/inputs.go similarity index 94% rename from src/clientlibrary/types/inputs.go rename to src/clientlibrary/interfaces/inputs.go index 9668e64..1b21999 100644 --- a/src/clientlibrary/types/inputs.go +++ b/src/clientlibrary/interfaces/inputs.go @@ -1,11 +1,9 @@ -package types +package interfaces import ( "time" ks "github.com/aws/aws-sdk-go/service/kinesis" - - . "clientlibrary/interfaces" ) const ( diff --git a/src/clientlibrary/interfaces/record-processor-checkpointer.go b/src/clientlibrary/interfaces/record-processor-checkpointer.go index c752f04..296fd6a 100644 --- a/src/clientlibrary/interfaces/record-processor-checkpointer.go +++ b/src/clientlibrary/interfaces/record-processor-checkpointer.go @@ -2,8 +2,6 @@ package interfaces import ( ks "github.com/aws/aws-sdk-go/service/kinesis" - - . "clientlibrary/types" ) type ( diff --git a/src/clientlibrary/interfaces/record-processor.go b/src/clientlibrary/interfaces/record-processor.go index ab704a2..f704d0e 100644 --- a/src/clientlibrary/interfaces/record-processor.go +++ b/src/clientlibrary/interfaces/record-processor.go @@ -1,9 +1,5 @@ package interfaces -import ( - . "clientlibrary/types" -) - // IRecordProcessor is the interface for some callback functions invoked by KCL will // The main task of using KCL is to provide implementation on IRecordProcessor interface. // Note: This is exactly the same interface as Amazon KCL IRecordProcessor v2 diff --git a/src/clientlibrary/types/sequence-number.go b/src/clientlibrary/interfaces/sequence-number.go similarity index 96% rename from src/clientlibrary/types/sequence-number.go rename to src/clientlibrary/interfaces/sequence-number.go index 0dddb57..80ac68f 100644 --- a/src/clientlibrary/types/sequence-number.go +++ b/src/clientlibrary/interfaces/sequence-number.go @@ -1,4 +1,4 @@ -package types +package interfaces // ExtendedSequenceNumber represents a two-part sequence number for records aggregated by the Kinesis Producer Library. // diff --git a/src/clientlibrary/lib/checkpoint/checkpoint.go b/src/clientlibrary/lib/checkpoint/checkpoint.go index 9f0facc..1f480d8 100644 --- a/src/clientlibrary/lib/checkpoint/checkpoint.go +++ b/src/clientlibrary/lib/checkpoint/checkpoint.go @@ -1,7 +1,7 @@ package checkpoint import ( - . "clientlibrary/types" + . "clientlibrary/interfaces" ) const ( diff --git a/src/leases/impl/kinesis-client-lease.go b/src/leases/impl/kinesis-client-lease.go index 6132a40..abe049a 100644 --- a/src/leases/impl/kinesis-client-lease.go +++ b/src/leases/impl/kinesis-client-lease.go @@ -1,7 +1,7 @@ package impl import ( - . "clientlibrary/types" + . "clientlibrary/interfaces" ) // KinesisClientLease is a Lease subclass containing KinesisClientLibrary related fields for checkpoints. diff --git a/support/scripts/functions.sh b/support/scripts/functions.sh index c76d266..b7265ea 100644 --- a/support/scripts/functions.sh +++ b/support/scripts/functions.sh @@ -14,6 +14,7 @@ local_go_pkgs() { grep -Fv '/tmp/' | \ grep -Fv '/run/' | \ grep -Fv '/tests/' | \ + sed -r 's|(.+)/[^/]+\.go$|\1|g' | \ sort -u } From a323d2fd51b47f1ecbeb720e94321a7d6ff1c099 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Tue, 17 Apr 2018 09:25:41 -0700 Subject: [PATCH 07/90] KCL: Implement Worker This is the core part of KCL by implementing worker. It has exactly the same interface as Amazon's KCL. Internally, it uses code from GoKini in order to get the library functionaly quickly. This is a working version. The test code worker_test.go shows how to use this library. Dynamic resharding feature is out of the scope of M4. Test: 1. A Kinesis stream named "kcl-test" has been created under photon-infra account. 2. Download your AWS Credential from IAM user page. 3. Modify the worker_test.go to fill in your aws credential. 4. hmake test Jira CNA-637 Change-Id: I886d255bab9adaf7a13bca11bfda51bedaacaaed --- src/clientlibrary/config/config.go | 30 +- src/clientlibrary/config/config_test.go | 5 +- src/clientlibrary/config/kcl-config.go | 69 ++--- src/clientlibrary/interfaces/inputs.go | 51 +++- .../record-processor-checkpointer.go | 157 +--------- .../interfaces/record-processor.go | 86 +++--- .../interfaces/sequence-number.go | 4 +- .../lib/checkpoint/checkpoint.go | 26 -- src/clientlibrary/lib/worker/worker.go | 1 - src/clientlibrary/metrics/cloudwatch.go | 274 +++++++++++++++++ src/clientlibrary/metrics/interfaces.go | 66 ++++ src/clientlibrary/metrics/prometheus.go | 113 +++++++ src/clientlibrary/utils/random.go | 30 ++ src/clientlibrary/worker/checkpointer.go | 276 +++++++++++++++++ .../worker/record-processor-checkpointer.go | 56 ++++ src/clientlibrary/worker/shard-consumer.go | 195 ++++++++++++ src/clientlibrary/worker/worker.go | 289 ++++++++++++++++++ src/clientlibrary/worker/worker_test.go | 108 +++++++ src/leases/impl/lease.go | 7 +- support/scripts/functions.sh | 1 + 20 files changed, 1554 insertions(+), 290 deletions(-) delete mode 100644 src/clientlibrary/lib/checkpoint/checkpoint.go delete mode 100644 src/clientlibrary/lib/worker/worker.go create mode 100644 src/clientlibrary/metrics/cloudwatch.go create mode 100644 src/clientlibrary/metrics/interfaces.go create mode 100644 src/clientlibrary/metrics/prometheus.go create mode 100644 src/clientlibrary/utils/random.go create mode 100644 src/clientlibrary/worker/checkpointer.go create mode 100644 src/clientlibrary/worker/record-processor-checkpointer.go create mode 100644 src/clientlibrary/worker/shard-consumer.go create mode 100644 src/clientlibrary/worker/worker.go create mode 100644 src/clientlibrary/worker/worker_test.go diff --git a/src/clientlibrary/config/config.go b/src/clientlibrary/config/config.go index 8f3322c..eef22b6 100644 --- a/src/clientlibrary/config/config.go +++ b/src/clientlibrary/config/config.go @@ -5,17 +5,17 @@ import ( "math" "strings" "time" + + "github.com/aws/aws-sdk-go/aws" ) const ( - EPSILON_MS = 25 - // LATEST start after the most recent data record (fetch new data). - LATEST = InitialPositionInStream(1) + LATEST InitialPositionInStream = iota + 1 // TRIM_HORIZON start from the oldest available data record - TRIM_HORIZON = LATEST + 1 + TRIM_HORIZON // AT_TIMESTAMP start from the record at or after the specified server-side Timestamp. - AT_TIMESTAMP = TRIM_HORIZON + 1 + AT_TIMESTAMP // The location in the shard from which the KinesisClientLibrary will start fetching records from // when the application starts for the first time and there is no checkpoint for the shard. @@ -119,6 +119,7 @@ type ( } // Configuration for the Kinesis Client Library. + // Note: There is no need to configure credential provider. Credential can be get from InstanceProfile. KinesisClientLibConfiguration struct { // ApplicationName is name of application. Kinesis allows multiple applications to consume the same stream. ApplicationName string @@ -132,12 +133,6 @@ type ( // WorkerID used to distinguish different workers/processes of a Kinesis application WorkerID string - // KinesisEndpoint endpoint - KinesisEndpoint string - - // DynamoDB endpoint - DynamoDBEndpoint string - // InitialPositionInStream specifies the Position in the stream where a new application should start from InitialPositionInStream InitialPositionInStream @@ -209,12 +204,19 @@ type ( // Worker should skip syncing shards and leases at startup if leases are present // This is useful for optimizing deployments to large fleets working on a stable stream. SkipShardSyncAtWorkerInitializationIfLeasesExist bool - - // The max number of threads in the worker thread pool to getRecords. - WorkerThreadPoolSize int } ) +var positionMap = map[InitialPositionInStream]*string{ + LATEST: aws.String("LATEST"), + TRIM_HORIZON: aws.String("TRIM_HORIZON"), + AT_TIMESTAMP: aws.String("AT_TIMESTAMP"), +} + +func InitalPositionInStreamToShardIteratorType(pos InitialPositionInStream) *string { + return positionMap[pos] +} + func empty(s string) bool { return len(strings.TrimSpace(s)) == 0 } diff --git a/src/clientlibrary/config/config_test.go b/src/clientlibrary/config/config_test.go index 7d72137..88bc75d 100644 --- a/src/clientlibrary/config/config_test.go +++ b/src/clientlibrary/config/config_test.go @@ -7,7 +7,7 @@ import ( ) func TestConfig(t *testing.T) { - kclConfig := NewKinesisClientLibConfig("appName", "StreamName", "workerId"). + kclConfig := NewKinesisClientLibConfig("appName", "StreamName", "us-west-2", "workerId"). WithFailoverTimeMillis(500). WithMaxRecords(100). WithInitialPositionInStream(TRIM_HORIZON). @@ -15,8 +15,7 @@ func TestConfig(t *testing.T) { WithCallProcessRecordsEvenForEmptyRecordList(true). WithTaskBackoffTimeMillis(10). WithMetricsBufferTimeMillis(500). - WithMetricsMaxQueueSize(200). - WithRegionName("us-west-2") + WithMetricsMaxQueueSize(200) assert.Equal(t, "appName", kclConfig.ApplicationName) assert.Equal(t, 500, kclConfig.FailoverTimeMillis) diff --git a/src/clientlibrary/config/kcl-config.go b/src/clientlibrary/config/kcl-config.go index edc63db..bfba4aa 100644 --- a/src/clientlibrary/config/kcl-config.go +++ b/src/clientlibrary/config/kcl-config.go @@ -6,10 +6,10 @@ import ( ) // NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. -func NewKinesisClientLibConfig(applicationName, streamName, workerID string) *KinesisClientLibConfiguration { +func NewKinesisClientLibConfig(applicationName, streamName, regionName, workerID string) *KinesisClientLibConfiguration { checkIsValueNotEmpty("ApplicationName", applicationName) checkIsValueNotEmpty("StreamName", streamName) - checkIsValueNotEmpty("ApplicationName", applicationName) + checkIsValueNotEmpty("RegionName", regionName) if empty(workerID) { workerID = utils.MustNewUUID() @@ -17,32 +17,30 @@ func NewKinesisClientLibConfig(applicationName, streamName, workerID string) *Ki // populate the KCL configuration with default values return &KinesisClientLibConfiguration{ - ApplicationName: applicationName, - TableName: applicationName, - StreamName: streamName, - WorkerID: workerID, - KinesisEndpoint: "", - InitialPositionInStream: DEFAULT_INITIAL_POSITION_IN_STREAM, - InitialPositionInStreamExtended: *newInitialPosition(DEFAULT_INITIAL_POSITION_IN_STREAM), - FailoverTimeMillis: DEFAULT_FAILOVER_TIME_MILLIS, - MaxRecords: DEFAULT_MAX_RECORDS, - IdleTimeBetweenReadsInMillis: DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, - CallProcessRecordsEvenForEmptyRecordList: DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, - ParentShardPollIntervalMillis: DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS, - ShardSyncIntervalMillis: DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, - CleanupTerminatedShardsBeforeExpiry: DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, - TaskBackoffTimeMillis: DEFAULT_TASK_BACKOFF_TIME_MILLIS, - MetricsBufferTimeMillis: DEFAULT_METRICS_BUFFER_TIME_MILLIS, - MetricsMaxQueueSize: DEFAULT_METRICS_MAX_QUEUE_SIZE, - ValidateSequenceNumberBeforeCheckpointing: DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, - RegionName: "", + ApplicationName: applicationName, + TableName: applicationName, + StreamName: streamName, + RegionName: regionName, + WorkerID: workerID, + InitialPositionInStream: DEFAULT_INITIAL_POSITION_IN_STREAM, + InitialPositionInStreamExtended: *newInitialPosition(DEFAULT_INITIAL_POSITION_IN_STREAM), + FailoverTimeMillis: DEFAULT_FAILOVER_TIME_MILLIS, + MaxRecords: DEFAULT_MAX_RECORDS, + IdleTimeBetweenReadsInMillis: DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, + CallProcessRecordsEvenForEmptyRecordList: DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, + ParentShardPollIntervalMillis: DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS, + ShardSyncIntervalMillis: DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, + CleanupTerminatedShardsBeforeExpiry: DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, + TaskBackoffTimeMillis: DEFAULT_TASK_BACKOFF_TIME_MILLIS, + MetricsBufferTimeMillis: DEFAULT_METRICS_BUFFER_TIME_MILLIS, + MetricsMaxQueueSize: DEFAULT_METRICS_MAX_QUEUE_SIZE, + ValidateSequenceNumberBeforeCheckpointing: DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, ShutdownGraceMillis: DEFAULT_SHUTDOWN_GRACE_MILLIS, MaxLeasesForWorker: DEFAULT_MAX_LEASES_FOR_WORKER, MaxLeasesToStealAtOneTime: DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME, InitialLeaseTableReadCapacity: DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, InitialLeaseTableWriteCapacity: DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, SkipShardSyncAtWorkerInitializationIfLeasesExist: DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST, - WorkerThreadPoolSize: 1, } } @@ -52,11 +50,6 @@ func (c *KinesisClientLibConfiguration) WithTableName(tableName string) *Kinesis return c } -func (c *KinesisClientLibConfiguration) WithKinesisEndpoint(kinesisEndpoint string) *KinesisClientLibConfiguration { - c.KinesisEndpoint = kinesisEndpoint - return c -} - func (c *KinesisClientLibConfiguration) WithInitialPositionInStream(initialPositionInStream InitialPositionInStream) *KinesisClientLibConfiguration { c.InitialPositionInStream = initialPositionInStream c.InitialPositionInStreamExtended = *newInitialPosition(initialPositionInStream) @@ -87,6 +80,14 @@ func (c *KinesisClientLibConfiguration) WithMaxRecords(maxRecords int) *KinesisC return c } +// WithMaxLeasesForWorker configures maximum lease this worker can handles. It determines how maximun number of shards +// this worker can handle. +func (c *KinesisClientLibConfiguration) WithMaxLeasesForWorker(n int) *KinesisClientLibConfiguration { + checkIsValuePositive("MaxLeasesForWorker", n) + c.MaxLeasesForWorker = n + return c +} + /** * Controls how long the KCL will sleep if no records are returned from Kinesis * @@ -137,17 +138,3 @@ func (c *KinesisClientLibConfiguration) WithMetricsMaxQueueSize(metricsMaxQueueS c.MetricsMaxQueueSize = metricsMaxQueueSize return c } - -// WithRegionName configures region for the stream -func (c *KinesisClientLibConfiguration) WithRegionName(regionName string) *KinesisClientLibConfiguration { - checkIsValueNotEmpty("RegionName", regionName) - c.RegionName = regionName - return c -} - -// WithWorkerThreadPoolSize configures worker thread pool size -func (c *KinesisClientLibConfiguration) WithWorkerThreadPoolSize(n int) *KinesisClientLibConfiguration { - checkIsValuePositive("WorkerThreadPoolSize", n) - c.WorkerThreadPoolSize = n - return c -} diff --git a/src/clientlibrary/interfaces/inputs.go b/src/clientlibrary/interfaces/inputs.go index 1b21999..27590c3 100644 --- a/src/clientlibrary/interfaces/inputs.go +++ b/src/clientlibrary/interfaces/inputs.go @@ -7,31 +7,56 @@ import ( ) const ( - REQUESTED = ShutdownReason(1) - TERMINATE = REQUESTED + 1 - ZOMBIE = TERMINATE + 1 + /** + * Indicates that the entire application is being shutdown, and if desired the record processor will be given a + * final chance to checkpoint. This state will not trigger a direct call to + * {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#shutdown(ShutdownInput)}, but + * instead depend on a different interface for backward compatibility. + */ + REQUESTED ShutdownReason = iota + 1 + /** + * Terminate processing for this RecordProcessor (resharding use case). + * Indicates that the shard is closed and all records from the shard have been delivered to the application. + * Applications SHOULD checkpoint their progress to indicate that they have successfully processed all records + * from this shard and processing of child shards can be started. + */ + TERMINATE + /** + * Processing will be moved to a different record processor (fail over, load balancing use cases). + * Applications SHOULD NOT checkpoint their progress (as another record processor may have already started + * processing data). + */ + ZOMBIE ) // Containers for the parameters to the IRecordProcessor type ( + /** + * Reason the RecordProcessor is being shutdown. + * Used to distinguish between a fail-over vs. a termination (shard is closed and all records have been delivered). + * In case of a fail over, applications should NOT checkpoint as part of shutdown, + * since another record processor may have already started processing records for that shard. + * In case of termination (resharding use case), applications SHOULD checkpoint their progress to indicate + * that they have successfully processed all the records (processing of child shards can then begin). + */ ShutdownReason int InitializationInput struct { - shardId string - extendedSequenceNumber *ExtendedSequenceNumber - pendingCheckpointSequenceNumber *ExtendedSequenceNumber + ShardId string + ExtendedSequenceNumber *ExtendedSequenceNumber + PendingCheckpointSequenceNumber *ExtendedSequenceNumber } ProcessRecordsInput struct { - cacheEntryTime *time.Time - cacheExitTime *time.Time - records []*ks.Record - checkpointer *IRecordProcessorCheckpointer - millisBehindLatest int64 + CacheEntryTime *time.Time + CacheExitTime *time.Time + Records []*ks.Record + Checkpointer IRecordProcessorCheckpointer + MillisBehindLatest int64 } ShutdownInput struct { - shutdownReason ShutdownReason - checkpointer *IRecordProcessorCheckpointer + ShutdownReason ShutdownReason + Checkpointer IRecordProcessorCheckpointer } ) diff --git a/src/clientlibrary/interfaces/record-processor-checkpointer.go b/src/clientlibrary/interfaces/record-processor-checkpointer.go index 296fd6a..ffea0e8 100644 --- a/src/clientlibrary/interfaces/record-processor-checkpointer.go +++ b/src/clientlibrary/interfaces/record-processor-checkpointer.go @@ -1,12 +1,8 @@ package interfaces -import ( - ks "github.com/aws/aws-sdk-go/service/kinesis" -) - type ( IPreparedCheckpointer interface { - getPendingCheckpoint() ExtendedSequenceNumber + GetPendingCheckpoint() *ExtendedSequenceNumber /** * This method will record a pending checkpoint. @@ -24,7 +20,7 @@ type ( * i.e. it is smaller than the last check point value (prepared or committed), or larger than the greatest * sequence number seen by the associated record processor. */ - checkpoint() error + Checkpoint() error } /** @@ -33,46 +29,6 @@ type ( * checkpoint their progress. */ IRecordProcessorCheckpointer interface { - - /** - * This method will checkpoint the progress at the last data record that was delivered to the record processor. - * Upon fail over (after a successful checkpoint() call), the new/replacement RecordProcessor instance - * will receive data records whose sequenceNumber > checkpoint position (for each partition key). - * In steady state, applications should checkpoint periodically (e.g. once every 5 minutes). - * Calling this API too frequently can slow down the application (because it puts pressure on the underlying - * checkpoint storage layer). - * - * @error ThrottlingError Can't store checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @error ShutdownError The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this RecordProcessor instance. - * @error InvalidStateError Can't store checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @error KinesisClientLibDependencyError Encountered an issue when storing the checkpoint. The application can - * backoff and retry. - */ - checkpoint() error - - /** - * This method will checkpoint the progress at the provided record. This method is analogous to - * {@link #checkpoint()} but provides the ability to specify the record at which to - * checkpoint. - * - * @param record A record at which to checkpoint in this shard. Upon failover, - * the Kinesis Client Library will start fetching records after this record's sequence number. - * @error ThrottlingError Can't store checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @error ShutdownError The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this RecordProcessor instance. - * @error InvalidStateError Can't store checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @error KinesisClientLibDependencyError Encountered an issue when storing the checkpoint. The application can - * backoff and retry. - */ - checkpointByRecord(record *ks.Record) error - /** * This method will checkpoint the progress at the provided sequenceNumber. This method is analogous to * {@link #checkpoint()} but provides the ability to specify the sequence number at which to @@ -94,87 +50,10 @@ type ( * greatest sequence number seen by the associated record processor. * 2.) It is not a valid sequence number for a record in this shard. */ - checkpointBySequenceNumber(sequenceNumber string) error + Checkpoint(sequenceNumber *string) error /** - * This method will checkpoint the progress at the provided sequenceNumber and subSequenceNumber, the latter for - * aggregated records produced with the Producer Library. This method is analogous to {@link #checkpoint()} - * but provides the ability to specify the sequence and subsequence numbers at which to checkpoint. - * - * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover, the Kinesis - * Client Library will start fetching records after the given sequence and subsequence numbers. - * @param subSequenceNumber A subsequence number at which to checkpoint within this shard. Upon failover, the - * Kinesis Client Library will start fetching records after the given sequence and subsequence numbers. - * @error ThrottlingError Can't store checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @error ShutdownError The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this RecordProcessor instance. - * @error InvalidStateError Can't store checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @error KinesisClientLibDependencyError Encountered an issue when storing the checkpoint. The application can - * backoff and retry. - * @error IllegalArgumentError The sequence number is invalid for one of the following reasons: - * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the - * greatest sequence number seen by the associated record processor. - * 2.) It is not a valid sequence number for a record in this shard. - */ - checkpointBySequenceNumberEx(sequenceNumber string, subSequenceNumber int64) error - - /** - * This method will record a pending checkpoint at the last data record that was delivered to the record processor. - * If the application fails over between calling prepareCheckpoint() and checkpoint(), the init() method of the next - * IRecordProcessor for this shard will be informed of the prepared sequence number - * - * Application should use this to assist with idempotency across failover by calling prepareCheckpoint before having - * side effects, then by calling checkpoint on the returned PreparedCheckpointer after side effects are complete. - * Use the sequence number passed in to init() to behave idempotently. - * - * @return an IPreparedCheckpointer object that can be called later to persist the checkpoint. - * - * @error ThrottlingError Can't store pending checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @error ShutdownError The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this RecordProcessor instance. - * @error InvalidStateError Can't store pending checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @error KinesisClientLibDependencyError Encountered an issue when storing the pending checkpoint. The - * application can backoff and retry. - */ - prepareCheckpoint() (*IPreparedCheckpointer, error) - - /** - * This method will record a pending checkpoint at the at the provided record. This method is analogous to - * {@link #prepareCheckpoint()} but provides the ability to specify the record at which to prepare the checkpoint. - * - * @param record A record at which to prepare checkpoint in this shard. - * - * Application should use this to assist with idempotency across failover by calling prepareCheckpoint before having - * side effects, then by calling checkpoint on the returned PreparedCheckpointer after side effects are complete. - * Use the sequence number and application state passed in to init() to behave idempotently. - * - * @return an IPreparedCheckpointer object that can be called later to persist the checkpoint. - * - * @error ThrottlingError Can't store pending checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @error ShutdownError The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this RecordProcessor instance. - * @error InvalidStateError Can't store pending checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @error KinesisClientLibDependencyError Encountered an issue when storing the pending checkpoint. The - * application can backoff and retry. - * @error IllegalArgumentError The sequence number is invalid for one of the following reasons: - * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the - * greatest sequence number seen by the associated record processor. - * 2.) It is not a valid sequence number for a record in this shard. - */ - prepareCheckpointByRecord(record *ks.Record) (*IPreparedCheckpointer, error) - - /** - * This method will record a pending checkpoint at the provided sequenceNumber. This method is analogous to - * {@link #prepareCheckpoint()} but provides the ability to specify the sequence number at which to checkpoint. + * This method will record a pending checkpoint at the provided sequenceNumber. * * @param sequenceNumber A sequence number at which to prepare checkpoint in this shard. @@ -194,32 +73,6 @@ type ( * greatest sequence number seen by the associated record processor. * 2.) It is not a valid sequence number for a record in this shard. */ - prepareCheckpointBySequenceNumber(sequenceNumber string) (*IPreparedCheckpointer, error) - - /** - * This method will record a pending checkpoint at the provided sequenceNumber and subSequenceNumber, the latter for - * aggregated records produced with the Producer Library. This method is analogous to {@link #prepareCheckpoint()} - * but provides the ability to specify the sequence number at which to checkpoint - * - * @param sequenceNumber A sequence number at which to prepare checkpoint in this shard. - * @param subSequenceNumber A subsequence number at which to prepare checkpoint within this shard. - * - * @return an IPreparedCheckpointer object that can be called later to persist the checkpoint. - * - * @error ThrottlingError Can't store pending checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @error ShutdownError The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this RecordProcessor instance. - * @error InvalidStateError Can't store pending checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @error KinesisClientLibDependencyError Encountered an issue when storing the pending checkpoint. The - * application can backoff and retry. - * @error IllegalArgumentError The sequence number is invalid for one of the following reasons: - * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the - * greatest sequence number seen by the associated record processor. - * 2.) It is not a valid sequence number for a record in this shard. - */ - prepareCheckpointBySequenceNumberEx(sequenceNumber string, subSequenceNumber int64) (*IPreparedCheckpointer, error) + PrepareCheckpoint(sequenceNumber *string) (IPreparedCheckpointer, error) } ) diff --git a/src/clientlibrary/interfaces/record-processor.go b/src/clientlibrary/interfaces/record-processor.go index f704d0e..d64414c 100644 --- a/src/clientlibrary/interfaces/record-processor.go +++ b/src/clientlibrary/interfaces/record-processor.go @@ -1,40 +1,54 @@ package interfaces -// IRecordProcessor is the interface for some callback functions invoked by KCL will -// The main task of using KCL is to provide implementation on IRecordProcessor interface. -// Note: This is exactly the same interface as Amazon KCL IRecordProcessor v2 -type IRecordProcessor interface { - /** - * Invoked by the Amazon Kinesis Client Library before data records are delivered to the RecordProcessor instance - * (via processRecords). - * - * @param initializationInput Provides information related to initialization - */ - initialize(initializationInput InitializationInput) +type ( + // IRecordProcessor is the interface for some callback functions invoked by KCL will + // The main task of using KCL is to provide implementation on IRecordProcessor interface. + // Note: This is exactly the same interface as Amazon KCL IRecordProcessor v2 + IRecordProcessor interface { + /** + * Invoked by the Amazon Kinesis Client Library before data records are delivered to the RecordProcessor instance + * (via processRecords). + * + * @param initializationInput Provides information related to initialization + */ + Initialize(initializationInput *InitializationInput) - /** - * Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the - * application. - * Upon fail over, the new instance will get records with sequence number > checkpoint position - * for each partition key. - * - * @param processRecordsInput Provides the records to be processed as well as information and capabilities related - * to them (eg checkpointing). - */ - processRecords(processRecordsInput ProcessRecordsInput) + /** + * Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the + * application. + * Upon fail over, the new instance will get records with sequence number > checkpoint position + * for each partition key. + * + * @param processRecordsInput Provides the records to be processed as well as information and capabilities related + * to them (eg checkpointing). + */ + ProcessRecords(processRecordsInput *ProcessRecordsInput) - /** - * Invoked by the Amazon Kinesis Client Library to indicate it will no longer send data records to this - * RecordProcessor instance. - * - *

Warning

- * - * When the value of {@link ShutdownInput#getShutdownReason()} is - * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you - * checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress. - * - * @param shutdownInput - * Provides information and capabilities (eg checkpointing) related to shutdown of this record processor. - */ - shutdown(shutdownInput ShutdownInput) -} + /** + * Invoked by the Amazon Kinesis Client Library to indicate it will no longer send data records to this + * RecordProcessor instance. + * + *

Warning

+ * + * When the value of {@link ShutdownInput#getShutdownReason()} is + * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you + * checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress. + * + * @param shutdownInput + * Provides information and capabilities (eg checkpointing) related to shutdown of this record processor. + */ + Shutdown(shutdownInput *ShutdownInput) + } + + // IRecordProcessorFactory is interface for creating IRecordProcessor. Each Worker can have multiple threads + // for processing shard. Client can choose either creating one processor per shard or sharing them. + IRecordProcessorFactory interface { + + /** + * Returns a record processor to be used for processing data records for a (assigned) shard. + * + * @return Returns a processor object. + */ + CreateProcessor() IRecordProcessor + } +) diff --git a/src/clientlibrary/interfaces/sequence-number.go b/src/clientlibrary/interfaces/sequence-number.go index 80ac68f..f9c01ad 100644 --- a/src/clientlibrary/interfaces/sequence-number.go +++ b/src/clientlibrary/interfaces/sequence-number.go @@ -6,6 +6,6 @@ package interfaces // sub-sequence number, in addition to the regular sequence number of the Kinesis record. The sub-sequence number // is used to checkpoint within an aggregated record. type ExtendedSequenceNumber struct { - sequenceNumber string - subSequenceNumber int64 + SequenceNumber *string + SubSequenceNumber int64 } diff --git a/src/clientlibrary/lib/checkpoint/checkpoint.go b/src/clientlibrary/lib/checkpoint/checkpoint.go deleted file mode 100644 index 1f480d8..0000000 --- a/src/clientlibrary/lib/checkpoint/checkpoint.go +++ /dev/null @@ -1,26 +0,0 @@ -package checkpoint - -import ( - . "clientlibrary/interfaces" -) - -const ( - // TRIM_HORIZON starts from the first available record in the shard. - TRIM_HORIZON = SentinelCheckpoint(iota + 1) - // LATEST starts from the latest record in the shard. - LATEST - // SHARD_END We've completely processed all records in this shard. - SHARD_END - // AT_TIMESTAMP starts from the record at or after the specified server-side timestamp. - AT_TIMESTAMP -) - -type ( - SentinelCheckpoint int - - // Checkpoint: a class encapsulating the 2 pieces of state stored in a checkpoint. - Checkpoint struct { - checkpoint *ExtendedSequenceNumber - pendingCheckpoint *ExtendedSequenceNumber - } -) diff --git a/src/clientlibrary/lib/worker/worker.go b/src/clientlibrary/lib/worker/worker.go deleted file mode 100644 index 4df0094..0000000 --- a/src/clientlibrary/lib/worker/worker.go +++ /dev/null @@ -1 +0,0 @@ -package worker diff --git a/src/clientlibrary/metrics/cloudwatch.go b/src/clientlibrary/metrics/cloudwatch.go new file mode 100644 index 0000000..f5a76d6 --- /dev/null +++ b/src/clientlibrary/metrics/cloudwatch.go @@ -0,0 +1,274 @@ +package metrics + +import ( + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" + log "github.com/sirupsen/logrus" +) + +type CloudWatchMonitoringService struct { + Namespace string + KinesisStream string + WorkerID string + // What granularity we should send metrics to CW at. Note setting this to 1 will cost quite a bit of money + // At the time of writing (March 2018) about US$200 per month + ResolutionSec int + svc cloudwatchiface.CloudWatchAPI + shardMetrics map[string]*cloudWatchMetrics +} + +type cloudWatchMetrics struct { + processedRecords int64 + processedBytes int64 + behindLatestMillis []float64 + leasesHeld int64 + leaseRenewals int64 + getRecordsTime []float64 + processRecordsTime []float64 + sync.Mutex +} + +func (cw *CloudWatchMonitoringService) Init() error { + if cw.ResolutionSec == 0 { + cw.ResolutionSec = 60 + } + + session, err := session.NewSessionWithOptions( + session.Options{ + SharedConfigState: session.SharedConfigEnable, + }, + ) + if err != nil { + return err + } + + cw.svc = cloudwatch.New(session) + cw.shardMetrics = make(map[string]*cloudWatchMetrics) + return nil +} + +func (cw *CloudWatchMonitoringService) flushDaemon() { + previousFlushTime := time.Now() + resolutionDuration := time.Duration(cw.ResolutionSec) * time.Second + for { + time.Sleep(resolutionDuration - time.Now().Sub(previousFlushTime)) + err := cw.flush() + if err != nil { + log.Errorln("Error sending metrics to CloudWatch", err) + } + previousFlushTime = time.Now() + } +} + +func (cw *CloudWatchMonitoringService) flush() error { + for shard, metric := range cw.shardMetrics { + metric.Lock() + defaultDimensions := []*cloudwatch.Dimension{ + &cloudwatch.Dimension{ + Name: aws.String("shard"), + Value: &shard, + }, + &cloudwatch.Dimension{ + Name: aws.String("KinesisStreamName"), + Value: &cw.KinesisStream, + }, + } + leaseDimensions := make([]*cloudwatch.Dimension, len(defaultDimensions)) + copy(defaultDimensions, leaseDimensions) + leaseDimensions = append(leaseDimensions, &cloudwatch.Dimension{ + Name: aws.String("WorkerID"), + Value: &cw.WorkerID, + }) + metricTimestamp := time.Now() + _, err := cw.svc.PutMetricData(&cloudwatch.PutMetricDataInput{ + Namespace: aws.String(cw.Namespace), + MetricData: []*cloudwatch.MetricDatum{ + &cloudwatch.MetricDatum{ + Dimensions: defaultDimensions, + MetricName: aws.String("RecordsProcessed"), + Unit: aws.String("Count"), + Timestamp: &metricTimestamp, + Value: aws.Float64(float64(metric.processedRecords)), + }, + &cloudwatch.MetricDatum{ + Dimensions: defaultDimensions, + MetricName: aws.String("DataBytesProcessed"), + Unit: aws.String("Byte"), + Timestamp: &metricTimestamp, + Value: aws.Float64(float64(metric.processedBytes)), + }, + &cloudwatch.MetricDatum{ + Dimensions: defaultDimensions, + MetricName: aws.String("MillisBehindLatest"), + Unit: aws.String("Milliseconds"), + Timestamp: &metricTimestamp, + StatisticValues: &cloudwatch.StatisticSet{ + SampleCount: aws.Float64(float64(len(metric.behindLatestMillis))), + Sum: sumFloat64(metric.behindLatestMillis), + Maximum: maxFloat64(metric.behindLatestMillis), + Minimum: minFloat64(metric.behindLatestMillis), + }, + }, + &cloudwatch.MetricDatum{ + Dimensions: defaultDimensions, + MetricName: aws.String("KinesisDataFetcher.getRecords.Time"), + Unit: aws.String("Milliseconds"), + Timestamp: &metricTimestamp, + StatisticValues: &cloudwatch.StatisticSet{ + SampleCount: aws.Float64(float64(len(metric.getRecordsTime))), + Sum: sumFloat64(metric.getRecordsTime), + Maximum: maxFloat64(metric.getRecordsTime), + Minimum: minFloat64(metric.getRecordsTime), + }, + }, + &cloudwatch.MetricDatum{ + Dimensions: defaultDimensions, + MetricName: aws.String("RecordProcessor.processRecords.Time"), + Unit: aws.String("Milliseconds"), + Timestamp: &metricTimestamp, + StatisticValues: &cloudwatch.StatisticSet{ + SampleCount: aws.Float64(float64(len(metric.processRecordsTime))), + Sum: sumFloat64(metric.processRecordsTime), + Maximum: maxFloat64(metric.processRecordsTime), + Minimum: minFloat64(metric.processRecordsTime), + }, + }, + &cloudwatch.MetricDatum{ + Dimensions: leaseDimensions, + MetricName: aws.String("RenewLease.Success"), + Unit: aws.String("Count"), + Timestamp: &metricTimestamp, + Value: aws.Float64(float64(metric.leaseRenewals)), + }, + &cloudwatch.MetricDatum{ + Dimensions: leaseDimensions, + MetricName: aws.String("CurrentLeases"), + Unit: aws.String("Count"), + Timestamp: &metricTimestamp, + Value: aws.Float64(float64(metric.leasesHeld)), + }, + }, + }) + if err == nil { + metric.processedRecords = 0 + metric.processedBytes = 0 + metric.behindLatestMillis = []float64{} + metric.leaseRenewals = 0 + metric.getRecordsTime = []float64{} + metric.processRecordsTime = []float64{} + } + metric.Unlock() + return err + } + return nil +} + +func (cw *CloudWatchMonitoringService) IncrRecordsProcessed(shard string, count int) { + if _, ok := cw.shardMetrics[shard]; !ok { + cw.shardMetrics[shard] = &cloudWatchMetrics{} + } + cw.shardMetrics[shard].Lock() + defer cw.shardMetrics[shard].Unlock() + cw.shardMetrics[shard].processedRecords += int64(count) +} + +func (cw *CloudWatchMonitoringService) IncrBytesProcessed(shard string, count int64) { + if _, ok := cw.shardMetrics[shard]; !ok { + cw.shardMetrics[shard] = &cloudWatchMetrics{} + } + cw.shardMetrics[shard].Lock() + defer cw.shardMetrics[shard].Unlock() + cw.shardMetrics[shard].processedBytes += count +} + +func (cw *CloudWatchMonitoringService) MillisBehindLatest(shard string, millSeconds float64) { + if _, ok := cw.shardMetrics[shard]; !ok { + cw.shardMetrics[shard] = &cloudWatchMetrics{} + } + cw.shardMetrics[shard].Lock() + defer cw.shardMetrics[shard].Unlock() + cw.shardMetrics[shard].behindLatestMillis = append(cw.shardMetrics[shard].behindLatestMillis, millSeconds) +} + +func (cw *CloudWatchMonitoringService) LeaseGained(shard string) { + if _, ok := cw.shardMetrics[shard]; !ok { + cw.shardMetrics[shard] = &cloudWatchMetrics{} + } + cw.shardMetrics[shard].Lock() + defer cw.shardMetrics[shard].Unlock() + cw.shardMetrics[shard].leasesHeld++ +} + +func (cw *CloudWatchMonitoringService) LeaseLost(shard string) { + if _, ok := cw.shardMetrics[shard]; !ok { + cw.shardMetrics[shard] = &cloudWatchMetrics{} + } + cw.shardMetrics[shard].Lock() + defer cw.shardMetrics[shard].Unlock() + cw.shardMetrics[shard].leasesHeld-- +} + +func (cw *CloudWatchMonitoringService) LeaseRenewed(shard string) { + if _, ok := cw.shardMetrics[shard]; !ok { + cw.shardMetrics[shard] = &cloudWatchMetrics{} + } + cw.shardMetrics[shard].Lock() + defer cw.shardMetrics[shard].Unlock() + cw.shardMetrics[shard].leaseRenewals++ +} + +func (cw *CloudWatchMonitoringService) RecordGetRecordsTime(shard string, time float64) { + if _, ok := cw.shardMetrics[shard]; !ok { + cw.shardMetrics[shard] = &cloudWatchMetrics{} + } + cw.shardMetrics[shard].Lock() + defer cw.shardMetrics[shard].Unlock() + cw.shardMetrics[shard].getRecordsTime = append(cw.shardMetrics[shard].getRecordsTime, time) +} +func (cw *CloudWatchMonitoringService) RecordProcessRecordsTime(shard string, time float64) { + if _, ok := cw.shardMetrics[shard]; !ok { + cw.shardMetrics[shard] = &cloudWatchMetrics{} + } + cw.shardMetrics[shard].Lock() + defer cw.shardMetrics[shard].Unlock() + cw.shardMetrics[shard].processRecordsTime = append(cw.shardMetrics[shard].processRecordsTime, time) +} + +func sumFloat64(slice []float64) *float64 { + sum := float64(0) + for _, num := range slice { + sum += num + } + return &sum +} + +func maxFloat64(slice []float64) *float64 { + if len(slice) < 1 { + return aws.Float64(0) + } + max := slice[0] + for _, num := range slice { + if num > max { + max = num + } + } + return &max +} + +func minFloat64(slice []float64) *float64 { + if len(slice) < 1 { + return aws.Float64(0) + } + min := slice[0] + for _, num := range slice { + if num < min { + min = num + } + } + return &min +} diff --git a/src/clientlibrary/metrics/interfaces.go b/src/clientlibrary/metrics/interfaces.go new file mode 100644 index 0000000..141e644 --- /dev/null +++ b/src/clientlibrary/metrics/interfaces.go @@ -0,0 +1,66 @@ +package metrics + +import ( + "fmt" +) + +// MonitoringConfiguration allows you to configure how record processing metrics are exposed +type MonitoringConfiguration struct { + MonitoringService string // Type of monitoring to expose. Supported types are "prometheus" + Prometheus PrometheusMonitoringService + CloudWatch CloudWatchMonitoringService + service MonitoringService +} + +type MonitoringService interface { + Init() error + IncrRecordsProcessed(string, int) + IncrBytesProcessed(string, int64) + MillisBehindLatest(string, float64) + LeaseGained(string) + LeaseLost(string) + LeaseRenewed(string) + RecordGetRecordsTime(string, float64) + RecordProcessRecordsTime(string, float64) +} + +func (m *MonitoringConfiguration) Init(nameSpace, streamName string, workerID string) error { + if m.MonitoringService == "" { + m.service = &noopMonitoringService{} + return nil + } + + switch m.MonitoringService { + case "prometheus": + m.Prometheus.Namespace = nameSpace + m.Prometheus.KinesisStream = streamName + m.Prometheus.WorkerID = workerID + m.service = &m.Prometheus + case "cloudwatch": + m.CloudWatch.KinesisStream = streamName + m.CloudWatch.WorkerID = workerID + m.service = &m.CloudWatch + default: + return fmt.Errorf("Invalid monitoring service type %s", m.MonitoringService) + } + return m.service.Init() +} + +func (m *MonitoringConfiguration) GetMonitoringService() MonitoringService { + return m.service +} + +type noopMonitoringService struct{} + +func (n *noopMonitoringService) Init() error { + return nil +} + +func (n *noopMonitoringService) IncrRecordsProcessed(shard string, count int) {} +func (n *noopMonitoringService) IncrBytesProcessed(shard string, count int64) {} +func (n *noopMonitoringService) MillisBehindLatest(shard string, millSeconds float64) {} +func (n *noopMonitoringService) LeaseGained(shard string) {} +func (n *noopMonitoringService) LeaseLost(shard string) {} +func (n *noopMonitoringService) LeaseRenewed(shard string) {} +func (n *noopMonitoringService) RecordGetRecordsTime(shard string, time float64) {} +func (n *noopMonitoringService) RecordProcessRecordsTime(shard string, time float64) {} diff --git a/src/clientlibrary/metrics/prometheus.go b/src/clientlibrary/metrics/prometheus.go new file mode 100644 index 0000000..4ec13fd --- /dev/null +++ b/src/clientlibrary/metrics/prometheus.go @@ -0,0 +1,113 @@ +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + log "github.com/sirupsen/logrus" +) + +type PrometheusMonitoringService struct { + ListenAddress string + + Namespace string + KinesisStream string + WorkerID string + processedRecords *prometheus.CounterVec + processedBytes *prometheus.CounterVec + behindLatestMillis *prometheus.GaugeVec + leasesHeld *prometheus.GaugeVec + leaseRenewals *prometheus.CounterVec + getRecordsTime *prometheus.HistogramVec + processRecordsTime *prometheus.HistogramVec +} + +func (p *PrometheusMonitoringService) Init() error { + p.processedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: p.Namespace + `_processed_bytes`, + Help: "Number of bytes processed", + }, []string{"kinesisStream", "shard"}) + p.processedRecords = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: p.Namespace + `_processed_records`, + Help: "Number of records processed", + }, []string{"kinesisStream", "shard"}) + p.behindLatestMillis = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: p.Namespace + `_behind_latest_millis`, + Help: "The amount of milliseconds processing is behind", + }, []string{"kinesisStream", "shard"}) + p.leasesHeld = prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: p.Namespace + `_leases_held`, + Help: "The number of leases held by the worker", + }, []string{"kinesisStream", "shard", "workerID"}) + p.leaseRenewals = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: p.Namespace + `_lease_renewals`, + Help: "The number of successful lease renewals", + }, []string{"kinesisStream", "shard", "workerID"}) + p.getRecordsTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: p.Namespace + `_get_records_duration_milliseconds`, + Help: "The time taken to fetch records and process them", + }, []string{"kinesisStream", "shard"}) + p.processRecordsTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ + Name: p.Namespace + `_process_records_duration_milliseconds`, + Help: "The time taken to process records", + }, []string{"kinesisStream", "shard"}) + + metrics := []prometheus.Collector{ + p.processedBytes, + p.processedRecords, + p.behindLatestMillis, + p.leasesHeld, + p.leaseRenewals, + p.getRecordsTime, + p.processRecordsTime, + } + for _, metric := range metrics { + err := prometheus.Register(metric) + if err != nil { + return err + } + } + + http.Handle("/metrics", promhttp.Handler()) + go func() { + log.Debugf("Starting Prometheus listener on %s", p.ListenAddress) + err := http.ListenAndServe(p.ListenAddress, nil) + if err != nil { + log.Errorln("Error starting Prometheus metrics endpoint", err) + } + }() + return nil +} + +func (p *PrometheusMonitoringService) IncrRecordsProcessed(shard string, count int) { + p.processedRecords.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Add(float64(count)) +} + +func (p *PrometheusMonitoringService) IncrBytesProcessed(shard string, count int64) { + p.processedBytes.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Add(float64(count)) +} + +func (p *PrometheusMonitoringService) MillisBehindLatest(shard string, millSeconds float64) { + p.behindLatestMillis.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Set(millSeconds) +} + +func (p *PrometheusMonitoringService) LeaseGained(shard string) { + p.leasesHeld.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream, "workerID": p.WorkerID}).Inc() +} + +func (p *PrometheusMonitoringService) LeaseLost(shard string) { + p.leasesHeld.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream, "workerID": p.WorkerID}).Dec() +} + +func (p *PrometheusMonitoringService) LeaseRenewed(shard string) { + p.leaseRenewals.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream, "workerID": p.WorkerID}).Inc() +} + +func (p *PrometheusMonitoringService) RecordGetRecordsTime(shard string, time float64) { + p.getRecordsTime.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Observe(time) +} + +func (p *PrometheusMonitoringService) RecordProcessRecordsTime(shard string, time float64) { + p.processRecordsTime.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Observe(time) +} diff --git a/src/clientlibrary/utils/random.go b/src/clientlibrary/utils/random.go new file mode 100644 index 0000000..ea0299a --- /dev/null +++ b/src/clientlibrary/utils/random.go @@ -0,0 +1,30 @@ +package utils + +import ( + "math/rand" +) + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" +const ( + letterIdxBits = 6 // 6 bits to represent a letter index + letterIdxMask = 1<= 0; { + if remain == 0 { + cache, remain = rand.Int63(), letterIdxMax + } + if idx := int(cache & letterIdxMask); idx < len(letterBytes) { + b[i] = letterBytes[idx] + i-- + } + cache >>= letterIdxBits + remain-- + } + + return string(b) +} diff --git a/src/clientlibrary/worker/checkpointer.go b/src/clientlibrary/worker/checkpointer.go new file mode 100644 index 0000000..2ca4dda --- /dev/null +++ b/src/clientlibrary/worker/checkpointer.go @@ -0,0 +1,276 @@ +package worker + +import ( + "errors" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/matryer/try" + log "github.com/sirupsen/logrus" + + "clientlibrary/config" +) + +const ( + // ErrLeaseNotAquired is returned when we failed to get a lock on the shard + ErrLeaseNotAquired = "Lease is already held by another node" + // ErrInvalidDynamoDBSchema is returned when there are one or more fields missing from the table + ErrInvalidDynamoDBSchema = "The DynamoDB schema is invalid and may need to be re-created" +) + +// Checkpointer handles checkpointing when a record has been processed +type Checkpointer interface { + Init() error + GetLease(*shardStatus, string) error + CheckpointSequence(*shardStatus) error + FetchCheckpoint(*shardStatus) error +} + +// ErrSequenceIDNotFound is returned by FetchCheckpoint when no SequenceID is found +var ErrSequenceIDNotFound = errors.New("SequenceIDNotFoundForShard") + +// DynamoCheckpoint implements the Checkpoint interface using DynamoDB as a backend +type DynamoCheckpoint struct { + TableName string + leaseTableReadCapacity int64 + leaseTableWriteCapacity int64 + + LeaseDuration int + svc dynamodbiface.DynamoDBAPI + kclConfig *config.KinesisClientLibConfiguration + Retries int +} + +func NewDynamoCheckpoint(dynamo dynamodbiface.DynamoDBAPI, kclConfig *config.KinesisClientLibConfiguration) Checkpointer { + checkpointer := &DynamoCheckpoint{ + TableName: kclConfig.TableName, + leaseTableReadCapacity: int64(kclConfig.InitialLeaseTableReadCapacity), + leaseTableWriteCapacity: int64(kclConfig.InitialLeaseTableWriteCapacity), + LeaseDuration: kclConfig.FailoverTimeMillis, + svc: dynamo, + kclConfig: kclConfig, + Retries: 5, + } + return checkpointer +} + +// Init initialises the DynamoDB Checkpoint +func (checkpointer *DynamoCheckpoint) Init() error { + if !checkpointer.doesTableExist() { + return checkpointer.createTable() + } + return nil +} + +// GetLease attempts to gain a lock on the given shard +func (checkpointer *DynamoCheckpoint) GetLease(shard *shardStatus, newAssignTo string) error { + newLeaseTimeout := time.Now().Add(time.Duration(checkpointer.LeaseDuration) * time.Millisecond).UTC() + newLeaseTimeoutString := newLeaseTimeout.Format(time.RFC3339) + currentCheckpoint, err := checkpointer.getItem(shard.ID) + if err != nil { + return err + } + + assignedVar, assignedToOk := currentCheckpoint["AssignedTo"] + leaseVar, leaseTimeoutOk := currentCheckpoint["LeaseTimeout"] + var conditionalExpression string + var expressionAttributeValues map[string]*dynamodb.AttributeValue + + if !leaseTimeoutOk || !assignedToOk { + conditionalExpression = "attribute_not_exists(AssignedTo)" + } else { + assignedTo := *assignedVar.S + leaseTimeout := *leaseVar.S + + currentLeaseTimeout, err := time.Parse(time.RFC3339, leaseTimeout) + if err != nil { + return err + } + if !time.Now().UTC().After(currentLeaseTimeout) && assignedTo != newAssignTo { + return errors.New(ErrLeaseNotAquired) + } + log.Debugf("Attempting to get a lock for shard: %s, leaseTimeout: %s, assignedTo: %s", shard.ID, currentLeaseTimeout, assignedTo) + conditionalExpression = "ShardID = :id AND AssignedTo = :assigned_to AND LeaseTimeout = :lease_timeout" + expressionAttributeValues = map[string]*dynamodb.AttributeValue{ + ":id": { + S: &shard.ID, + }, + ":assigned_to": { + S: &assignedTo, + }, + ":lease_timeout": { + S: &leaseTimeout, + }, + } + } + + marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ + "ShardID": { + S: &shard.ID, + }, + "AssignedTo": { + S: &newAssignTo, + }, + "LeaseTimeout": { + S: &newLeaseTimeoutString, + }, + } + + if shard.Checkpoint != "" { + marshalledCheckpoint["Checkpoint"] = &dynamodb.AttributeValue{ + S: &shard.Checkpoint, + } + } + + err = checkpointer.conditionalUpdate(conditionalExpression, expressionAttributeValues, marshalledCheckpoint) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == dynamodb.ErrCodeConditionalCheckFailedException { + return errors.New(ErrLeaseNotAquired) + } + } + return err + } + + shard.mux.Lock() + shard.AssignedTo = newAssignTo + shard.LeaseTimeout = newLeaseTimeout + shard.mux.Unlock() + + return nil +} + +// CheckpointSequence writes a checkpoint at the designated sequence ID +func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *shardStatus) error { + leaseTimeout := shard.LeaseTimeout.UTC().Format(time.RFC3339) + marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ + "ShardID": { + S: &shard.ID, + }, + "SequenceID": { + S: &shard.Checkpoint, + }, + "AssignedTo": { + S: &shard.AssignedTo, + }, + "LeaseTimeout": { + S: &leaseTimeout, + }, + } + return checkpointer.saveItem(marshalledCheckpoint) +} + +// FetchCheckpoint retrieves the checkpoint for the given shard +func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *shardStatus) error { + checkpoint, err := checkpointer.getItem(shard.ID) + if err != nil { + return err + } + + sequenceID, ok := checkpoint["SequenceID"] + if !ok { + return ErrSequenceIDNotFound + } + log.Debugf("Retrieved Shard Iterator %s", *sequenceID.S) + shard.mux.Lock() + defer shard.mux.Unlock() + shard.Checkpoint = *sequenceID.S + + if assignedTo, ok := checkpoint["Assignedto"]; ok { + shard.AssignedTo = *assignedTo.S + } + return nil +} + +func (checkpointer *DynamoCheckpoint) createTable() error { + input := &dynamodb.CreateTableInput{ + AttributeDefinitions: []*dynamodb.AttributeDefinition{ + { + AttributeName: aws.String("ShardID"), + AttributeType: aws.String("S"), + }, + }, + KeySchema: []*dynamodb.KeySchemaElement{ + { + AttributeName: aws.String("ShardID"), + KeyType: aws.String("HASH"), + }, + }, + ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + ReadCapacityUnits: aws.Int64(checkpointer.leaseTableReadCapacity), + WriteCapacityUnits: aws.Int64(checkpointer.leaseTableWriteCapacity), + }, + TableName: aws.String(checkpointer.TableName), + } + _, err := checkpointer.svc.CreateTable(input) + return err +} + +func (checkpointer *DynamoCheckpoint) doesTableExist() bool { + input := &dynamodb.DescribeTableInput{ + TableName: aws.String(checkpointer.TableName), + } + _, err := checkpointer.svc.DescribeTable(input) + return (err == nil) +} + +func (checkpointer *DynamoCheckpoint) saveItem(item map[string]*dynamodb.AttributeValue) error { + return checkpointer.putItem(&dynamodb.PutItemInput{ + TableName: aws.String(checkpointer.TableName), + Item: item, + }) +} + +func (checkpointer *DynamoCheckpoint) conditionalUpdate(conditionExpression string, expressionAttributeValues map[string]*dynamodb.AttributeValue, item map[string]*dynamodb.AttributeValue) error { + return checkpointer.putItem(&dynamodb.PutItemInput{ + ConditionExpression: aws.String(conditionExpression), + TableName: aws.String(checkpointer.TableName), + Item: item, + ExpressionAttributeValues: expressionAttributeValues, + }) +} + +func (checkpointer *DynamoCheckpoint) putItem(input *dynamodb.PutItemInput) error { + return try.Do(func(attempt int) (bool, error) { + _, err := checkpointer.svc.PutItem(input) + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == dynamodb.ErrCodeProvisionedThroughputExceededException || + awsErr.Code() == dynamodb.ErrCodeInternalServerError && + attempt < checkpointer.Retries { + // Backoff time as recommended by https://docs.aws.amazon.com/general/latest/gr/api-retries.html + time.Sleep(time.Duration(2^attempt*100) * time.Millisecond) + return true, err + } + } + return false, err + }) +} + +func (checkpointer *DynamoCheckpoint) getItem(shardID string) (map[string]*dynamodb.AttributeValue, error) { + var item *dynamodb.GetItemOutput + err := try.Do(func(attempt int) (bool, error) { + var err error + item, err = checkpointer.svc.GetItem(&dynamodb.GetItemInput{ + TableName: aws.String(checkpointer.TableName), + Key: map[string]*dynamodb.AttributeValue{ + "ShardID": { + S: aws.String(shardID), + }, + }, + }) + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == dynamodb.ErrCodeProvisionedThroughputExceededException || + awsErr.Code() == dynamodb.ErrCodeInternalServerError && + attempt < checkpointer.Retries { + // Backoff time as recommended by https://docs.aws.amazon.com/general/latest/gr/api-retries.html + time.Sleep(time.Duration(2^attempt*100) * time.Millisecond) + return true, err + } + } + return false, err + }) + return item.Item, err +} diff --git a/src/clientlibrary/worker/record-processor-checkpointer.go b/src/clientlibrary/worker/record-processor-checkpointer.go new file mode 100644 index 0000000..4f624f2 --- /dev/null +++ b/src/clientlibrary/worker/record-processor-checkpointer.go @@ -0,0 +1,56 @@ +package worker + +import ( + "github.com/aws/aws-sdk-go/aws" + + kcl "clientlibrary/interfaces" +) + +type ( + + /* Objects of this class are prepared to checkpoint at a specific sequence number. They use an + * IRecordProcessorCheckpointer to do the actual checkpointing, so their checkpoint is subject to the same 'didn't go + * backwards' validation as a normal checkpoint. + */ + PreparedCheckpointer struct { + pendingCheckpointSequenceNumber *kcl.ExtendedSequenceNumber + checkpointer kcl.IRecordProcessorCheckpointer + } + + /** + * This class is used to enable RecordProcessors to checkpoint their progress. + * The Amazon Kinesis Client Library will instantiate an object and provide a reference to the application + * RecordProcessor instance. Amazon Kinesis Client Library will create one instance per shard assignment. + */ + RecordProcessorCheckpointer struct { + shard *shardStatus + checkpoint Checkpointer + } +) + +func NewRecordProcessorCheckpoint(shard *shardStatus, checkpoint Checkpointer) kcl.IRecordProcessorCheckpointer { + return &RecordProcessorCheckpointer{ + shard: shard, + checkpoint: checkpoint, + } +} + +func (pc *PreparedCheckpointer) GetPendingCheckpoint() *kcl.ExtendedSequenceNumber { + return pc.pendingCheckpointSequenceNumber +} + +func (pc *PreparedCheckpointer) Checkpoint() error { + return pc.checkpointer.Checkpoint(pc.pendingCheckpointSequenceNumber.SequenceNumber) +} + +func (rc *RecordProcessorCheckpointer) Checkpoint(sequenceNumber *string) error { + rc.shard.mux.Lock() + rc.shard.Checkpoint = aws.StringValue(sequenceNumber) + rc.shard.mux.Unlock() + return rc.checkpoint.CheckpointSequence(rc.shard) +} + +func (rc *RecordProcessorCheckpointer) PrepareCheckpoint(sequenceNumber *string) (kcl.IPreparedCheckpointer, error) { + return &PreparedCheckpointer{}, nil + +} diff --git a/src/clientlibrary/worker/shard-consumer.go b/src/clientlibrary/worker/shard-consumer.go new file mode 100644 index 0000000..905b48b --- /dev/null +++ b/src/clientlibrary/worker/shard-consumer.go @@ -0,0 +1,195 @@ +package worker + +import ( + log "github.com/sirupsen/logrus" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + + "clientlibrary/config" + kcl "clientlibrary/interfaces" + "clientlibrary/metrics" +) + +const ( + // This is the initial state of a shard consumer. This causes the consumer to remain blocked until the all + // parent shards have been completed. + WAITING_ON_PARENT_SHARDS ShardConsumerState = iota + 1 + + // This state is responsible for initializing the record processor with the shard information. + INITIALIZING + + // + PROCESSING + + SHUTDOWN_REQUESTED + + SHUTTING_DOWN + + SHUTDOWN_COMPLETE + + // ErrCodeKMSThrottlingException is defined in the API Reference https://docs.aws.amazon.com/sdk-for-go/api/service/kinesis/#Kinesis.GetRecords + // But it's not a constant? + ErrCodeKMSThrottlingException = "KMSThrottlingException" +) + +type ShardConsumerState int + +// ShardConsumer is responsible for consuming data records of a (specified) shard. +// Note: ShardConsumer only deal with one shard. +type ShardConsumer struct { + streamName string + shard *shardStatus + kc kinesisiface.KinesisAPI + checkpointer Checkpointer + recordProcessor kcl.IRecordProcessor + kclConfig *config.KinesisClientLibConfiguration + stop *chan struct{} + waitGroup *sync.WaitGroup + consumerID string + mService metrics.MonitoringService + state ShardConsumerState +} + +func (sc *ShardConsumer) getShardIterator(shard *shardStatus) (*string, error) { + err := sc.checkpointer.FetchCheckpoint(shard) + if err != nil && err != ErrSequenceIDNotFound { + return nil, err + } + + // If there isn't any checkpoint for the shard, use the configuration value. + if shard.Checkpoint == "" { + initPos := sc.kclConfig.InitialPositionInStream + shardIterArgs := &kinesis.GetShardIteratorInput{ + ShardId: &shard.ID, + ShardIteratorType: config.InitalPositionInStreamToShardIteratorType(initPos), + StreamName: &sc.streamName, + } + iterResp, err := sc.kc.GetShardIterator(shardIterArgs) + if err != nil { + return nil, err + } + return iterResp.ShardIterator, nil + } + + shardIterArgs := &kinesis.GetShardIteratorInput{ + ShardId: &shard.ID, + ShardIteratorType: aws.String("AFTER_SEQUENCE_NUMBER"), + StartingSequenceNumber: &shard.Checkpoint, + StreamName: &sc.streamName, + } + iterResp, err := sc.kc.GetShardIterator(shardIterArgs) + if err != nil { + return nil, err + } + return iterResp.ShardIterator, nil +} + +func (sc *ShardConsumer) getRecords(shard *shardStatus) error { + defer sc.waitGroup.Done() + + shardIterator, err := sc.getShardIterator(shard) + if err != nil { + log.Errorf("Unable to get shard iterator for %s: %v", shard.ID, err) + return err + } + + recordCheckpointer := NewRecordProcessorCheckpoint(shard, sc.checkpointer) + var retriedErrors int + + for { + getRecordsStartTime := time.Now() + if time.Now().UTC().After(shard.LeaseTimeout.Add(-5 * time.Second)) { + log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID) + err = sc.checkpointer.GetLease(shard, sc.consumerID) + if err != nil { + if err.Error() == ErrLeaseNotAquired { + shard.setLeaseOwner("") + sc.mService.LeaseLost(shard.ID) + log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", shard.ID, sc.consumerID) + return nil + } + log.Fatal(err) + } + } + + log.Debugf("Trying to read %d record from iterator: %v", sc.kclConfig.MaxRecords, aws.StringValue(shardIterator)) + getRecordsArgs := &kinesis.GetRecordsInput{ + Limit: aws.Int64(int64(sc.kclConfig.MaxRecords)), + ShardIterator: shardIterator, + } + // Get records from stream and retry as needed + getResp, err := sc.kc.GetRecords(getRecordsArgs) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == kinesis.ErrCodeProvisionedThroughputExceededException || awsErr.Code() == ErrCodeKMSThrottlingException { + log.Errorf("Error getting records from shard %v: %v", shard.ID, err) + retriedErrors++ + // exponential backoff + time.Sleep(time.Duration(2^retriedErrors*100) * time.Millisecond) + continue + } + } + log.Fatalf("Error getting records from Kinesis that cannot be retried: %s\nRequest: %s", err, getRecordsArgs) + } + retriedErrors = 0 + + // IRecordProcessorCheckpointer + input := &kcl.ProcessRecordsInput{ + Records: getResp.Records, + MillisBehindLatest: aws.Int64Value(getResp.MillisBehindLatest), + Checkpointer: recordCheckpointer, + } + + recordLength := len(input.Records) + recordBytes := int64(0) + log.Debugf("Received %d records", recordLength) + + for _, r := range getResp.Records { + recordBytes += int64(len(r.Data)) + } + + if recordLength > 0 || sc.kclConfig.CallProcessRecordsEvenForEmptyRecordList { + processRecordsStartTime := time.Now() + + // Delivery the events to the record processor + sc.recordProcessor.ProcessRecords(input) + + // Convert from nanoseconds to milliseconds + processedRecordsTiming := time.Since(processRecordsStartTime) / 1000000 + sc.mService.RecordProcessRecordsTime(shard.ID, float64(processedRecordsTiming)) + } + + // Idle between each read, the user is responsible for checkpoint the progress + time.Sleep(time.Duration(sc.kclConfig.IdleTimeBetweenReadsInMillis) * time.Millisecond) + + sc.mService.IncrRecordsProcessed(shard.ID, recordLength) + sc.mService.IncrBytesProcessed(shard.ID, recordBytes) + sc.mService.MillisBehindLatest(shard.ID, float64(*getResp.MillisBehindLatest)) + + // Convert from nanoseconds to milliseconds + getRecordsTime := time.Since(getRecordsStartTime) / 1000000 + sc.mService.RecordGetRecordsTime(shard.ID, float64(getRecordsTime)) + + // The shard has been closed, so no new records can be read from it + if getResp.NextShardIterator == nil { + log.Infof("Shard %s closed", shard.ID) + shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.TERMINATE, Checkpointer: recordCheckpointer} + sc.recordProcessor.Shutdown(shutdownInput) + return nil + } + shardIterator = getResp.NextShardIterator + + select { + case <-*sc.stop: + shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.REQUESTED, Checkpointer: recordCheckpointer} + sc.recordProcessor.Shutdown(shutdownInput) + return nil + case <-time.After(1 * time.Nanosecond): + } + } +} diff --git a/src/clientlibrary/worker/worker.go b/src/clientlibrary/worker/worker.go new file mode 100644 index 0000000..39ed3d1 --- /dev/null +++ b/src/clientlibrary/worker/worker.go @@ -0,0 +1,289 @@ +package worker + +import ( + "errors" + log "github.com/sirupsen/logrus" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + + "clientlibrary/config" + kcl "clientlibrary/interfaces" + "clientlibrary/metrics" +) + +type shardStatus struct { + ID string + Checkpoint string + AssignedTo string + mux *sync.Mutex + LeaseTimeout time.Time +} + +func (ss *shardStatus) getLeaseOwner() string { + ss.mux.Lock() + defer ss.mux.Unlock() + return ss.AssignedTo +} + +func (ss *shardStatus) setLeaseOwner(owner string) { + ss.mux.Lock() + defer ss.mux.Unlock() + ss.AssignedTo = owner +} + +/** + * Worker is the high level class that Kinesis applications use to start processing data. It initializes and oversees + * different components (e.g. syncing shard and lease information, tracking shard assignments, and processing data from + * the shards). + */ +type Worker struct { + streamName string + regionName string + workerID string + + processorFactory kcl.IRecordProcessorFactory + kclConfig *config.KinesisClientLibConfiguration + kc kinesisiface.KinesisAPI + dynamo dynamodbiface.DynamoDBAPI + checkpointer Checkpointer + + stop *chan struct{} + waitGroup *sync.WaitGroup + sigs *chan os.Signal + + shardStatus map[string]*shardStatus + + metricsConfig *metrics.MonitoringConfiguration + mService metrics.MonitoringService +} + +// NewWorker constructs a Worker instance for processing Kinesis stream data. +func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisClientLibConfiguration, metricsConfig *metrics.MonitoringConfiguration) *Worker { + w := &Worker{ + streamName: kclConfig.StreamName, + regionName: kclConfig.RegionName, + workerID: kclConfig.WorkerID, + processorFactory: factory, + kclConfig: kclConfig, + metricsConfig: metricsConfig, + } + + // create session for Kinesis + log.Info("Creating Kinesis session") + s := session.New(&aws.Config{Region: aws.String(w.regionName)}) + w.kc = kinesis.New(s) + + log.Info("Creating DynamoDB session") + s = session.New(&aws.Config{Region: aws.String(w.regionName)}) + w.dynamo = dynamodb.New(s) + w.checkpointer = NewDynamoCheckpoint(w.dynamo, kclConfig) + + if w.metricsConfig == nil { + w.metricsConfig = &metrics.MonitoringConfiguration{MonitoringService: ""} + } + return w +} + +// Run starts consuming data from the stream, and pass it to the application record processors. +func (w *Worker) Start() error { + if err := w.initialize(); err != nil { + log.Errorf("Failed to start Worker: %+v", err) + return err + } + + log.Info("Initialization complete. Starting worker event loop.") + + // entering event loop + go w.eventLoop() + return nil +} + +// Shutdown signals worker to shutdown. Worker will try initiating shutdown of all record processors. +func (w *Worker) Shutdown() { + log.Info("Worker shutdown in requested.") + + close(*w.stop) + w.waitGroup.Wait() + + log.Info("Worker loop is complete. Exiting from worker.") +} + +// Publish to write some data into stream. This function is mainly used for testing purpose. +func (w *Worker) Publish(streamName, partitionKey string, data []byte) error { + _, err := w.kc.PutRecord(&kinesis.PutRecordInput{ + Data: data, + StreamName: aws.String(streamName), + PartitionKey: aws.String(partitionKey), + }) + if err != nil { + log.Errorf("Error in publishing data to %s/%s. Error: %+v", streamName, partitionKey, err) + } + return err +} + +// initialize +func (w *Worker) initialize() error { + log.Info("Worker initialization in progress...") + + err := w.metricsConfig.Init(w.kclConfig.ApplicationName, w.streamName, w.workerID) + if err != nil { + log.Errorf("Failed to start monitoring service: %s", err) + } + w.mService = w.metricsConfig.GetMonitoringService() + + log.Info("Initializing Checkpointer") + if err := w.checkpointer.Init(); err != nil { + log.Errorf("Failed to start Checkpointer: %+v", err) + return err + } + + w.shardStatus = make(map[string]*shardStatus) + + sigs := make(chan os.Signal, 1) + w.sigs = &sigs + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + stopChan := make(chan struct{}) + w.stop = &stopChan + + wg := sync.WaitGroup{} + w.waitGroup = &wg + + err = w.getShardIDs("") + if err != nil { + log.Errorf("Error getting Kinesis shards: %s", err) + return err + } + + return nil +} + +// newShardConsumer to create a shard consumer instance +func (w *Worker) newShardConsumer(shard *shardStatus) *ShardConsumer { + return &ShardConsumer{ + streamName: w.streamName, + shard: shard, + kc: w.kc, + checkpointer: w.checkpointer, + recordProcessor: w.processorFactory.CreateProcessor(), + kclConfig: w.kclConfig, + consumerID: w.workerID, + stop: w.stop, + waitGroup: w.waitGroup, + mService: w.mService, + state: WAITING_ON_PARENT_SHARDS, + } +} + +// eventLoop +func (w *Worker) eventLoop() { + for { + err := w.getShardIDs("") + if err != nil { + log.Errorf("Error getting Kinesis shards: %v", err) + // Back-off? + time.Sleep(500 * time.Millisecond) + } + log.Infof("Found %d shards", len(w.shardStatus)) + + // Count the number of leases hold by this worker + counter := 0 + for _, shard := range w.shardStatus { + if shard.getLeaseOwner() == w.workerID { + counter++ + } + } + + // max number of lease has not been reached + if counter < w.kclConfig.MaxLeasesForWorker { + for _, shard := range w.shardStatus { + // We already own this shard so carry on + if shard.getLeaseOwner() == w.workerID { + continue + } + + err := w.checkpointer.FetchCheckpoint(shard) + if err != nil { + if err != ErrSequenceIDNotFound { + log.Fatal(err) + } + } + + err = w.checkpointer.GetLease(shard, w.workerID) + if err != nil { + if err.Error() == ErrLeaseNotAquired { + continue + } + log.Fatal(err) + } + + w.mService.LeaseGained(shard.ID) + + log.Infof("Start Shard Consumer for shard: %v", shard.ID) + sc := w.newShardConsumer(shard) + go sc.getRecords(shard) + w.waitGroup.Add(1) + } + } + + select { + case sig := <-*w.sigs: + log.Infof("Received signal %s. Exiting", sig) + w.Shutdown() + return + case <-*w.stop: + log.Info("Shutting down") + return + case <-time.After(time.Duration(w.kclConfig.ShardSyncIntervalMillis) * time.Millisecond): + } + } +} + +// List all ACTIVE shard and store them into shardStatus table +func (w *Worker) getShardIDs(startShardID string) error { + args := &kinesis.DescribeStreamInput{ + StreamName: aws.String(w.streamName), + } + if startShardID != "" { + args.ExclusiveStartShardId = aws.String(startShardID) + } + streamDesc, err := w.kc.DescribeStream(args) + if err != nil { + return err + } + + if *streamDesc.StreamDescription.StreamStatus != "ACTIVE" { + return errors.New("Stream not active") + } + + var lastShardID string + for _, s := range streamDesc.StreamDescription.Shards { + if _, ok := w.shardStatus[*s.ShardId]; !ok { + log.Debugf("Found shard with id %s", *s.ShardId) + w.shardStatus[*s.ShardId] = &shardStatus{ + ID: *s.ShardId, + mux: &sync.Mutex{}, + } + } + lastShardID = *s.ShardId + } + + if *streamDesc.StreamDescription.HasMoreShards { + err := w.getShardIDs(lastShardID) + if err != nil { + return err + } + } + + return nil +} diff --git a/src/clientlibrary/worker/worker_test.go b/src/clientlibrary/worker/worker_test.go new file mode 100644 index 0000000..ebcbc3d --- /dev/null +++ b/src/clientlibrary/worker/worker_test.go @@ -0,0 +1,108 @@ +package worker + +import ( + "os" + "testing" + "time" + + log "github.com/sirupsen/logrus" + + cfg "clientlibrary/config" + kc "clientlibrary/interfaces" + "clientlibrary/utils" + "github.com/stretchr/testify/assert" +) + +const ( + streamName = "kcl-test" + regionName = "us-west-2" + workerID = "test-worker" +) + +const specstr = `{"name":"kube-qQyhk","networking":{"containerNetworkCidr":"10.2.0.0/16"},"orgName":"BVT-Org-cLQch","projectName":"project-tDSJd","serviceLevel":"DEVELOPER","size":{"count":1},"version":"1.8.1-4"}` + +func TestWorker(t *testing.T) { + os.Setenv("AWS_ACCESS_KEY_ID", "your aws access key id") + os.Setenv("AWS_SECRET_ACCESS_KEY", "your aws secret access key") + defer os.Unsetenv("AWS_ACCESS_KEY_ID") + defer os.Unsetenv("AWS_SECRET_ACCESS_KEY") + kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(40). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000) + + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) + + assert.Equal(t, regionName, kclConfig.RegionName) + assert.Equal(t, streamName, kclConfig.StreamName) + + worker := NewWorker(recordProcessorFactory(t), kclConfig, nil) + assert.Equal(t, regionName, worker.regionName) + assert.Equal(t, streamName, worker.streamName) + + err := worker.Start() + assert.Nil(t, err) + + // Put some data into stream. + for i := 0; i < 100; i++ { + // Use random string as partition key to ensure even distribution across shards + err := worker.Publish(streamName, utils.RandStringBytesMaskImpr(10), []byte(specstr)) + if err != nil { + t.Errorf("Errorin Publish. %+v", err) + } + } + + time.Sleep(10 * time.Second) + worker.Shutdown() +} + +// Record processor factory is used to create RecordProcessor +func recordProcessorFactory(t *testing.T) kc.IRecordProcessorFactory { + return &dumpRecordProcessorFactory{t: t} +} + +// simple record processor and dump everything +type dumpRecordProcessorFactory struct { + t *testing.T +} + +func (d *dumpRecordProcessorFactory) CreateProcessor() kc.IRecordProcessor { + return &dumpRecordProcessor{ + t: d.t, + } +} + +// Create a dump record processor for printing out all data from record. +type dumpRecordProcessor struct { + t *testing.T +} + +func (dd *dumpRecordProcessor) Initialize(input *kc.InitializationInput) { + dd.t.Logf("sharId=%v", input.ShardId) +} + +func (dd *dumpRecordProcessor) ProcessRecords(input *kc.ProcessRecordsInput) { + dd.t.Log("Processing Records...") + + // don't process empty record + if len(input.Records) == 0 { + return + } + + for _, v := range input.Records { + dd.t.Logf("Record = %s", v.Data) + assert.Equal(dd.t, specstr, string(v.Data)) + } + + dd.t.Logf("Checkpoint it and MillisBehindLatest = %v", input.MillisBehindLatest) + // checkpoint it after processing this batch + lastRecordSequenceNubmer := input.Records[len(input.Records)-1].SequenceNumber + input.Checkpointer.Checkpoint(lastRecordSequenceNubmer) +} + +func (dd *dumpRecordProcessor) Shutdown(input *kc.ShutdownInput) { + dd.t.Logf("Shutdown Reason = %v", input.ShutdownReason) + +} diff --git a/src/leases/impl/lease.go b/src/leases/impl/lease.go index b87ecf3..394475f 100644 --- a/src/leases/impl/lease.go +++ b/src/leases/impl/lease.go @@ -16,8 +16,11 @@ const ( // processing the corresponding unit of work, or until it fails. When the worker stops holding the lease, another worker will // take and hold the lease. type Lease struct { - leaseKey string - leaseOwner string + // shard-id + leaseKey string + // worker# + leaseOwner string + // ccounter incremented periodically leaseCounter int64 // This field is used to prevent updates to leases that we have lost and re-acquired. It is deliberately not diff --git a/support/scripts/functions.sh b/support/scripts/functions.sh index b7265ea..845e3c3 100644 --- a/support/scripts/functions.sh +++ b/support/scripts/functions.sh @@ -14,6 +14,7 @@ local_go_pkgs() { grep -Fv '/tmp/' | \ grep -Fv '/run/' | \ grep -Fv '/tests/' | \ + grep -Fv '/gokini/' | \ sed -r 's|(.+)/[^/]+\.go$|\1|g' | \ sort -u } From c05bfb7ac8834eb6e9e57d80cab58d9afa2eb57c Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Wed, 18 Apr 2018 15:50:15 -0700 Subject: [PATCH 08/90] KCL: Fixing checkpoint operation This change fixed the bug of not finding checkpoint when process restart. It also adds missing call to record processor for notifying the shard info and checkpoint when application first started. Test: Run hmake test and verify the log. Change-Id: I4bdf21ac10c5ee988a0860c140991f7d05975541 --- src/clientlibrary/interfaces/inputs.go | 11 +++++++ src/clientlibrary/worker/checkpointer.go | 35 ++++++++++++---------- src/clientlibrary/worker/shard-consumer.go | 23 +++++++++++--- src/clientlibrary/worker/worker.go | 1 + src/clientlibrary/worker/worker_test.go | 13 ++++---- 5 files changed, 58 insertions(+), 25 deletions(-) diff --git a/src/clientlibrary/interfaces/inputs.go b/src/clientlibrary/interfaces/inputs.go index 27590c3..8f7590d 100644 --- a/src/clientlibrary/interfaces/inputs.go +++ b/src/clientlibrary/interfaces/inputs.go @@ -3,6 +3,7 @@ package interfaces import ( "time" + "github.com/aws/aws-sdk-go/aws" ks "github.com/aws/aws-sdk-go/service/kinesis" ) @@ -60,3 +61,13 @@ type ( Checkpointer IRecordProcessorCheckpointer } ) + +var shutdownReasonMap = map[ShutdownReason]*string{ + REQUESTED: aws.String("REQUESTED"), + TERMINATE: aws.String("TERMINATE"), + ZOMBIE: aws.String("ZOMBIE"), +} + +func ShutdownReasonMessage(reason ShutdownReason) *string { + return shutdownReasonMap[reason] +} diff --git a/src/clientlibrary/worker/checkpointer.go b/src/clientlibrary/worker/checkpointer.go index 2ca4dda..39584db 100644 --- a/src/clientlibrary/worker/checkpointer.go +++ b/src/clientlibrary/worker/checkpointer.go @@ -15,6 +15,11 @@ import ( ) const ( + LEASE_KEY_KEY = "ShardID" + LEASE_OWNER_KEY = "AssignedTo" + LEASE_TIMEOUT_KEY = "LeaseTimeout" + CHECKPOINT_SEQUENCE_NUMBER_KEY = "Checkpoint" + // ErrLeaseNotAquired is returned when we failed to get a lock on the shard ErrLeaseNotAquired = "Lease is already held by another node" // ErrInvalidDynamoDBSchema is returned when there are one or more fields missing from the table @@ -74,8 +79,8 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *shardStatus, newAssignTo s return err } - assignedVar, assignedToOk := currentCheckpoint["AssignedTo"] - leaseVar, leaseTimeoutOk := currentCheckpoint["LeaseTimeout"] + assignedVar, assignedToOk := currentCheckpoint[LEASE_OWNER_KEY] + leaseVar, leaseTimeoutOk := currentCheckpoint[LEASE_TIMEOUT_KEY] var conditionalExpression string var expressionAttributeValues map[string]*dynamodb.AttributeValue @@ -108,19 +113,19 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *shardStatus, newAssignTo s } marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ - "ShardID": { + LEASE_KEY_KEY: { S: &shard.ID, }, - "AssignedTo": { + LEASE_OWNER_KEY: { S: &newAssignTo, }, - "LeaseTimeout": { + LEASE_TIMEOUT_KEY: { S: &newLeaseTimeoutString, }, } if shard.Checkpoint != "" { - marshalledCheckpoint["Checkpoint"] = &dynamodb.AttributeValue{ + marshalledCheckpoint[CHECKPOINT_SEQUENCE_NUMBER_KEY] = &dynamodb.AttributeValue{ S: &shard.Checkpoint, } } @@ -147,16 +152,16 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *shardStatus, newAssignTo s func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *shardStatus) error { leaseTimeout := shard.LeaseTimeout.UTC().Format(time.RFC3339) marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ - "ShardID": { + LEASE_KEY_KEY: { S: &shard.ID, }, - "SequenceID": { + CHECKPOINT_SEQUENCE_NUMBER_KEY: { S: &shard.Checkpoint, }, - "AssignedTo": { + LEASE_OWNER_KEY: { S: &shard.AssignedTo, }, - "LeaseTimeout": { + LEASE_TIMEOUT_KEY: { S: &leaseTimeout, }, } @@ -170,7 +175,7 @@ func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *shardStatus) error return err } - sequenceID, ok := checkpoint["SequenceID"] + sequenceID, ok := checkpoint[CHECKPOINT_SEQUENCE_NUMBER_KEY] if !ok { return ErrSequenceIDNotFound } @@ -179,7 +184,7 @@ func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *shardStatus) error defer shard.mux.Unlock() shard.Checkpoint = *sequenceID.S - if assignedTo, ok := checkpoint["Assignedto"]; ok { + if assignedTo, ok := checkpoint[LEASE_OWNER_KEY]; ok { shard.AssignedTo = *assignedTo.S } return nil @@ -189,13 +194,13 @@ func (checkpointer *DynamoCheckpoint) createTable() error { input := &dynamodb.CreateTableInput{ AttributeDefinitions: []*dynamodb.AttributeDefinition{ { - AttributeName: aws.String("ShardID"), + AttributeName: aws.String(LEASE_KEY_KEY), AttributeType: aws.String("S"), }, }, KeySchema: []*dynamodb.KeySchemaElement{ { - AttributeName: aws.String("ShardID"), + AttributeName: aws.String(LEASE_KEY_KEY), KeyType: aws.String("HASH"), }, }, @@ -256,7 +261,7 @@ func (checkpointer *DynamoCheckpoint) getItem(shardID string) (map[string]*dynam item, err = checkpointer.svc.GetItem(&dynamodb.GetItemInput{ TableName: aws.String(checkpointer.TableName), Key: map[string]*dynamodb.AttributeValue{ - "ShardID": { + LEASE_KEY_KEY: { S: aws.String(shardID), }, }, diff --git a/src/clientlibrary/worker/shard-consumer.go b/src/clientlibrary/worker/shard-consumer.go index 905b48b..012eff9 100644 --- a/src/clientlibrary/worker/shard-consumer.go +++ b/src/clientlibrary/worker/shard-consumer.go @@ -56,6 +56,7 @@ type ShardConsumer struct { } func (sc *ShardConsumer) getShardIterator(shard *shardStatus) (*string, error) { + // Get checkpoint of the shard from dynamoDB err := sc.checkpointer.FetchCheckpoint(shard) if err != nil && err != ErrSequenceIDNotFound { return nil, err @@ -64,6 +65,8 @@ func (sc *ShardConsumer) getShardIterator(shard *shardStatus) (*string, error) { // If there isn't any checkpoint for the shard, use the configuration value. if shard.Checkpoint == "" { initPos := sc.kclConfig.InitialPositionInStream + log.Debugf("No checkpoint recorded for shard: %v, starting with: %v", shard.ID, + aws.StringValue(config.InitalPositionInStreamToShardIteratorType(initPos))) shardIterArgs := &kinesis.GetShardIteratorInput{ ShardId: &shard.ID, ShardIteratorType: config.InitalPositionInStreamToShardIteratorType(initPos), @@ -76,6 +79,7 @@ func (sc *ShardConsumer) getShardIterator(shard *shardStatus) (*string, error) { return iterResp.ShardIterator, nil } + log.Debugf("Start shard: %v at checkpoint: %v", shard.ID, shard.Checkpoint) shardIterArgs := &kinesis.GetShardIteratorInput{ ShardId: &shard.ID, ShardIteratorType: aws.String("AFTER_SEQUENCE_NUMBER"), @@ -98,6 +102,13 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { return err } + // Start processing events and notify record processor on shard and starting checkpoint + input := &kcl.InitializationInput{ + ShardId: shard.ID, + ExtendedSequenceNumber: &kcl.ExtendedSequenceNumber{SequenceNumber: aws.String(shard.Checkpoint)}, + } + sc.recordProcessor.Initialize(input) + recordCheckpointer := NewRecordProcessorCheckpoint(shard, sc.checkpointer) var retriedErrors int @@ -147,7 +158,7 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { recordLength := len(input.Records) recordBytes := int64(0) - log.Debugf("Received %d records", recordLength) + log.Debugf("Received %d records, MillisBehindLatest: %v", recordLength, input.MillisBehindLatest) for _, r := range getResp.Records { recordBytes += int64(len(r.Data)) @@ -164,9 +175,6 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { sc.mService.RecordProcessRecordsTime(shard.ID, float64(processedRecordsTiming)) } - // Idle between each read, the user is responsible for checkpoint the progress - time.Sleep(time.Duration(sc.kclConfig.IdleTimeBetweenReadsInMillis) * time.Millisecond) - sc.mService.IncrRecordsProcessed(shard.ID, recordLength) sc.mService.IncrBytesProcessed(shard.ID, recordBytes) sc.mService.MillisBehindLatest(shard.ID, float64(*getResp.MillisBehindLatest)) @@ -175,6 +183,13 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { getRecordsTime := time.Since(getRecordsStartTime) / 1000000 sc.mService.RecordGetRecordsTime(shard.ID, float64(getRecordsTime)) + // Idle between each read, the user is responsible for checkpoint the progress + // This value is only used when no records are returned; if records are returned, it should immediately + // retrieve the next set of records. + if recordLength == 0 && aws.Int64Value(getResp.MillisBehindLatest) < int64(sc.kclConfig.IdleTimeBetweenReadsInMillis) { + time.Sleep(time.Duration(sc.kclConfig.IdleTimeBetweenReadsInMillis) * time.Millisecond) + } + // The shard has been closed, so no new records can be read from it if getResp.NextShardIterator == nil { log.Infof("Shard %s closed", shard.ID) diff --git a/src/clientlibrary/worker/worker.go b/src/clientlibrary/worker/worker.go index 39ed3d1..7ecb0a4 100644 --- a/src/clientlibrary/worker/worker.go +++ b/src/clientlibrary/worker/worker.go @@ -268,6 +268,7 @@ func (w *Worker) getShardIDs(startShardID string) error { var lastShardID string for _, s := range streamDesc.StreamDescription.Shards { + // found new shard if _, ok := w.shardStatus[*s.ShardId]; !ok { log.Debugf("Found shard with id %s", *s.ShardId) w.shardStatus[*s.ShardId] = &shardStatus{ diff --git a/src/clientlibrary/worker/worker_test.go b/src/clientlibrary/worker/worker_test.go index ebcbc3d..88b3dac 100644 --- a/src/clientlibrary/worker/worker_test.go +++ b/src/clientlibrary/worker/worker_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go/aws" log "github.com/sirupsen/logrus" cfg "clientlibrary/config" @@ -28,9 +29,10 @@ func TestWorker(t *testing.T) { defer os.Unsetenv("AWS_SECRET_ACCESS_KEY") kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). - WithMaxRecords(40). + WithMaxRecords(10). WithMaxLeasesForWorker(1). - WithShardSyncIntervalMillis(5000) + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000) log.SetOutput(os.Stdout) log.SetLevel(log.DebugLevel) @@ -80,7 +82,7 @@ type dumpRecordProcessor struct { } func (dd *dumpRecordProcessor) Initialize(input *kc.InitializationInput) { - dd.t.Logf("sharId=%v", input.ShardId) + dd.t.Logf("Processing SharId: %v at checkpoint: %v", input.ShardId, aws.StringValue(input.ExtendedSequenceNumber.SequenceNumber)) } func (dd *dumpRecordProcessor) ProcessRecords(input *kc.ProcessRecordsInput) { @@ -96,13 +98,12 @@ func (dd *dumpRecordProcessor) ProcessRecords(input *kc.ProcessRecordsInput) { assert.Equal(dd.t, specstr, string(v.Data)) } - dd.t.Logf("Checkpoint it and MillisBehindLatest = %v", input.MillisBehindLatest) // checkpoint it after processing this batch lastRecordSequenceNubmer := input.Records[len(input.Records)-1].SequenceNumber + dd.t.Logf("Checkpoint progress at: %v, MillisBehindLatest = %v", lastRecordSequenceNubmer, input.MillisBehindLatest) input.Checkpointer.Checkpoint(lastRecordSequenceNubmer) } func (dd *dumpRecordProcessor) Shutdown(input *kc.ShutdownInput) { - dd.t.Logf("Shutdown Reason = %v", input.ShutdownReason) - + dd.t.Logf("Shutdown Reason: %v", aws.StringValue(kc.ShutdownReasonMessage(input.ShutdownReason))) } From 869a8e4275caf4cc00fbb75eb4d3c504ee9e6c84 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Wed, 18 Apr 2018 20:09:52 -0700 Subject: [PATCH 09/90] KCL: Add support for handling shard split Add support for handling child/parent shard. When processing child shard, it has to wait until parent shard finished before processing itself. Change-Id: I8bbf104c22ae93409d856be9c6829988c1b2d7eb --- src/clientlibrary/worker/checkpointer.go | 13 ++++++ .../worker/record-processor-checkpointer.go | 9 ++++- src/clientlibrary/worker/shard-consumer.go | 31 ++++++++++++++ src/clientlibrary/worker/worker.go | 40 ++++++++++++++----- 4 files changed, 81 insertions(+), 12 deletions(-) diff --git a/src/clientlibrary/worker/checkpointer.go b/src/clientlibrary/worker/checkpointer.go index 39584db..4994e63 100644 --- a/src/clientlibrary/worker/checkpointer.go +++ b/src/clientlibrary/worker/checkpointer.go @@ -19,6 +19,10 @@ const ( LEASE_OWNER_KEY = "AssignedTo" LEASE_TIMEOUT_KEY = "LeaseTimeout" CHECKPOINT_SEQUENCE_NUMBER_KEY = "Checkpoint" + PARENT_SHARD_ID_KEY = "ParentShardId" + + // We've completely processed all records in this shard. + SHARD_END = "SHARD_END" // ErrLeaseNotAquired is returned when we failed to get a lock on the shard ErrLeaseNotAquired = "Lease is already held by another node" @@ -124,6 +128,10 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *shardStatus, newAssignTo s }, } + if len(shard.ParentShardId) > 0 { + marshalledCheckpoint[PARENT_SHARD_ID_KEY] = &dynamodb.AttributeValue{S: &shard.ParentShardId} + } + if shard.Checkpoint != "" { marshalledCheckpoint[CHECKPOINT_SEQUENCE_NUMBER_KEY] = &dynamodb.AttributeValue{ S: &shard.Checkpoint, @@ -165,6 +173,11 @@ func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *shardStatus) err S: &leaseTimeout, }, } + + if len(shard.ParentShardId) > 0 { + marshalledCheckpoint[PARENT_SHARD_ID_KEY] = &dynamodb.AttributeValue{S: &shard.ParentShardId} + } + return checkpointer.saveItem(marshalledCheckpoint) } diff --git a/src/clientlibrary/worker/record-processor-checkpointer.go b/src/clientlibrary/worker/record-processor-checkpointer.go index 4f624f2..69a406e 100644 --- a/src/clientlibrary/worker/record-processor-checkpointer.go +++ b/src/clientlibrary/worker/record-processor-checkpointer.go @@ -45,7 +45,14 @@ func (pc *PreparedCheckpointer) Checkpoint() error { func (rc *RecordProcessorCheckpointer) Checkpoint(sequenceNumber *string) error { rc.shard.mux.Lock() - rc.shard.Checkpoint = aws.StringValue(sequenceNumber) + + // checkpoint the last sequence of a closed shard + if rc.shard.EndingSequenceNumber == aws.StringValue(sequenceNumber) { + rc.shard.Checkpoint = SHARD_END + } else { + rc.shard.Checkpoint = aws.StringValue(sequenceNumber) + } + rc.shard.mux.Unlock() return rc.checkpoint.CheckpointSequence(rc.shard) } diff --git a/src/clientlibrary/worker/shard-consumer.go b/src/clientlibrary/worker/shard-consumer.go index 012eff9..9b9f175 100644 --- a/src/clientlibrary/worker/shard-consumer.go +++ b/src/clientlibrary/worker/shard-consumer.go @@ -96,6 +96,12 @@ func (sc *ShardConsumer) getShardIterator(shard *shardStatus) (*string, error) { func (sc *ShardConsumer) getRecords(shard *shardStatus) error { defer sc.waitGroup.Done() + // If the shard is child shard, need to wait until the parent finished. + if err := sc.waitOnParentShard(shard); err != nil { + log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", shard.ParentShardId, err) + return err + } + shardIterator, err := sc.getShardIterator(shard) if err != nil { log.Errorf("Unable to get shard iterator for %s: %v", shard.ID, err) @@ -208,3 +214,28 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { } } } + +// Need to wait until the parent shard finished +func (sc *ShardConsumer) waitOnParentShard(shard *shardStatus) error { + if len(shard.ParentShardId) == 0 { + return nil + } + + pshard := &shardStatus{ + ID: shard.ParentShardId, + mux: &sync.Mutex{}, + } + + for { + if err := sc.checkpointer.FetchCheckpoint(pshard); err != nil { + return err + } + + // Parent shard is finished. + if pshard.Checkpoint == SHARD_END { + return nil + } + + time.Sleep(time.Duration(sc.kclConfig.ParentShardPollIntervalMillis) * time.Millisecond) + } +} diff --git a/src/clientlibrary/worker/worker.go b/src/clientlibrary/worker/worker.go index 7ecb0a4..d8402ec 100644 --- a/src/clientlibrary/worker/worker.go +++ b/src/clientlibrary/worker/worker.go @@ -22,11 +22,16 @@ import ( ) type shardStatus struct { - ID string - Checkpoint string - AssignedTo string - mux *sync.Mutex - LeaseTimeout time.Time + ID string + ParentShardId string + Checkpoint string + AssignedTo string + mux *sync.Mutex + LeaseTimeout time.Time + // Shard Range + StartingSequenceNumber string + // child shard doesn't have end sequence number + EndingSequenceNumber string } func (ss *shardStatus) getLeaseOwner() string { @@ -214,19 +219,29 @@ func (w *Worker) eventLoop() { err := w.checkpointer.FetchCheckpoint(shard) if err != nil { + // checkpoint may not existed yet if not an error condition. if err != ErrSequenceIDNotFound { - log.Fatal(err) + log.Error(err) + // move on to next shard + continue } } + // The shard is closed and we have processed all records + if shard.Checkpoint == SHARD_END { + continue + } + err = w.checkpointer.GetLease(shard, w.workerID) if err != nil { - if err.Error() == ErrLeaseNotAquired { - continue + // cannot get lease on the shard + if err.Error() != ErrLeaseNotAquired { + log.Error(err) } - log.Fatal(err) + continue } + // log metrics on got lease w.mService.LeaseGained(shard.ID) log.Infof("Start Shard Consumer for shard: %v", shard.ID) @@ -272,8 +287,11 @@ func (w *Worker) getShardIDs(startShardID string) error { if _, ok := w.shardStatus[*s.ShardId]; !ok { log.Debugf("Found shard with id %s", *s.ShardId) w.shardStatus[*s.ShardId] = &shardStatus{ - ID: *s.ShardId, - mux: &sync.Mutex{}, + ID: *s.ShardId, + ParentShardId: aws.StringValue(s.ParentShardId), + mux: &sync.Mutex{}, + StartingSequenceNumber: aws.StringValue(s.SequenceNumberRange.StartingSequenceNumber), + EndingSequenceNumber: aws.StringValue(s.SequenceNumberRange.EndingSequenceNumber), } } lastShardID = *s.ShardId From 9d1993547f06868e93b821af13008fb450b2ee86 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Thu, 19 Apr 2018 11:53:12 -0700 Subject: [PATCH 10/90] KCL: Ignore Lint error on const go languaage doesn't like all-caps on const. Since KCL is mainly from Amazon's KCL, we'd like the constant to have the exactly same name as Amazon's KCL. Thefore, skip the lint check. Change-Id: Ib8a2f52a8f4b44d814eda264f62fdcd53cccc2a7 --- support/scripts/check.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/support/scripts/check.sh b/support/scripts/check.sh index fbab703..539bdf9 100755 --- a/support/scripts/check.sh +++ b/support/scripts/check.sh @@ -15,6 +15,7 @@ lint() { gometalinter \ --exclude=_mock.go \ --disable=gotype \ + --disable=golint \ --vendor \ --skip=test \ --fast \ From 2fea884212b2b179455c71043313a6c61370a918 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Fri, 20 Apr 2018 08:30:24 -0700 Subject: [PATCH 11/90] KCL: Enable Metrics This change enables metrics reporting and fixes a few bug in metrics reporting. The current metrics reporting is quite limited. Will add more metrics in next cr. Tested with both prometheus and cloudwatch. Jira CNA-702 Change-Id: I678b3f8a372d83f7b8adc419133c14cd10884f61 --- src/clientlibrary/metrics/cloudwatch.go | 74 ++++++++++++---------- src/clientlibrary/metrics/interfaces.go | 6 ++ src/clientlibrary/metrics/prometheus.go | 3 + src/clientlibrary/worker/shard-consumer.go | 2 + src/clientlibrary/worker/worker_test.go | 52 ++++++++++++++- 5 files changed, 104 insertions(+), 33 deletions(-) diff --git a/src/clientlibrary/metrics/cloudwatch.go b/src/clientlibrary/metrics/cloudwatch.go index f5a76d6..385438b 100644 --- a/src/clientlibrary/metrics/cloudwatch.go +++ b/src/clientlibrary/metrics/cloudwatch.go @@ -15,8 +15,8 @@ type CloudWatchMonitoringService struct { Namespace string KinesisStream string WorkerID string - // What granularity we should send metrics to CW at. Note setting this to 1 will cost quite a bit of money - // At the time of writing (March 2018) about US$200 per month + Region string + // how frequently to send data to cloudwatch ResolutionSec int svc cloudwatchiface.CloudWatchAPI shardMetrics map[string]*cloudWatchMetrics @@ -34,75 +34,82 @@ type cloudWatchMetrics struct { } func (cw *CloudWatchMonitoringService) Init() error { + // default to 1 min resolution if cw.ResolutionSec == 0 { cw.ResolutionSec = 60 } - session, err := session.NewSessionWithOptions( - session.Options{ - SharedConfigState: session.SharedConfigEnable, - }, - ) - if err != nil { - return err - } - - cw.svc = cloudwatch.New(session) + s := session.New(&aws.Config{Region: aws.String(cw.Region)}) + cw.svc = cloudwatch.New(s) cw.shardMetrics = make(map[string]*cloudWatchMetrics) + return nil } +// Start daemon to flush metrics periodically func (cw *CloudWatchMonitoringService) flushDaemon() { previousFlushTime := time.Now() resolutionDuration := time.Duration(cw.ResolutionSec) * time.Second for { time.Sleep(resolutionDuration - time.Now().Sub(previousFlushTime)) - err := cw.flush() + err := cw.Flush() if err != nil { - log.Errorln("Error sending metrics to CloudWatch", err) + log.Errorf("Error sending metrics to CloudWatch. %+v", err) } previousFlushTime = time.Now() } } -func (cw *CloudWatchMonitoringService) flush() error { +func (cw *CloudWatchMonitoringService) Flush() error { + // publish per shard metrics for shard, metric := range cw.shardMetrics { metric.Lock() defaultDimensions := []*cloudwatch.Dimension{ - &cloudwatch.Dimension{ - Name: aws.String("shard"), + { + Name: aws.String("Shard"), Value: &shard, }, - &cloudwatch.Dimension{ + { Name: aws.String("KinesisStreamName"), Value: &cw.KinesisStream, }, } - leaseDimensions := make([]*cloudwatch.Dimension, len(defaultDimensions)) - copy(defaultDimensions, leaseDimensions) - leaseDimensions = append(leaseDimensions, &cloudwatch.Dimension{ - Name: aws.String("WorkerID"), - Value: &cw.WorkerID, - }) + + leaseDimensions := []*cloudwatch.Dimension{ + { + Name: aws.String("Shard"), + Value: &shard, + }, + { + Name: aws.String("KinesisStreamName"), + Value: &cw.KinesisStream, + }, + { + Name: aws.String("WorkerID"), + Value: &cw.WorkerID, + }, + } metricTimestamp := time.Now() + + // Publish metrics data to cloud watch _, err := cw.svc.PutMetricData(&cloudwatch.PutMetricDataInput{ Namespace: aws.String(cw.Namespace), MetricData: []*cloudwatch.MetricDatum{ - &cloudwatch.MetricDatum{ + { Dimensions: defaultDimensions, MetricName: aws.String("RecordsProcessed"), Unit: aws.String("Count"), Timestamp: &metricTimestamp, Value: aws.Float64(float64(metric.processedRecords)), }, - &cloudwatch.MetricDatum{ + { Dimensions: defaultDimensions, MetricName: aws.String("DataBytesProcessed"), - Unit: aws.String("Byte"), + Unit: aws.String("Bytes"), Timestamp: &metricTimestamp, Value: aws.Float64(float64(metric.processedBytes)), }, - &cloudwatch.MetricDatum{ + { Dimensions: defaultDimensions, MetricName: aws.String("MillisBehindLatest"), Unit: aws.String("Milliseconds"), @@ -114,7 +121,7 @@ func (cw *CloudWatchMonitoringService) flush() error { Minimum: minFloat64(metric.behindLatestMillis), }, }, - &cloudwatch.MetricDatum{ + { Dimensions: defaultDimensions, MetricName: aws.String("KinesisDataFetcher.getRecords.Time"), Unit: aws.String("Milliseconds"), @@ -126,7 +133,7 @@ func (cw *CloudWatchMonitoringService) flush() error { Minimum: minFloat64(metric.getRecordsTime), }, }, - &cloudwatch.MetricDatum{ + { Dimensions: defaultDimensions, MetricName: aws.String("RecordProcessor.processRecords.Time"), Unit: aws.String("Milliseconds"), @@ -138,14 +145,14 @@ func (cw *CloudWatchMonitoringService) flush() error { Minimum: minFloat64(metric.processRecordsTime), }, }, - &cloudwatch.MetricDatum{ + { Dimensions: leaseDimensions, MetricName: aws.String("RenewLease.Success"), Unit: aws.String("Count"), Timestamp: &metricTimestamp, Value: aws.Float64(float64(metric.leaseRenewals)), }, - &cloudwatch.MetricDatum{ + { Dimensions: leaseDimensions, MetricName: aws.String("CurrentLeases"), Unit: aws.String("Count"), @@ -161,7 +168,10 @@ func (cw *CloudWatchMonitoringService) flush() error { metric.leaseRenewals = 0 metric.getRecordsTime = []float64{} metric.processRecordsTime = []float64{} + } else { + log.Errorf("Error in publishing cloudwatch metrics. Error: %+v", err) } + metric.Unlock() return err } diff --git a/src/clientlibrary/metrics/interfaces.go b/src/clientlibrary/metrics/interfaces.go index 141e644..57dfc11 100644 --- a/src/clientlibrary/metrics/interfaces.go +++ b/src/clientlibrary/metrics/interfaces.go @@ -7,6 +7,7 @@ import ( // MonitoringConfiguration allows you to configure how record processing metrics are exposed type MonitoringConfiguration struct { MonitoringService string // Type of monitoring to expose. Supported types are "prometheus" + Region string Prometheus PrometheusMonitoringService CloudWatch CloudWatchMonitoringService service MonitoringService @@ -22,6 +23,7 @@ type MonitoringService interface { LeaseRenewed(string) RecordGetRecordsTime(string, float64) RecordProcessRecordsTime(string, float64) + Flush() error } func (m *MonitoringConfiguration) Init(nameSpace, streamName string, workerID string) error { @@ -35,10 +37,13 @@ func (m *MonitoringConfiguration) Init(nameSpace, streamName string, workerID st m.Prometheus.Namespace = nameSpace m.Prometheus.KinesisStream = streamName m.Prometheus.WorkerID = workerID + m.Prometheus.Region = m.Region m.service = &m.Prometheus case "cloudwatch": + m.CloudWatch.Namespace = nameSpace m.CloudWatch.KinesisStream = streamName m.CloudWatch.WorkerID = workerID + m.CloudWatch.Region = m.Region m.service = &m.CloudWatch default: return fmt.Errorf("Invalid monitoring service type %s", m.MonitoringService) @@ -64,3 +69,4 @@ func (n *noopMonitoringService) LeaseLost(shard string) func (n *noopMonitoringService) LeaseRenewed(shard string) {} func (n *noopMonitoringService) RecordGetRecordsTime(shard string, time float64) {} func (n *noopMonitoringService) RecordProcessRecordsTime(shard string, time float64) {} +func (n *noopMonitoringService) Flush() error { return nil } diff --git a/src/clientlibrary/metrics/prometheus.go b/src/clientlibrary/metrics/prometheus.go index 4ec13fd..fc9ab28 100644 --- a/src/clientlibrary/metrics/prometheus.go +++ b/src/clientlibrary/metrics/prometheus.go @@ -14,6 +14,7 @@ type PrometheusMonitoringService struct { Namespace string KinesisStream string WorkerID string + Region string processedRecords *prometheus.CounterVec processedBytes *prometheus.CounterVec behindLatestMillis *prometheus.GaugeVec @@ -111,3 +112,5 @@ func (p *PrometheusMonitoringService) RecordGetRecordsTime(shard string, time fl func (p *PrometheusMonitoringService) RecordProcessRecordsTime(shard string, time float64) { p.processRecordsTime.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Observe(time) } + +func (p *PrometheusMonitoringService) Flush() error { return nil } diff --git a/src/clientlibrary/worker/shard-consumer.go b/src/clientlibrary/worker/shard-consumer.go index 9b9f175..07a3dc4 100644 --- a/src/clientlibrary/worker/shard-consumer.go +++ b/src/clientlibrary/worker/shard-consumer.go @@ -209,6 +209,8 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { case <-*sc.stop: shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.REQUESTED, Checkpointer: recordCheckpointer} sc.recordProcessor.Shutdown(shutdownInput) + // flush out the metrics data + sc.mService.Flush() return nil case <-time.After(1 * time.Nanosecond): } diff --git a/src/clientlibrary/worker/worker_test.go b/src/clientlibrary/worker/worker_test.go index 88b3dac..eb33bfe 100644 --- a/src/clientlibrary/worker/worker_test.go +++ b/src/clientlibrary/worker/worker_test.go @@ -1,15 +1,18 @@ package worker import ( + "net/http" "os" "testing" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/prometheus/common/expfmt" log "github.com/sirupsen/logrus" cfg "clientlibrary/config" kc "clientlibrary/interfaces" + "clientlibrary/metrics" "clientlibrary/utils" "github.com/stretchr/testify/assert" ) @@ -21,6 +24,7 @@ const ( ) const specstr = `{"name":"kube-qQyhk","networking":{"containerNetworkCidr":"10.2.0.0/16"},"orgName":"BVT-Org-cLQch","projectName":"project-tDSJd","serviceLevel":"DEVELOPER","size":{"count":1},"version":"1.8.1-4"}` +const metricsSystem = "cloudwatch" func TestWorker(t *testing.T) { os.Setenv("AWS_ACCESS_KEY_ID", "your aws access key id") @@ -40,7 +44,10 @@ func TestWorker(t *testing.T) { assert.Equal(t, regionName, kclConfig.RegionName) assert.Equal(t, streamName, kclConfig.StreamName) - worker := NewWorker(recordProcessorFactory(t), kclConfig, nil) + // configure cloudwatch as metrics system + metricsConfig := getMetricsConfig(metricsSystem) + + worker := NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig) assert.Equal(t, regionName, worker.regionName) assert.Equal(t, streamName, worker.streamName) @@ -56,10 +63,53 @@ func TestWorker(t *testing.T) { } } + // wait a few seconds before shutdown processing time.Sleep(10 * time.Second) + + if metricsConfig != nil && metricsConfig.MonitoringService == "prometheus" { + res, err := http.Get("http://localhost:8080/metrics") + if err != nil { + t.Fatalf("Error scraping Prometheus endpoint %s", err) + } + + var parser expfmt.TextParser + parsed, err := parser.TextToMetricFamilies(res.Body) + res.Body.Close() + if err != nil { + t.Errorf("Error reading monitoring response %s", err) + } + t.Logf("Prometheus: %+v", parsed) + + } + worker.Shutdown() } +// configure different metrics system +func getMetricsConfig(service string) *metrics.MonitoringConfiguration { + if service == "cloudwatch" { + return &metrics.MonitoringConfiguration{ + MonitoringService: "cloudwatch", + Region: regionName, + CloudWatch: metrics.CloudWatchMonitoringService{ + ResolutionSec: 1, + }, + } + } + + if service == "prometheus" { + return &metrics.MonitoringConfiguration{ + MonitoringService: "prometheus", + Region: regionName, + Prometheus: metrics.PrometheusMonitoringService{ + ListenAddress: ":8080", + }, + } + } + + return nil +} + // Record processor factory is used to create RecordProcessor func recordProcessorFactory(t *testing.T) kc.IRecordProcessorFactory { return &dumpRecordProcessorFactory{t: t} From e1071abc80cff2a851977e11b21b55f900385679 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Fri, 20 Apr 2018 21:07:11 -0700 Subject: [PATCH 12/90] KCL: Fix cloudwatch metrics This changes fixed cloudwatch metrics publishing by adding long running go routine to periodically publish cloudwatch metrics. Also, shutdown metrics publishing when KCL is shutdown. Test: Run hmake test and verified cloudwatch metrics has been published via AWS cloudwatch console. Jira CNA-702 Change-Id: I78b347cd12939447b0daf93f51acf620d18e2f49 --- src/clientlibrary/metrics/cloudwatch.go | 391 +++++++++++---------- src/clientlibrary/metrics/interfaces.go | 10 +- src/clientlibrary/metrics/prometheus.go | 29 +- src/clientlibrary/worker/shard-consumer.go | 2 - src/clientlibrary/worker/worker.go | 8 +- src/clientlibrary/worker/worker_test.go | 4 +- 6 files changed, 248 insertions(+), 196 deletions(-) diff --git a/src/clientlibrary/metrics/cloudwatch.go b/src/clientlibrary/metrics/cloudwatch.go index 385438b..6724e07 100644 --- a/src/clientlibrary/metrics/cloudwatch.go +++ b/src/clientlibrary/metrics/cloudwatch.go @@ -16,10 +16,15 @@ type CloudWatchMonitoringService struct { KinesisStream string WorkerID string Region string - // how frequently to send data to cloudwatch - ResolutionSec int - svc cloudwatchiface.CloudWatchAPI - shardMetrics map[string]*cloudWatchMetrics + + // control how often to pusblish to CloudWatch + MetricsBufferTimeMillis int + MetricsMaxQueueSize int + + stop *chan struct{} + waitGroup *sync.WaitGroup + svc cloudwatchiface.CloudWatchAPI + shardMetrics *sync.Map } type cloudWatchMetrics struct { @@ -34,219 +39,251 @@ type cloudWatchMetrics struct { } func (cw *CloudWatchMonitoringService) Init() error { - // default to 1 min resolution - if cw.ResolutionSec == 0 { - cw.ResolutionSec = 60 - } - s := session.New(&aws.Config{Region: aws.String(cw.Region)}) cw.svc = cloudwatch.New(s) - cw.shardMetrics = make(map[string]*cloudWatchMetrics) + cw.shardMetrics = new(sync.Map) + + stopChan := make(chan struct{}) + cw.stop = &stopChan + wg := sync.WaitGroup{} + cw.waitGroup = &wg return nil } +func (cw *CloudWatchMonitoringService) Start() error { + cw.waitGroup.Add(1) + // entering eventloop for sending metrics to CloudWatch + go cw.eventloop() + return nil +} + +func (cw *CloudWatchMonitoringService) Shutdown() { + log.Info("Shutting down cloudwatch metrics system...") + close(*cw.stop) + cw.waitGroup.Wait() + log.Info("Cloudwatch metrics system has been shutdown.") +} + // Start daemon to flush metrics periodically -func (cw *CloudWatchMonitoringService) flushDaemon() { - previousFlushTime := time.Now() - resolutionDuration := time.Duration(cw.ResolutionSec) * time.Second +func (cw *CloudWatchMonitoringService) eventloop() { + defer cw.waitGroup.Done() + for { - time.Sleep(resolutionDuration - time.Now().Sub(previousFlushTime)) - err := cw.Flush() + err := cw.flush() if err != nil { log.Errorf("Error sending metrics to CloudWatch. %+v", err) } - previousFlushTime = time.Now() + + select { + case <-*cw.stop: + log.Info("Shutting down monitoring system") + cw.flush() + return + case <-time.After(time.Duration(cw.MetricsBufferTimeMillis) * time.Millisecond): + } } } -func (cw *CloudWatchMonitoringService) Flush() error { - // publish per shard metrics - for shard, metric := range cw.shardMetrics { - metric.Lock() - defaultDimensions := []*cloudwatch.Dimension{ - { - Name: aws.String("Shard"), - Value: &shard, - }, - { - Name: aws.String("KinesisStreamName"), - Value: &cw.KinesisStream, - }, - } - - leaseDimensions := []*cloudwatch.Dimension{ - { - Name: aws.String("Shard"), - Value: &shard, - }, - { - Name: aws.String("KinesisStreamName"), - Value: &cw.KinesisStream, - }, - { - Name: aws.String("WorkerID"), - Value: &cw.WorkerID, - }, - } - metricTimestamp := time.Now() - - // Publish metrics data to cloud watch - _, err := cw.svc.PutMetricData(&cloudwatch.PutMetricDataInput{ - Namespace: aws.String(cw.Namespace), - MetricData: []*cloudwatch.MetricDatum{ - { - Dimensions: defaultDimensions, - MetricName: aws.String("RecordsProcessed"), - Unit: aws.String("Count"), - Timestamp: &metricTimestamp, - Value: aws.Float64(float64(metric.processedRecords)), - }, - { - Dimensions: defaultDimensions, - MetricName: aws.String("DataBytesProcessed"), - Unit: aws.String("Bytes"), - Timestamp: &metricTimestamp, - Value: aws.Float64(float64(metric.processedBytes)), - }, - { - Dimensions: defaultDimensions, - MetricName: aws.String("MillisBehindLatest"), - Unit: aws.String("Milliseconds"), - Timestamp: &metricTimestamp, - StatisticValues: &cloudwatch.StatisticSet{ - SampleCount: aws.Float64(float64(len(metric.behindLatestMillis))), - Sum: sumFloat64(metric.behindLatestMillis), - Maximum: maxFloat64(metric.behindLatestMillis), - Minimum: minFloat64(metric.behindLatestMillis), - }, - }, - { - Dimensions: defaultDimensions, - MetricName: aws.String("KinesisDataFetcher.getRecords.Time"), - Unit: aws.String("Milliseconds"), - Timestamp: &metricTimestamp, - StatisticValues: &cloudwatch.StatisticSet{ - SampleCount: aws.Float64(float64(len(metric.getRecordsTime))), - Sum: sumFloat64(metric.getRecordsTime), - Maximum: maxFloat64(metric.getRecordsTime), - Minimum: minFloat64(metric.getRecordsTime), - }, - }, - { - Dimensions: defaultDimensions, - MetricName: aws.String("RecordProcessor.processRecords.Time"), - Unit: aws.String("Milliseconds"), - Timestamp: &metricTimestamp, - StatisticValues: &cloudwatch.StatisticSet{ - SampleCount: aws.Float64(float64(len(metric.processRecordsTime))), - Sum: sumFloat64(metric.processRecordsTime), - Maximum: maxFloat64(metric.processRecordsTime), - Minimum: minFloat64(metric.processRecordsTime), - }, - }, - { - Dimensions: leaseDimensions, - MetricName: aws.String("RenewLease.Success"), - Unit: aws.String("Count"), - Timestamp: &metricTimestamp, - Value: aws.Float64(float64(metric.leaseRenewals)), - }, - { - Dimensions: leaseDimensions, - MetricName: aws.String("CurrentLeases"), - Unit: aws.String("Count"), - Timestamp: &metricTimestamp, - Value: aws.Float64(float64(metric.leasesHeld)), - }, - }, - }) - if err == nil { - metric.processedRecords = 0 - metric.processedBytes = 0 - metric.behindLatestMillis = []float64{} - metric.leaseRenewals = 0 - metric.getRecordsTime = []float64{} - metric.processRecordsTime = []float64{} - } else { - log.Errorf("Error in publishing cloudwatch metrics. Error: %+v", err) - } - - metric.Unlock() - return err +func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWatchMetrics) bool { + metric.Lock() + defaultDimensions := []*cloudwatch.Dimension{ + { + Name: aws.String("Shard"), + Value: &shard, + }, + { + Name: aws.String("KinesisStreamName"), + Value: &cw.KinesisStream, + }, } + + leaseDimensions := []*cloudwatch.Dimension{ + { + Name: aws.String("Shard"), + Value: &shard, + }, + { + Name: aws.String("KinesisStreamName"), + Value: &cw.KinesisStream, + }, + { + Name: aws.String("WorkerID"), + Value: &cw.WorkerID, + }, + } + metricTimestamp := time.Now() + + data := []*cloudwatch.MetricDatum{ + { + Dimensions: defaultDimensions, + MetricName: aws.String("RecordsProcessed"), + Unit: aws.String("Count"), + Timestamp: &metricTimestamp, + Value: aws.Float64(float64(metric.processedRecords)), + }, + { + Dimensions: defaultDimensions, + MetricName: aws.String("DataBytesProcessed"), + Unit: aws.String("Bytes"), + Timestamp: &metricTimestamp, + Value: aws.Float64(float64(metric.processedBytes)), + }, + { + Dimensions: leaseDimensions, + MetricName: aws.String("RenewLease.Success"), + Unit: aws.String("Count"), + Timestamp: &metricTimestamp, + Value: aws.Float64(float64(metric.leaseRenewals)), + }, + { + Dimensions: leaseDimensions, + MetricName: aws.String("CurrentLeases"), + Unit: aws.String("Count"), + Timestamp: &metricTimestamp, + Value: aws.Float64(float64(metric.leasesHeld)), + }, + } + + if len(metric.behindLatestMillis) > 0 { + data = append(data, &cloudwatch.MetricDatum{ + Dimensions: defaultDimensions, + MetricName: aws.String("MillisBehindLatest"), + Unit: aws.String("Milliseconds"), + Timestamp: &metricTimestamp, + StatisticValues: &cloudwatch.StatisticSet{ + SampleCount: aws.Float64(float64(len(metric.behindLatestMillis))), + Sum: sumFloat64(metric.behindLatestMillis), + Maximum: maxFloat64(metric.behindLatestMillis), + Minimum: minFloat64(metric.behindLatestMillis), + }}) + } + + if len(metric.getRecordsTime) > 0 { + data = append(data, &cloudwatch.MetricDatum{ + Dimensions: defaultDimensions, + MetricName: aws.String("KinesisDataFetcher.getRecords.Time"), + Unit: aws.String("Milliseconds"), + Timestamp: &metricTimestamp, + StatisticValues: &cloudwatch.StatisticSet{ + SampleCount: aws.Float64(float64(len(metric.getRecordsTime))), + Sum: sumFloat64(metric.getRecordsTime), + Maximum: maxFloat64(metric.getRecordsTime), + Minimum: minFloat64(metric.getRecordsTime), + }}) + } + + if len(metric.processRecordsTime) > 0 { + data = append(data, &cloudwatch.MetricDatum{ + Dimensions: defaultDimensions, + MetricName: aws.String("RecordProcessor.processRecords.Time"), + Unit: aws.String("Milliseconds"), + Timestamp: &metricTimestamp, + StatisticValues: &cloudwatch.StatisticSet{ + SampleCount: aws.Float64(float64(len(metric.processRecordsTime))), + Sum: sumFloat64(metric.processRecordsTime), + Maximum: maxFloat64(metric.processRecordsTime), + Minimum: minFloat64(metric.processRecordsTime), + }}) + } + + // Publish metrics data to cloud watch + _, err := cw.svc.PutMetricData(&cloudwatch.PutMetricDataInput{ + Namespace: aws.String(cw.Namespace), + MetricData: data, + }) + + if err == nil { + metric.processedRecords = 0 + metric.processedBytes = 0 + metric.behindLatestMillis = []float64{} + metric.leaseRenewals = 0 + metric.getRecordsTime = []float64{} + metric.processRecordsTime = []float64{} + } else { + log.Errorf("Error in publishing cloudwatch metrics. Error: %+v", err) + } + + metric.Unlock() + return true +} + +func (cw *CloudWatchMonitoringService) flush() error { + log.Debugf("Flushing metrics data. Stream: %s, Worker: %s", cw.KinesisStream, cw.WorkerID) + // publish per shard metrics + cw.shardMetrics.Range(func(k, v interface{}) bool { + shard, metric := k.(string), v.(*cloudWatchMetrics) + return cw.flushShard(shard, metric) + }) + return nil } func (cw *CloudWatchMonitoringService) IncrRecordsProcessed(shard string, count int) { - if _, ok := cw.shardMetrics[shard]; !ok { - cw.shardMetrics[shard] = &cloudWatchMetrics{} - } - cw.shardMetrics[shard].Lock() - defer cw.shardMetrics[shard].Unlock() - cw.shardMetrics[shard].processedRecords += int64(count) + m := cw.getOrCreatePerShardMetrics(shard) + m.Lock() + defer m.Unlock() + m.processedRecords += int64(count) } func (cw *CloudWatchMonitoringService) IncrBytesProcessed(shard string, count int64) { - if _, ok := cw.shardMetrics[shard]; !ok { - cw.shardMetrics[shard] = &cloudWatchMetrics{} - } - cw.shardMetrics[shard].Lock() - defer cw.shardMetrics[shard].Unlock() - cw.shardMetrics[shard].processedBytes += count + m := cw.getOrCreatePerShardMetrics(shard) + m.Lock() + defer m.Unlock() + m.processedBytes += count } func (cw *CloudWatchMonitoringService) MillisBehindLatest(shard string, millSeconds float64) { - if _, ok := cw.shardMetrics[shard]; !ok { - cw.shardMetrics[shard] = &cloudWatchMetrics{} - } - cw.shardMetrics[shard].Lock() - defer cw.shardMetrics[shard].Unlock() - cw.shardMetrics[shard].behindLatestMillis = append(cw.shardMetrics[shard].behindLatestMillis, millSeconds) + m := cw.getOrCreatePerShardMetrics(shard) + m.Lock() + defer m.Unlock() + m.behindLatestMillis = append(m.behindLatestMillis, millSeconds) } func (cw *CloudWatchMonitoringService) LeaseGained(shard string) { - if _, ok := cw.shardMetrics[shard]; !ok { - cw.shardMetrics[shard] = &cloudWatchMetrics{} - } - cw.shardMetrics[shard].Lock() - defer cw.shardMetrics[shard].Unlock() - cw.shardMetrics[shard].leasesHeld++ + m := cw.getOrCreatePerShardMetrics(shard) + m.Lock() + defer m.Unlock() + m.leasesHeld++ } func (cw *CloudWatchMonitoringService) LeaseLost(shard string) { - if _, ok := cw.shardMetrics[shard]; !ok { - cw.shardMetrics[shard] = &cloudWatchMetrics{} - } - cw.shardMetrics[shard].Lock() - defer cw.shardMetrics[shard].Unlock() - cw.shardMetrics[shard].leasesHeld-- + m := cw.getOrCreatePerShardMetrics(shard) + m.Lock() + defer m.Unlock() + m.leasesHeld-- } func (cw *CloudWatchMonitoringService) LeaseRenewed(shard string) { - if _, ok := cw.shardMetrics[shard]; !ok { - cw.shardMetrics[shard] = &cloudWatchMetrics{} - } - cw.shardMetrics[shard].Lock() - defer cw.shardMetrics[shard].Unlock() - cw.shardMetrics[shard].leaseRenewals++ + m := cw.getOrCreatePerShardMetrics(shard) + m.Lock() + defer m.Unlock() + m.leaseRenewals++ } func (cw *CloudWatchMonitoringService) RecordGetRecordsTime(shard string, time float64) { - if _, ok := cw.shardMetrics[shard]; !ok { - cw.shardMetrics[shard] = &cloudWatchMetrics{} - } - cw.shardMetrics[shard].Lock() - defer cw.shardMetrics[shard].Unlock() - cw.shardMetrics[shard].getRecordsTime = append(cw.shardMetrics[shard].getRecordsTime, time) + m := cw.getOrCreatePerShardMetrics(shard) + m.Lock() + defer m.Unlock() + m.getRecordsTime = append(m.getRecordsTime, time) } func (cw *CloudWatchMonitoringService) RecordProcessRecordsTime(shard string, time float64) { - if _, ok := cw.shardMetrics[shard]; !ok { - cw.shardMetrics[shard] = &cloudWatchMetrics{} + m := cw.getOrCreatePerShardMetrics(shard) + m.Lock() + defer m.Unlock() + m.processRecordsTime = append(m.processRecordsTime, time) +} + +func (cw *CloudWatchMonitoringService) getOrCreatePerShardMetrics(shard string) *cloudWatchMetrics { + var i interface{} + var ok bool + if i, ok = cw.shardMetrics.Load(shard); !ok { + m := &cloudWatchMetrics{} + cw.shardMetrics.Store(shard, m) + return m } - cw.shardMetrics[shard].Lock() - defer cw.shardMetrics[shard].Unlock() - cw.shardMetrics[shard].processRecordsTime = append(cw.shardMetrics[shard].processRecordsTime, time) + + return i.(*cloudWatchMetrics) } func sumFloat64(slice []float64) *float64 { diff --git a/src/clientlibrary/metrics/interfaces.go b/src/clientlibrary/metrics/interfaces.go index 57dfc11..809089e 100644 --- a/src/clientlibrary/metrics/interfaces.go +++ b/src/clientlibrary/metrics/interfaces.go @@ -15,6 +15,7 @@ type MonitoringConfiguration struct { type MonitoringService interface { Init() error + Start() error IncrRecordsProcessed(string, int) IncrBytesProcessed(string, int64) MillisBehindLatest(string, float64) @@ -23,7 +24,7 @@ type MonitoringService interface { LeaseRenewed(string) RecordGetRecordsTime(string, float64) RecordProcessRecordsTime(string, float64) - Flush() error + Shutdown() } func (m *MonitoringConfiguration) Init(nameSpace, streamName string, workerID string) error { @@ -57,9 +58,9 @@ func (m *MonitoringConfiguration) GetMonitoringService() MonitoringService { type noopMonitoringService struct{} -func (n *noopMonitoringService) Init() error { - return nil -} +func (n *noopMonitoringService) Init() error { return nil } +func (n *noopMonitoringService) Start() error { return nil } +func (n *noopMonitoringService) Shutdown() {} func (n *noopMonitoringService) IncrRecordsProcessed(shard string, count int) {} func (n *noopMonitoringService) IncrBytesProcessed(shard string, count int64) {} @@ -69,4 +70,3 @@ func (n *noopMonitoringService) LeaseLost(shard string) func (n *noopMonitoringService) LeaseRenewed(shard string) {} func (n *noopMonitoringService) RecordGetRecordsTime(shard string, time float64) {} func (n *noopMonitoringService) RecordProcessRecordsTime(shard string, time float64) {} -func (n *noopMonitoringService) Flush() error { return nil } diff --git a/src/clientlibrary/metrics/prometheus.go b/src/clientlibrary/metrics/prometheus.go index fc9ab28..bdf3ab0 100644 --- a/src/clientlibrary/metrics/prometheus.go +++ b/src/clientlibrary/metrics/prometheus.go @@ -8,6 +8,9 @@ import ( log "github.com/sirupsen/logrus" ) +// PrometheusMonitoringService to start Prometheus as metrics system. +// It might be trick if the service onboarding with KCL also uses Prometheus. +// Therefore, we should start cloudwatch metrics by default instead. type PrometheusMonitoringService struct { ListenAddress string @@ -70,17 +73,25 @@ func (p *PrometheusMonitoringService) Init() error { } } - http.Handle("/metrics", promhttp.Handler()) - go func() { - log.Debugf("Starting Prometheus listener on %s", p.ListenAddress) - err := http.ListenAndServe(p.ListenAddress, nil) - if err != nil { - log.Errorln("Error starting Prometheus metrics endpoint", err) - } - }() return nil } +func (p *PrometheusMonitoringService) Start() error { + http.Handle("/metrics", promhttp.Handler()) + go func() { + log.Infof("Starting Prometheus listener on %s", p.ListenAddress) + err := http.ListenAndServe(p.ListenAddress, nil) + if err != nil { + log.Errorf("Error starting Prometheus metrics endpoint. %+v", err) + } + log.Info("Stopped metrics server") + }() + + return nil +} + +func (p *PrometheusMonitoringService) Shutdown() {} + func (p *PrometheusMonitoringService) IncrRecordsProcessed(shard string, count int) { p.processedRecords.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Add(float64(count)) } @@ -112,5 +123,3 @@ func (p *PrometheusMonitoringService) RecordGetRecordsTime(shard string, time fl func (p *PrometheusMonitoringService) RecordProcessRecordsTime(shard string, time float64) { p.processRecordsTime.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Observe(time) } - -func (p *PrometheusMonitoringService) Flush() error { return nil } diff --git a/src/clientlibrary/worker/shard-consumer.go b/src/clientlibrary/worker/shard-consumer.go index 07a3dc4..9b9f175 100644 --- a/src/clientlibrary/worker/shard-consumer.go +++ b/src/clientlibrary/worker/shard-consumer.go @@ -209,8 +209,6 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { case <-*sc.stop: shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.REQUESTED, Checkpointer: recordCheckpointer} sc.recordProcessor.Shutdown(shutdownInput) - // flush out the metrics data - sc.mService.Flush() return nil case <-time.After(1 * time.Nanosecond): } diff --git a/src/clientlibrary/worker/worker.go b/src/clientlibrary/worker/worker.go index d8402ec..9a7e27c 100644 --- a/src/clientlibrary/worker/worker.go +++ b/src/clientlibrary/worker/worker.go @@ -106,8 +106,11 @@ func (w *Worker) Start() error { return err } - log.Info("Initialization complete. Starting worker event loop.") + // Start monitoring service + log.Info("Starting monitoring service.") + w.mService.Start() + log.Info("Starting worker event loop.") // entering event loop go w.eventLoop() return nil @@ -120,6 +123,7 @@ func (w *Worker) Shutdown() { close(*w.stop) w.waitGroup.Wait() + w.mService.Shutdown() log.Info("Worker loop is complete. Exiting from worker.") } @@ -170,6 +174,8 @@ func (w *Worker) initialize() error { return err } + log.Info("Initialization complete.") + return nil } diff --git a/src/clientlibrary/worker/worker_test.go b/src/clientlibrary/worker/worker_test.go index eb33bfe..e1dee71 100644 --- a/src/clientlibrary/worker/worker_test.go +++ b/src/clientlibrary/worker/worker_test.go @@ -92,7 +92,9 @@ func getMetricsConfig(service string) *metrics.MonitoringConfiguration { MonitoringService: "cloudwatch", Region: regionName, CloudWatch: metrics.CloudWatchMonitoringService{ - ResolutionSec: 1, + // Those value should come from kclConfig + MetricsBufferTimeMillis: 10000, + MetricsMaxQueueSize: 20, }, } } From 5ef4338a22b617fe3ae69a45d1a2428070421adc Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Sat, 21 Apr 2018 19:58:51 -0700 Subject: [PATCH 13/90] KCL: Update shard sync to remove not existed shard Need to remove shard not longer existed in Kinesis from shardStatus cache. Change-Id: I09b4a4c3c6480b8300fa937e6073dcd578156b29 --- src/clientlibrary/worker/worker.go | 32 +++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/src/clientlibrary/worker/worker.go b/src/clientlibrary/worker/worker.go index 9a7e27c..1603ff4 100644 --- a/src/clientlibrary/worker/worker.go +++ b/src/clientlibrary/worker/worker.go @@ -168,12 +168,6 @@ func (w *Worker) initialize() error { wg := sync.WaitGroup{} w.waitGroup = &wg - err = w.getShardIDs("") - if err != nil { - log.Errorf("Error getting Kinesis shards: %s", err) - return err - } - log.Info("Initialization complete.") return nil @@ -199,12 +193,13 @@ func (w *Worker) newShardConsumer(shard *shardStatus) *ShardConsumer { // eventLoop func (w *Worker) eventLoop() { for { - err := w.getShardIDs("") + err := w.syncShard() if err != nil { log.Errorf("Error getting Kinesis shards: %v", err) // Back-off? time.Sleep(500 * time.Millisecond) } + log.Infof("Found %d shards", len(w.shardStatus)) // Count the number of leases hold by this worker @@ -271,7 +266,9 @@ func (w *Worker) eventLoop() { } // List all ACTIVE shard and store them into shardStatus table -func (w *Worker) getShardIDs(startShardID string) error { +// If shard has been removed, need to exclude it from cached shard status. +func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) error { + // The default pagination limit is 100. args := &kinesis.DescribeStreamInput{ StreamName: aws.String(w.streamName), } @@ -289,6 +286,8 @@ func (w *Worker) getShardIDs(startShardID string) error { var lastShardID string for _, s := range streamDesc.StreamDescription.Shards { + // record avail shardId from fresh reading from Kinesis + shardInfo[*s.ShardId] = true // found new shard if _, ok := w.shardStatus[*s.ShardId]; !ok { log.Debugf("Found shard with id %s", *s.ShardId) @@ -304,7 +303,7 @@ func (w *Worker) getShardIDs(startShardID string) error { } if *streamDesc.StreamDescription.HasMoreShards { - err := w.getShardIDs(lastShardID) + err := w.getShardIDs(lastShardID, shardInfo) if err != nil { return err } @@ -312,3 +311,18 @@ func (w *Worker) getShardIDs(startShardID string) error { return nil } + +// syncShard to sync the cached shard info with actual shard info from Kinesis +func (w *Worker) syncShard() error { + shardInfo := make(map[string]bool) + err := w.getShardIDs("", shardInfo) + + for _, shard := range w.shardStatus { + // The cached shard no longer existed, remove it. + if _, ok := shardInfo[shard.ID]; !ok { + delete(w.shardStatus, shard.ID) + } + } + + return err +} From 6384d897481cf0523e6af075f5263b24312a3381 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Sun, 22 Apr 2018 07:09:16 -0700 Subject: [PATCH 14/90] KCL: Organize the folder structure Organize the folder structure in order to support imported as submodule for other services. Jira CNA-701 Change-Id: I1dda27934642bb8a7755df07dc4a5048449afc86 --- src/leases/dynamoutils/dynamoutils.go | 78 ---- src/leases/impl/kinesis-client-lease.go | 116 ----- src/leases/impl/lease-manager.go | 440 ------------------ src/leases/impl/lease-serializer.go | 184 -------- src/leases/impl/lease.go | 116 ----- src/leases/interfaces/lease-manager.go | 162 ------- src/leases/interfaces/lease-renewer.go | 78 ---- src/leases/interfaces/lease-serializer.go | 86 ---- src/leases/interfaces/lease-taker.go | 28 -- src/leases/interfaces/lease.go | 21 - .../clientlibrary/common/errors.go | 0 .../clientlibrary/config/config.go | 0 .../clientlibrary/config/config_test.go | 0 .../config/initial-stream-pos.go | 0 .../clientlibrary/config/kcl-config.go | 2 +- .../clientlibrary/interfaces/inputs.go | 0 .../record-processor-checkpointer.go | 0 .../interfaces/record-processor.go | 0 .../interfaces/sequence-number.go | 0 .../clientlibrary/metrics/cloudwatch.go | 0 .../clientlibrary/metrics/interfaces.go | 0 .../clientlibrary/metrics/prometheus.go | 0 .../clientlibrary/utils/random.go | 0 .../clientlibrary/utils/uuid.go | 0 .../clientlibrary/worker/checkpointer.go | 2 +- .../worker/record-processor-checkpointer.go | 2 +- .../clientlibrary/worker/shard-consumer.go | 6 +- .../clientlibrary/worker/worker.go | 6 +- .../clientlibrary/worker/worker_test.go | 12 +- 29 files changed, 13 insertions(+), 1326 deletions(-) delete mode 100644 src/leases/dynamoutils/dynamoutils.go delete mode 100644 src/leases/impl/kinesis-client-lease.go delete mode 100644 src/leases/impl/lease-manager.go delete mode 100644 src/leases/impl/lease-serializer.go delete mode 100644 src/leases/impl/lease.go delete mode 100644 src/leases/interfaces/lease-manager.go delete mode 100644 src/leases/interfaces/lease-renewer.go delete mode 100644 src/leases/interfaces/lease-serializer.go delete mode 100644 src/leases/interfaces/lease-taker.go delete mode 100644 src/leases/interfaces/lease.go rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/common/errors.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/config/config.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/config/config_test.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/config/initial-stream-pos.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/config/kcl-config.go (99%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/interfaces/inputs.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/interfaces/record-processor-checkpointer.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/interfaces/record-processor.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/interfaces/sequence-number.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/metrics/cloudwatch.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/metrics/interfaces.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/metrics/prometheus.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/utils/random.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/utils/uuid.go (100%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/worker/checkpointer.go (99%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/worker/record-processor-checkpointer.go (96%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/worker/shard-consumer.go (97%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/worker/worker.go (97%) rename src/{ => vmware.com/cascade-kinesis-client}/clientlibrary/worker/worker_test.go (93%) diff --git a/src/leases/dynamoutils/dynamoutils.go b/src/leases/dynamoutils/dynamoutils.go deleted file mode 100644 index 8d286be..0000000 --- a/src/leases/dynamoutils/dynamoutils.go +++ /dev/null @@ -1,78 +0,0 @@ -package util - -import ( - "strconv" - - "clientlibrary/common" - "github.com/aws/aws-sdk-go/service/dynamodb" -) - -/** - * Some static utility functions used by our LeaseSerializers. - */ - -func CreateAttributeValueFromSS(collectionValue []*string) (*dynamodb.AttributeValue, error) { - if len(collectionValue) == 0 { - return nil, common.IllegalArgumentError.MakeErr().WithDetail("Collection attributeValues cannot be null or empty.") - } - - attrib := &dynamodb.AttributeValue{} - attrib.SetSS(collectionValue) - - return attrib, nil -} - -func CreateAttributeValueFromString(stringValue string) (*dynamodb.AttributeValue, error) { - if len(stringValue) == 0 { - return nil, common.IllegalArgumentError.MakeErr().WithDetail("String attributeValues cannot be null or empty.") - } - - attrib := &dynamodb.AttributeValue{} - attrib.SetS(stringValue) - - return attrib, nil -} - -func CreateAttributeValueFromLong(longValue int64) (*dynamodb.AttributeValue, error) { - attrib := &dynamodb.AttributeValue{} - attrib.SetN(strconv.FormatInt(longValue, 10)) - - return attrib, nil -} - -func SafeGetLong(dynamoRecord map[string]*dynamodb.AttributeValue, key string) int64 { - av := dynamoRecord[key] - - if av == nil || av.N == nil { - return 0 - } - - var val int64 - val, err := strconv.ParseInt(*av.N, 10, 64) - - if err != nil { - return 0 - } - - return val -} - -func SafeGetString(dynamoRecord map[string]*dynamodb.AttributeValue, key string) *string { - av := dynamoRecord[key] - if av == nil { - return nil - } - - return av.S -} - -func SafeGetSS(dynamoRecord map[string]*dynamodb.AttributeValue, key string) []*string { - av := dynamoRecord[key] - - if av == nil { - var emptyslice []*string - return emptyslice - } - - return av.SS -} diff --git a/src/leases/impl/kinesis-client-lease.go b/src/leases/impl/kinesis-client-lease.go deleted file mode 100644 index abe049a..0000000 --- a/src/leases/impl/kinesis-client-lease.go +++ /dev/null @@ -1,116 +0,0 @@ -package impl - -import ( - . "clientlibrary/interfaces" -) - -// KinesisClientLease is a Lease subclass containing KinesisClientLibrary related fields for checkpoints. -type KinesisClientLease struct { - checkpoint *ExtendedSequenceNumber - pendingCheckpoint *ExtendedSequenceNumber - ownerSwitchesSinceCheckpoint int64 - parentShardIds *[]string - - // coreLease to hold lease information - // Note: golang doesn't support inheritance, use composition instead. - coreLease Lease -} - -// GetCheckpoint returns most recently application-supplied checkpoint value. During fail over, the new worker -// will pick up after the old worker's last checkpoint. -func (l *KinesisClientLease) GetCheckpoint() *ExtendedSequenceNumber { - return l.checkpoint -} - -// GetPendingCheckpoint returns pending checkpoint, possibly null. -func (l *KinesisClientLease) GetPendingCheckpoint() *ExtendedSequenceNumber { - return l.pendingCheckpoint -} - -// GetOwnerSwitchesSinceCheckpoint counts of distinct lease holders between checkpoints. -func (l *KinesisClientLease) GetOwnerSwitchesSinceCheckpoint() int64 { - return l.ownerSwitchesSinceCheckpoint -} - -// GetParentShardIds returns shardIds that parent this lease. Used for resharding. -func (l *KinesisClientLease) GetParentShardIds() *[]string { - return l.parentShardIds -} - -// SetCheckpoint -func (l *KinesisClientLease) SetCheckpoint(checkpoint *ExtendedSequenceNumber) { - l.checkpoint = checkpoint -} - -// SetPendingCheckpoint -func (l *KinesisClientLease) SetPendingCheckpoint(pendingCheckpoint *ExtendedSequenceNumber) { - l.pendingCheckpoint = pendingCheckpoint -} - -// SetOwnerSwitchesSinceCheckpoint -func (l *KinesisClientLease) SetOwnerSwitchesSinceCheckpoint(ownerSwitchesSinceCheckpoint int64) { - l.ownerSwitchesSinceCheckpoint = ownerSwitchesSinceCheckpoint -} - -// SetParentShardIds -func (l *KinesisClientLease) SetParentShardIds(parentShardIds *[]string) { - l.parentShardIds = parentShardIds -} - -// GetLeaseKey retrieves leaseKey - identifies the unit of work associated with this lease. -func (l *KinesisClientLease) GetLeaseKey() string { - return l.coreLease.GetLeaseKey() -} - -// GetLeaseOwner gets current owner of the lease, may be "". -func (l *KinesisClientLease) GetLeaseOwner() string { - return l.coreLease.GetLeaseOwner() -} - -// GetLeaseCounter retrieves leaseCounter which is incremented periodically by the holder of the lease. Used for optimistic locking. -func (l *KinesisClientLease) GetLeaseCounter() int64 { - return l.coreLease.GetLeaseCounter() -} - -// GetConcurrencyToken returns concurrency token -func (l *KinesisClientLease) GetConcurrencyToken() string { - return l.coreLease.GetConcurrencyToken() -} - -// GetLastCounterIncrementNanos returns concurrency token -func (l *KinesisClientLease) GetLastCounterIncrementNanos() int64 { - return l.coreLease.GetLastCounterIncrementNanos() -} - -// SetLeaseKey sets leaseKey - LeaseKey is immutable once set. -func (l *KinesisClientLease) SetLeaseKey(leaseKey string) error { - return l.coreLease.SetLeaseKey(leaseKey) -} - -// SetLeaseOwner set current owner of the lease, may be "". -func (l *KinesisClientLease) SetLeaseOwner(leaseOwner string) { - l.coreLease.SetLeaseOwner(leaseOwner) -} - -// SetLeaseCounter sets leaseCounter which is incremented periodically by the holder of the lease. Used for optimistic locking. -func (l *KinesisClientLease) SetLeaseCounter(leaseCounter int64) { - l.coreLease.SetLeaseCounter(leaseCounter) -} - -// SetConcurrencyToken -func (l *KinesisClientLease) SetConcurrencyToken(concurrencyToken string) { - l.coreLease.SetConcurrencyToken(concurrencyToken) -} - -// SetLastCounterIncrementNanos returns concurrency token -func (l *KinesisClientLease) SetLastCounterIncrementNanos(lastCounterIncrementNanos int64) { - l.coreLease.SetLastCounterIncrementNanos(lastCounterIncrementNanos) -} - -// IsExpired to check whether lease expired using -// @param leaseDurationNanos duration of lease in nanoseconds -// @param asOfNanos time in nanoseconds to check expiration as-of -// @return true if lease is expired as-of given time, false otherwise -func (l *KinesisClientLease) IsExpired(leaseDurationNanos, asOfNanos int64) bool { - return l.coreLease.IsExpired(leaseDurationNanos, asOfNanos) -} diff --git a/src/leases/impl/lease-manager.go b/src/leases/impl/lease-manager.go deleted file mode 100644 index a447e11..0000000 --- a/src/leases/impl/lease-manager.go +++ /dev/null @@ -1,440 +0,0 @@ -package impl - -import ( - "log" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" - - . "leases/interfaces" -) - -const ( - // CREATING - The table is being created. - TABLE_CREATING = "CREATING" - - // UPDATING - The table is being updated. - TABLE_UPDATING = "UPDATING" - - // DELETING - The table is being deleted. - TABLE_DELETING = "DELETING" - - // ACTIVE - The table is ready for use. - TABLE_ACTIVE = "ACTIVE" -) - -// LeaseManager is an implementation of ILeaseManager that uses DynamoDB. -type LeaseManager struct { - tableName string - dynamoDBClient dynamodbiface.DynamoDBAPI - serializer ILeaseSerializer - consistentReads bool -} - -func NewLeaseManager(tableName string, dynamoDBClient dynamodbiface.DynamoDBAPI, serializer ILeaseSerializer) *LeaseManager { - return &LeaseManager{ - tableName: tableName, - dynamoDBClient: dynamoDBClient, - serializer: serializer, - consistentReads: false, - } -} - -/** - * Creates the table that will store leases. Succeeds if table already exists. - * - * @param readCapacity - * @param writeCapacity - * - * @return true if we created a new table (table didn't exist before) - * - * @error ProvisionedThroughputError if we cannot create the lease table due to per-AWS-account capacity - * restrictions. - * @error LeasingDependencyError if DynamoDB createTable fails in an unexpected way - */ -func (l *LeaseManager) CreateLeaseTableIfNotExists(readCapacity, writeCapacity int64) (bool, error) { - status, _ := l.tableStatus() - - if status != nil { - return false, nil - } - - input := &dynamodb.CreateTableInput{ - AttributeDefinitions: l.serializer.GetAttributeDefinitions(), - KeySchema: l.serializer.GetKeySchema(), - ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ - ReadCapacityUnits: aws.Int64(readCapacity), - WriteCapacityUnits: aws.Int64(writeCapacity), - }, - TableName: aws.String(l.tableName), - } - _, err := l.dynamoDBClient.CreateTable(input) - - if err != nil { - return false, err - } - return true, nil -} - -/** - * @return true if the lease table already exists. - * - * @error LeasingDependencyError if DynamoDB describeTable fails in an unexpected way - */ -func (l *LeaseManager) LeaseTableExists() (bool, error) { - status, _ := l.tableStatus() - - if status != nil || aws.StringValue(status) == TABLE_ACTIVE { - return true, nil - } - return false, nil -} - -/** - * Blocks until the lease table exists by polling leaseTableExists. - * - * @param secondsBetweenPolls time to wait between polls in seconds - * @param timeoutSeconds total time to wait in seconds - * - * @return true if table exists, false if timeout was reached - * - * @error LeasingDependencyError if DynamoDB describeTable fails in an unexpected way - */ -func (l *LeaseManager) WaitUntilLeaseTableExists(secondsBetweenPolls, timeoutSeconds int64) (bool, error) { - delay := time.Duration(secondsBetweenPolls) * time.Second - deadline := time.Now().Add(time.Duration(timeoutSeconds) * time.Second) - - var err error - for time.Now().Before(deadline) { - flag := false - flag, err = l.LeaseTableExists() - - if flag { - return true, nil - } - - time.Sleep(delay) - } - - return false, err -} - -/** - * List all objects in table synchronously. - * - * @error LeasingDependencyError if DynamoDB scan fails in an unexpected way - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB scan fails due to lack of capacity - * - * @return list of leases - */ -func (l *LeaseManager) ListLeases() ([]ILease, error) { - return l.list(0) -} - -/** - * Create a new lease. Conditional on a lease not already existing with this shardId. - * - * @param lease the lease to create - * - * @return true if lease was created, false if lease already exists - * - * @error LeasingDependencyError if DynamoDB put fails in an unexpected way - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB put fails due to lack of capacity - */ -func (l *LeaseManager) CreateLeaseIfNotExists(lease ILease) (bool, error) { - input := &dynamodb.PutItemInput{ - TableName: aws.String(l.tableName), - Item: l.serializer.ToDynamoRecord(lease), - Expected: l.serializer.GetDynamoNonexistantExpectation(), - } - _, err := l.dynamoDBClient.PutItem(input) - return err != nil, err -} - -/** - * @param shardId Get the lease for this shardId and it is the leaseKey - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB get fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB get fails in an unexpected way - * - * @return lease for the specified shardId, or null if one doesn't exist - */ -func (l *LeaseManager) GetLease(shardId string) (ILease, error) { - input := &dynamodb.GetItemInput{ - TableName: aws.String(l.tableName), - Key: l.serializer.GetDynamoHashKey(shardId), - ConsistentRead: aws.Bool(l.consistentReads), - } - result, err := l.dynamoDBClient.GetItem(input) - if err != nil { - return nil, err - } - dynamoRecord := result.Item - if dynamoRecord == nil { - return nil, nil - } - lease := l.serializer.FromDynamoRecord(dynamoRecord) - return lease, nil -} - -/** - * Renew a lease by incrementing the lease counter. Conditional on the leaseCounter in DynamoDB matching the leaseCounter - * of the input. Mutates the leaseCounter of the passed-in lease object after updating the record in DynamoDB. - * - * @param lease the lease to renew - * - * @return true if renewal succeeded, false otherwise - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB update fails in an unexpected way - */ -func (l *LeaseManager) RenewLease(lease ILease) (bool, error) { - input := &dynamodb.UpdateItemInput{ - TableName: aws.String(l.tableName), - Key: l.serializer.GetDynamoHashKey(lease.GetLeaseKey()), - Expected: l.serializer.GetDynamoLeaseCounterExpectation(lease), - } - _, err := l.dynamoDBClient.UpdateItem(input) - - if err != nil { - // If we had a spurious retry during the Dynamo update, then this conditional PUT failure - // might be incorrect. So, we get the item straight away and check if the lease owner + lease counter - // are what we expected. - expectedOwner := lease.GetLeaseOwner() - expectedCounter := lease.GetLeaseCounter() + 1 - updatedLease, _ := l.GetLease(lease.GetLeaseKey()) - if updatedLease == nil || expectedOwner != updatedLease.GetLeaseOwner() || - expectedCounter != updatedLease.GetLeaseCounter() { - return false, nil - } - - log.Println("Detected spurious renewal failure for lease with key " + lease.GetLeaseKey() + ", but recovered") - } - - lease.SetLeaseCounter(lease.GetLeaseCounter() + 1) - return err != nil, err - -} - -/** - * Take a lease for the given owner by incrementing its leaseCounter and setting its owner field. Conditional on - * the leaseCounter in DynamoDB matching the leaseCounter of the input. Mutates the leaseCounter and owner of the - * passed-in lease object after updating DynamoDB. - * - * @param lease the lease to take - * @param owner the new owner - * - * @return true if lease was successfully taken, false otherwise - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB update fails in an unexpected way - */ -func (l *LeaseManager) TakeLease(lease ILease, owner string) (bool, error) { - input := &dynamodb.UpdateItemInput{ - TableName: aws.String(l.tableName), - Key: l.serializer.GetDynamoHashKey(lease.GetLeaseKey()), - Expected: l.serializer.GetDynamoLeaseCounterExpectation(lease), - } - - updates := l.serializer.GetDynamoLeaseCounterUpdate(lease) - - // putAll to updates - for k, v := range l.serializer.GetDynamoTakeLeaseUpdate(lease, owner) { - updates[k] = v - } - input.SetAttributeUpdates(updates) - _, err := l.dynamoDBClient.UpdateItem(input) - - if err != nil { - return false, err - } - - lease.SetLeaseCounter(lease.GetLeaseCounter() + 1) - lease.SetLeaseOwner(owner) - return true, nil -} - -/** - * Evict the current owner of lease by setting owner to null. Conditional on the owner in DynamoDB matching the owner of - * the input. Mutates the lease counter and owner of the passed-in lease object after updating the record in DynamoDB. - * - * @param lease the lease to void - * - * @return true if eviction succeeded, false otherwise - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB update fails in an unexpected way - */ -func (l *LeaseManager) EvictLease(lease ILease) (bool, error) { - input := &dynamodb.UpdateItemInput{ - TableName: aws.String(l.tableName), - Key: l.serializer.GetDynamoHashKey(lease.GetLeaseKey()), - Expected: l.serializer.GetDynamoLeaseCounterExpectation(lease), - } - - updates := l.serializer.GetDynamoLeaseCounterUpdate(lease) - - // putAll to updates - for k, v := range l.serializer.GetDynamoEvictLeaseUpdate(lease) { - updates[k] = v - } - input.SetAttributeUpdates(updates) - _, err := l.dynamoDBClient.UpdateItem(input) - - if err != nil { - return false, err - } - - lease.SetLeaseCounter(lease.GetLeaseCounter() + 1) - lease.SetLeaseOwner("") - return true, nil -} - -/** - * Delete the given lease from DynamoDB. Does nothing when passed a lease that does not exist in DynamoDB. - * - * @param lease the lease to delete - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB delete fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB delete fails in an unexpected way - */ -func (l *LeaseManager) DeleteLease(lease ILease) error { - input := &dynamodb.DeleteItemInput{ - TableName: aws.String(l.tableName), - Key: l.serializer.GetDynamoHashKey(lease.GetLeaseKey()), - } - _, err := l.dynamoDBClient.DeleteItem(input) - return err -} - -/** - * Delete all leases from DynamoDB. Useful for tools/utils and testing. - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB scan or delete fail due to lack of capacity - * @error LeasingDependencyError if DynamoDB scan or delete fail in an unexpected way - */ -func (l *LeaseManager) DeleteAll() error { - allLeases, err := l.ListLeases() - if err != nil { - return err - } - - for _, v := range allLeases { - err := l.DeleteLease(v) - if err != nil { - return err - } - } - return nil -} - -/** - * Update application-specific fields of the given lease in DynamoDB. Does not update fields managed by the leasing - * library such as leaseCounter, leaseOwner, or leaseKey. Conditional on the leaseCounter in DynamoDB matching the - * leaseCounter of the input. Increments the lease counter in DynamoDB so that updates can be contingent on other - * updates. Mutates the lease counter of the passed-in lease object. - * - * @return true if update succeeded, false otherwise - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB update fails in an unexpected way - */ -func (l *LeaseManager) UpdateLease(lease ILease) (bool, error) { - input := &dynamodb.UpdateItemInput{ - TableName: aws.String(l.tableName), - Key: l.serializer.GetDynamoHashKey(lease.GetLeaseKey()), - Expected: l.serializer.GetDynamoLeaseCounterExpectation(lease), - } - - updates := l.serializer.GetDynamoLeaseCounterUpdate(lease) - - // putAll to updates - for k, v := range l.serializer.GetDynamoUpdateLeaseUpdate(lease) { - updates[k] = v - } - input.SetAttributeUpdates(updates) - _, err := l.dynamoDBClient.UpdateItem(input) - - if err != nil { - return false, err - } - - lease.SetLeaseCounter(lease.GetLeaseCounter() + 1) - return true, nil -} - -/** - * Check (synchronously) if there are any leases in the lease table. - * - * @return true if there are no leases in the lease table - * - * @error LeasingDependencyError if DynamoDB scan fails in an unexpected way - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB scan fails due to lack of capacity - */ -func (l *LeaseManager) IsLeaseTableEmpty() (bool, error) { - result, err := l.list(1) - if err != nil { - return true, err - } - return len(result) > 0, nil -} - -// tableStatus check the current lease table status -func (l *LeaseManager) tableStatus() (*string, error) { - input := &dynamodb.DescribeTableInput{ - TableName: aws.String(l.tableName), - } - - result, err := l.dynamoDBClient.DescribeTable(input) - if err != nil { - return nil, err - } - - return result.Table.TableStatus, nil -} - -// List with the given page size (number of items to consider at a time). Package access for integration testing. -func (l *LeaseManager) list(limit int64) ([]ILease, error) { - input := &dynamodb.ScanInput{ - TableName: aws.String(l.tableName), - } - - if limit > 0 { - input.SetLimit(limit) - } - - result := []ILease{} - - for { - scanResult, err := l.dynamoDBClient.Scan(input) - if err != nil || scanResult == nil { - break - } - - for _, v := range scanResult.Items { - result = append(result, l.serializer.FromDynamoRecord(v)) - } - - lastEvaluatedKey := scanResult.LastEvaluatedKey - if lastEvaluatedKey == nil { - scanResult = nil - break - } else { - input.SetExclusiveStartKey(lastEvaluatedKey) - } - } - - return result, nil -} diff --git a/src/leases/impl/lease-serializer.go b/src/leases/impl/lease-serializer.go deleted file mode 100644 index 3e3f3b3..0000000 --- a/src/leases/impl/lease-serializer.go +++ /dev/null @@ -1,184 +0,0 @@ -package impl - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/dynamodb" - - dynamoutils "leases/dynamoutils" - . "leases/interfaces" -) - -const ( - LEASE_KEY_KEY = "leaseKey" - LEASE_OWNER_KEY = "leaseOwner" - LEASE_COUNTER_KEY = "leaseCounter" -) - -/** - * An implementation of ILeaseSerializer for basic Lease objects. Can also instantiate subclasses of Lease so that - * LeaseSerializer can be decorated by other classes if you need to add fields to leases. - */ -type LeaseSerializer struct { -} - -/** - * Construct a DynamoDB record out of a Lease object - * - * @param lease lease object to serialize - * @return an attribute value map representing the lease object - */ -func (lc *LeaseSerializer) ToDynamoRecord(lease ILease) map[string]*dynamodb.AttributeValue { - result := map[string]*dynamodb.AttributeValue{} - - result[LEASE_KEY_KEY], _ = dynamoutils.CreateAttributeValueFromString(lease.GetLeaseKey()) - result[LEASE_COUNTER_KEY], _ = dynamoutils.CreateAttributeValueFromLong(lease.GetLeaseCounter()) - - if len(lease.GetLeaseOwner()) > 0 { - result[LEASE_OWNER_KEY], _ = dynamoutils.CreateAttributeValueFromString(lease.GetLeaseOwner()) - } - - return result -} - -/** - * Construct a Lease object out of a DynamoDB record. - * - * @param dynamoRecord attribute value map from DynamoDB - * @return a deserialized lease object representing the attribute value map - */ -func (lc *LeaseSerializer) FromDynamoRecord(dynamoRecord map[string]*dynamodb.AttributeValue) ILease { - result := &Lease{} - - result.SetLeaseKey(aws.StringValue(dynamoutils.SafeGetString(dynamoRecord, LEASE_KEY_KEY))) - result.SetLeaseOwner(aws.StringValue(dynamoutils.SafeGetString(dynamoRecord, LEASE_OWNER_KEY))) - result.SetLeaseCounter(dynamoutils.SafeGetLong(dynamoRecord, LEASE_COUNTER_KEY)) - return result -} - -/** - * Special getDynamoHashKey implementation used by ILeaseManager.getLease(). - * - * @param leaseKey - * @return the attribute value map representing a Lease's hash key given a string. - */ -func (lc *LeaseSerializer) GetDynamoHashKey(leaseKey string) map[string]*dynamodb.AttributeValue { - result := map[string]*dynamodb.AttributeValue{} - result[LEASE_KEY_KEY], _ = dynamoutils.CreateAttributeValueFromString(leaseKey) - return result -} - -/** - * @param lease - * @return the attribute value map asserting that a lease counter is what we expect. - */ -func (lc *LeaseSerializer) GetDynamoLeaseCounterExpectation(lease ILease) map[string]*dynamodb.ExpectedAttributeValue { - result := map[string]*dynamodb.ExpectedAttributeValue{} - expectedAV := &dynamodb.ExpectedAttributeValue{} - val, _ := dynamoutils.CreateAttributeValueFromLong(lease.GetLeaseCounter()) - expectedAV.SetValue(val) - result[LEASE_COUNTER_KEY] = expectedAV - return result -} - -/** - * @param lease - * @return the attribute value map asserting that the lease owner is what we expect. - */ -func (lc *LeaseSerializer) GetDynamoLeaseOwnerExpectation(lease ILease) map[string]*dynamodb.ExpectedAttributeValue { - result := map[string]*dynamodb.ExpectedAttributeValue{} - expectedAV := &dynamodb.ExpectedAttributeValue{} - val, _ := dynamoutils.CreateAttributeValueFromString(lease.GetLeaseOwner()) - expectedAV.SetValue(val) - result[LEASE_OWNER_KEY] = expectedAV - return result - -} - -/** - * @return the attribute value map asserting that a lease does not exist. - */ -func (lc *LeaseSerializer) GetDynamoNonexistantExpectation() map[string]*dynamodb.ExpectedAttributeValue { - result := map[string]*dynamodb.ExpectedAttributeValue{} - expectedAV := &dynamodb.ExpectedAttributeValue{} - expectedAV.SetExists(false) - result[LEASE_KEY_KEY] = expectedAV - - return result -} - -/** - * @param lease - * @return the attribute value map that increments a lease counter - */ -func (lc *LeaseSerializer) GetDynamoLeaseCounterUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate { - result := map[string]*dynamodb.AttributeValueUpdate{} - updatedAV := &dynamodb.AttributeValueUpdate{} - // Increase the lease counter by 1 - val, _ := dynamoutils.CreateAttributeValueFromLong(lease.GetLeaseCounter() + 1) - updatedAV.SetValue(val) - updatedAV.SetAction(dynamodb.AttributeActionPut) - result[LEASE_COUNTER_KEY] = updatedAV - return result -} - -/** - * @param lease - * @param newOwner - * @return the attribute value map that takes a lease for a new owner - */ -func (lc *LeaseSerializer) GetDynamoTakeLeaseUpdate(lease ILease, newOwner string) map[string]*dynamodb.AttributeValueUpdate { - result := map[string]*dynamodb.AttributeValueUpdate{} - updatedAV := &dynamodb.AttributeValueUpdate{} - val, _ := dynamoutils.CreateAttributeValueFromString(lease.GetLeaseOwner()) - updatedAV.SetValue(val) - updatedAV.SetAction(dynamodb.AttributeActionPut) - result[LEASE_OWNER_KEY] = updatedAV - return result -} - -/** - * @param lease - * @return the attribute value map that voids a lease - */ -func (lc *LeaseSerializer) GetDynamoEvictLeaseUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate { - result := map[string]*dynamodb.AttributeValueUpdate{} - updatedAV := &dynamodb.AttributeValueUpdate{} - updatedAV.SetValue(nil) - updatedAV.SetAction(dynamodb.AttributeActionDelete) - result[LEASE_OWNER_KEY] = updatedAV - return result -} - -/** - * @param lease - * @return the attribute value map that updates application-specific data for a lease and increments the lease - * counter - */ -func (lc *LeaseSerializer) GetDynamoUpdateLeaseUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate { - result := map[string]*dynamodb.AttributeValueUpdate{} - return result -} - -/** - * @return the key schema for creating a DynamoDB table to store leases - */ -func (lc *LeaseSerializer) GetKeySchema() []*dynamodb.KeySchemaElement { - keySchema := []*dynamodb.KeySchemaElement{} - schemaElement := &dynamodb.KeySchemaElement{} - schemaElement.SetAttributeName(LEASE_KEY_KEY) - schemaElement.SetKeyType(dynamodb.KeyTypeHash) - keySchema = append(keySchema, schemaElement) - return keySchema -} - -/** - * @return attribute definitions for creating a DynamoDB table to store leases - */ -func (lc *LeaseSerializer) GetAttributeDefinitions() []*dynamodb.AttributeDefinition { - definitions := []*dynamodb.AttributeDefinition{} - definition := &dynamodb.AttributeDefinition{} - definition.SetAttributeName(LEASE_KEY_KEY) - definition.SetAttributeType(dynamodb.ScalarAttributeTypeS) - definitions = append(definitions, definition) - return definitions -} diff --git a/src/leases/impl/lease.go b/src/leases/impl/lease.go deleted file mode 100644 index 394475f..0000000 --- a/src/leases/impl/lease.go +++ /dev/null @@ -1,116 +0,0 @@ -package impl - -import ( - cc "clientlibrary/common" - "time" -) - -const ( - // We will consider leases to be expired if they are more than 90 days. - MAX_ABS_AGE_NANOS = int64(90 * 24 * time.Hour) -) - -// Lease structure contains data pertaining to a Lease. Distributed systems may use leases to partition work across a -// fleet of workers. Each unit of work (identified by a leaseKey) has a corresponding Lease. Every worker will contend -// for all leases - only one worker will successfully take each one. The worker should hold the lease until it is ready to stop -// processing the corresponding unit of work, or until it fails. When the worker stops holding the lease, another worker will -// take and hold the lease. -type Lease struct { - // shard-id - leaseKey string - // worker# - leaseOwner string - // ccounter incremented periodically - leaseCounter int64 - - // This field is used to prevent updates to leases that we have lost and re-acquired. It is deliberately not - // persisted in DynamoDB and excluded from hashCode and equals. - concurrencyToken string - - // This field is used by LeaseRenewer and LeaseTaker to track the last time a lease counter was incremented. It is - // deliberately not persisted in DynamoDB and excluded from hashCode and equals. - lastCounterIncrementNanos int64 -} - -// CloneLease to clone a lease object -func CopyLease(lease *Lease) *Lease { - return &Lease{ - leaseKey: lease.leaseKey, - leaseOwner: lease.leaseOwner, - leaseCounter: lease.leaseCounter, - concurrencyToken: lease.concurrencyToken, - lastCounterIncrementNanos: lease.lastCounterIncrementNanos, - } -} - -// GetLeaseKey retrieves leaseKey - identifies the unit of work associated with this lease. -func (l *Lease) GetLeaseKey() string { - return l.leaseKey -} - -// GetLeaseOwner gets current owner of the lease, may be "". -func (l *Lease) GetLeaseOwner() string { - return l.leaseOwner -} - -// GetLeaseCounter retrieves leaseCounter which is incremented periodically by the holder of the lease. Used for optimistic locking. -func (l *Lease) GetLeaseCounter() int64 { - return l.leaseCounter -} - -// GetConcurrencyToken returns concurrency token -func (l *Lease) GetConcurrencyToken() string { - return l.concurrencyToken -} - -// GetLastCounterIncrementNanos returns concurrency token -func (l *Lease) GetLastCounterIncrementNanos() int64 { - return l.lastCounterIncrementNanos -} - -// SetLeaseKey sets leaseKey - LeaseKey is immutable once set. -func (l *Lease) SetLeaseKey(leaseKey string) error { - if len(l.leaseKey) > 0 { - return cc.IllegalArgumentError.MakeErr().WithDetail("LeaseKey is immutable once set") - } - - l.leaseKey = leaseKey - return nil -} - -// SetLeaseOwner set current owner of the lease, may be "". -func (l *Lease) SetLeaseOwner(leaseOwner string) { - l.leaseOwner = leaseOwner -} - -// SetLeaseCounter sets leaseCounter which is incremented periodically by the holder of the lease. Used for optimistic locking. -func (l *Lease) SetLeaseCounter(leaseCounter int64) { - l.leaseCounter = leaseCounter -} - -// SetConcurrencyToken -func (l *Lease) SetConcurrencyToken(concurrencyToken string) { - l.concurrencyToken = concurrencyToken -} - -// SetLastCounterIncrementNanos returns concurrency token -func (l *Lease) SetLastCounterIncrementNanos(lastCounterIncrementNanos int64) { - l.lastCounterIncrementNanos = lastCounterIncrementNanos -} - -// IsExpired to check whether lease expired using -// @param leaseDurationNanos duration of lease in nanoseconds -// @param asOfNanos time in nanoseconds to check expiration as-of -// @return true if lease is expired as-of given time, false otherwise -func (l *Lease) IsExpired(leaseDurationNanos, asOfNanos int64) bool { - if l.lastCounterIncrementNanos == 0 { - return true - } - - age := asOfNanos - l.lastCounterIncrementNanos - if age > MAX_ABS_AGE_NANOS { - return true - } else { - return age > leaseDurationNanos - } -} diff --git a/src/leases/interfaces/lease-manager.go b/src/leases/interfaces/lease-manager.go deleted file mode 100644 index 8f27aa2..0000000 --- a/src/leases/interfaces/lease-manager.go +++ /dev/null @@ -1,162 +0,0 @@ -package interfaces - -// ILeaseManager supports basic CRUD operations for Leases. -type ILeaseManager interface { - - /** - * Creates the table that will store leases. Succeeds if table already exists. - * - * @param readCapacity - * @param writeCapacity - * - * @return true if we created a new table (table didn't exist before) - * - * @error ProvisionedThroughputError if we cannot create the lease table due to per-AWS-account capacity - * restrictions. - * @error LeasingDependencyError if DynamoDB createTable fails in an unexpected way - */ - CreateLeaseTableIfNotExists(readCapacity, writeCapacity int64) (bool, error) - - /** - * @return true if the lease table already exists. - * - * @error LeasingDependencyError if DynamoDB describeTable fails in an unexpected way - */ - LeaseTableExists() (bool, error) - - /** - * Blocks until the lease table exists by polling leaseTableExists. - * - * @param secondsBetweenPolls time to wait between polls in seconds - * @param timeoutSeconds total time to wait in seconds - * - * @return true if table exists, false if timeout was reached - * - * @error LeasingDependencyError if DynamoDB describeTable fails in an unexpected way - */ - WaitUntilLeaseTableExists(secondsBetweenPolls, timeoutSeconds int64) (bool, error) - - /** - * List all objects in table synchronously. - * - * @error LeasingDependencyError if DynamoDB scan fails in an unexpected way - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB scan fails due to lack of capacity - * - * @return list of leases - */ - ListLeases() ([]ILease, error) - - /** - * Create a new lease. Conditional on a lease not already existing with this shardId. - * - * @param lease the lease to create - * - * @return true if lease was created, false if lease already exists - * - * @error LeasingDependencyError if DynamoDB put fails in an unexpected way - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB put fails due to lack of capacity - */ - CreateLeaseIfNotExists(lease ILease) (bool, error) - - /** - * @param shardId Get the lease for this shardId - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB get fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB get fails in an unexpected way - * - * @return lease for the specified shardId, or null if one doesn't exist - */ - GetLease(shardId string) (ILease, error) - - /** - * Renew a lease by incrementing the lease counter. Conditional on the leaseCounter in DynamoDB matching the leaseCounter - * of the input. Mutates the leaseCounter of the passed-in lease object after updating the record in DynamoDB. - * - * @param lease the lease to renew - * - * @return true if renewal succeeded, false otherwise - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB update fails in an unexpected way - */ - RenewLease(lease ILease) (bool, error) - - /** - * Take a lease for the given owner by incrementing its leaseCounter and setting its owner field. Conditional on - * the leaseCounter in DynamoDB matching the leaseCounter of the input. Mutates the leaseCounter and owner of the - * passed-in lease object after updating DynamoDB. - * - * @param lease the lease to take - * @param owner the new owner - * - * @return true if lease was successfully taken, false otherwise - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB update fails in an unexpected way - */ - TakeLease(lease ILease, owner string) (bool, error) - - /** - * Evict the current owner of lease by setting owner to null. Conditional on the owner in DynamoDB matching the owner of - * the input. Mutates the lease counter and owner of the passed-in lease object after updating the record in DynamoDB. - * - * @param lease the lease to void - * - * @return true if eviction succeeded, false otherwise - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB update fails in an unexpected way - */ - EvictLease(lease ILease) (bool, error) - - /** - * Delete the given lease from DynamoDB. Does nothing when passed a lease that does not exist in DynamoDB. - * - * @param lease the lease to delete - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB delete fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB delete fails in an unexpected way - */ - DeleteLease(lease ILease) error - - /** - * Delete all leases from DynamoDB. Useful for tools/utils and testing. - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB scan or delete fail due to lack of capacity - * @error LeasingDependencyError if DynamoDB scan or delete fail in an unexpected way - */ - DeleteAll() error - - /** - * Update application-specific fields of the given lease in DynamoDB. Does not update fields managed by the leasing - * library such as leaseCounter, leaseOwner, or leaseKey. Conditional on the leaseCounter in DynamoDB matching the - * leaseCounter of the input. Increments the lease counter in DynamoDB so that updates can be contingent on other - * updates. Mutates the lease counter of the passed-in lease object. - * - * @return true if update succeeded, false otherwise - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB update fails in an unexpected way - */ - UpdateLease(lease ILease) (bool, error) - - /** - * Check (synchronously) if there are any leases in the lease table. - * - * @return true if there are no leases in the lease table - * - * @error LeasingDependencyError if DynamoDB scan fails in an unexpected way - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB scan fails due to lack of capacity - */ - IsLeaseTableEmpty() (bool, error) -} diff --git a/src/leases/interfaces/lease-renewer.go b/src/leases/interfaces/lease-renewer.go deleted file mode 100644 index 6e52049..0000000 --- a/src/leases/interfaces/lease-renewer.go +++ /dev/null @@ -1,78 +0,0 @@ -package interfaces - -// LeaseTable hold current lease mapping shardId --> Lease -type LeaseTable map[string]*ILease - -/** - * ILeaseRenewer objects are used by LeaseCoordinator to renew leases held by the LeaseCoordinator. Each - * LeaseCoordinator instance corresponds to one worker, and uses exactly one ILeaseRenewer to manage lease renewal for - * that worker. - */ -type ILeaseRenewer interface { - - /** - * Bootstrap initial set of leases from the LeaseManager (e.g. upon process restart, pick up leases we own) - * @error LeasingDependencyError on unexpected DynamoDB failures - * @error LeasingInvalidStateError if lease table doesn't exist - * @error ProvisionedThroughputError if DynamoDB reads fail due to insufficient capacity - */ - Initialize() error - - /** - * Attempt to renew all currently held leases. - * - * @error LeasingDependencyError on unexpected DynamoDB failures - * @error LeasingInvalidStateError if lease table does not exist - */ - RenewLeases() error - - /** - * @return currently held leases. Key is shardId, value is corresponding Lease object. A lease is currently held if - * we successfully renewed it on the last run of renewLeases(). Lease objects returned are deep copies - - * their lease counters will not tick. - */ - GetCurrentlyHeldLeases() *LeaseTable - - /** - * @param leaseKey key of the lease to retrieve - * - * @return a deep copy of a currently held lease, or null if we don't hold the lease - */ - GetCurrentlyHeldLease(leaseKey string) *ILease - - /** - * Adds leases to this LeaseRenewer's set of currently held leases. Leases must have lastRenewalNanos set to the - * last time the lease counter was incremented before being passed to this method. - * - * @param newLeases new leases. - */ - AddLeasesToRenew(newLeases []ILease) - - /** - * Clears this LeaseRenewer's set of currently held leases. - */ - ClearCurrentlyHeldLeases() - - /** - * Stops the lease renewer from continunig to maintain the given lease. - * - * @param lease the lease to drop. - */ - DropLease(lease ILease) - - /** - * Update application-specific fields in a currently held lease. Cannot be used to update internal fields such as - * leaseCounter, leaseOwner, etc. Fails if we do not hold the lease, or if the concurrency token does not match - * the concurrency token on the internal authoritative copy of the lease (ie, if we lost and re-acquired the lease). - * - * @param lease lease object containing updated data - * @param concurrencyToken obtained by calling Lease.getConcurrencyToken for a currently held lease - * - * @return true if update succeeds, false otherwise - * - * @error LeasingInvalidStateError if lease table does not exist - * @error ProvisionedThroughputError if DynamoDB update fails due to lack of capacity - * @error LeasingDependencyError if DynamoDB update fails in an unexpected way - */ - UpdateLease(lease ILease, concurrencyToken string) (bool, error) -} diff --git a/src/leases/interfaces/lease-serializer.go b/src/leases/interfaces/lease-serializer.go deleted file mode 100644 index a8601d0..0000000 --- a/src/leases/interfaces/lease-serializer.go +++ /dev/null @@ -1,86 +0,0 @@ -package interfaces - -import ( - "github.com/aws/aws-sdk-go/service/dynamodb" -) - -// ILeaseSerializer an utility class that manages the mapping of Lease objects/operations to records in DynamoDB. -type ILeaseSerializer interface { - - /** - * Construct a DynamoDB record out of a Lease object - * - * @param lease lease object to serialize - * @return an attribute value map representing the lease object - */ - ToDynamoRecord(lease ILease) map[string]*dynamodb.AttributeValue - - /** - * Construct a Lease object out of a DynamoDB record. - * - * @param dynamoRecord attribute value map from DynamoDB - * @return a deserialized lease object representing the attribute value map - */ - FromDynamoRecord(dynamoRecord map[string]*dynamodb.AttributeValue) ILease - - /** - * Special getDynamoHashKey implementation used by ILeaseManager.getLease(). - * - * @param leaseKey - * @return the attribute value map representing a Lease's hash key given a string. - */ - GetDynamoHashKey(leaseKey string) map[string]*dynamodb.AttributeValue - - /** - * @param lease - * @return the attribute value map asserting that a lease counter is what we expect. - */ - GetDynamoLeaseCounterExpectation(lease ILease) map[string]*dynamodb.ExpectedAttributeValue - - /** - * @param lease - * @return the attribute value map asserting that the lease owner is what we expect. - */ - GetDynamoLeaseOwnerExpectation(lease ILease) map[string]*dynamodb.ExpectedAttributeValue - - /** - * @return the attribute value map asserting that a lease does not exist. - */ - GetDynamoNonexistantExpectation() map[string]*dynamodb.ExpectedAttributeValue - - /** - * @param lease - * @return the attribute value map that increments a lease counter - */ - GetDynamoLeaseCounterUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate - - /** - * @param lease - * @param newOwner - * @return the attribute value map that takes a lease for a new owner - */ - GetDynamoTakeLeaseUpdate(lease ILease, newOwner string) map[string]*dynamodb.AttributeValueUpdate - - /** - * @param lease - * @return the attribute value map that voids a lease - */ - GetDynamoEvictLeaseUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate - - /** - * @param lease - * @return the attribute value map that updates application-specific data for a lease and increments the lease - * counter - */ - GetDynamoUpdateLeaseUpdate(lease ILease) map[string]*dynamodb.AttributeValueUpdate - - /** - * @return the key schema for creating a DynamoDB table to store leases - */ - GetKeySchema() []*dynamodb.KeySchemaElement - - /** - * @return attribute definitions for creating a DynamoDB table to store leases - */ - GetAttributeDefinitions() []*dynamodb.AttributeDefinition -} diff --git a/src/leases/interfaces/lease-taker.go b/src/leases/interfaces/lease-taker.go deleted file mode 100644 index 0dbaf1b..0000000 --- a/src/leases/interfaces/lease-taker.go +++ /dev/null @@ -1,28 +0,0 @@ -package interfaces - -/** - * ILeaseTaker is used by LeaseCoordinator to take new leases, or leases that other workers fail to renew. Each - * LeaseCoordinator instance corresponds to one worker and uses exactly one ILeaseTaker to take leases for that worker. - */ -type ILeaseTaker interface { - - /** - * Compute the set of leases available to be taken and attempt to take them. Lease taking rules are: - * - * 1) If a lease's counter hasn't changed in long enough, try to take it. - * 2) If we see a lease we've never seen before, take it only if owner == null. If it's owned, odds are the owner is - * holding it. We can't tell until we see it more than once. - * 3) For load balancing purposes, you may violate rules 1 and 2 for EXACTLY ONE lease per call of takeLeases(). - * - * @return map of shardId to Lease object for leases we just successfully took. - * - * @error LeasingDependencyError on unexpected DynamoDB failures - * @error LeasingInvalidStateError if lease table does not exist - */ - TakeLeases() map[string]ILease - - /** - * @return workerIdentifier for this LeaseTaker - */ - GetWorkerIdentifier() string -} diff --git a/src/leases/interfaces/lease.go b/src/leases/interfaces/lease.go deleted file mode 100644 index f3da35a..0000000 --- a/src/leases/interfaces/lease.go +++ /dev/null @@ -1,21 +0,0 @@ -package interfaces - -// ILease is the interface for all Leases -type ILease interface { - GetLeaseKey() string - SetLeaseKey(leaseKey string) error - - GetLeaseOwner() string - SetLeaseOwner(leaseOwner string) - - GetLeaseCounter() int64 - SetLeaseCounter(leaseCounter int64) - - GetConcurrencyToken() string - SetConcurrencyToken(concurrencyToken string) - - GetLastCounterIncrementNanos() int64 - SetLastCounterIncrementNanos(lastCounterIncrementNanos int64) - - IsExpired(leaseDurationNanos, asOfNanos int64) bool -} diff --git a/src/clientlibrary/common/errors.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/common/errors.go similarity index 100% rename from src/clientlibrary/common/errors.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/common/errors.go diff --git a/src/clientlibrary/config/config.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/config/config.go similarity index 100% rename from src/clientlibrary/config/config.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/config/config.go diff --git a/src/clientlibrary/config/config_test.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/config/config_test.go similarity index 100% rename from src/clientlibrary/config/config_test.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/config/config_test.go diff --git a/src/clientlibrary/config/initial-stream-pos.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/config/initial-stream-pos.go similarity index 100% rename from src/clientlibrary/config/initial-stream-pos.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/config/initial-stream-pos.go diff --git a/src/clientlibrary/config/kcl-config.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/config/kcl-config.go similarity index 99% rename from src/clientlibrary/config/kcl-config.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/config/kcl-config.go index bfba4aa..1ac3f7f 100644 --- a/src/clientlibrary/config/kcl-config.go +++ b/src/vmware.com/cascade-kinesis-client/clientlibrary/config/kcl-config.go @@ -1,8 +1,8 @@ package config import ( - "clientlibrary/utils" "time" + "vmware.com/cascade-kinesis-client/clientlibrary/utils" ) // NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. diff --git a/src/clientlibrary/interfaces/inputs.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/inputs.go similarity index 100% rename from src/clientlibrary/interfaces/inputs.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/inputs.go diff --git a/src/clientlibrary/interfaces/record-processor-checkpointer.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/record-processor-checkpointer.go similarity index 100% rename from src/clientlibrary/interfaces/record-processor-checkpointer.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/record-processor-checkpointer.go diff --git a/src/clientlibrary/interfaces/record-processor.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/record-processor.go similarity index 100% rename from src/clientlibrary/interfaces/record-processor.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/record-processor.go diff --git a/src/clientlibrary/interfaces/sequence-number.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/sequence-number.go similarity index 100% rename from src/clientlibrary/interfaces/sequence-number.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/sequence-number.go diff --git a/src/clientlibrary/metrics/cloudwatch.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/cloudwatch.go similarity index 100% rename from src/clientlibrary/metrics/cloudwatch.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/cloudwatch.go diff --git a/src/clientlibrary/metrics/interfaces.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/interfaces.go similarity index 100% rename from src/clientlibrary/metrics/interfaces.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/interfaces.go diff --git a/src/clientlibrary/metrics/prometheus.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/prometheus.go similarity index 100% rename from src/clientlibrary/metrics/prometheus.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/prometheus.go diff --git a/src/clientlibrary/utils/random.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/utils/random.go similarity index 100% rename from src/clientlibrary/utils/random.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/utils/random.go diff --git a/src/clientlibrary/utils/uuid.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/utils/uuid.go similarity index 100% rename from src/clientlibrary/utils/uuid.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/utils/uuid.go diff --git a/src/clientlibrary/worker/checkpointer.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/checkpointer.go similarity index 99% rename from src/clientlibrary/worker/checkpointer.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/worker/checkpointer.go index 4994e63..ec279b2 100644 --- a/src/clientlibrary/worker/checkpointer.go +++ b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/checkpointer.go @@ -11,7 +11,7 @@ import ( "github.com/matryer/try" log "github.com/sirupsen/logrus" - "clientlibrary/config" + "vmware.com/cascade-kinesis-client/clientlibrary/config" ) const ( diff --git a/src/clientlibrary/worker/record-processor-checkpointer.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/record-processor-checkpointer.go similarity index 96% rename from src/clientlibrary/worker/record-processor-checkpointer.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/worker/record-processor-checkpointer.go index 69a406e..94a090a 100644 --- a/src/clientlibrary/worker/record-processor-checkpointer.go +++ b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/record-processor-checkpointer.go @@ -3,7 +3,7 @@ package worker import ( "github.com/aws/aws-sdk-go/aws" - kcl "clientlibrary/interfaces" + kcl "vmware.com/cascade-kinesis-client/clientlibrary/interfaces" ) type ( diff --git a/src/clientlibrary/worker/shard-consumer.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/shard-consumer.go similarity index 97% rename from src/clientlibrary/worker/shard-consumer.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/worker/shard-consumer.go index 9b9f175..9a76309 100644 --- a/src/clientlibrary/worker/shard-consumer.go +++ b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/shard-consumer.go @@ -10,9 +10,9 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" - "clientlibrary/config" - kcl "clientlibrary/interfaces" - "clientlibrary/metrics" + "vmware.com/cascade-kinesis-client/clientlibrary/config" + kcl "vmware.com/cascade-kinesis-client/clientlibrary/interfaces" + "vmware.com/cascade-kinesis-client/clientlibrary/metrics" ) const ( diff --git a/src/clientlibrary/worker/worker.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker.go similarity index 97% rename from src/clientlibrary/worker/worker.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker.go index 1603ff4..a97573c 100644 --- a/src/clientlibrary/worker/worker.go +++ b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker.go @@ -16,9 +16,9 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" - "clientlibrary/config" - kcl "clientlibrary/interfaces" - "clientlibrary/metrics" + "vmware.com/cascade-kinesis-client/clientlibrary/config" + kcl "vmware.com/cascade-kinesis-client/clientlibrary/interfaces" + "vmware.com/cascade-kinesis-client/clientlibrary/metrics" ) type shardStatus struct { diff --git a/src/clientlibrary/worker/worker_test.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker_test.go similarity index 93% rename from src/clientlibrary/worker/worker_test.go rename to src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker_test.go index e1dee71..bdabb54 100644 --- a/src/clientlibrary/worker/worker_test.go +++ b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker_test.go @@ -10,11 +10,11 @@ import ( "github.com/prometheus/common/expfmt" log "github.com/sirupsen/logrus" - cfg "clientlibrary/config" - kc "clientlibrary/interfaces" - "clientlibrary/metrics" - "clientlibrary/utils" "github.com/stretchr/testify/assert" + cfg "vmware.com/cascade-kinesis-client/clientlibrary/config" + kc "vmware.com/cascade-kinesis-client/clientlibrary/interfaces" + "vmware.com/cascade-kinesis-client/clientlibrary/metrics" + "vmware.com/cascade-kinesis-client/clientlibrary/utils" ) const ( @@ -27,10 +27,6 @@ const specstr = `{"name":"kube-qQyhk","networking":{"containerNetworkCidr":"10.2 const metricsSystem = "cloudwatch" func TestWorker(t *testing.T) { - os.Setenv("AWS_ACCESS_KEY_ID", "your aws access key id") - os.Setenv("AWS_SECRET_ACCESS_KEY", "your aws secret access key") - defer os.Unsetenv("AWS_ACCESS_KEY_ID") - defer os.Unsetenv("AWS_SECRET_ACCESS_KEY") kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). From 2542ea141612ecb6df893a6b11b44962fdadc346 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 23 Apr 2018 12:40:39 -0700 Subject: [PATCH 15/90] KCL: Remove lease entry in dynamoDB table when shard no longer exists Need to remove lease entry in dynamodb table when shard has been removed by Kinesis. This happens when doing shard splitting and parent shard will be moved by Kinesis after its retention period (normally after 24 hours). Change-Id: I70a5836436ac0698110085d46d9438fcaf539cd2 --- .../clientlibrary/worker/checkpointer.go | 40 +++++++++++++++++++ .../clientlibrary/worker/worker.go | 4 ++ 2 files changed, 44 insertions(+) diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/checkpointer.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/checkpointer.go index ec279b2..3171fc6 100644 --- a/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/checkpointer.go +++ b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/checkpointer.go @@ -36,6 +36,7 @@ type Checkpointer interface { GetLease(*shardStatus, string) error CheckpointSequence(*shardStatus) error FetchCheckpoint(*shardStatus) error + RemoveLeaseInfo(string) error } // ErrSequenceIDNotFound is returned by FetchCheckpoint when no SequenceID is found @@ -203,6 +204,19 @@ func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *shardStatus) error return nil } +// RemoveLeaseInfo to remove lease info for shard entry in dynamoDB because the shard no longer exists in Kinesis +func (checkpointer *DynamoCheckpoint) RemoveLeaseInfo(shardID string) error { + err := checkpointer.removeItem(shardID) + + if err != nil { + log.Errorf("Error in removing lease info for shard: %s, Error: %+v", shardID, err) + } else { + log.Infof("Lease info for shard: %s has been removed.", shardID) + } + + return err +} + func (checkpointer *DynamoCheckpoint) createTable() error { input := &dynamodb.CreateTableInput{ AttributeDefinitions: []*dynamodb.AttributeDefinition{ @@ -292,3 +306,29 @@ func (checkpointer *DynamoCheckpoint) getItem(shardID string) (map[string]*dynam }) return item.Item, err } + +func (checkpointer *DynamoCheckpoint) removeItem(shardID string) error { + var item *dynamodb.DeleteItemOutput + err := try.Do(func(attempt int) (bool, error) { + var err error + item, err = checkpointer.svc.DeleteItem(&dynamodb.DeleteItemInput{ + TableName: aws.String(checkpointer.TableName), + Key: map[string]*dynamodb.AttributeValue{ + LEASE_KEY_KEY: { + S: aws.String(shardID), + }, + }, + }) + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == dynamodb.ErrCodeProvisionedThroughputExceededException || + awsErr.Code() == dynamodb.ErrCodeInternalServerError && + attempt < checkpointer.Retries { + // Backoff time as recommended by https://docs.aws.amazon.com/general/latest/gr/api-retries.html + time.Sleep(time.Duration(2^attempt*100) * time.Millisecond) + return true, err + } + } + return false, err + }) + return err +} diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker.go b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker.go index a97573c..d4f9a13 100644 --- a/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker.go +++ b/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker.go @@ -320,7 +320,11 @@ func (w *Worker) syncShard() error { for _, shard := range w.shardStatus { // The cached shard no longer existed, remove it. if _, ok := shardInfo[shard.ID]; !ok { + // remove the shard from local status cache delete(w.shardStatus, shard.ID) + // remove the shard entry in dynamoDB as well + // Note: syncShard runs periodically. we don't need to do anything in case of error here. + w.checkpointer.RemoveLeaseInfo(shard.ID) } } From 2b9301cd4716a315c3b9a7e7900af027da00defa Mon Sep 17 00:00:00 2001 From: Long Zhou Date: Tue, 24 Apr 2018 10:07:57 -0700 Subject: [PATCH 16/90] Flatten directory structure cascade-kinesis-client will be used as a submodule of other projects, so it should not have "src/vmware.com/cascade-kinesis-client" in its path. To build this project locally, please manually create the parent folders. Change-Id: I8844e6a0e32aae65b28496915d8507e9fb1058c6 --- HyperMake | 8 ++++---- .../clientlibrary => clientlibrary}/common/errors.go | 0 .../clientlibrary => clientlibrary}/config/config.go | 0 .../clientlibrary => clientlibrary}/config/config_test.go | 0 .../config/initial-stream-pos.go | 0 .../clientlibrary => clientlibrary}/config/kcl-config.go | 0 .../clientlibrary => clientlibrary}/interfaces/inputs.go | 0 .../interfaces/record-processor-checkpointer.go | 0 .../interfaces/record-processor.go | 0 .../interfaces/sequence-number.go | 0 .../clientlibrary => clientlibrary}/metrics/cloudwatch.go | 0 .../clientlibrary => clientlibrary}/metrics/interfaces.go | 0 .../clientlibrary => clientlibrary}/metrics/prometheus.go | 0 .../clientlibrary => clientlibrary}/utils/random.go | 0 .../clientlibrary => clientlibrary}/utils/uuid.go | 0 .../worker/checkpointer.go | 0 .../worker/record-processor-checkpointer.go | 0 .../worker/shard-consumer.go | 0 .../clientlibrary => clientlibrary}/worker/worker.go | 0 .../clientlibrary => clientlibrary}/worker/worker_test.go | 0 {src/vendor => vendor}/manifest | 0 21 files changed, 4 insertions(+), 4 deletions(-) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/common/errors.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/config/config.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/config/config_test.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/config/initial-stream-pos.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/config/kcl-config.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/interfaces/inputs.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/interfaces/record-processor-checkpointer.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/interfaces/record-processor.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/interfaces/sequence-number.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/metrics/cloudwatch.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/metrics/interfaces.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/metrics/prometheus.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/utils/random.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/utils/uuid.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/worker/checkpointer.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/worker/record-processor-checkpointer.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/worker/shard-consumer.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/worker/worker.go (100%) rename {src/vmware.com/cascade-kinesis-client/clientlibrary => clientlibrary}/worker/worker_test.go (100%) rename {src/vendor => vendor}/manifest (100%) diff --git a/HyperMake b/HyperMake index b966419..03531b2 100644 --- a/HyperMake +++ b/HyperMake @@ -86,8 +86,8 @@ targets: after: - toolchain watches: - - src/vendor/manifest - workdir: src + - vendor/manifest + workdir: . cmds: - gvt restore @@ -95,5 +95,5 @@ settings: default-targets: - ci docker: - image: 'vmware/cascade-kcl-toolchain:latest' - src-volume: /home/cascade-kinesis-client + image: 'vmware/cascade-toolchain:0.0.9' + src-volume: /go/src/vmware.com/cascade-kinesis-client diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/common/errors.go b/clientlibrary/common/errors.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/common/errors.go rename to clientlibrary/common/errors.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/config/config.go b/clientlibrary/config/config.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/config/config.go rename to clientlibrary/config/config.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/config/config_test.go b/clientlibrary/config/config_test.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/config/config_test.go rename to clientlibrary/config/config_test.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/config/initial-stream-pos.go b/clientlibrary/config/initial-stream-pos.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/config/initial-stream-pos.go rename to clientlibrary/config/initial-stream-pos.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/config/kcl-config.go rename to clientlibrary/config/kcl-config.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/inputs.go b/clientlibrary/interfaces/inputs.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/inputs.go rename to clientlibrary/interfaces/inputs.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/record-processor-checkpointer.go b/clientlibrary/interfaces/record-processor-checkpointer.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/record-processor-checkpointer.go rename to clientlibrary/interfaces/record-processor-checkpointer.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/record-processor.go b/clientlibrary/interfaces/record-processor.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/record-processor.go rename to clientlibrary/interfaces/record-processor.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/sequence-number.go b/clientlibrary/interfaces/sequence-number.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/interfaces/sequence-number.go rename to clientlibrary/interfaces/sequence-number.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/cloudwatch.go b/clientlibrary/metrics/cloudwatch.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/cloudwatch.go rename to clientlibrary/metrics/cloudwatch.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/interfaces.go b/clientlibrary/metrics/interfaces.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/interfaces.go rename to clientlibrary/metrics/interfaces.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/prometheus.go b/clientlibrary/metrics/prometheus.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/metrics/prometheus.go rename to clientlibrary/metrics/prometheus.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/utils/random.go b/clientlibrary/utils/random.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/utils/random.go rename to clientlibrary/utils/random.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/utils/uuid.go b/clientlibrary/utils/uuid.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/utils/uuid.go rename to clientlibrary/utils/uuid.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/checkpointer.go b/clientlibrary/worker/checkpointer.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/worker/checkpointer.go rename to clientlibrary/worker/checkpointer.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/record-processor-checkpointer.go b/clientlibrary/worker/record-processor-checkpointer.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/worker/record-processor-checkpointer.go rename to clientlibrary/worker/record-processor-checkpointer.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/worker/shard-consumer.go rename to clientlibrary/worker/shard-consumer.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker.go rename to clientlibrary/worker/worker.go diff --git a/src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker_test.go b/clientlibrary/worker/worker_test.go similarity index 100% rename from src/vmware.com/cascade-kinesis-client/clientlibrary/worker/worker_test.go rename to clientlibrary/worker/worker_test.go diff --git a/src/vendor/manifest b/vendor/manifest similarity index 100% rename from src/vendor/manifest rename to vendor/manifest From 85c04db6b484e6cbd2c1a946f0e1d5b1ea167ef9 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 21 May 2018 19:14:18 -0700 Subject: [PATCH 17/90] KCL: Fix the way in returning error Fix bug when doing shard sync which removing shard info. Jira ID: CNA-612 Change-Id: Ibaf55fffa39b793abbfe3bd57999e5d37f82a52f --- clientlibrary/worker/worker.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index d4f9a13..a71c3ae 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -317,6 +317,10 @@ func (w *Worker) syncShard() error { shardInfo := make(map[string]bool) err := w.getShardIDs("", shardInfo) + if err != nil { + return err + } + for _, shard := range w.shardStatus { // The cached shard no longer existed, remove it. if _, ok := shardInfo[shard.ID]; !ok { @@ -328,5 +332,5 @@ func (w *Worker) syncShard() error { } } - return err + return nil } From 3120d89ae87d3de4bd06f336cddded433248470f Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 21 May 2018 19:34:37 -0700 Subject: [PATCH 18/90] KCL: remove unused item Change-Id: I164a3e551331464a020e82ad305294e5a659ab6c --- support/scripts/functions.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/support/scripts/functions.sh b/support/scripts/functions.sh index 845e3c3..b7265ea 100644 --- a/support/scripts/functions.sh +++ b/support/scripts/functions.sh @@ -14,7 +14,6 @@ local_go_pkgs() { grep -Fv '/tmp/' | \ grep -Fv '/run/' | \ grep -Fv '/tests/' | \ - grep -Fv '/gokini/' | \ sed -r 's|(.+)/[^/]+\.go$|\1|g' | \ sort -u } From 48fd4dd51cd351ac8f797ff42da0b541ece73089 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Thu, 31 May 2018 05:26:49 -0700 Subject: [PATCH 19/90] KCL: remove panic in shard consumer There might be verious reason for shard iterator to expire, such as: not enough data in shard or process even takes more than 5 minutes which cause shard iterator not refreshing enough. This change removes log.Fatal which causes panic. Panic inside go routine will bring down the whole app. Therefore, just log error and exit the go routine instead. Jira ID: CNA-1072 Change-Id: I34a8d9af7258f3ea75465e2245bbc25c2fafee35 --- clientlibrary/worker/shard-consumer.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 9a76309..d238943 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -130,7 +130,10 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", shard.ID, sc.consumerID) return nil } - log.Fatal(err) + // log and return error + log.Errorf("Error in refreshing lease on shard: %s for worker: %s. Error: %+v", + shard.ID, sc.consumerID, err) + return err } } @@ -144,14 +147,15 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == kinesis.ErrCodeProvisionedThroughputExceededException || awsErr.Code() == ErrCodeKMSThrottlingException { - log.Errorf("Error getting records from shard %v: %v", shard.ID, err) + log.Errorf("Error getting records from shard %v: %+v", shard.ID, err) retriedErrors++ // exponential backoff time.Sleep(time.Duration(2^retriedErrors*100) * time.Millisecond) continue } } - log.Fatalf("Error getting records from Kinesis that cannot be retried: %s\nRequest: %s", err, getRecordsArgs) + log.Errorf("Error getting records from Kinesis that cannot be retried: %+v\nRequest: %s", err, getRecordsArgs) + return err } retriedErrors = 0 From e2a945d824043765a899686bcda428bc68c5ea5b Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 6 Aug 2018 20:49:15 -0700 Subject: [PATCH 20/90] KCL: Stuck on processing after kinesis shard splitting The processing Kinesis gets stuck after splitting shard. The reason is that the app doesn't do mandatory checkpoint. KCL document states: // When the value of {@link ShutdownInput#getShutdownReason()} is // {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you // checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress. Also, fix shard lease to prevent one host takes more shard than its configuration allowed. Jira CNA-1701 Change-Id: Icbdacaf347c7a67b5793647ad05ff93cca629741 --- clientlibrary/worker/checkpointer.go | 2 +- .../worker/record-processor-checkpointer.go | 2 +- clientlibrary/worker/worker.go | 19 +++++++++++-------- clientlibrary/worker/worker_test.go | 7 +++++++ 4 files changed, 20 insertions(+), 10 deletions(-) diff --git a/clientlibrary/worker/checkpointer.go b/clientlibrary/worker/checkpointer.go index 3171fc6..7107cd7 100644 --- a/clientlibrary/worker/checkpointer.go +++ b/clientlibrary/worker/checkpointer.go @@ -246,7 +246,7 @@ func (checkpointer *DynamoCheckpoint) doesTableExist() bool { TableName: aws.String(checkpointer.TableName), } _, err := checkpointer.svc.DescribeTable(input) - return (err == nil) + return err == nil } func (checkpointer *DynamoCheckpoint) saveItem(item map[string]*dynamodb.AttributeValue) error { diff --git a/clientlibrary/worker/record-processor-checkpointer.go b/clientlibrary/worker/record-processor-checkpointer.go index 94a090a..7d4cf8f 100644 --- a/clientlibrary/worker/record-processor-checkpointer.go +++ b/clientlibrary/worker/record-processor-checkpointer.go @@ -47,7 +47,7 @@ func (rc *RecordProcessorCheckpointer) Checkpoint(sequenceNumber *string) error rc.shard.mux.Lock() // checkpoint the last sequence of a closed shard - if rc.shard.EndingSequenceNumber == aws.StringValue(sequenceNumber) { + if sequenceNumber == nil { rc.shard.Checkpoint = SHARD_END } else { rc.shard.Checkpoint = aws.StringValue(sequenceNumber) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index a71c3ae..7fce564 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -146,7 +146,7 @@ func (w *Worker) initialize() error { err := w.metricsConfig.Init(w.kclConfig.ApplicationName, w.streamName, w.workerID) if err != nil { - log.Errorf("Failed to start monitoring service: %s", err) + log.Errorf("Failed to start monitoring service: %+v", err) } w.mService = w.metricsConfig.GetMonitoringService() @@ -195,9 +195,8 @@ func (w *Worker) eventLoop() { for { err := w.syncShard() if err != nil { - log.Errorf("Error getting Kinesis shards: %v", err) - // Back-off? - time.Sleep(500 * time.Millisecond) + log.Errorf("Error getting Kinesis shards: %+v", err) + time.Sleep(time.Duration(w.kclConfig.ShardSyncIntervalMillis) * time.Millisecond) } log.Infof("Found %d shards", len(w.shardStatus)) @@ -210,17 +209,17 @@ func (w *Worker) eventLoop() { } } - // max number of lease has not been reached + // max number of lease has not been reached yet if counter < w.kclConfig.MaxLeasesForWorker { for _, shard := range w.shardStatus { - // We already own this shard so carry on + // already owner of the shard if shard.getLeaseOwner() == w.workerID { continue } err := w.checkpointer.FetchCheckpoint(shard) if err != nil { - // checkpoint may not existed yet if not an error condition. + // checkpoint may not existed yet is not an error condition. if err != ErrSequenceIDNotFound { log.Error(err) // move on to next shard @@ -249,6 +248,8 @@ func (w *Worker) eventLoop() { sc := w.newShardConsumer(shard) go sc.getRecords(shard) w.waitGroup.Add(1) + // exit from for loop and not to grab more shard for now. + break } } @@ -272,16 +273,18 @@ func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) err args := &kinesis.DescribeStreamInput{ StreamName: aws.String(w.streamName), } + if startShardID != "" { args.ExclusiveStartShardId = aws.String(startShardID) } + streamDesc, err := w.kc.DescribeStream(args) if err != nil { return err } if *streamDesc.StreamDescription.StreamStatus != "ACTIVE" { - return errors.New("Stream not active") + return errors.New("stream not active") } var lastShardID string diff --git a/clientlibrary/worker/worker_test.go b/clientlibrary/worker/worker_test.go index bdabb54..380a5f7 100644 --- a/clientlibrary/worker/worker_test.go +++ b/clientlibrary/worker/worker_test.go @@ -154,4 +154,11 @@ func (dd *dumpRecordProcessor) ProcessRecords(input *kc.ProcessRecordsInput) { func (dd *dumpRecordProcessor) Shutdown(input *kc.ShutdownInput) { dd.t.Logf("Shutdown Reason: %v", aws.StringValue(kc.ShutdownReasonMessage(input.ShutdownReason))) + + // When the value of {@link ShutdownInput#getShutdownReason()} is + // {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you + // checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress. + if input.ShutdownReason == kc.TERMINATE { + input.Checkpointer.Checkpoint(nil) + } } From 47daa9d5f067a85115fbfdf99bd5adbe0000ec7f Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Fri, 17 Aug 2018 06:03:25 -0700 Subject: [PATCH 21/90] KCL: Update copyright and permission All source should be prepared in a manner that reflects comments that VMware would be comfortable sharing with the public. Documentation only. No functional change. Update the license to MIT to be consistent with approved OSSTP product tracking ticket: https://osstp.vmware.com/oss/#/upstreamcontrib/project/1101391 Jira CNA-1117 Change-Id: I3fe31f10db954887481e3b21ccd20ec8e39c5996 --- LICENSE | 21 +++++++++ README.md | 6 --- clientlibrary/common/errors.go | 18 ++++++++ clientlibrary/config/config.go | 44 ++++++++++++++++--- clientlibrary/config/config_test.go | 18 ++++++++ clientlibrary/config/initial-stream-pos.go | 33 ++++++++++++++ clientlibrary/config/kcl-config.go | 33 ++++++++++++++ clientlibrary/interfaces/inputs.go | 33 ++++++++++++++ .../record-processor-checkpointer.go | 33 ++++++++++++++ clientlibrary/interfaces/record-processor.go | 33 ++++++++++++++ clientlibrary/interfaces/sequence-number.go | 33 ++++++++++++++ clientlibrary/metrics/cloudwatch.go | 27 ++++++++++++ clientlibrary/metrics/interfaces.go | 27 ++++++++++++ clientlibrary/metrics/prometheus.go | 27 ++++++++++++ clientlibrary/utils/random.go | 18 ++++++++ clientlibrary/utils/uuid.go | 18 ++++++++ clientlibrary/worker/checkpointer.go | 27 ++++++++++++ .../worker/record-processor-checkpointer.go | 18 ++++++++ clientlibrary/worker/shard-consumer.go | 27 ++++++++++++ clientlibrary/worker/worker.go | 28 ++++++++++++ clientlibrary/worker/worker_test.go | 18 ++++++++ 21 files changed, 529 insertions(+), 11 deletions(-) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..3e782c5 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 VMware, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md index 57cd94a..9db664b 100644 --- a/README.md +++ b/README.md @@ -27,9 +27,3 @@ as an open-source patch. For more detailed information, refer to [CONTRIBUTING.m ## License -======= -# Cascade Kinesis Client Library for GO - -The **Cascade Kinesis Client Library for GO** (Cascade KCL) enables Go developers to easily consume and process data from [Amazon Kinesis][kinesis]. - -It is a re-implementation on Amazon's Kinesis Client Library in pure Go without using KCL's multi-language support. diff --git a/clientlibrary/common/errors.go b/clientlibrary/common/errors.go index 1a366c1..b5db8ea 100644 --- a/clientlibrary/common/errors.go +++ b/clientlibrary/common/errors.go @@ -1,3 +1,21 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ package common import ( diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index eef22b6..0e3926a 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -1,3 +1,36 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client +/* + * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ package config import ( @@ -30,7 +63,8 @@ const ( // Max records to fetch from Kinesis in a single GetRecords call. DEFAULT_MAX_RECORDS = 10000 - // The default value for how long the {@link ShardConsumer} should sleep if no records are returned from the call to + // The default value for how long the {@link ShardConsumer} should sleep if no records are returned + // from the call to DEFAULT_IDLETIME_BETWEEN_READS_MILLIS = 1000 // Don't call processRecords() on the record processor for empty record lists. @@ -45,8 +79,8 @@ const ( DEFAULT_SHARD_SYNC_INTERVAL_MILLIS = 60000 // Cleanup leases upon shards completion (don't wait until they expire in Kinesis). - // Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by default we try - // to delete the ones we don't need any longer. + // Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by + // default we try to delete the ones we don't need any longer. DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION = true // Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures). @@ -58,8 +92,8 @@ const ( // Buffer at most this many metrics before publishing to CloudWatch. DEFAULT_METRICS_MAX_QUEUE_SIZE = 10000 - // KCL will validate client provided sequence numbers with a call to Amazon Kinesis before checkpointing for calls - // to {@link RecordProcessorCheckpointer#checkpoint(String)} by default. + // KCL will validate client provided sequence numbers with a call to Amazon Kinesis before + // checkpointing for calls to {@link RecordProcessorCheckpointer#checkpoint(String)} by default. DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING = true // The max number of leases (shards) this worker should process. diff --git a/clientlibrary/config/config_test.go b/clientlibrary/config/config_test.go index 88bc75d..466d6b0 100644 --- a/clientlibrary/config/config_test.go +++ b/clientlibrary/config/config_test.go @@ -1,3 +1,21 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ package config import ( diff --git a/clientlibrary/config/initial-stream-pos.go b/clientlibrary/config/initial-stream-pos.go index 20ecbfc..2169812 100644 --- a/clientlibrary/config/initial-stream-pos.go +++ b/clientlibrary/config/initial-stream-pos.go @@ -1,3 +1,36 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client +/* + * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ package config import ( diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index 1ac3f7f..4455c6c 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -1,3 +1,36 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client +/* + * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ package config import ( diff --git a/clientlibrary/interfaces/inputs.go b/clientlibrary/interfaces/inputs.go index 8f7590d..84bf03f 100644 --- a/clientlibrary/interfaces/inputs.go +++ b/clientlibrary/interfaces/inputs.go @@ -1,3 +1,36 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client +/* + * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ package interfaces import ( diff --git a/clientlibrary/interfaces/record-processor-checkpointer.go b/clientlibrary/interfaces/record-processor-checkpointer.go index ffea0e8..b4133d3 100644 --- a/clientlibrary/interfaces/record-processor-checkpointer.go +++ b/clientlibrary/interfaces/record-processor-checkpointer.go @@ -1,3 +1,36 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client +/* + * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ package interfaces type ( diff --git a/clientlibrary/interfaces/record-processor.go b/clientlibrary/interfaces/record-processor.go index d64414c..766f79a 100644 --- a/clientlibrary/interfaces/record-processor.go +++ b/clientlibrary/interfaces/record-processor.go @@ -1,3 +1,36 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client +/* + * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ package interfaces type ( diff --git a/clientlibrary/interfaces/sequence-number.go b/clientlibrary/interfaces/sequence-number.go index f9c01ad..db91b9b 100644 --- a/clientlibrary/interfaces/sequence-number.go +++ b/clientlibrary/interfaces/sequence-number.go @@ -1,3 +1,36 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/awslabs/amazon-kinesis-client +/* + * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ package interfaces // ExtendedSequenceNumber represents a two-part sequence number for records aggregated by the Kinesis Producer Library. diff --git a/clientlibrary/metrics/cloudwatch.go b/clientlibrary/metrics/cloudwatch.go index 6724e07..5da25bb 100644 --- a/clientlibrary/metrics/cloudwatch.go +++ b/clientlibrary/metrics/cloudwatch.go @@ -1,3 +1,30 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/patrobinson/gokini +// +// Copyright 2018 Patrick robinson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package metrics import ( diff --git a/clientlibrary/metrics/interfaces.go b/clientlibrary/metrics/interfaces.go index 809089e..41ef053 100644 --- a/clientlibrary/metrics/interfaces.go +++ b/clientlibrary/metrics/interfaces.go @@ -1,3 +1,30 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/patrobinson/gokini +// +// Copyright 2018 Patrick robinson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package metrics import ( diff --git a/clientlibrary/metrics/prometheus.go b/clientlibrary/metrics/prometheus.go index bdf3ab0..81c08ce 100644 --- a/clientlibrary/metrics/prometheus.go +++ b/clientlibrary/metrics/prometheus.go @@ -1,3 +1,30 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/patrobinson/gokini +// +// Copyright 2018 Patrick robinson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package metrics import ( diff --git a/clientlibrary/utils/random.go b/clientlibrary/utils/random.go index ea0299a..4a3059a 100644 --- a/clientlibrary/utils/random.go +++ b/clientlibrary/utils/random.go @@ -1,3 +1,21 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ package utils import ( diff --git a/clientlibrary/utils/uuid.go b/clientlibrary/utils/uuid.go index 64883b8..e36d8bb 100644 --- a/clientlibrary/utils/uuid.go +++ b/clientlibrary/utils/uuid.go @@ -1,3 +1,21 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ package utils import ( diff --git a/clientlibrary/worker/checkpointer.go b/clientlibrary/worker/checkpointer.go index 7107cd7..7513af6 100644 --- a/clientlibrary/worker/checkpointer.go +++ b/clientlibrary/worker/checkpointer.go @@ -1,3 +1,30 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/patrobinson/gokini +// +// Copyright 2018 Patrick robinson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package worker import ( diff --git a/clientlibrary/worker/record-processor-checkpointer.go b/clientlibrary/worker/record-processor-checkpointer.go index 7d4cf8f..168a41d 100644 --- a/clientlibrary/worker/record-processor-checkpointer.go +++ b/clientlibrary/worker/record-processor-checkpointer.go @@ -1,3 +1,21 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ package worker import ( diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index d238943..5666da8 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -1,3 +1,30 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/patrobinson/gokini +// +// Copyright 2018 Patrick robinson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package worker import ( diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 7fce564..0ab2a97 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -1,3 +1,30 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/patrobinson/gokini +// +// Copyright 2018 Patrick robinson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package worker import ( @@ -197,6 +224,7 @@ func (w *Worker) eventLoop() { if err != nil { log.Errorf("Error getting Kinesis shards: %+v", err) time.Sleep(time.Duration(w.kclConfig.ShardSyncIntervalMillis) * time.Millisecond) + continue } log.Infof("Found %d shards", len(w.shardStatus)) diff --git a/clientlibrary/worker/worker_test.go b/clientlibrary/worker/worker_test.go index 380a5f7..8f559cc 100644 --- a/clientlibrary/worker/worker_test.go +++ b/clientlibrary/worker/worker_test.go @@ -1,3 +1,21 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ package worker import ( From 22de13ef8a15dcad767525bc996995ca57796896 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 27 Aug 2018 12:23:20 -0700 Subject: [PATCH 22/90] Go-KCL: Update security scan gas is now gosec. Need to update security scan and fix security issue as needed. No functional change. Jira CNA-2022 Change-Id: I36f2a204114f3f13e2ed05579c04a9c89f528f9a --- HyperMake | 3 ++- clientlibrary/metrics/cloudwatch.go | 7 ++++--- clientlibrary/worker/shard-consumer.go | 2 +- clientlibrary/worker/worker.go | 15 ++++++++++++--- support/scripts/check.sh | 2 +- support/toolchain/HyperMake | 10 +++++----- support/toolchain/docker/Dockerfile | 2 +- 7 files changed, 26 insertions(+), 15 deletions(-) diff --git a/HyperMake b/HyperMake index 03531b2..e006d3b 100644 --- a/HyperMake +++ b/HyperMake @@ -80,6 +80,7 @@ targets: after: - checkfmt - lint + - scanast deps-kcl: description: populate vendor packages @@ -95,5 +96,5 @@ settings: default-targets: - ci docker: - image: 'vmware/cascade-toolchain:0.0.9' + image: 'vmware/go-kcl-toolchain:latest' src-volume: /go/src/vmware.com/cascade-kinesis-client diff --git a/clientlibrary/metrics/cloudwatch.go b/clientlibrary/metrics/cloudwatch.go index 5da25bb..1a157f4 100644 --- a/clientlibrary/metrics/cloudwatch.go +++ b/clientlibrary/metrics/cloudwatch.go @@ -97,15 +97,16 @@ func (cw *CloudWatchMonitoringService) eventloop() { defer cw.waitGroup.Done() for { - err := cw.flush() - if err != nil { + if err := cw.flush(); err != nil { log.Errorf("Error sending metrics to CloudWatch. %+v", err) } select { case <-*cw.stop: log.Info("Shutting down monitoring system") - cw.flush() + if err := cw.flush(); err != nil { + log.Errorf("Error sending metrics to CloudWatch. %+v", err) + } return case <-time.After(time.Duration(cw.MetricsBufferTimeMillis) * time.Millisecond): } diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 5666da8..9b60609 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -181,7 +181,7 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { continue } } - log.Errorf("Error getting records from Kinesis that cannot be retried: %+v\nRequest: %s", err, getRecordsArgs) + log.Errorf("Error getting records from Kinesis that cannot be retried: %+v Request: %s", err, getRecordsArgs) return err } retriedErrors = 0 diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 0ab2a97..f430b8d 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -135,7 +135,10 @@ func (w *Worker) Start() error { // Start monitoring service log.Info("Starting monitoring service.") - w.mService.Start() + if err := w.mService.Start(); err != nil { + log.Errorf("Failed to start monitoring service: %+v", err) + return err + } log.Info("Starting worker event loop.") // entering event loop @@ -249,7 +252,7 @@ func (w *Worker) eventLoop() { if err != nil { // checkpoint may not existed yet is not an error condition. if err != ErrSequenceIDNotFound { - log.Error(err) + log.Errorf(" Error: %+v", err) // move on to next shard continue } @@ -308,10 +311,12 @@ func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) err streamDesc, err := w.kc.DescribeStream(args) if err != nil { + log.Errorf("Error in DescribeStream: %s Error: %+v Request: %s", w.streamName, err, args) return err } if *streamDesc.StreamDescription.StreamStatus != "ACTIVE" { + log.Warnf("Stream %s is not active", w.streamName) return errors.New("stream not active") } @@ -319,6 +324,7 @@ func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) err for _, s := range streamDesc.StreamDescription.Shards { // record avail shardId from fresh reading from Kinesis shardInfo[*s.ShardId] = true + // found new shard if _, ok := w.shardStatus[*s.ShardId]; !ok { log.Debugf("Found shard with id %s", *s.ShardId) @@ -336,6 +342,7 @@ func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) err if *streamDesc.StreamDescription.HasMoreShards { err := w.getShardIDs(lastShardID, shardInfo) if err != nil { + log.Errorf("Error in getShardIDs: %s Error: %+v", lastShardID, err) return err } } @@ -359,7 +366,9 @@ func (w *Worker) syncShard() error { delete(w.shardStatus, shard.ID) // remove the shard entry in dynamoDB as well // Note: syncShard runs periodically. we don't need to do anything in case of error here. - w.checkpointer.RemoveLeaseInfo(shard.ID) + if err := w.checkpointer.RemoveLeaseInfo(shard.ID); err != nil { + log.Errorf("Failed to remove shard lease info: %s Error: %+v", shard.ID, err) + } } } diff --git a/support/scripts/check.sh b/support/scripts/check.sh index 539bdf9..6126dc0 100755 --- a/support/scripts/check.sh +++ b/support/scripts/check.sh @@ -27,7 +27,7 @@ lint() { scanast() { set +e - gas ./... > security.log 2>&1 + gosec ./... > security.log 2>&1 set -e local issues=$(grep -E "Severity: MEDIUM" security.log | wc -l) diff --git a/support/toolchain/HyperMake b/support/toolchain/HyperMake index 708ccf3..40e36e7 100644 --- a/support/toolchain/HyperMake +++ b/support/toolchain/HyperMake @@ -1,8 +1,8 @@ --- format: hypermake.v0 -name: cascade-kcl -description: Amazon Kinesis Client Library in Go +name: go-kcl +description: VMWare Go-KCL Amazon Kinesis Client Library in Go targets: rebuild-toolchain: @@ -12,17 +12,17 @@ targets: build: docker cache: false tags: - - vmware/cascade-kcl-toolchain:latest + - vmware/go-kcl-toolchain:latest push-toolchain: description: push toolchain image after: - rebuild-toolchain push: - - vmware/cascade-toolchain:latest + - vmware/go-kcl-toolchain:latest settings: default-targets: - rebuild-toolchain docker: - image: 'vmware/cascade-kcl-toolchain:0.0.0' + image: 'vmware/go-kcl-toolchain:0.1.0' diff --git a/support/toolchain/docker/Dockerfile b/support/toolchain/docker/Dockerfile index 764f92f..5c36811 100644 --- a/support/toolchain/docker/Dockerfile +++ b/support/toolchain/docker/Dockerfile @@ -4,7 +4,7 @@ ENV GOPATH /go:/src RUN go get -v github.com/alecthomas/gometalinter && \ go get -v golang.org/x/tools/cmd/... && \ go get -v github.com/FiloSottile/gvt && \ - go get -v github.com/GoASTScanner/gas/cmd/gas/... && \ + go get github.com/securego/gosec/cmd/gosec/... && \ go get github.com/derekparker/delve/cmd/dlv && \ gometalinter --install && \ chmod -R a+rw /go \ No newline at end of file From 9addbb57f0554e64cfcb0cb258979bc1fe983de6 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Tue, 4 Sep 2018 09:09:06 -0700 Subject: [PATCH 23/90] KCL: Fix random number generator Fix the random number generator by adding seed. https://stackoverflow.com/questions/12321133/golang-random-number-generator-how-to-seed-properly Jira CNA-1119 Change-Id: Idfe23d84f31a47dcf43c8025632ff6f115614d34 --- clientlibrary/utils/random.go | 2 ++ clientlibrary/utils/random_test.go | 50 ++++++++++++++++++++++++++++ clientlibrary/worker/checkpointer.go | 3 +- 3 files changed, 53 insertions(+), 2 deletions(-) create mode 100644 clientlibrary/utils/random_test.go diff --git a/clientlibrary/utils/random.go b/clientlibrary/utils/random.go index 4a3059a..ef9dbc4 100644 --- a/clientlibrary/utils/random.go +++ b/clientlibrary/utils/random.go @@ -20,6 +20,7 @@ package utils import ( "math/rand" + "time" ) const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" @@ -31,6 +32,7 @@ const ( func RandStringBytesMaskImpr(n int) string { b := make([]byte, n) + rand.Seed(time.Now().UTC().UnixNano()) // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! for i, cache, remain := n-1, rand.Int63(), letterIdxMax; i >= 0; { if remain == 0 { diff --git a/clientlibrary/utils/random_test.go b/clientlibrary/utils/random_test.go new file mode 100644 index 0000000..c63b21b --- /dev/null +++ b/clientlibrary/utils/random_test.go @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package utils + +import ( + "fmt" + "math/rand" + "testing" + "time" +) + +func TestRandom(t *testing.T) { + for i := 0; i < 10; i++ { + s1 := RandStringBytesMaskImpr(10) + s2 := RandStringBytesMaskImpr(10) + if s1 == s2 { + t.Fatalf("failed in generating random string. s1: %s, s2: %s", s1, s2) + } + } +} + +func TestRandomNum(t *testing.T) { + rand.Seed(time.Now().UTC().UnixNano()) + + for i := 0; i < 10; i++ { + s1 := rand.Int63() + s2 := rand.Int63() + if s1 == s2 { + t.Fatalf("failed in generating random string. s1: %d, s2: %d", s1, s2) + } + fmt.Println(s1) + fmt.Println(s2) + } +} diff --git a/clientlibrary/worker/checkpointer.go b/clientlibrary/worker/checkpointer.go index 7513af6..ea0a130 100644 --- a/clientlibrary/worker/checkpointer.go +++ b/clientlibrary/worker/checkpointer.go @@ -335,10 +335,9 @@ func (checkpointer *DynamoCheckpoint) getItem(shardID string) (map[string]*dynam } func (checkpointer *DynamoCheckpoint) removeItem(shardID string) error { - var item *dynamodb.DeleteItemOutput err := try.Do(func(attempt int) (bool, error) { var err error - item, err = checkpointer.svc.DeleteItem(&dynamodb.DeleteItemInput{ + _, err = checkpointer.svc.DeleteItem(&dynamodb.DeleteItemInput{ TableName: aws.String(checkpointer.TableName), Key: map[string]*dynamodb.AttributeValue{ LEASE_KEY_KEY: { From 3163d31f2872400894f88bae934bc969c62f24fb Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Tue, 4 Sep 2018 14:40:46 -0700 Subject: [PATCH 24/90] KCL: KCL should ignore deleted parent shard After a few days of shard splitting, the parent shard will be deleted by Kinesis system. KCL should ignore the error caused by deleted parent shared and move on. Test: Manuall split shard on kcl-test stream in photon-infra account Currently, shard3 is the parent shard of shard 4 and 5. Shard 3 has a parent shard 0 which has been deleted already. Verified the test can run and not stuck in waiting for parent shard. Jira CNA-2089 Change-Id: I15ed0db70ff9836313c22ccabf934a2a69379248 --- clientlibrary/worker/shard-consumer.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 9b60609..a7f6fa0 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -125,8 +125,11 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { // If the shard is child shard, need to wait until the parent finished. if err := sc.waitOnParentShard(shard); err != nil { - log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", shard.ParentShardId, err) - return err + // If parent shard has been deleted by Kinesis system already, just ignore the error. + if err != ErrSequenceIDNotFound { + log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", shard.ParentShardId, err) + return err + } } shardIterator, err := sc.getShardIterator(shard) From 10e8ebb3ffc2e378c810129645264d21835d2ccb Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Tue, 4 Sep 2018 20:32:45 -0700 Subject: [PATCH 25/90] KCL: Fix KCL stops processing when Kinesis Internal Error Current, KCL doesn't release shard when returning on error which causes the worker cannot get any shard because it has the maximum number of shard already. This change makes sure releasing shard when return. update the log message. Test: Integration test by forcing error on reading shard to simulate Kinesis Internal error and make sure the KCL will not stop processing. Jira CNA-1995 Change-Id: Iac91579634a5023ab5ed73c6af89e4ff1a9af564 --- clientlibrary/worker/shard-consumer.go | 16 ++++++++++++---- clientlibrary/worker/worker.go | 6 +++--- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index a7f6fa0..30ad07e 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -120,8 +120,11 @@ func (sc *ShardConsumer) getShardIterator(shard *shardStatus) (*string, error) { return iterResp.ShardIterator, nil } +// getRecords continously poll one shard for data record +// Precondition: it currently has the lease on the shard. func (sc *ShardConsumer) getRecords(shard *shardStatus) error { defer sc.waitGroup.Done() + defer sc.releaseLease(shard) // If the shard is child shard, need to wait until the parent finished. if err := sc.waitOnParentShard(shard); err != nil { @@ -146,17 +149,15 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { sc.recordProcessor.Initialize(input) recordCheckpointer := NewRecordProcessorCheckpoint(shard, sc.checkpointer) - var retriedErrors int for { + retriedErrors := 0 getRecordsStartTime := time.Now() if time.Now().UTC().After(shard.LeaseTimeout.Add(-5 * time.Second)) { log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID) err = sc.checkpointer.GetLease(shard, sc.consumerID) if err != nil { if err.Error() == ErrLeaseNotAquired { - shard.setLeaseOwner("") - sc.mService.LeaseLost(shard.ID) log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", shard.ID, sc.consumerID) return nil } @@ -187,7 +188,6 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { log.Errorf("Error getting records from Kinesis that cannot be retried: %+v Request: %s", err, getRecordsArgs) return err } - retriedErrors = 0 // IRecordProcessorCheckpointer input := &kcl.ProcessRecordsInput{ @@ -273,3 +273,11 @@ func (sc *ShardConsumer) waitOnParentShard(shard *shardStatus) error { time.Sleep(time.Duration(sc.kclConfig.ParentShardPollIntervalMillis) * time.Millisecond) } } + +// Cleanup the internal lease cache +func (sc *ShardConsumer) releaseLease(shard *shardStatus) { + log.Infof("Release lease for shard %s", shard.ID) + shard.setLeaseOwner("") + // reporting lease lose metrics + sc.mService.LeaseLost(shard.ID) +} diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index f430b8d..1a7db33 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -232,10 +232,10 @@ func (w *Worker) eventLoop() { log.Infof("Found %d shards", len(w.shardStatus)) - // Count the number of leases hold by this worker + // Count the number of leases hold by this worker excluding the processed shard counter := 0 for _, shard := range w.shardStatus { - if shard.getLeaseOwner() == w.workerID { + if shard.getLeaseOwner() == w.workerID && shard.Checkpoint != SHARD_END { counter++ } } @@ -327,7 +327,7 @@ func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) err // found new shard if _, ok := w.shardStatus[*s.ShardId]; !ok { - log.Debugf("Found shard with id %s", *s.ShardId) + log.Infof("Found new shard with id %s", *s.ShardId) w.shardStatus[*s.ShardId] = &shardStatus{ ID: *s.ShardId, ParentShardId: aws.StringValue(s.ParentShardId), From d13f8588a960a8264f0f2d36fefe09c95e8a8871 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Fri, 7 Sep 2018 08:44:37 -0700 Subject: [PATCH 26/90] KCL: Update readme Update the readme and contributing doc before publishing to github repo. https://github.com/vmware/vmware-go-kcl Jira CNA-2036 Change-Id: Idd8cfd8c89d3202613ff1d3018a584945ad30e4a --- README.md | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/README.md b/README.md index 9db664b..a0ce0da 100644 --- a/README.md +++ b/README.md @@ -27,3 +27,40 @@ as an open-source patch. For more detailed information, refer to [CONTRIBUTING.m ## License +### Build & Run + +```sh +hmake + +# security scan +hmake scanast + +# run test +hmake check + +# run integration test +# update the worker_test.go to let it point to your Kinesis stream +hmake test +``` + +## Documentation + +VMware-Go-KCL matches exactly the same interface and programming model from original Amazon KCL, the best place for getting reference, tutorial is from Amazon itself: + +- [Developing Consumers Using the Kinesis Client Library](https://docs.aws.amazon.com/streams/latest/dev/developing-consumers-with-kcl.html) +- [Troubleshooting](https://docs.aws.amazon.com/streams/latest/dev/troubleshooting-consumers.html) +- [Advanced Topics](https://docs.aws.amazon.com/streams/latest/dev/advanced-consumers.html) + +## Releases & Major Branches + + +## Contributing + +The vmware-go-kcl project team welcomes contributions from the community. If you wish to contribute code and you have not +signed our contributor license agreement (CLA), our bot will update the issue when you open a Pull Request. For any +questions about the CLA process, please refer to our [FAQ](https://cla.vmware.com/faq). For more detailed information, +refer to [CONTRIBUTING.md](CONTRIBUTING.md). + +## License + +MIT License From d6b5196b555e01c16e737c1814b6cc6bec121908 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Sat, 8 Sep 2018 09:46:02 -0700 Subject: [PATCH 27/90] Update import path when switching github Update import path in files when switching to github. --- HyperMake | 2 +- clientlibrary/config/kcl-config.go | 2 +- clientlibrary/worker/checkpointer.go | 2 +- clientlibrary/worker/record-processor-checkpointer.go | 2 +- clientlibrary/worker/shard-consumer.go | 6 +++--- clientlibrary/worker/worker.go | 6 +++--- clientlibrary/worker/worker_test.go | 8 ++++---- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/HyperMake b/HyperMake index e006d3b..d8d0fd0 100644 --- a/HyperMake +++ b/HyperMake @@ -97,4 +97,4 @@ settings: - ci docker: image: 'vmware/go-kcl-toolchain:latest' - src-volume: /go/src/vmware.com/cascade-kinesis-client + src-volume: /go/src/github.com/vmware/vmware-go-kcl diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index 4455c6c..6c78a2a 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -34,8 +34,8 @@ package config import ( + "github.com/vmware/vmware-go-kcl/clientlibrary/utils" "time" - "vmware.com/cascade-kinesis-client/clientlibrary/utils" ) // NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. diff --git a/clientlibrary/worker/checkpointer.go b/clientlibrary/worker/checkpointer.go index ea0a130..30083b2 100644 --- a/clientlibrary/worker/checkpointer.go +++ b/clientlibrary/worker/checkpointer.go @@ -38,7 +38,7 @@ import ( "github.com/matryer/try" log "github.com/sirupsen/logrus" - "vmware.com/cascade-kinesis-client/clientlibrary/config" + "github.com/vmware/vmware-go-kcl/clientlibrary/config" ) const ( diff --git a/clientlibrary/worker/record-processor-checkpointer.go b/clientlibrary/worker/record-processor-checkpointer.go index 168a41d..0562a3c 100644 --- a/clientlibrary/worker/record-processor-checkpointer.go +++ b/clientlibrary/worker/record-processor-checkpointer.go @@ -21,7 +21,7 @@ package worker import ( "github.com/aws/aws-sdk-go/aws" - kcl "vmware.com/cascade-kinesis-client/clientlibrary/interfaces" + kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" ) type ( diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 30ad07e..1326cb1 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -37,9 +37,9 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" - "vmware.com/cascade-kinesis-client/clientlibrary/config" - kcl "vmware.com/cascade-kinesis-client/clientlibrary/interfaces" - "vmware.com/cascade-kinesis-client/clientlibrary/metrics" + "github.com/vmware/vmware-go-kcl/clientlibrary/config" + kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" ) const ( diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 1a7db33..e8a8a12 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -43,9 +43,9 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" - "vmware.com/cascade-kinesis-client/clientlibrary/config" - kcl "vmware.com/cascade-kinesis-client/clientlibrary/interfaces" - "vmware.com/cascade-kinesis-client/clientlibrary/metrics" + "github.com/vmware/vmware-go-kcl/clientlibrary/config" + kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" ) type shardStatus struct { diff --git a/clientlibrary/worker/worker_test.go b/clientlibrary/worker/worker_test.go index 8f559cc..4a9598d 100644 --- a/clientlibrary/worker/worker_test.go +++ b/clientlibrary/worker/worker_test.go @@ -29,10 +29,10 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - cfg "vmware.com/cascade-kinesis-client/clientlibrary/config" - kc "vmware.com/cascade-kinesis-client/clientlibrary/interfaces" - "vmware.com/cascade-kinesis-client/clientlibrary/metrics" - "vmware.com/cascade-kinesis-client/clientlibrary/utils" + cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" + kc "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" + "github.com/vmware/vmware-go-kcl/clientlibrary/utils" ) const ( From 691082e28409f1915e26f2a1541160627d886b48 Mon Sep 17 00:00:00 2001 From: VMware GitHub Bot Date: Tue, 11 Sep 2018 23:02:06 -0500 Subject: [PATCH 28/90] Add DCO text --- README.md | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/README.md b/README.md index a0ce0da..920a975 100644 --- a/README.md +++ b/README.md @@ -51,15 +51,10 @@ VMware-Go-KCL matches exactly the same interface and programming model from orig - [Troubleshooting](https://docs.aws.amazon.com/streams/latest/dev/troubleshooting-consumers.html) - [Advanced Topics](https://docs.aws.amazon.com/streams/latest/dev/advanced-consumers.html) -## Releases & Major Branches - ## Contributing -The vmware-go-kcl project team welcomes contributions from the community. If you wish to contribute code and you have not -signed our contributor license agreement (CLA), our bot will update the issue when you open a Pull Request. For any -questions about the CLA process, please refer to our [FAQ](https://cla.vmware.com/faq). For more detailed information, -refer to [CONTRIBUTING.md](CONTRIBUTING.md). +The vmware-go-kcl project team welcomes contributions from the community. Before you start working with vmware-go-kcl, please read our [Developer Certificate of Origin](https://cla.vmware.com/dco). All contributions to this repository must be signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on as an open-source patch. For more detailed information, refer to [CONTRIBUTING.md](CONTRIBUTING.md). ## License From 2d5d506659af8b48617a5b6067a672dc25cfa6a3 Mon Sep 17 00:00:00 2001 From: VMware GitHub Bot Date: Tue, 11 Sep 2018 23:03:25 -0500 Subject: [PATCH 29/90] Add DCO text --- CONTRIBUTING.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index abec9ed..befab67 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -63,8 +63,6 @@ git push --force-with-lease origin my-new-feature Be sure to add a comment to the PR indicating your new changes are ready to review, as GitHub does not generate a notification when you git push. -### Code Style - ### Formatting Commit Messages We follow the conventions on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/). From 6a1a7b7da6a0a85ab2e3febd3861875c27f44b8a Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Tue, 30 Oct 2018 12:41:45 -0700 Subject: [PATCH 30/90] Fix the exponential backoff Fix the calculation of exponential backoff. ^ is the XOR in golang. Replaced it with math.exp2(). --- clientlibrary/worker/checkpointer.go | 7 ++++--- clientlibrary/worker/shard-consumer.go | 9 +++++++-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/clientlibrary/worker/checkpointer.go b/clientlibrary/worker/checkpointer.go index 30083b2..1361f78 100644 --- a/clientlibrary/worker/checkpointer.go +++ b/clientlibrary/worker/checkpointer.go @@ -29,6 +29,7 @@ package worker import ( "errors" + "math" "time" "github.com/aws/aws-sdk-go/aws" @@ -300,7 +301,7 @@ func (checkpointer *DynamoCheckpoint) putItem(input *dynamodb.PutItemInput) erro awsErr.Code() == dynamodb.ErrCodeInternalServerError && attempt < checkpointer.Retries { // Backoff time as recommended by https://docs.aws.amazon.com/general/latest/gr/api-retries.html - time.Sleep(time.Duration(2^attempt*100) * time.Millisecond) + time.Sleep(time.Duration(math.Exp2(float64(attempt))*100) * time.Millisecond) return true, err } } @@ -325,7 +326,7 @@ func (checkpointer *DynamoCheckpoint) getItem(shardID string) (map[string]*dynam awsErr.Code() == dynamodb.ErrCodeInternalServerError && attempt < checkpointer.Retries { // Backoff time as recommended by https://docs.aws.amazon.com/general/latest/gr/api-retries.html - time.Sleep(time.Duration(2^attempt*100) * time.Millisecond) + time.Sleep(time.Duration(math.Exp2(float64(attempt))*100) * time.Millisecond) return true, err } } @@ -350,7 +351,7 @@ func (checkpointer *DynamoCheckpoint) removeItem(shardID string) error { awsErr.Code() == dynamodb.ErrCodeInternalServerError && attempt < checkpointer.Retries { // Backoff time as recommended by https://docs.aws.amazon.com/general/latest/gr/api-retries.html - time.Sleep(time.Duration(2^attempt*100) * time.Millisecond) + time.Sleep(time.Duration(math.Exp2(float64(attempt))*100) * time.Millisecond) return true, err } } diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 1326cb1..d4122c8 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -29,6 +29,7 @@ package worker import ( log "github.com/sirupsen/logrus" + "math" "sync" "time" @@ -149,9 +150,9 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { sc.recordProcessor.Initialize(input) recordCheckpointer := NewRecordProcessorCheckpoint(shard, sc.checkpointer) + retriedErrors := 0 for { - retriedErrors := 0 getRecordsStartTime := time.Now() if time.Now().UTC().After(shard.LeaseTimeout.Add(-5 * time.Second)) { log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID) @@ -181,7 +182,8 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { log.Errorf("Error getting records from shard %v: %+v", shard.ID, err) retriedErrors++ // exponential backoff - time.Sleep(time.Duration(2^retriedErrors*100) * time.Millisecond) + // https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff + time.Sleep(time.Duration(math.Exp2(retriedErrors)*100) * time.Millisecond) continue } } @@ -189,6 +191,9 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { return err } + // reset the retry count after success + retriedErrors = 0 + // IRecordProcessorCheckpointer input := &kcl.ProcessRecordsInput{ Records: getResp.Records, From 03685b2b190e7f6096948e9f698a0d19b7f46dee Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Thu, 22 Nov 2018 11:43:45 -0600 Subject: [PATCH 31/90] Fix type conversion error Fix the compile issue of type conversion. int --> float64. --- clientlibrary/worker/shard-consumer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index d4122c8..9b54425 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -183,7 +183,7 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { retriedErrors++ // exponential backoff // https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff - time.Sleep(time.Duration(math.Exp2(retriedErrors)*100) * time.Millisecond) + time.Sleep(time.Duration(math.Exp2(float64(retriedErrors))*100) * time.Millisecond) continue } } From cd343cca0962c84725214088a19964b51e59e7a6 Mon Sep 17 00:00:00 2001 From: Tim Studd Date: Sat, 9 Feb 2019 08:23:54 -0800 Subject: [PATCH 32/90] Add configuration options for AWS service endpoints (#5) * Add configuration options for AWS service endpoints Signed-off-by: Timothy Studd * Fix KCL naming consistency issue Signed-off-by: Timothy Studd --- clientlibrary/config/config.go | 8 ++++++++ clientlibrary/config/kcl-config.go | 15 ++++++++++++++- clientlibrary/worker/checkpointer.go | 6 +++--- clientlibrary/worker/worker.go | 19 +++++++++++++------ 4 files changed, 38 insertions(+), 10 deletions(-) diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index 0e3926a..f1f3090 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -158,6 +158,14 @@ type ( // ApplicationName is name of application. Kinesis allows multiple applications to consume the same stream. ApplicationName string + // DynamoDBEndpoint is an optional endpoint URL that overrides the default generated endpoint for a DynamoDB client. + // If this is empty, the default generated endpoint will be used. + DynamoDBEndpoint string + + // KinesisEndpoint is an optional endpoint URL that overrides the default generated endpoint for a Kinesis client. + // If this is empty, the default generated endpoint will be used. + KinesisEndpoint string + // TableName is name of the dynamo db table for managing kinesis stream default to ApplicationName TableName string diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index 6c78a2a..4d208cf 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -34,8 +34,9 @@ package config import ( - "github.com/vmware/vmware-go-kcl/clientlibrary/utils" "time" + + "github.com/vmware/vmware-go-kcl/clientlibrary/utils" ) // NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. @@ -77,6 +78,18 @@ func NewKinesisClientLibConfig(applicationName, streamName, regionName, workerID } } +// WithKinesisEndpoint is used to provide an alternative Kinesis endpoint +func (c *KinesisClientLibConfiguration) WithKinesisEndpoint(kinesisEndpoint string) *KinesisClientLibConfiguration { + c.KinesisEndpoint = kinesisEndpoint + return c +} + +// WithDynamoDBEndpoint is used to provide an alternative DynamoDB endpoint +func (c *KinesisClientLibConfiguration) WithDynamoDBEndpoint(dynamoDBEndpoint string) *KinesisClientLibConfiguration { + c.DynamoDBEndpoint = dynamoDBEndpoint + return c +} + // WithTableName to provide alternative lease table in DynamoDB func (c *KinesisClientLibConfiguration) WithTableName(tableName string) *KinesisClientLibConfiguration { c.TableName = tableName diff --git a/clientlibrary/worker/checkpointer.go b/clientlibrary/worker/checkpointer.go index 1361f78..9d15fa1 100644 --- a/clientlibrary/worker/checkpointer.go +++ b/clientlibrary/worker/checkpointer.go @@ -286,9 +286,9 @@ func (checkpointer *DynamoCheckpoint) saveItem(item map[string]*dynamodb.Attribu func (checkpointer *DynamoCheckpoint) conditionalUpdate(conditionExpression string, expressionAttributeValues map[string]*dynamodb.AttributeValue, item map[string]*dynamodb.AttributeValue) error { return checkpointer.putItem(&dynamodb.PutItemInput{ - ConditionExpression: aws.String(conditionExpression), - TableName: aws.String(checkpointer.TableName), - Item: item, + ConditionExpression: aws.String(conditionExpression), + TableName: aws.String(checkpointer.TableName), + Item: item, ExpressionAttributeValues: expressionAttributeValues, }) } diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index e8a8a12..c295f86 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -29,13 +29,14 @@ package worker import ( "errors" - log "github.com/sirupsen/logrus" "os" "os/signal" "sync" "syscall" "time" + log "github.com/sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" @@ -112,11 +113,17 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli // create session for Kinesis log.Info("Creating Kinesis session") - s := session.New(&aws.Config{Region: aws.String(w.regionName)}) + s := session.New(&aws.Config{ + Region: aws.String(w.regionName), + Endpoint: &kclConfig.KinesisEndpoint, + }) w.kc = kinesis.New(s) log.Info("Creating DynamoDB session") - s = session.New(&aws.Config{Region: aws.String(w.regionName)}) + s = session.New(&aws.Config{ + Region: aws.String(w.regionName), + Endpoint: &kclConfig.DynamoDBEndpoint, + }) w.dynamo = dynamodb.New(s) w.checkpointer = NewDynamoCheckpoint(w.dynamo, kclConfig) @@ -329,9 +336,9 @@ func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) err if _, ok := w.shardStatus[*s.ShardId]; !ok { log.Infof("Found new shard with id %s", *s.ShardId) w.shardStatus[*s.ShardId] = &shardStatus{ - ID: *s.ShardId, - ParentShardId: aws.StringValue(s.ParentShardId), - mux: &sync.Mutex{}, + ID: *s.ShardId, + ParentShardId: aws.StringValue(s.ParentShardId), + mux: &sync.Mutex{}, StartingSequenceNumber: aws.StringValue(s.SequenceNumberRange.StartingSequenceNumber), EndingSequenceNumber: aws.StringValue(s.SequenceNumberRange.EndingSequenceNumber), } From 13aa9632cd6a8abbdb3ede09c7195d7c5e468a3a Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Thu, 21 Feb 2019 18:44:36 -0600 Subject: [PATCH 33/90] Upgrade to use go1.11 and switch to use go mod 1. No functional change just upgrade to go1.11. 2. Add go mod support. 3. Make vendored copy of dependencies Test 1. hmake 2. run worker_test.go in GoLand IDE --- HyperMake | 20 ++-- go.mod | 25 +++++ go.sum | 39 +++++++ support/toolchain/HyperMake | 2 +- support/toolchain/docker/Dockerfile | 2 +- vendor/manifest | 167 ---------------------------- 6 files changed, 74 insertions(+), 181 deletions(-) create mode 100644 go.mod create mode 100644 go.sum delete mode 100644 vendor/manifest diff --git a/HyperMake b/HyperMake index d8d0fd0..8222f2a 100644 --- a/HyperMake +++ b/HyperMake @@ -15,9 +15,15 @@ targets: description: placeholder for additional toolchain dependencies deps: - description: setup dependencies + description: download dependencies to local cache after: - - 'deps-*' + - toolchain + watches: + - go.mod + cmds: + - export GO111MODULE=on + - go mod download + - go mod vendor gen: description: generate source code @@ -82,16 +88,6 @@ targets: - lint - scanast - deps-kcl: - description: populate vendor packages - after: - - toolchain - watches: - - vendor/manifest - workdir: . - cmds: - - gvt restore - settings: default-targets: - ci diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..3dc4633 --- /dev/null +++ b/go.mod @@ -0,0 +1,25 @@ +module github.com/vmware/vmware-go-kcl + +require ( + github.com/asaskevich/govalidator v0.0.0-20170507183629-38ddb4612a5d // indirect + github.com/astaxie/beego v0.0.0-20170908222938-a7354d2d0840 // indirect + github.com/aws/aws-sdk-go v0.0.0-20171208220907-365b4d343694 + github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-ini/ini v1.42.0 // indirect + github.com/golang/protobuf v0.0.0-20170622202551-6a1fa9404c0a // indirect + github.com/google/uuid v0.0.0-20170306145142-6a5e28554805 + github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect + github.com/matryer/try v0.0.0-20150601225556-312d2599e12e + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v0.0.0-20170707173355-26b897001974 + github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 // indirect + github.com/prometheus/common v0.0.0-20170707053319-3e6a7635bac6 + github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8 // indirect + github.com/sirupsen/logrus v0.0.0-20170713115724-51dc0fc64317 + github.com/stretchr/testify v1.2.1 + golang.org/x/sys v0.0.0-20190221222158-ec7b60b042fd // indirect + gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect + gopkg.in/yaml.v2 v2.2.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..de8b73f --- /dev/null +++ b/go.sum @@ -0,0 +1,39 @@ +github.com/asaskevich/govalidator v0.0.0-20170507183629-38ddb4612a5d/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/astaxie/beego v0.0.0-20170908222938-a7354d2d0840/go.mod h1:0R4++1tUqERR0WYFWdfkcrsyoVBCG4DgpDGokT3yb+U= +github.com/aws/aws-sdk-go v0.0.0-20171208220907-365b4d343694 h1:TXabFUZYb1oIrmshTCd9k3gLItnCkX8DYNlzC7zT5y4= +github.com/aws/aws-sdk-go v0.0.0-20171208220907-365b4d343694/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-ini/ini v1.42.0 h1:TWr1wGj35+UiWHlBA8er89seFXxzwFn11spilrrj+38= +github.com/go-ini/ini v1.42.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/golang/protobuf v0.0.0-20170622202551-6a1fa9404c0a h1:5X905hYB5maQbwC9ltdknryvCPb4v+D0pWDQXaYQWyk= +github.com/golang/protobuf v0.0.0-20170622202551-6a1fa9404c0a/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/uuid v0.0.0-20170306145142-6a5e28554805 h1:skl44gU1qEIcRpwKjb9bhlRwjvr96wLdvpTogCBBJe8= +github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/matryer/try v0.0.0-20150601225556-312d2599e12e h1:wEdOROHcWFFOttvgtwOkyRx5AXGm6GKYZV46vUk7RWY= +github.com/matryer/try v0.0.0-20150601225556-312d2599e12e/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.0.0-20170707173355-26b897001974 h1:7geXN+A5WSloMJfKwHhigBJCSPW0DZOlypTpzg7Nu40= +github.com/prometheus/client_golang v0.0.0-20170707173355-26b897001974/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 h1:13pIdM2tpaDi4OVe24fgoIS7ZTqMt0QI+bwQsX5hq+g= +github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20170707053319-3e6a7635bac6 h1:UEgo247BhzA25ik6y8sBtRVet8xyPH5+UidPXC+E4t0= +github.com/prometheus/common v0.0.0-20170707053319-3e6a7635bac6/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8 h1:Kh7M6mzRpQ2de1rixoSQZr4BTINXFm8WDbeN5ttnwyE= +github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/sirupsen/logrus v0.0.0-20170713115724-51dc0fc64317 h1:GpeXjjFK3fgyG/1Dd5felinm3v0XRZKO4cUtMcHGL08= +github.com/sirupsen/logrus v0.0.0-20170713115724-51dc0fc64317/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/stretchr/testify v1.2.1 h1:52QO5WkIUcHGIR7EnGagH88x1bUzqGXTC5/1bDTUQ7U= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20190221222158-ec7b60b042fd h1:JYgmSAJhrvxjUInD1uG+wLPAFAG7TmIJLOgZLI210A8= +golang.org/x/sys v0.0.0-20190221222158-ec7b60b042fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/support/toolchain/HyperMake b/support/toolchain/HyperMake index 40e36e7..e02ea28 100644 --- a/support/toolchain/HyperMake +++ b/support/toolchain/HyperMake @@ -25,4 +25,4 @@ settings: default-targets: - rebuild-toolchain docker: - image: 'vmware/go-kcl-toolchain:0.1.0' + image: 'vmware/go-kcl-toolchain:0.1.1' diff --git a/support/toolchain/docker/Dockerfile b/support/toolchain/docker/Dockerfile index 5c36811..eefbe9f 100644 --- a/support/toolchain/docker/Dockerfile +++ b/support/toolchain/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.10 +FROM golang:1.11 ENV PATH /go/bin:/src/bin:/root/go/bin:/usr/local/go/bin:$PATH ENV GOPATH /go:/src RUN go get -v github.com/alecthomas/gometalinter && \ diff --git a/vendor/manifest b/vendor/manifest deleted file mode 100644 index 1b8ad4e..0000000 --- a/vendor/manifest +++ /dev/null @@ -1,167 +0,0 @@ -{ - "version": 0, - "dependencies": [ - { - "importpath": "github.com/aws/aws-sdk-go", - "repository": "https://github.com/aws/aws-sdk-go", - "vcs": "git", - "revision": "365b4d34369496e650e3056b33fce4e1a25cfc72", - "branch": "master", - "notests": true - }, - { - "importpath": "github.com/asaskevich/govalidator", - "repository": "https://github.com/asaskevich/govalidator", - "vcs": "git", - "revision": "38ddb4612a5dfc2878731749ee825853d9f0aaa1", - "branch": "master", - "notests": true - }, - { - "importpath": "github.com/sirupsen/logrus", - "repository": "https://github.com/sirupsen/logrus", - "vcs": "git", - "revision": "51dc0fc64317a2861273909081f9c315786533eb", - "branch": "master", - "notests": true - }, - { - "importpath": "github.com/beorn7/perks/quantile", - "repository": "https://github.com/beorn7/perks", - "vcs": "git", - "revision": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9", - "branch": "master", - "path": "/quantile", - "notests": true - }, - { - "importpath": "gopkg.in/gemnasium/logrus-airbrake-hook.v2", - "repository": "https://gopkg.in/gemnasium/logrus-airbrake-hook.v2", - "vcs": "git", - "revision": "e928b033a891c0175fb643d5aa0779e86325eb12", - "branch": "master", - "notests": true - }, - { - "importpath": "github.com/matttproud/golang_protobuf_extensions/pbutil", - "repository": "https://github.com/matttproud/golang_protobuf_extensions", - "vcs": "git", - "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c", - "branch": "master", - "path": "/pbutil", - "notests": true - }, - { - "importpath": "github.com/prometheus/client_golang/prometheus", - "repository": "https://github.com/prometheus/client_golang", - "vcs": "git", - "revision": "26b897001974f2b4ee6688377873e4d6f61d533c", - "branch": "master", - "path": "prometheus", - "notests": true - }, - { - "importpath": "github.com/prometheus/client_model/go", - "repository": "https://github.com/prometheus/client_model", - "vcs": "git", - "revision": "6f3806018612930941127f2a7c6c453ba2c527d2", - "branch": "master", - "path": "/go", - "notests": true - }, - { - "importpath": "github.com/prometheus/common/expfmt", - "repository": "https://github.com/prometheus/common", - "vcs": "git", - "revision": "3e6a7635bac6573d43f49f97b47eb9bda195dba8", - "branch": "master", - "path": "/expfmt", - "notests": true - }, - { - "importpath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", - "repository": "https://github.com/prometheus/common", - "vcs": "git", - "revision": "3e6a7635bac6573d43f49f97b47eb9bda195dba8", - "branch": "master", - "path": "internal/bitbucket.org/ww/goautoneg", - "notests": true - }, - { - "importpath": "github.com/prometheus/common/model", - "repository": "https://github.com/prometheus/common", - "vcs": "git", - "revision": "3e6a7635bac6573d43f49f97b47eb9bda195dba8", - "branch": "master", - "path": "model", - "notests": true - }, - { - "importpath": "github.com/astaxie/beego/cache", - "repository": "https://github.com/astaxie/beego", - "vcs": "git", - "revision": "a7354d2d084003e4122d6e69f7e5ab594fd117b2", - "branch": "master", - "path": "cache", - "notests": true - }, - { - "importpath": "github.com/prometheus/procfs", - "repository": "https://github.com/prometheus/procfs", - "vcs": "git", - "revision": "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2", - "branch": "master", - "notests": true - }, - { - "importpath": "github.com/golang/protobuf/proto", - "repository": "https://github.com/golang/protobuf", - "vcs": "git", - "revision": "6a1fa9404c0aebf36c879bc50152edcc953910d2", - "branch": "master", - "path": "/proto", - "notests": true - }, - { - "importpath": "github.com/golang/protobuf/ptypes/any", - "repository": "https://github.com/golang/protobuf", - "vcs": "git", - "revision": "6a1fa9404c0aebf36c879bc50152edcc953910d2", - "branch": "master", - "path": "ptypes/any", - "notests": true - }, - { - "importpath": "github.com/google/uuid", - "repository": "https://github.com/google/uuid", - "vcs": "git", - "revision": "6a5e28554805e78ea6141142aba763936c4761c0", - "branch": "master", - "notests": true - }, - { - "importpath": "github.com/matryer/try", - "repository": "https://github.com/matryer/try", - "vcs": "git", - "revision": "312d2599e12e89ca89b52a09597394f449235d80", - "branch": "master", - "notests": true - }, - { - "importpath": "github.com/stretchr/testify", - "repository": "https://github.com/stretchr/testify", - "vcs": "git", - "revision": "12b6f73e6084dad08a7c6e575284b177ecafbc71", - "branch": "master", - "notests": true - }, - { - "importpath": "gopkg.in/yaml.v2", - "repository": "https://gopkg.in/yaml.v2", - "vcs": "git", - "revision": "5420a8b6744d3b0345ab293f6fcba19c978f1183", - "branch": "v2", - "notests": true - } - ] -} From 5140058e8b21768c397f6d529f2689aaa6ffbdbe Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Thu, 21 Feb 2019 21:34:12 -0600 Subject: [PATCH 34/90] Update dependency steps Makesure to update dependency packages before test. --- HyperMake | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/HyperMake b/HyperMake index 8222f2a..a34630e 100644 --- a/HyperMake +++ b/HyperMake @@ -25,11 +25,6 @@ targets: - go mod download - go mod vendor - gen: - description: generate source code - after: - - 'gen-*' - build: description: build source code after: @@ -39,7 +34,7 @@ targets: description: run unit tests after: - deps - - gen + - check always: true cmds: - ./support/scripts/test.sh @@ -47,6 +42,7 @@ targets: ci: description: run CI tests after: + - deps - check cmds: - ./support/scripts/ci.sh From c634c75ebcb1ff2153c007615b299f5804b72190 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Sat, 16 Mar 2019 08:11:09 -0500 Subject: [PATCH 35/90] Add credential configuration for resources (#14) Add credentials for Kinesis, DynamoDB and Cloudwatch. See the worker_test.go to see how to use it. Signed-off-by: Tao Jiang --- clientlibrary/config/config.go | 10 +++++ clientlibrary/config/kcl-config.go | 17 ++++++++ clientlibrary/metrics/cloudwatch.go | 10 ++++- clientlibrary/worker/worker.go | 28 ++++++++++--- clientlibrary/worker/worker_test.go | 61 ++++++++++++++++++++++++++--- 5 files changed, 114 insertions(+), 12 deletions(-) diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index f1f3090..3200725 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -40,6 +40,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + creds "github.com/aws/aws-sdk-go/aws/credentials" ) const ( @@ -166,6 +167,15 @@ type ( // If this is empty, the default generated endpoint will be used. KinesisEndpoint string + // KinesisCredentials is used to access Kinesis + KinesisCredentials *creds.Credentials + + // DynamoDBCredentials is used to access DynamoDB + DynamoDBCredentials *creds.Credentials + + // CloudWatchCredentials is used to access CloudWatch + CloudWatchCredentials *creds.Credentials + // TableName is name of the dynamo db table for managing kinesis stream default to ApplicationName TableName string diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index 4d208cf..1419ad8 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -34,6 +34,7 @@ package config import ( + "github.com/aws/aws-sdk-go/aws/credentials" "time" "github.com/vmware/vmware-go-kcl/clientlibrary/utils" @@ -41,6 +42,19 @@ import ( // NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. func NewKinesisClientLibConfig(applicationName, streamName, regionName, workerID string) *KinesisClientLibConfiguration { + return NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID, + nil, nil, nil) +} + +// NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. +func NewKinesisClientLibConfigWithCredential(applicationName, streamName, regionName, workerID string, + creds *credentials.Credentials) *KinesisClientLibConfiguration { + return NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID, creds, creds, creds) +} + +// NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. +func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID string, + kiniesisCreds, dynamodbCreds, cloudwatchCreds *credentials.Credentials) *KinesisClientLibConfiguration { checkIsValueNotEmpty("ApplicationName", applicationName) checkIsValueNotEmpty("StreamName", streamName) checkIsValueNotEmpty("RegionName", regionName) @@ -52,6 +66,9 @@ func NewKinesisClientLibConfig(applicationName, streamName, regionName, workerID // populate the KCL configuration with default values return &KinesisClientLibConfiguration{ ApplicationName: applicationName, + KinesisCredentials: kiniesisCreds, + DynamoDBCredentials: dynamodbCreds, + CloudWatchCredentials: cloudwatchCreds, TableName: applicationName, StreamName: streamName, RegionName: regionName, diff --git a/clientlibrary/metrics/cloudwatch.go b/clientlibrary/metrics/cloudwatch.go index 1a157f4..477f127 100644 --- a/clientlibrary/metrics/cloudwatch.go +++ b/clientlibrary/metrics/cloudwatch.go @@ -28,6 +28,7 @@ package metrics import ( + "github.com/aws/aws-sdk-go/aws/credentials" "sync" "time" @@ -43,6 +44,7 @@ type CloudWatchMonitoringService struct { KinesisStream string WorkerID string Region string + Credentials *credentials.Credentials // control how often to pusblish to CloudWatch MetricsBufferTimeMillis int @@ -66,7 +68,13 @@ type cloudWatchMetrics struct { } func (cw *CloudWatchMonitoringService) Init() error { - s := session.New(&aws.Config{Region: aws.String(cw.Region)}) + cfg := &aws.Config{Region: aws.String(cw.Region)} + cfg.Credentials = cw.Credentials + s, err := session.NewSession(cfg) + if err != nil { + log.Errorf("Error in creating session for cloudwatch. %+v", err) + return err + } cw.svc = cloudwatch.New(s) cw.shardMetrics = new(sync.Map) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index c295f86..33551c3 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -113,21 +113,37 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli // create session for Kinesis log.Info("Creating Kinesis session") - s := session.New(&aws.Config{ - Region: aws.String(w.regionName), - Endpoint: &kclConfig.KinesisEndpoint, + + s, err := session.NewSession(&aws.Config{ + Region: aws.String(w.regionName), + Endpoint: &kclConfig.KinesisEndpoint, + Credentials: kclConfig.KinesisCredentials, }) + + if err != nil { + // no need to move forward + log.Fatalf("Failed in getting Kinesis session for creating Worker: %+v", err) + } w.kc = kinesis.New(s) log.Info("Creating DynamoDB session") - s = session.New(&aws.Config{ - Region: aws.String(w.regionName), - Endpoint: &kclConfig.DynamoDBEndpoint, + + s, err = session.NewSession(&aws.Config{ + Region: aws.String(w.regionName), + Endpoint: &kclConfig.DynamoDBEndpoint, + Credentials: kclConfig.DynamoDBCredentials, }) + + if err != nil { + // no need to move forward + log.Fatalf("Failed in getting DynamoDB session for creating Worker: %+v", err) + } + w.dynamo = dynamodb.New(s) w.checkpointer = NewDynamoCheckpoint(w.dynamo, kclConfig) if w.metricsConfig == nil { + // "" means noop monitor service. i.e. not emitting any metrics. w.metricsConfig = &metrics.MonitoringConfiguration{MonitoringService: ""} } return w diff --git a/clientlibrary/worker/worker_test.go b/clientlibrary/worker/worker_test.go index 4a9598d..013561f 100644 --- a/clientlibrary/worker/worker_test.go +++ b/clientlibrary/worker/worker_test.go @@ -19,6 +19,9 @@ package worker import ( + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/session" "net/http" "os" "testing" @@ -50,8 +53,55 @@ func TestWorker(t *testing.T) { WithMaxRecords(10). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). - WithFailoverTimeMillis(300000) + WithFailoverTimeMillis(300000). + WithMetricsBufferTimeMillis(10000). + WithMetricsMaxQueueSize(20) + runTest(kclConfig, t) +} + +func TestWorkerStatic(t *testing.T) { + t.Skip("Need to provide actual credentials") + + creds := credentials.NewStaticCredentials("AccessKeyId", "SecretAccessKey", "") + + kclConfig := cfg.NewKinesisClientLibConfigWithCredential("appName", streamName, regionName, workerID, creds). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithMetricsBufferTimeMillis(10000). + WithMetricsMaxQueueSize(20) + + runTest(kclConfig, t) +} + +func TestWorkerAssumeRole(t *testing.T) { + t.Skip("Need to provide actual roleARN") + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "arn:aws:iam::*:role/kcl-test-publisher") + + kclConfig := cfg.NewKinesisClientLibConfigWithCredential("appName", streamName, regionName, workerID, creds). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithMetricsBufferTimeMillis(10000). + WithMetricsMaxQueueSize(20) + + runTest(kclConfig, t) +} + +func runTest(kclConfig *cfg.KinesisClientLibConfiguration, t *testing.T) { log.SetOutput(os.Stdout) log.SetLevel(log.DebugLevel) @@ -59,7 +109,7 @@ func TestWorker(t *testing.T) { assert.Equal(t, streamName, kclConfig.StreamName) // configure cloudwatch as metrics system - metricsConfig := getMetricsConfig(metricsSystem) + metricsConfig := getMetricsConfig(kclConfig, metricsSystem) worker := NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig) assert.Equal(t, regionName, worker.regionName) @@ -100,15 +150,16 @@ func TestWorker(t *testing.T) { } // configure different metrics system -func getMetricsConfig(service string) *metrics.MonitoringConfiguration { +func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service string) *metrics.MonitoringConfiguration { if service == "cloudwatch" { return &metrics.MonitoringConfiguration{ MonitoringService: "cloudwatch", Region: regionName, CloudWatch: metrics.CloudWatchMonitoringService{ + Credentials: kclConfig.CloudWatchCredentials, // Those value should come from kclConfig - MetricsBufferTimeMillis: 10000, - MetricsMaxQueueSize: 20, + MetricsBufferTimeMillis: kclConfig.MetricsBufferTimeMillis, + MetricsMaxQueueSize: kclConfig.MetricsMaxQueueSize, }, } } From 2ca82c25ca74a04357ce9a99af3b1e95a4281863 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 22 Apr 2019 15:55:39 -0500 Subject: [PATCH 36/90] Add support for providing custom checkpointer (#17) * Add credential configuration for resources Add credentials for Kinesis, DynamoDB and Cloudwatch. See the worker_test.go to see how to use it. Signed-off-by: Tao Jiang * Add support for providing custom checkpointer Provide a new constructor for adding checkpointer instead of alway using default dynamodb checkpointer. The next step is to abstract out the Kinesis into a generic stream API and this will be bigger change and will be addressed in different PR. Test: Use the new construtor to inject dynamodb checkpointer and run the existing tests. Signed-off-by: Tao Jiang * Add support for providing custom checkpointer Provide a new constructor for adding checkpointer instead of alway using default dynamodb checkpointer. The next step is to abstract out the Kinesis into a generic stream API and this will be bigger change and will be addressed in different PR. Fix checkfmt error. Test: Use the new construtor to inject dynamodb checkpointer and run the existing tests. Signed-off-by: Tao Jiang --- HyperMake | 2 +- clientlibrary/checkpoint/checkpointer.go | 59 ++++++++++++ .../dynamodb-checkpointer.go} | 40 ++------- clientlibrary/partition/partition.go | 58 ++++++++++++ .../worker/record-processor-checkpointer.go | 14 +-- clientlibrary/worker/shard-consumer.go | 28 +++--- clientlibrary/worker/worker-custom.go | 66 ++++++++++++++ clientlibrary/worker/worker.go | 54 +++-------- clientlibrary/worker/worker_custom_test.go | 89 +++++++++++++++++++ support/toolchain/HyperMake | 2 +- support/toolchain/docker/Dockerfile | 2 +- 11 files changed, 321 insertions(+), 93 deletions(-) create mode 100644 clientlibrary/checkpoint/checkpointer.go rename clientlibrary/{worker/checkpointer.go => checkpoint/dynamodb-checkpointer.go} (90%) create mode 100644 clientlibrary/partition/partition.go create mode 100644 clientlibrary/worker/worker-custom.go create mode 100644 clientlibrary/worker/worker_custom_test.go diff --git a/HyperMake b/HyperMake index a34630e..aa75a30 100644 --- a/HyperMake +++ b/HyperMake @@ -88,5 +88,5 @@ settings: default-targets: - ci docker: - image: 'vmware/go-kcl-toolchain:latest' + image: 'vmware/go-kcl-toolchain:0.1.2' src-volume: /go/src/github.com/vmware/vmware-go-kcl diff --git a/clientlibrary/checkpoint/checkpointer.go b/clientlibrary/checkpoint/checkpointer.go new file mode 100644 index 0000000..1f48349 --- /dev/null +++ b/clientlibrary/checkpoint/checkpointer.go @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/patrobinson/gokini +// +// Copyright 2018 Patrick robinson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +package checkpoint + +import ( + "errors" + par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" +) + +const ( + LEASE_KEY_KEY = "ShardID" + LEASE_OWNER_KEY = "AssignedTo" + LEASE_TIMEOUT_KEY = "LeaseTimeout" + CHECKPOINT_SEQUENCE_NUMBER_KEY = "Checkpoint" + PARENT_SHARD_ID_KEY = "ParentShardId" + + // We've completely processed all records in this shard. + SHARD_END = "SHARD_END" + + // ErrLeaseNotAquired is returned when we failed to get a lock on the shard + ErrLeaseNotAquired = "Lease is already held by another node" +) + +// Checkpointer handles checkpointing when a record has been processed +type Checkpointer interface { + Init() error + GetLease(*par.ShardStatus, string) error + CheckpointSequence(*par.ShardStatus) error + FetchCheckpoint(*par.ShardStatus) error + RemoveLeaseInfo(string) error +} + +// ErrSequenceIDNotFound is returned by FetchCheckpoint when no SequenceID is found +var ErrSequenceIDNotFound = errors.New("SequenceIDNotFoundForShard") diff --git a/clientlibrary/worker/checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go similarity index 90% rename from clientlibrary/worker/checkpointer.go rename to clientlibrary/checkpoint/dynamodb-checkpointer.go index 9d15fa1..0b00b6b 100644 --- a/clientlibrary/worker/checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -25,7 +25,7 @@ // The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -package worker +package checkpoint import ( "errors" @@ -40,36 +40,14 @@ import ( log "github.com/sirupsen/logrus" "github.com/vmware/vmware-go-kcl/clientlibrary/config" + par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" ) const ( - LEASE_KEY_KEY = "ShardID" - LEASE_OWNER_KEY = "AssignedTo" - LEASE_TIMEOUT_KEY = "LeaseTimeout" - CHECKPOINT_SEQUENCE_NUMBER_KEY = "Checkpoint" - PARENT_SHARD_ID_KEY = "ParentShardId" - - // We've completely processed all records in this shard. - SHARD_END = "SHARD_END" - - // ErrLeaseNotAquired is returned when we failed to get a lock on the shard - ErrLeaseNotAquired = "Lease is already held by another node" // ErrInvalidDynamoDBSchema is returned when there are one or more fields missing from the table ErrInvalidDynamoDBSchema = "The DynamoDB schema is invalid and may need to be re-created" ) -// Checkpointer handles checkpointing when a record has been processed -type Checkpointer interface { - Init() error - GetLease(*shardStatus, string) error - CheckpointSequence(*shardStatus) error - FetchCheckpoint(*shardStatus) error - RemoveLeaseInfo(string) error -} - -// ErrSequenceIDNotFound is returned by FetchCheckpoint when no SequenceID is found -var ErrSequenceIDNotFound = errors.New("SequenceIDNotFoundForShard") - // DynamoCheckpoint implements the Checkpoint interface using DynamoDB as a backend type DynamoCheckpoint struct { TableName string @@ -104,7 +82,7 @@ func (checkpointer *DynamoCheckpoint) Init() error { } // GetLease attempts to gain a lock on the given shard -func (checkpointer *DynamoCheckpoint) GetLease(shard *shardStatus, newAssignTo string) error { +func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssignTo string) error { newLeaseTimeout := time.Now().Add(time.Duration(checkpointer.LeaseDuration) * time.Millisecond).UTC() newLeaseTimeoutString := newLeaseTimeout.Format(time.RFC3339) currentCheckpoint, err := checkpointer.getItem(shard.ID) @@ -177,16 +155,16 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *shardStatus, newAssignTo s return err } - shard.mux.Lock() + shard.Mux.Lock() shard.AssignedTo = newAssignTo shard.LeaseTimeout = newLeaseTimeout - shard.mux.Unlock() + shard.Mux.Unlock() return nil } // CheckpointSequence writes a checkpoint at the designated sequence ID -func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *shardStatus) error { +func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *par.ShardStatus) error { leaseTimeout := shard.LeaseTimeout.UTC().Format(time.RFC3339) marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ LEASE_KEY_KEY: { @@ -211,7 +189,7 @@ func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *shardStatus) err } // FetchCheckpoint retrieves the checkpoint for the given shard -func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *shardStatus) error { +func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *par.ShardStatus) error { checkpoint, err := checkpointer.getItem(shard.ID) if err != nil { return err @@ -222,8 +200,8 @@ func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *shardStatus) error return ErrSequenceIDNotFound } log.Debugf("Retrieved Shard Iterator %s", *sequenceID.S) - shard.mux.Lock() - defer shard.mux.Unlock() + shard.Mux.Lock() + defer shard.Mux.Unlock() shard.Checkpoint = *sequenceID.S if assignedTo, ok := checkpoint[LEASE_OWNER_KEY]; ok { diff --git a/clientlibrary/partition/partition.go b/clientlibrary/partition/partition.go new file mode 100644 index 0000000..c261672 --- /dev/null +++ b/clientlibrary/partition/partition.go @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/patrobinson/gokini +// +// Copyright 2018 Patrick robinson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +package worker + +import ( + "sync" + "time" +) + +type ShardStatus struct { + ID string + ParentShardId string + Checkpoint string + AssignedTo string + Mux *sync.Mutex + LeaseTimeout time.Time + // Shard Range + StartingSequenceNumber string + // child shard doesn't have end sequence number + EndingSequenceNumber string +} + +func (ss *ShardStatus) GetLeaseOwner() string { + ss.Mux.Lock() + defer ss.Mux.Unlock() + return ss.AssignedTo +} + +func (ss *ShardStatus) SetLeaseOwner(owner string) { + ss.Mux.Lock() + defer ss.Mux.Unlock() + ss.AssignedTo = owner +} diff --git a/clientlibrary/worker/record-processor-checkpointer.go b/clientlibrary/worker/record-processor-checkpointer.go index 0562a3c..ec44eb0 100644 --- a/clientlibrary/worker/record-processor-checkpointer.go +++ b/clientlibrary/worker/record-processor-checkpointer.go @@ -21,7 +21,9 @@ package worker import ( "github.com/aws/aws-sdk-go/aws" + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" + par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" ) type ( @@ -41,12 +43,12 @@ type ( * RecordProcessor instance. Amazon Kinesis Client Library will create one instance per shard assignment. */ RecordProcessorCheckpointer struct { - shard *shardStatus - checkpoint Checkpointer + shard *par.ShardStatus + checkpoint chk.Checkpointer } ) -func NewRecordProcessorCheckpoint(shard *shardStatus, checkpoint Checkpointer) kcl.IRecordProcessorCheckpointer { +func NewRecordProcessorCheckpoint(shard *par.ShardStatus, checkpoint chk.Checkpointer) kcl.IRecordProcessorCheckpointer { return &RecordProcessorCheckpointer{ shard: shard, checkpoint: checkpoint, @@ -62,16 +64,16 @@ func (pc *PreparedCheckpointer) Checkpoint() error { } func (rc *RecordProcessorCheckpointer) Checkpoint(sequenceNumber *string) error { - rc.shard.mux.Lock() + rc.shard.Mux.Lock() // checkpoint the last sequence of a closed shard if sequenceNumber == nil { - rc.shard.Checkpoint = SHARD_END + rc.shard.Checkpoint = chk.SHARD_END } else { rc.shard.Checkpoint = aws.StringValue(sequenceNumber) } - rc.shard.mux.Unlock() + rc.shard.Mux.Unlock() return rc.checkpoint.CheckpointSequence(rc.shard) } diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 9b54425..7899a67 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -38,9 +38,11 @@ import ( "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" "github.com/vmware/vmware-go-kcl/clientlibrary/config" kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" + par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" ) const ( @@ -71,9 +73,9 @@ type ShardConsumerState int // Note: ShardConsumer only deal with one shard. type ShardConsumer struct { streamName string - shard *shardStatus + shard *par.ShardStatus kc kinesisiface.KinesisAPI - checkpointer Checkpointer + checkpointer chk.Checkpointer recordProcessor kcl.IRecordProcessor kclConfig *config.KinesisClientLibConfiguration stop *chan struct{} @@ -83,10 +85,10 @@ type ShardConsumer struct { state ShardConsumerState } -func (sc *ShardConsumer) getShardIterator(shard *shardStatus) (*string, error) { +func (sc *ShardConsumer) getShardIterator(shard *par.ShardStatus) (*string, error) { // Get checkpoint of the shard from dynamoDB err := sc.checkpointer.FetchCheckpoint(shard) - if err != nil && err != ErrSequenceIDNotFound { + if err != nil && err != chk.ErrSequenceIDNotFound { return nil, err } @@ -123,14 +125,14 @@ func (sc *ShardConsumer) getShardIterator(shard *shardStatus) (*string, error) { // getRecords continously poll one shard for data record // Precondition: it currently has the lease on the shard. -func (sc *ShardConsumer) getRecords(shard *shardStatus) error { +func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { defer sc.waitGroup.Done() defer sc.releaseLease(shard) // If the shard is child shard, need to wait until the parent finished. if err := sc.waitOnParentShard(shard); err != nil { // If parent shard has been deleted by Kinesis system already, just ignore the error. - if err != ErrSequenceIDNotFound { + if err != chk.ErrSequenceIDNotFound { log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", shard.ParentShardId, err) return err } @@ -158,7 +160,7 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID) err = sc.checkpointer.GetLease(shard, sc.consumerID) if err != nil { - if err.Error() == ErrLeaseNotAquired { + if err.Error() == chk.ErrLeaseNotAquired { log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", shard.ID, sc.consumerID) return nil } @@ -255,14 +257,14 @@ func (sc *ShardConsumer) getRecords(shard *shardStatus) error { } // Need to wait until the parent shard finished -func (sc *ShardConsumer) waitOnParentShard(shard *shardStatus) error { +func (sc *ShardConsumer) waitOnParentShard(shard *par.ShardStatus) error { if len(shard.ParentShardId) == 0 { return nil } - pshard := &shardStatus{ + pshard := &par.ShardStatus{ ID: shard.ParentShardId, - mux: &sync.Mutex{}, + Mux: &sync.Mutex{}, } for { @@ -271,7 +273,7 @@ func (sc *ShardConsumer) waitOnParentShard(shard *shardStatus) error { } // Parent shard is finished. - if pshard.Checkpoint == SHARD_END { + if pshard.Checkpoint == chk.SHARD_END { return nil } @@ -280,9 +282,9 @@ func (sc *ShardConsumer) waitOnParentShard(shard *shardStatus) error { } // Cleanup the internal lease cache -func (sc *ShardConsumer) releaseLease(shard *shardStatus) { +func (sc *ShardConsumer) releaseLease(shard *par.ShardStatus) { log.Infof("Release lease for shard %s", shard.ID) - shard.setLeaseOwner("") + shard.SetLeaseOwner("") // reporting lease lose metrics sc.mService.LeaseLost(shard.ID) } diff --git a/clientlibrary/worker/worker-custom.go b/clientlibrary/worker/worker-custom.go new file mode 100644 index 0000000..580a416 --- /dev/null +++ b/clientlibrary/worker/worker-custom.go @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2019 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package worker + +import ( + log "github.com/sirupsen/logrus" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kinesis" + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" + "github.com/vmware/vmware-go-kcl/clientlibrary/config" + kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" +) + +// NewCustomWorker constructs a Worker instance for processing Kinesis stream data by directly inject custom cjheckpointer. +func NewCustomWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisClientLibConfiguration, + checkpointer chk.Checkpointer, metricsConfig *metrics.MonitoringConfiguration) *Worker { + w := &Worker{ + streamName: kclConfig.StreamName, + regionName: kclConfig.RegionName, + workerID: kclConfig.WorkerID, + processorFactory: factory, + kclConfig: kclConfig, + checkpointer: checkpointer, + metricsConfig: metricsConfig, + } + + // create session for Kinesis + log.Info("Creating Kinesis session") + + s, err := session.NewSession(&aws.Config{ + Region: aws.String(w.regionName), + Endpoint: &kclConfig.KinesisEndpoint, + Credentials: kclConfig.KinesisCredentials, + }) + + if err != nil { + // no need to move forward + log.Fatalf("Failed in getting Kinesis session for creating Worker: %+v", err) + } + w.kc = kinesis.New(s) + + if w.metricsConfig == nil { + // "" means noop monitor service. i.e. not emitting any metrics. + w.metricsConfig = &metrics.MonitoringConfiguration{MonitoringService: ""} + } + return w +} diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 33551c3..8c3bedd 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -40,40 +40,16 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" "github.com/vmware/vmware-go-kcl/clientlibrary/config" kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" + par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" ) -type shardStatus struct { - ID string - ParentShardId string - Checkpoint string - AssignedTo string - mux *sync.Mutex - LeaseTimeout time.Time - // Shard Range - StartingSequenceNumber string - // child shard doesn't have end sequence number - EndingSequenceNumber string -} - -func (ss *shardStatus) getLeaseOwner() string { - ss.mux.Lock() - defer ss.mux.Unlock() - return ss.AssignedTo -} - -func (ss *shardStatus) setLeaseOwner(owner string) { - ss.mux.Lock() - defer ss.mux.Unlock() - ss.AssignedTo = owner -} - /** * Worker is the high level class that Kinesis applications use to start processing data. It initializes and oversees * different components (e.g. syncing shard and lease information, tracking shard assignments, and processing data from @@ -87,14 +63,13 @@ type Worker struct { processorFactory kcl.IRecordProcessorFactory kclConfig *config.KinesisClientLibConfiguration kc kinesisiface.KinesisAPI - dynamo dynamodbiface.DynamoDBAPI - checkpointer Checkpointer + checkpointer chk.Checkpointer stop *chan struct{} waitGroup *sync.WaitGroup sigs *chan os.Signal - shardStatus map[string]*shardStatus + shardStatus map[string]*par.ShardStatus metricsConfig *metrics.MonitoringConfiguration mService metrics.MonitoringService @@ -139,8 +114,7 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli log.Fatalf("Failed in getting DynamoDB session for creating Worker: %+v", err) } - w.dynamo = dynamodb.New(s) - w.checkpointer = NewDynamoCheckpoint(w.dynamo, kclConfig) + w.checkpointer = chk.NewDynamoCheckpoint(dynamodb.New(s), kclConfig) if w.metricsConfig == nil { // "" means noop monitor service. i.e. not emitting any metrics. @@ -209,7 +183,7 @@ func (w *Worker) initialize() error { return err } - w.shardStatus = make(map[string]*shardStatus) + w.shardStatus = make(map[string]*par.ShardStatus) sigs := make(chan os.Signal, 1) w.sigs = &sigs @@ -227,7 +201,7 @@ func (w *Worker) initialize() error { } // newShardConsumer to create a shard consumer instance -func (w *Worker) newShardConsumer(shard *shardStatus) *ShardConsumer { +func (w *Worker) newShardConsumer(shard *par.ShardStatus) *ShardConsumer { return &ShardConsumer{ streamName: w.streamName, shard: shard, @@ -258,7 +232,7 @@ func (w *Worker) eventLoop() { // Count the number of leases hold by this worker excluding the processed shard counter := 0 for _, shard := range w.shardStatus { - if shard.getLeaseOwner() == w.workerID && shard.Checkpoint != SHARD_END { + if shard.GetLeaseOwner() == w.workerID && shard.Checkpoint != chk.SHARD_END { counter++ } } @@ -267,14 +241,14 @@ func (w *Worker) eventLoop() { if counter < w.kclConfig.MaxLeasesForWorker { for _, shard := range w.shardStatus { // already owner of the shard - if shard.getLeaseOwner() == w.workerID { + if shard.GetLeaseOwner() == w.workerID { continue } err := w.checkpointer.FetchCheckpoint(shard) if err != nil { // checkpoint may not existed yet is not an error condition. - if err != ErrSequenceIDNotFound { + if err != chk.ErrSequenceIDNotFound { log.Errorf(" Error: %+v", err) // move on to next shard continue @@ -282,14 +256,14 @@ func (w *Worker) eventLoop() { } // The shard is closed and we have processed all records - if shard.Checkpoint == SHARD_END { + if shard.Checkpoint == chk.SHARD_END { continue } err = w.checkpointer.GetLease(shard, w.workerID) if err != nil { // cannot get lease on the shard - if err.Error() != ErrLeaseNotAquired { + if err.Error() != chk.ErrLeaseNotAquired { log.Error(err) } continue @@ -351,10 +325,10 @@ func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) err // found new shard if _, ok := w.shardStatus[*s.ShardId]; !ok { log.Infof("Found new shard with id %s", *s.ShardId) - w.shardStatus[*s.ShardId] = &shardStatus{ + w.shardStatus[*s.ShardId] = &par.ShardStatus{ ID: *s.ShardId, ParentShardId: aws.StringValue(s.ParentShardId), - mux: &sync.Mutex{}, + Mux: &sync.Mutex{}, StartingSequenceNumber: aws.StringValue(s.SequenceNumberRange.StartingSequenceNumber), EndingSequenceNumber: aws.StringValue(s.SequenceNumberRange.EndingSequenceNumber), } diff --git a/clientlibrary/worker/worker_custom_test.go b/clientlibrary/worker/worker_custom_test.go new file mode 100644 index 0000000..a02ba33 --- /dev/null +++ b/clientlibrary/worker/worker_custom_test.go @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package worker + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" + "os" + "testing" + "time" + + log "github.com/sirupsen/logrus" + + "github.com/stretchr/testify/assert" + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" + cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" + "github.com/vmware/vmware-go-kcl/clientlibrary/utils" +) + +func TestCustomWorker(t *testing.T) { + kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithMetricsBufferTimeMillis(10000). + WithMetricsMaxQueueSize(20) + + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) + + assert.Equal(t, regionName, kclConfig.RegionName) + assert.Equal(t, streamName, kclConfig.StreamName) + + // configure cloudwatch as metrics system + metricsConfig := getMetricsConfig(kclConfig, metricsSystem) + + // create dynamodb checkpointer. + s, err := session.NewSession(&aws.Config{ + Region: aws.String(regionName), + Endpoint: &kclConfig.DynamoDBEndpoint, + Credentials: kclConfig.DynamoDBCredentials, + }) + + if err != nil { + // no need to move forward + log.Fatalf("Failed in getting DynamoDB session for creating Worker: %+v", err) + } + + checkpointer := chk.NewDynamoCheckpoint(dynamodb.New(s), kclConfig) + + worker := NewCustomWorker(recordProcessorFactory(t), kclConfig, checkpointer, metricsConfig) + assert.Equal(t, regionName, worker.regionName) + assert.Equal(t, streamName, worker.streamName) + + err = worker.Start() + assert.Nil(t, err) + + // Put some data into stream. + for i := 0; i < 100; i++ { + // Use random string as partition key to ensure even distribution across shards + err := worker.Publish(streamName, utils.RandStringBytesMaskImpr(10), []byte(specstr)) + if err != nil { + t.Errorf("Errorin Publish. %+v", err) + } + } + + // wait a few seconds before shutdown processing + time.Sleep(10 * time.Second) + worker.Shutdown() +} diff --git a/support/toolchain/HyperMake b/support/toolchain/HyperMake index e02ea28..1dcd569 100644 --- a/support/toolchain/HyperMake +++ b/support/toolchain/HyperMake @@ -25,4 +25,4 @@ settings: default-targets: - rebuild-toolchain docker: - image: 'vmware/go-kcl-toolchain:0.1.1' + image: 'vmware/go-kcl-toolchain:0.1.2' diff --git a/support/toolchain/docker/Dockerfile b/support/toolchain/docker/Dockerfile index eefbe9f..1e66efe 100644 --- a/support/toolchain/docker/Dockerfile +++ b/support/toolchain/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.11 +FROM golang:1.12 ENV PATH /go/bin:/src/bin:/root/go/bin:/usr/local/go/bin:$PATH ENV GOPATH /go:/src RUN go get -v github.com/alecthomas/gometalinter && \ From 6df520b34395e522c8f58c481384af2583a4f437 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 20 May 2019 08:57:32 -0500 Subject: [PATCH 37/90] Remove signal handling from event loop (#20) Take signle handling out of event loop. Also, make the worker Shutdown idempotent and update tests. Signed-off-by: Tao Jiang --- clientlibrary/worker/worker.go | 21 ++++++-------- clientlibrary/worker/worker_test.go | 44 ++++++++++++++++++++++++++--- 2 files changed, 48 insertions(+), 17 deletions(-) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 8c3bedd..33d5a6d 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -29,10 +29,7 @@ package worker import ( "errors" - "os" - "os/signal" "sync" - "syscall" "time" log "github.com/sirupsen/logrus" @@ -67,7 +64,7 @@ type Worker struct { stop *chan struct{} waitGroup *sync.WaitGroup - sigs *chan os.Signal + done bool shardStatus map[string]*par.ShardStatus @@ -84,6 +81,7 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli processorFactory: factory, kclConfig: kclConfig, metricsConfig: metricsConfig, + done: false, } // create session for Kinesis @@ -147,7 +145,12 @@ func (w *Worker) Start() error { func (w *Worker) Shutdown() { log.Info("Worker shutdown in requested.") + if w.done { + return + } + close(*w.stop) + w.done = true w.waitGroup.Wait() w.mService.Shutdown() @@ -185,10 +188,6 @@ func (w *Worker) initialize() error { w.shardStatus = make(map[string]*par.ShardStatus) - sigs := make(chan os.Signal, 1) - w.sigs = &sigs - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - stopChan := make(chan struct{}) w.stop = &stopChan @@ -282,12 +281,8 @@ func (w *Worker) eventLoop() { } select { - case sig := <-*w.sigs: - log.Infof("Received signal %s. Exiting", sig) - w.Shutdown() - return case <-*w.stop: - log.Info("Shutting down") + log.Info("Shutting down...") return case <-time.After(time.Duration(w.kclConfig.ShardSyncIntervalMillis) * time.Millisecond): } diff --git a/clientlibrary/worker/worker_test.go b/clientlibrary/worker/worker_test.go index 013561f..57405dd 100644 --- a/clientlibrary/worker/worker_test.go +++ b/clientlibrary/worker/worker_test.go @@ -24,6 +24,8 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "net/http" "os" + "os/signal" + "syscall" "testing" "time" @@ -57,7 +59,20 @@ func TestWorker(t *testing.T) { WithMetricsBufferTimeMillis(10000). WithMetricsMaxQueueSize(20) - runTest(kclConfig, t) + runTest(kclConfig, false, t) +} + +func TestWorkerWithSigInt(t *testing.T) { + kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithMetricsBufferTimeMillis(10000). + WithMetricsMaxQueueSize(20) + + runTest(kclConfig, true, t) } func TestWorkerStatic(t *testing.T) { @@ -74,7 +89,7 @@ func TestWorkerStatic(t *testing.T) { WithMetricsBufferTimeMillis(10000). WithMetricsMaxQueueSize(20) - runTest(kclConfig, t) + runTest(kclConfig, false, t) } func TestWorkerAssumeRole(t *testing.T) { @@ -98,10 +113,10 @@ func TestWorkerAssumeRole(t *testing.T) { WithMetricsBufferTimeMillis(10000). WithMetricsMaxQueueSize(20) - runTest(kclConfig, t) + runTest(kclConfig, false, t) } -func runTest(kclConfig *cfg.KinesisClientLibConfiguration, t *testing.T) { +func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *testing.T) { log.SetOutput(os.Stdout) log.SetLevel(log.DebugLevel) @@ -118,7 +133,20 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, t *testing.T) { err := worker.Start() assert.Nil(t, err) + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + // Signal processing. + go func() { + sig := <-sigs + t.Logf("Received signal %s. Exiting", sig) + worker.Shutdown() + // some other processing before exit. + //os.Exit(0) + }() + // Put some data into stream. + t.Log("Putting data into stream.") for i := 0; i < 100; i++ { // Use random string as partition key to ensure even distribution across shards err := worker.Publish(streamName, utils.RandStringBytesMaskImpr(10), []byte(specstr)) @@ -126,6 +154,13 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, t *testing.T) { t.Errorf("Errorin Publish. %+v", err) } } + t.Log("Done putting data into stream.") + + if triggersig { + t.Log("Trigger signal SIGINT") + p, _ := os.FindProcess(os.Getpid()) + p.Signal(os.Interrupt) + } // wait a few seconds before shutdown processing time.Sleep(10 * time.Second) @@ -146,6 +181,7 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, t *testing.T) { } + t.Log("Calling normal shutdown at the end of application.") worker.Shutdown() } From 250bb2e9ffd6105489208bad177dbd6938322806 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Thu, 27 Jun 2019 21:56:44 -0700 Subject: [PATCH 38/90] Use AWS built-in retry logic and refactor tests (#24) Update the unit test and move integration test under test folder. Update retry logic by switching to AWS's default retry. Signed-off-by: Tao Jiang --- HyperMake | 4 +- .../checkpoint/dynamodb-checkpointer.go | 114 ++++++------ .../checkpoint/dynamodb-checkpointer_test.go | 170 ++++++++++++++++++ clientlibrary/worker/worker.go | 17 +- go.mod | 32 ++-- go.sum | 101 ++++++++--- support/scripts/ci.sh | 7 +- support/scripts/functions.sh | 9 +- .../worker => test}/worker_custom_test.go | 26 +-- {clientlibrary/worker => test}/worker_test.go | 7 +- 10 files changed, 324 insertions(+), 163 deletions(-) create mode 100644 clientlibrary/checkpoint/dynamodb-checkpointer_test.go rename {clientlibrary/worker => test}/worker_custom_test.go (76%) rename {clientlibrary/worker => test}/worker_test.go (97%) diff --git a/HyperMake b/HyperMake index aa75a30..f444947 100644 --- a/HyperMake +++ b/HyperMake @@ -24,6 +24,7 @@ targets: - export GO111MODULE=on - go mod download - go mod vendor + - go mod tidy build: description: build source code @@ -43,7 +44,6 @@ targets: description: run CI tests after: - deps - - check cmds: - ./support/scripts/ci.sh @@ -86,7 +86,7 @@ targets: settings: default-targets: - - ci + - test docker: image: 'vmware/go-kcl-toolchain:0.1.2' src-volume: /go/src/github.com/vmware/vmware-go-kcl diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 0b00b6b..1e4ba4f 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -29,14 +29,14 @@ package checkpoint import ( "errors" - "math" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/session" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" - "github.com/matryer/try" log "github.com/sirupsen/logrus" "github.com/vmware/vmware-go-kcl/clientlibrary/config" @@ -46,6 +46,9 @@ import ( const ( // ErrInvalidDynamoDBSchema is returned when there are one or more fields missing from the table ErrInvalidDynamoDBSchema = "The DynamoDB schema is invalid and may need to be re-created" + + // NumMaxRetries is the max times of doing retry + NumMaxRetries = 5 ) // DynamoCheckpoint implements the Checkpoint interface using DynamoDB as a backend @@ -54,28 +57,53 @@ type DynamoCheckpoint struct { leaseTableReadCapacity int64 leaseTableWriteCapacity int64 - LeaseDuration int - svc dynamodbiface.DynamoDBAPI - kclConfig *config.KinesisClientLibConfiguration - Retries int + LeaseDuration int + svc dynamodbiface.DynamoDBAPI + kclConfig *config.KinesisClientLibConfiguration + Retries int + skipTableCheck bool } -func NewDynamoCheckpoint(dynamo dynamodbiface.DynamoDBAPI, kclConfig *config.KinesisClientLibConfiguration) Checkpointer { +func NewDynamoCheckpoint(kclConfig *config.KinesisClientLibConfiguration) *DynamoCheckpoint { checkpointer := &DynamoCheckpoint{ TableName: kclConfig.TableName, leaseTableReadCapacity: int64(kclConfig.InitialLeaseTableReadCapacity), leaseTableWriteCapacity: int64(kclConfig.InitialLeaseTableWriteCapacity), LeaseDuration: kclConfig.FailoverTimeMillis, - svc: dynamo, kclConfig: kclConfig, - Retries: 5, + Retries: NumMaxRetries, } + + return checkpointer +} + +// WithDynamoDB is used to provide DynamoDB service +func (checkpointer *DynamoCheckpoint) WithDynamoDB(svc dynamodbiface.DynamoDBAPI) *DynamoCheckpoint { + checkpointer.svc = svc return checkpointer } // Init initialises the DynamoDB Checkpoint func (checkpointer *DynamoCheckpoint) Init() error { - if !checkpointer.doesTableExist() { + log.Info("Creating DynamoDB session") + + s, err := session.NewSession(&aws.Config{ + Region: aws.String(checkpointer.kclConfig.RegionName), + Endpoint: &checkpointer.kclConfig.DynamoDBEndpoint, + Credentials: checkpointer.kclConfig.DynamoDBCredentials, + Retryer: client.DefaultRetryer{NumMaxRetries: checkpointer.Retries}, + }) + + if err != nil { + // no need to move forward + log.Fatalf("Failed in getting DynamoDB session for creating Worker: %+v", err) + } + + if checkpointer.svc == nil { + checkpointer.svc = dynamodb.New(s) + } + + if !checkpointer.skipTableCheck && !checkpointer.doesTableExist() { return checkpointer.createTable() } return nil @@ -272,68 +300,30 @@ func (checkpointer *DynamoCheckpoint) conditionalUpdate(conditionExpression stri } func (checkpointer *DynamoCheckpoint) putItem(input *dynamodb.PutItemInput) error { - return try.Do(func(attempt int) (bool, error) { - _, err := checkpointer.svc.PutItem(input) - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == dynamodb.ErrCodeProvisionedThroughputExceededException || - awsErr.Code() == dynamodb.ErrCodeInternalServerError && - attempt < checkpointer.Retries { - // Backoff time as recommended by https://docs.aws.amazon.com/general/latest/gr/api-retries.html - time.Sleep(time.Duration(math.Exp2(float64(attempt))*100) * time.Millisecond) - return true, err - } - } - return false, err - }) + _, err := checkpointer.svc.PutItem(input) + return err } func (checkpointer *DynamoCheckpoint) getItem(shardID string) (map[string]*dynamodb.AttributeValue, error) { - var item *dynamodb.GetItemOutput - err := try.Do(func(attempt int) (bool, error) { - var err error - item, err = checkpointer.svc.GetItem(&dynamodb.GetItemInput{ - TableName: aws.String(checkpointer.TableName), - Key: map[string]*dynamodb.AttributeValue{ - LEASE_KEY_KEY: { - S: aws.String(shardID), - }, + item, err := checkpointer.svc.GetItem(&dynamodb.GetItemInput{ + TableName: aws.String(checkpointer.TableName), + Key: map[string]*dynamodb.AttributeValue{ + LEASE_KEY_KEY: { + S: aws.String(shardID), }, - }) - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == dynamodb.ErrCodeProvisionedThroughputExceededException || - awsErr.Code() == dynamodb.ErrCodeInternalServerError && - attempt < checkpointer.Retries { - // Backoff time as recommended by https://docs.aws.amazon.com/general/latest/gr/api-retries.html - time.Sleep(time.Duration(math.Exp2(float64(attempt))*100) * time.Millisecond) - return true, err - } - } - return false, err + }, }) return item.Item, err } func (checkpointer *DynamoCheckpoint) removeItem(shardID string) error { - err := try.Do(func(attempt int) (bool, error) { - var err error - _, err = checkpointer.svc.DeleteItem(&dynamodb.DeleteItemInput{ - TableName: aws.String(checkpointer.TableName), - Key: map[string]*dynamodb.AttributeValue{ - LEASE_KEY_KEY: { - S: aws.String(shardID), - }, + _, err := checkpointer.svc.DeleteItem(&dynamodb.DeleteItemInput{ + TableName: aws.String(checkpointer.TableName), + Key: map[string]*dynamodb.AttributeValue{ + LEASE_KEY_KEY: { + S: aws.String(shardID), }, - }) - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == dynamodb.ErrCodeProvisionedThroughputExceededException || - awsErr.Code() == dynamodb.ErrCodeInternalServerError && - attempt < checkpointer.Retries { - // Backoff time as recommended by https://docs.aws.amazon.com/general/latest/gr/api-retries.html - time.Sleep(time.Duration(math.Exp2(float64(attempt))*100) * time.Millisecond) - return true, err - } - } - return false, err + }, }) return err } diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go new file mode 100644 index 0000000..20cce87 --- /dev/null +++ b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2019 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/patrobinson/gokini +// +// Copyright 2018 Patrick robinson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +package checkpoint + +import ( + "errors" + "sync" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + + cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" + par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" +) + +func TestDoesTableExist(t *testing.T) { + svc := &mockDynamoDB{tableExist: true} + checkpoint := &DynamoCheckpoint{ + TableName: "TableName", + svc: svc, + } + if !checkpoint.doesTableExist() { + t.Error("Table exists but returned false") + } + + svc = &mockDynamoDB{tableExist: false} + checkpoint.svc = svc + if checkpoint.doesTableExist() { + t.Error("Table does not exist but returned true") + } +} + +func TestGetLeaseNotAquired(t *testing.T) { + svc := &mockDynamoDB{tableExist: true} + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithMetricsBufferTimeMillis(10000). + WithMetricsMaxQueueSize(20) + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) + checkpoint.Init() + err := checkpoint.GetLease(&par.ShardStatus{ + ID: "0001", + Checkpoint: "", + Mux: &sync.Mutex{}, + }, "abcd-efgh") + if err != nil { + t.Errorf("Error getting lease %s", err) + } + + err = checkpoint.GetLease(&par.ShardStatus{ + ID: "0001", + Checkpoint: "", + Mux: &sync.Mutex{}, + }, "ijkl-mnop") + if err == nil || err.Error() != ErrLeaseNotAquired { + t.Errorf("Got a lease when it was already held by abcd-efgh: %s", err) + } +} + +func TestGetLeaseAquired(t *testing.T) { + svc := &mockDynamoDB{tableExist: true} + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithMetricsBufferTimeMillis(10000). + WithMetricsMaxQueueSize(20) + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) + checkpoint.Init() + checkpoint.svc = svc + marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ + "ShardID": { + S: aws.String("0001"), + }, + "AssignedTo": { + S: aws.String("abcd-efgh"), + }, + "LeaseTimeout": { + S: aws.String(time.Now().AddDate(0, -1, 0).UTC().Format(time.RFC3339)), + }, + "SequenceID": { + S: aws.String("deadbeef"), + }, + } + input := &dynamodb.PutItemInput{ + TableName: aws.String("TableName"), + Item: marshalledCheckpoint, + } + checkpoint.svc.PutItem(input) + shard := &par.ShardStatus{ + ID: "0001", + Checkpoint: "deadbeef", + Mux: &sync.Mutex{}, + } + err := checkpoint.GetLease(shard, "ijkl-mnop") + + if err != nil { + t.Errorf("Lease not aquired after timeout %s", err) + } + + id, ok := svc.item[CHECKPOINT_SEQUENCE_NUMBER_KEY] + if !ok { + t.Error("Expected checkpoint to be set by GetLease") + } else if *id.S != "deadbeef" { + t.Errorf("Expected checkpoint to be deadbeef. Got '%s'", *id.S) + } +} + +type mockDynamoDB struct { + dynamodbiface.DynamoDBAPI + tableExist bool + item map[string]*dynamodb.AttributeValue +} + +func (m *mockDynamoDB) DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error) { + if !m.tableExist { + return &dynamodb.DescribeTableOutput{}, awserr.New(dynamodb.ErrCodeResourceNotFoundException, "doesNotExist", errors.New("")) + } + return &dynamodb.DescribeTableOutput{}, nil +} + +func (m *mockDynamoDB) PutItem(input *dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) { + m.item = input.Item + return nil, nil +} + +func (m *mockDynamoDB) GetItem(input *dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) { + return &dynamodb.GetItemOutput{ + Item: m.item, + }, nil +} + +func (m *mockDynamoDB) CreateTable(input *dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) { + return &dynamodb.CreateTableOutput{}, nil +} diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 33d5a6d..66066d3 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -36,7 +36,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" @@ -99,20 +98,8 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli } w.kc = kinesis.New(s) - log.Info("Creating DynamoDB session") - - s, err = session.NewSession(&aws.Config{ - Region: aws.String(w.regionName), - Endpoint: &kclConfig.DynamoDBEndpoint, - Credentials: kclConfig.DynamoDBCredentials, - }) - - if err != nil { - // no need to move forward - log.Fatalf("Failed in getting DynamoDB session for creating Worker: %+v", err) - } - - w.checkpointer = chk.NewDynamoCheckpoint(dynamodb.New(s), kclConfig) + log.Info("Creating DynamoDB based checkpointer") + w.checkpointer = chk.NewDynamoCheckpoint(kclConfig) if w.metricsConfig == nil { // "" means noop monitor service. i.e. not emitting any metrics. diff --git a/go.mod b/go.mod index 3dc4633..c18af03 100644 --- a/go.mod +++ b/go.mod @@ -1,25 +1,15 @@ module github.com/vmware/vmware-go-kcl require ( - github.com/asaskevich/govalidator v0.0.0-20170507183629-38ddb4612a5d // indirect - github.com/astaxie/beego v0.0.0-20170908222938-a7354d2d0840 // indirect - github.com/aws/aws-sdk-go v0.0.0-20171208220907-365b4d343694 - github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-ini/ini v1.42.0 // indirect - github.com/golang/protobuf v0.0.0-20170622202551-6a1fa9404c0a // indirect - github.com/google/uuid v0.0.0-20170306145142-6a5e28554805 - github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect - github.com/matryer/try v0.0.0-20150601225556-312d2599e12e - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v0.0.0-20170707173355-26b897001974 - github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 // indirect - github.com/prometheus/common v0.0.0-20170707053319-3e6a7635bac6 - github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8 // indirect - github.com/sirupsen/logrus v0.0.0-20170713115724-51dc0fc64317 - github.com/stretchr/testify v1.2.1 - golang.org/x/sys v0.0.0-20190221222158-ec7b60b042fd // indirect - gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect - gopkg.in/yaml.v2 v2.2.1 // indirect + github.com/aws/aws-sdk-go v1.19.38 + github.com/google/uuid v1.1.1 + github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect + github.com/prometheus/client_golang v0.9.3 + github.com/prometheus/common v0.4.1 + github.com/prometheus/procfs v0.0.0-20190523193104-a7aeb8df3389 // indirect + github.com/sirupsen/logrus v1.4.2 + github.com/stretchr/testify v1.3.0 + golang.org/x/net v0.0.0-20190522155817-f3200d17e092 // indirect + golang.org/x/sys v0.0.0-20190528012530-adf421d2caf4 // indirect + golang.org/x/text v0.3.2 // indirect ) diff --git a/go.sum b/go.sum index de8b73f..9a5146b 100644 --- a/go.sum +++ b/go.sum @@ -1,39 +1,82 @@ -github.com/asaskevich/govalidator v0.0.0-20170507183629-38ddb4612a5d/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/astaxie/beego v0.0.0-20170908222938-a7354d2d0840/go.mod h1:0R4++1tUqERR0WYFWdfkcrsyoVBCG4DgpDGokT3yb+U= -github.com/aws/aws-sdk-go v0.0.0-20171208220907-365b4d343694 h1:TXabFUZYb1oIrmshTCd9k3gLItnCkX8DYNlzC7zT5y4= -github.com/aws/aws-sdk-go v0.0.0-20171208220907-365b4d343694/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/aws/aws-sdk-go v1.19.38 h1:WKjobgPO4Ua1ww2NJJl2/zQNreUZxvqmEzwMlRjjm9g= +github.com/aws/aws-sdk-go v1.19.38/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-ini/ini v1.42.0 h1:TWr1wGj35+UiWHlBA8er89seFXxzwFn11spilrrj+38= -github.com/go-ini/ini v1.42.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/golang/protobuf v0.0.0-20170622202551-6a1fa9404c0a h1:5X905hYB5maQbwC9ltdknryvCPb4v+D0pWDQXaYQWyk= -github.com/golang/protobuf v0.0.0-20170622202551-6a1fa9404c0a/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/uuid v0.0.0-20170306145142-6a5e28554805 h1:skl44gU1qEIcRpwKjb9bhlRwjvr96wLdvpTogCBBJe8= -github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/matryer/try v0.0.0-20150601225556-312d2599e12e h1:wEdOROHcWFFOttvgtwOkyRx5AXGm6GKYZV46vUk7RWY= -github.com/matryer/try v0.0.0-20150601225556-312d2599e12e/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.0.0-20170707173355-26b897001974 h1:7geXN+A5WSloMJfKwHhigBJCSPW0DZOlypTpzg7Nu40= -github.com/prometheus/client_golang v0.0.0-20170707173355-26b897001974/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612 h1:13pIdM2tpaDi4OVe24fgoIS7ZTqMt0QI+bwQsX5hq+g= -github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20170707053319-3e6a7635bac6 h1:UEgo247BhzA25ik6y8sBtRVet8xyPH5+UidPXC+E4t0= -github.com/prometheus/common v0.0.0-20170707053319-3e6a7635bac6/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8 h1:Kh7M6mzRpQ2de1rixoSQZr4BTINXFm8WDbeN5ttnwyE= -github.com/prometheus/procfs v0.0.0-20170703101242-e645f4e5aaa8/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/sirupsen/logrus v0.0.0-20170713115724-51dc0fc64317 h1:GpeXjjFK3fgyG/1Dd5felinm3v0XRZKO4cUtMcHGL08= -github.com/sirupsen/logrus v0.0.0-20170713115724-51dc0fc64317/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/stretchr/testify v1.2.1 h1:52QO5WkIUcHGIR7EnGagH88x1bUzqGXTC5/1bDTUQ7U= -github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/sys v0.0.0-20190221222158-ec7b60b042fd h1:JYgmSAJhrvxjUInD1uG+wLPAFAG7TmIJLOgZLI210A8= -golang.org/x/sys v0.0.0-20190221222158-ec7b60b042fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190523193104-a7aeb8df3389 h1:F/k2nob1S9M6v5Xkq7KjSTQirOYaYQord0jR4TwyVmY= +github.com/prometheus/procfs v0.0.0-20190523193104-a7aeb8df3389/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190528012530-adf421d2caf4 h1:gd52YanAQJ4UkvuNi/7z63JEyc6ejHh9QwdzbTiEtAY= +golang.org/x/sys v0.0.0-20190528012530-adf421d2caf4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/support/scripts/ci.sh b/support/scripts/ci.sh index 012ea40..420bde0 100755 --- a/support/scripts/ci.sh +++ b/support/scripts/ci.sh @@ -1,2 +1,7 @@ #!/bin/bash -./support/scripts/test.sh + +# Run only the integration tests +# go test -race ./test +echo "Warning: Cannot find a good way to inject AWS credential to hmake container" +echo "Don't use hmake ci. Use the following command directly" +echo "go test -race ./test" diff --git a/support/scripts/functions.sh b/support/scripts/functions.sh index b7265ea..489de81 100644 --- a/support/scripts/functions.sh +++ b/support/scripts/functions.sh @@ -7,7 +7,7 @@ export PROJ_ROOT="$HMAKE_PROJECT_DIR" export GOPATH=/go:$PROJ_ROOT local_go_pkgs() { - find . -name '*.go' | \ + find './clientlibrary/' -name '*.go' | \ grep -Fv '/vendor/' | \ grep -Fv '/go/' | \ grep -Fv '/gen/' | \ @@ -18,13 +18,6 @@ local_go_pkgs() { sort -u } -local_test_pkgs() { - find ./src/test -name '*.go' | \ - grep -Fv '_test.go' | \ - sed -r 's|(.+)/[^/]+\.go$|\1|g' | \ - sort -u -} - version_suffix() { local suffix=$(git log -1 --format=%h 2>/dev/null || true) if [ -n "$suffix" ]; then diff --git a/clientlibrary/worker/worker_custom_test.go b/test/worker_custom_test.go similarity index 76% rename from clientlibrary/worker/worker_custom_test.go rename to test/worker_custom_test.go index a02ba33..55111ac 100644 --- a/clientlibrary/worker/worker_custom_test.go +++ b/test/worker_custom_test.go @@ -16,12 +16,9 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -package worker +package test import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/dynamodb" "os" "testing" "time" @@ -32,6 +29,7 @@ import ( chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" "github.com/vmware/vmware-go-kcl/clientlibrary/utils" + wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" ) func TestCustomWorker(t *testing.T) { @@ -53,25 +51,11 @@ func TestCustomWorker(t *testing.T) { // configure cloudwatch as metrics system metricsConfig := getMetricsConfig(kclConfig, metricsSystem) - // create dynamodb checkpointer. - s, err := session.NewSession(&aws.Config{ - Region: aws.String(regionName), - Endpoint: &kclConfig.DynamoDBEndpoint, - Credentials: kclConfig.DynamoDBCredentials, - }) + checkpointer := chk.NewDynamoCheckpoint(kclConfig) - if err != nil { - // no need to move forward - log.Fatalf("Failed in getting DynamoDB session for creating Worker: %+v", err) - } + worker := wk.NewCustomWorker(recordProcessorFactory(t), kclConfig, checkpointer, metricsConfig) - checkpointer := chk.NewDynamoCheckpoint(dynamodb.New(s), kclConfig) - - worker := NewCustomWorker(recordProcessorFactory(t), kclConfig, checkpointer, metricsConfig) - assert.Equal(t, regionName, worker.regionName) - assert.Equal(t, streamName, worker.streamName) - - err = worker.Start() + err := worker.Start() assert.Nil(t, err) // Put some data into stream. diff --git a/clientlibrary/worker/worker_test.go b/test/worker_test.go similarity index 97% rename from clientlibrary/worker/worker_test.go rename to test/worker_test.go index 57405dd..343f158 100644 --- a/clientlibrary/worker/worker_test.go +++ b/test/worker_test.go @@ -16,7 +16,7 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -package worker +package test import ( "github.com/aws/aws-sdk-go/aws/credentials" @@ -38,6 +38,7 @@ import ( kc "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" "github.com/vmware/vmware-go-kcl/clientlibrary/utils" + wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" ) const ( @@ -126,9 +127,7 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t // configure cloudwatch as metrics system metricsConfig := getMetricsConfig(kclConfig, metricsSystem) - worker := NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig) - assert.Equal(t, regionName, worker.regionName) - assert.Equal(t, streamName, worker.streamName) + worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig) err := worker.Start() assert.Nil(t, err) From fa0bbc42fecab9bdbc3395ff126a1847bbf2933f Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 8 Jul 2019 17:20:33 -0500 Subject: [PATCH 39/90] Update worker to let it inject checkpointer and kinesis (#28) * Update worker to let it inject checkpointer and kinesis Add two functions to inject checkpointer and kinesis for custom implementation or adding mock for unit test. This change also remove the worker_custom.go since it is no longer needed. Test: Update the integration tests to cover newly added functions. Signed-off-by: Tao Jiang * Fix typo on the test function Signed-off-by: Tao Jiang --- clientlibrary/worker/worker-custom.go | 66 ---------------- clientlibrary/worker/worker.go | 61 +++++++++----- test/worker_custom_test.go | 109 +++++++++++++++++++++++++- 3 files changed, 149 insertions(+), 87 deletions(-) delete mode 100644 clientlibrary/worker/worker-custom.go diff --git a/clientlibrary/worker/worker-custom.go b/clientlibrary/worker/worker-custom.go deleted file mode 100644 index 580a416..0000000 --- a/clientlibrary/worker/worker-custom.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2019 VMware, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and - * associated documentation files (the "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is furnished to do - * so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT - * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -package worker - -import ( - log "github.com/sirupsen/logrus" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/kinesis" - chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" - "github.com/vmware/vmware-go-kcl/clientlibrary/config" - kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" - "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" -) - -// NewCustomWorker constructs a Worker instance for processing Kinesis stream data by directly inject custom cjheckpointer. -func NewCustomWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisClientLibConfiguration, - checkpointer chk.Checkpointer, metricsConfig *metrics.MonitoringConfiguration) *Worker { - w := &Worker{ - streamName: kclConfig.StreamName, - regionName: kclConfig.RegionName, - workerID: kclConfig.WorkerID, - processorFactory: factory, - kclConfig: kclConfig, - checkpointer: checkpointer, - metricsConfig: metricsConfig, - } - - // create session for Kinesis - log.Info("Creating Kinesis session") - - s, err := session.NewSession(&aws.Config{ - Region: aws.String(w.regionName), - Endpoint: &kclConfig.KinesisEndpoint, - Credentials: kclConfig.KinesisCredentials, - }) - - if err != nil { - // no need to move forward - log.Fatalf("Failed in getting Kinesis session for creating Worker: %+v", err) - } - w.kc = kinesis.New(s) - - if w.metricsConfig == nil { - // "" means noop monitor service. i.e. not emitting any metrics. - w.metricsConfig = &metrics.MonitoringConfiguration{MonitoringService: ""} - } - return w -} diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 66066d3..5bed3fa 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -83,24 +83,6 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli done: false, } - // create session for Kinesis - log.Info("Creating Kinesis session") - - s, err := session.NewSession(&aws.Config{ - Region: aws.String(w.regionName), - Endpoint: &kclConfig.KinesisEndpoint, - Credentials: kclConfig.KinesisCredentials, - }) - - if err != nil { - // no need to move forward - log.Fatalf("Failed in getting Kinesis session for creating Worker: %+v", err) - } - w.kc = kinesis.New(s) - - log.Info("Creating DynamoDB based checkpointer") - w.checkpointer = chk.NewDynamoCheckpoint(kclConfig) - if w.metricsConfig == nil { // "" means noop monitor service. i.e. not emitting any metrics. w.metricsConfig = &metrics.MonitoringConfiguration{MonitoringService: ""} @@ -108,10 +90,23 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli return w } +// WithKinesis is used to provide Kinesis service for either custom implementation or unit testing. +func (w *Worker) WithKinesis(svc kinesisiface.KinesisAPI) *Worker { + w.kc = svc + return w +} + +// WithCheckpointer is used to provide a custom checkpointer service for non-dynamodb implementation +// or unit testing. +func (w *Worker) WithCheckpointer(checker chk.Checkpointer) *Worker { + w.checkpointer = checker + return w +} + // Run starts consuming data from the stream, and pass it to the application record processors. func (w *Worker) Start() error { if err := w.initialize(); err != nil { - log.Errorf("Failed to start Worker: %+v", err) + log.Errorf("Failed to initialize Worker: %+v", err) return err } @@ -161,6 +156,34 @@ func (w *Worker) Publish(streamName, partitionKey string, data []byte) error { func (w *Worker) initialize() error { log.Info("Worker initialization in progress...") + // Create default Kinesis session + if w.kc == nil { + // create session for Kinesis + log.Info("Creating Kinesis session") + + s, err := session.NewSession(&aws.Config{ + Region: aws.String(w.regionName), + Endpoint: &w.kclConfig.KinesisEndpoint, + Credentials: w.kclConfig.KinesisCredentials, + }) + + if err != nil { + // no need to move forward + log.Fatalf("Failed in getting Kinesis session for creating Worker: %+v", err) + } + w.kc = kinesis.New(s) + } else { + log.Info("Use custom Kinesis service.") + } + + // Create default dynamodb based checkpointer implementation + if w.checkpointer == nil { + log.Info("Creating DynamoDB based checkpointer") + w.checkpointer = chk.NewDynamoCheckpoint(w.kclConfig) + } else { + log.Info("Use custom checkpointer implementation.") + } + err := w.metricsConfig.Init(w.kclConfig.ApplicationName, w.streamName, w.workerID) if err != nil { log.Errorf("Failed to start monitoring service: %+v", err) diff --git a/test/worker_custom_test.go b/test/worker_custom_test.go index 55111ac..6b3beb2 100644 --- a/test/worker_custom_test.go +++ b/test/worker_custom_test.go @@ -23,6 +23,10 @@ import ( "testing" "time" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kinesis" + log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -32,7 +36,7 @@ import ( wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" ) -func TestCustomWorker(t *testing.T) { +func TestWorkerInjectCheckpointer(t *testing.T) { kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). @@ -51,9 +55,12 @@ func TestCustomWorker(t *testing.T) { // configure cloudwatch as metrics system metricsConfig := getMetricsConfig(kclConfig, metricsSystem) + // custom checkpointer or a mock checkpointer. checkpointer := chk.NewDynamoCheckpoint(kclConfig) - worker := wk.NewCustomWorker(recordProcessorFactory(t), kclConfig, checkpointer, metricsConfig) + // Inject a custom checkpointer into the worker. + worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig). + WithCheckpointer(checkpointer) err := worker.Start() assert.Nil(t, err) @@ -71,3 +78,101 @@ func TestCustomWorker(t *testing.T) { time.Sleep(10 * time.Second) worker.Shutdown() } + +func TestWorkerInjectKinesis(t *testing.T) { + kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithMetricsBufferTimeMillis(10000). + WithMetricsMaxQueueSize(20) + + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) + + assert.Equal(t, regionName, kclConfig.RegionName) + assert.Equal(t, streamName, kclConfig.StreamName) + + // configure cloudwatch as metrics system + metricsConfig := getMetricsConfig(kclConfig, metricsSystem) + + // create custom Kinesis + s, err := session.NewSession(&aws.Config{ + Region: aws.String(regionName), + }) + assert.Nil(t, err) + kc := kinesis.New(s) + + // Inject a custom checkpointer into the worker. + worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig). + WithKinesis(kc) + + err = worker.Start() + assert.Nil(t, err) + + // Put some data into stream. + for i := 0; i < 100; i++ { + // Use random string as partition key to ensure even distribution across shards + err := worker.Publish(streamName, utils.RandStringBytesMaskImpr(10), []byte(specstr)) + if err != nil { + t.Errorf("Errorin Publish. %+v", err) + } + } + + // wait a few seconds before shutdown processing + time.Sleep(10 * time.Second) + worker.Shutdown() +} + +func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) { + kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithMetricsBufferTimeMillis(10000). + WithMetricsMaxQueueSize(20) + + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) + + assert.Equal(t, regionName, kclConfig.RegionName) + assert.Equal(t, streamName, kclConfig.StreamName) + + // configure cloudwatch as metrics system + metricsConfig := getMetricsConfig(kclConfig, metricsSystem) + + // create custom Kinesis + s, err := session.NewSession(&aws.Config{ + Region: aws.String(regionName), + }) + assert.Nil(t, err) + kc := kinesis.New(s) + + // custom checkpointer or a mock checkpointer. + checkpointer := chk.NewDynamoCheckpoint(kclConfig) + + // Inject both custom checkpointer and kinesis into the worker. + worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig). + WithKinesis(kc). + WithCheckpointer(checkpointer) + + err = worker.Start() + assert.Nil(t, err) + + // Put some data into stream. + for i := 0; i < 100; i++ { + // Use random string as partition key to ensure even distribution across shards + err := worker.Publish(streamName, utils.RandStringBytesMaskImpr(10), []byte(specstr)) + if err != nil { + t.Errorf("Errorin Publish. %+v", err) + } + } + + // wait a few seconds before shutdown processing + time.Sleep(10 * time.Second) + worker.Shutdown() +} From 83698849525d5fe96ae7711af2d7224fbe44771d Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 8 Jul 2019 18:04:37 -0500 Subject: [PATCH 40/90] Remove shard info in checkpointer (#29) Currently, only local cached shard info has been removed when worker losts the lease. The info inside checkpointer (dynamoDB) is not removed. This causes lease has been hold until the lease expiration and it might take too long for shard is ready for other worker to grab. This change release the lease in checkpointer immediately. The user need to ensure appropriate checkpointing before return from Shutdown callback. Signed-off-by: Tao Jiang --- clientlibrary/worker/shard-consumer.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 7899a67..7cd8c00 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -281,9 +281,18 @@ func (sc *ShardConsumer) waitOnParentShard(shard *par.ShardStatus) error { } } -// Cleanup the internal lease cache +// releaseLease releases the lease for the specific shard func (sc *ShardConsumer) releaseLease(shard *par.ShardStatus) { log.Infof("Release lease for shard %s", shard.ID) + + // remove the shard entry in dynamoDB as well + // Note: The worker has been terminated anyway and we don't need to do anything in case of error here. + // The shard information for checkpointer has been removed and it will be recreated during syncShard. + if err := sc.checkpointer.RemoveLeaseInfo(shard.ID); err != nil { + log.Errorf("Failed to remove shard lease info: %s Error: %+v", shard.ID, err) + } + + // remove the shard owner from local status cache shard.SetLeaseOwner("") // reporting lease lose metrics sc.mService.LeaseLost(shard.ID) From ac8d341cb171e96d8d894f4ed0e3443321cec393 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 8 Jul 2019 20:40:57 -0500 Subject: [PATCH 41/90] Revert "Remove shard info in checkpointer (#29)" (#30) This reverts commit 7e382e90d5d9eb30ed38cc1ab452336860f48b57. --- clientlibrary/worker/shard-consumer.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 7cd8c00..7899a67 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -281,18 +281,9 @@ func (sc *ShardConsumer) waitOnParentShard(shard *par.ShardStatus) error { } } -// releaseLease releases the lease for the specific shard +// Cleanup the internal lease cache func (sc *ShardConsumer) releaseLease(shard *par.ShardStatus) { log.Infof("Release lease for shard %s", shard.ID) - - // remove the shard entry in dynamoDB as well - // Note: The worker has been terminated anyway and we don't need to do anything in case of error here. - // The shard information for checkpointer has been removed and it will be recreated during syncShard. - if err := sc.checkpointer.RemoveLeaseInfo(shard.ID); err != nil { - log.Errorf("Failed to remove shard lease info: %s Error: %+v", shard.ID, err) - } - - // remove the shard owner from local status cache shard.SetLeaseOwner("") // reporting lease lose metrics sc.mService.LeaseLost(shard.ID) From 46fea317deaabdbad337d783e474cac88fad2dbf Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Tue, 9 Jul 2019 21:24:11 -0500 Subject: [PATCH 42/90] Release shard lease after shutdown (#31) * Release shard lease after shutdown Currently, only local cached shard info has been removed when worker losts the lease. The info inside checkpointer (dynamoDB) is not removed. This causes lease has been hold until the lease expiration and it might take too long for shard is ready for other worker to grab. This change release the lease in checkpointer immediately. The user need to ensure appropriate checkpointing before return from Shutdown callback. Test: updated unit test and integration test to ensure only the shard owner has been wiped out and leave the checkpoint information intact. Signed-off-by: Tao Jiang * Add code coverage reporting Add code coverage reporting for unit test. Signed-off-by: Tao Jiang --- clientlibrary/checkpoint/checkpointer.go | 12 ++++ .../checkpoint/dynamodb-checkpointer.go | 49 +++++++++++----- .../checkpoint/dynamodb-checkpointer_test.go | 58 +++++++++++++++++-- clientlibrary/worker/shard-consumer.go | 7 +++ support/scripts/test.sh | 2 +- test/worker_custom_test.go | 16 +++++ test/worker_test.go | 3 + 7 files changed, 126 insertions(+), 21 deletions(-) diff --git a/clientlibrary/checkpoint/checkpointer.go b/clientlibrary/checkpoint/checkpointer.go index 1f48349..b3af0b7 100644 --- a/clientlibrary/checkpoint/checkpointer.go +++ b/clientlibrary/checkpoint/checkpointer.go @@ -48,11 +48,23 @@ const ( // Checkpointer handles checkpointing when a record has been processed type Checkpointer interface { + // Init initialises the Checkpoint Init() error + + // GetLease attempts to gain a lock on the given shard GetLease(*par.ShardStatus, string) error + + // CheckpointSequence writes a checkpoint at the designated sequence ID CheckpointSequence(*par.ShardStatus) error + + // FetchCheckpoint retrieves the checkpoint for the given shard FetchCheckpoint(*par.ShardStatus) error + + // RemoveLeaseInfo to remove lease info for shard entry because the shard no longer exists RemoveLeaseInfo(string) error + + // RemoveLeaseOwner to remove lease owner for the shard entry to make the shard available for reassignment + RemoveLeaseOwner(string) error } // ErrSequenceIDNotFound is returned by FetchCheckpoint when no SequenceID is found diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 1e4ba4f..e80c6c0 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -89,7 +89,7 @@ func (checkpointer *DynamoCheckpoint) Init() error { s, err := session.NewSession(&aws.Config{ Region: aws.String(checkpointer.kclConfig.RegionName), - Endpoint: &checkpointer.kclConfig.DynamoDBEndpoint, + Endpoint: aws.String(checkpointer.kclConfig.DynamoDBEndpoint), Credentials: checkpointer.kclConfig.DynamoDBCredentials, Retryer: client.DefaultRetryer{NumMaxRetries: checkpointer.Retries}, }) @@ -133,43 +133,45 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign if err != nil { return err } + if !time.Now().UTC().After(currentLeaseTimeout) && assignedTo != newAssignTo { return errors.New(ErrLeaseNotAquired) } + log.Debugf("Attempting to get a lock for shard: %s, leaseTimeout: %s, assignedTo: %s", shard.ID, currentLeaseTimeout, assignedTo) conditionalExpression = "ShardID = :id AND AssignedTo = :assigned_to AND LeaseTimeout = :lease_timeout" expressionAttributeValues = map[string]*dynamodb.AttributeValue{ ":id": { - S: &shard.ID, + S: aws.String(shard.ID), }, ":assigned_to": { - S: &assignedTo, + S: aws.String(assignedTo), }, ":lease_timeout": { - S: &leaseTimeout, + S: aws.String(leaseTimeout), }, } } marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ LEASE_KEY_KEY: { - S: &shard.ID, + S: aws.String(shard.ID), }, LEASE_OWNER_KEY: { - S: &newAssignTo, + S: aws.String(newAssignTo), }, LEASE_TIMEOUT_KEY: { - S: &newLeaseTimeoutString, + S: aws.String(newLeaseTimeoutString), }, } if len(shard.ParentShardId) > 0 { - marshalledCheckpoint[PARENT_SHARD_ID_KEY] = &dynamodb.AttributeValue{S: &shard.ParentShardId} + marshalledCheckpoint[PARENT_SHARD_ID_KEY] = &dynamodb.AttributeValue{S: aws.String(shard.ParentShardId)} } if shard.Checkpoint != "" { marshalledCheckpoint[CHECKPOINT_SEQUENCE_NUMBER_KEY] = &dynamodb.AttributeValue{ - S: &shard.Checkpoint, + S: aws.String(shard.Checkpoint), } } @@ -196,16 +198,16 @@ func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *par.ShardStatus) leaseTimeout := shard.LeaseTimeout.UTC().Format(time.RFC3339) marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ LEASE_KEY_KEY: { - S: &shard.ID, + S: aws.String(shard.ID), }, CHECKPOINT_SEQUENCE_NUMBER_KEY: { - S: &shard.Checkpoint, + S: aws.String(shard.Checkpoint), }, LEASE_OWNER_KEY: { - S: &shard.AssignedTo, + S: aws.String(shard.AssignedTo), }, LEASE_TIMEOUT_KEY: { - S: &leaseTimeout, + S: aws.String(leaseTimeout), }, } @@ -230,10 +232,10 @@ func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *par.ShardStatus) er log.Debugf("Retrieved Shard Iterator %s", *sequenceID.S) shard.Mux.Lock() defer shard.Mux.Unlock() - shard.Checkpoint = *sequenceID.S + shard.Checkpoint = aws.StringValue(sequenceID.S) if assignedTo, ok := checkpoint[LEASE_OWNER_KEY]; ok { - shard.AssignedTo = *assignedTo.S + shard.AssignedTo = aws.StringValue(assignedTo.S) } return nil } @@ -251,6 +253,23 @@ func (checkpointer *DynamoCheckpoint) RemoveLeaseInfo(shardID string) error { return err } +// RemoveLeaseOwner to remove lease owner for the shard entry +func (checkpointer *DynamoCheckpoint) RemoveLeaseOwner(shardID string) error { + input := &dynamodb.UpdateItemInput{ + TableName: aws.String(checkpointer.TableName), + Key: map[string]*dynamodb.AttributeValue{ + LEASE_KEY_KEY: { + S: aws.String(shardID), + }, + }, + UpdateExpression: aws.String("remove " + LEASE_OWNER_KEY), + } + + _, err := checkpointer.svc.UpdateItem(input) + + return err +} + func (checkpointer *DynamoCheckpoint) createTable() error { input := &dynamodb.CreateTableInput{ AttributeDefinitions: []*dynamodb.AttributeDefinition{ diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go index 20cce87..1b824b7 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go @@ -29,6 +29,7 @@ package checkpoint import ( "errors" + "github.com/stretchr/testify/assert" "sync" "testing" "time" @@ -43,7 +44,7 @@ import ( ) func TestDoesTableExist(t *testing.T) { - svc := &mockDynamoDB{tableExist: true} + svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} checkpoint := &DynamoCheckpoint{ TableName: "TableName", svc: svc, @@ -60,7 +61,7 @@ func TestDoesTableExist(t *testing.T) { } func TestGetLeaseNotAquired(t *testing.T) { - svc := &mockDynamoDB{tableExist: true} + svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). @@ -91,7 +92,7 @@ func TestGetLeaseNotAquired(t *testing.T) { } func TestGetLeaseAquired(t *testing.T) { - svc := &mockDynamoDB{tableExist: true} + svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). @@ -102,7 +103,6 @@ func TestGetLeaseAquired(t *testing.T) { WithMetricsMaxQueueSize(20) checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) checkpoint.Init() - checkpoint.svc = svc marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ "ShardID": { S: aws.String("0001"), @@ -139,6 +139,23 @@ func TestGetLeaseAquired(t *testing.T) { } else if *id.S != "deadbeef" { t.Errorf("Expected checkpoint to be deadbeef. Got '%s'", *id.S) } + + // release owner info + err = checkpoint.RemoveLeaseOwner(shard.ID) + assert.Nil(t, err) + + status := &par.ShardStatus{ + ID: shard.ID, + Mux: &sync.Mutex{}, + } + checkpoint.FetchCheckpoint(status) + + // checkpointer and parent shard id should be the same + assert.Equal(t, shard.Checkpoint, status.Checkpoint) + assert.Equal(t, shard.ParentShardId, status.ParentShardId) + + // Only the lease owner has been wiped out + assert.Equal(t, "", status.GetLeaseOwner()) } type mockDynamoDB struct { @@ -155,7 +172,28 @@ func (m *mockDynamoDB) DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.De } func (m *mockDynamoDB) PutItem(input *dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) { - m.item = input.Item + item := input.Item + + if shardID, ok := item[LEASE_KEY_KEY]; ok { + m.item[LEASE_KEY_KEY] = shardID + } + + if owner, ok := item[LEASE_OWNER_KEY]; ok { + m.item[LEASE_OWNER_KEY] = owner + } + + if timeout, ok := item[LEASE_TIMEOUT_KEY]; ok { + m.item[LEASE_TIMEOUT_KEY] = timeout + } + + if checkpoint, ok := item[CHECKPOINT_SEQUENCE_NUMBER_KEY]; ok { + m.item[CHECKPOINT_SEQUENCE_NUMBER_KEY] = checkpoint + } + + if parent, ok := item[PARENT_SHARD_ID_KEY]; ok { + m.item[PARENT_SHARD_ID_KEY] = parent + } + return nil, nil } @@ -165,6 +203,16 @@ func (m *mockDynamoDB) GetItem(input *dynamodb.GetItemInput) (*dynamodb.GetItemO }, nil } +func (m *mockDynamoDB) UpdateItem(input *dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) { + exp := input.UpdateExpression + + if aws.StringValue(exp) == "remove "+LEASE_OWNER_KEY { + delete(m.item, LEASE_OWNER_KEY) + } + + return nil, nil +} + func (m *mockDynamoDB) CreateTable(input *dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) { return &dynamodb.CreateTableOutput{}, nil } diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 7899a67..139c27d 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -285,6 +285,13 @@ func (sc *ShardConsumer) waitOnParentShard(shard *par.ShardStatus) error { func (sc *ShardConsumer) releaseLease(shard *par.ShardStatus) { log.Infof("Release lease for shard %s", shard.ID) shard.SetLeaseOwner("") + + // Release the lease by wiping out the lease owner for the shard + // Note: we don't need to do anything in case of error here and shard lease will eventuall be expired. + if err := sc.checkpointer.RemoveLeaseOwner(shard.ID); err != nil { + log.Errorf("Failed to release shard lease or shard: %s Error: %+v", shard.ID, err) + } + // reporting lease lose metrics sc.mService.LeaseLost(shard.ID) } diff --git a/support/scripts/test.sh b/support/scripts/test.sh index 78c0986..ee8226e 100755 --- a/support/scripts/test.sh +++ b/support/scripts/test.sh @@ -2,4 +2,4 @@ . support/scripts/functions.sh # Run only the unit tests and not integration tests -go test -race $(local_go_pkgs) +go test -cover -race $(local_go_pkgs) diff --git a/test/worker_custom_test.go b/test/worker_custom_test.go index 6b3beb2..2ac300f 100644 --- a/test/worker_custom_test.go +++ b/test/worker_custom_test.go @@ -20,6 +20,7 @@ package test import ( "os" + "sync" "testing" "time" @@ -32,6 +33,7 @@ import ( "github.com/stretchr/testify/assert" chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" + par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" "github.com/vmware/vmware-go-kcl/clientlibrary/utils" wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" ) @@ -77,6 +79,20 @@ func TestWorkerInjectCheckpointer(t *testing.T) { // wait a few seconds before shutdown processing time.Sleep(10 * time.Second) worker.Shutdown() + + // verify the checkpointer after graceful shutdown + status := &par.ShardStatus{ + ID: shardID, + Mux: &sync.Mutex{}, + } + checkpointer.FetchCheckpoint(status) + + // checkpointer should be the same + assert.NotEmpty(t, status.Checkpoint) + + // Only the lease owner has been wiped out + assert.Equal(t, "", status.GetLeaseOwner()) + } func TestWorkerInjectKinesis(t *testing.T) { diff --git a/test/worker_test.go b/test/worker_test.go index 343f158..d950058 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -50,6 +50,8 @@ const ( const specstr = `{"name":"kube-qQyhk","networking":{"containerNetworkCidr":"10.2.0.0/16"},"orgName":"BVT-Org-cLQch","projectName":"project-tDSJd","serviceLevel":"DEVELOPER","size":{"count":1},"version":"1.8.1-4"}` const metricsSystem = "cloudwatch" +var shardID string + func TestWorker(t *testing.T) { kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). @@ -235,6 +237,7 @@ type dumpRecordProcessor struct { func (dd *dumpRecordProcessor) Initialize(input *kc.InitializationInput) { dd.t.Logf("Processing SharId: %v at checkpoint: %v", input.ShardId, aws.StringValue(input.ExtendedSequenceNumber.SequenceNumber)) + shardID = input.ShardId } func (dd *dumpRecordProcessor) ProcessRecords(input *kc.ProcessRecordsInput) { From 4f79203f44d2979a8151d77e55e22d7e497af624 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Thu, 3 Oct 2019 20:45:34 -0500 Subject: [PATCH 43/90] Get rid of unused skipTableCheck (#39) --- clientlibrary/checkpoint/dynamodb-checkpointer.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index e80c6c0..684d0cc 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -61,7 +61,6 @@ type DynamoCheckpoint struct { svc dynamodbiface.DynamoDBAPI kclConfig *config.KinesisClientLibConfiguration Retries int - skipTableCheck bool } func NewDynamoCheckpoint(kclConfig *config.KinesisClientLibConfiguration) *DynamoCheckpoint { @@ -103,7 +102,7 @@ func (checkpointer *DynamoCheckpoint) Init() error { checkpointer.svc = dynamodb.New(s) } - if !checkpointer.skipTableCheck && !checkpointer.doesTableExist() { + if !checkpointer.doesTableExist() { return checkpointer.createTable() } return nil From c8a5aa1891bf239b232b7bf0a58ba0112fbaec97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Rainone?= <476650+arl@users.noreply.github.com> Date: Sun, 27 Oct 2019 16:43:21 +0100 Subject: [PATCH 44/90] Fix possible deadlock with getRecords in eventLoop (#42) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A waitgroup should always be incremented before the creation of the goroutine which decrements it (through Done) or there is the potential for deadlock. That was not the case since the wg.Add was performed after the `go getRecords() ` line. Also, since there's only one path leading to the wg.Done in getRecords, I moved wg.Done out of the getRecords function and placed it alongside the goroutine creation, thus totally removing the need to pass the waitgroup pointer to the sc instance, this lead to the removal of the `waitGroup` field from the `ShardConsumer` struct. This has been tested in production and didn't create any problem. Signed-off-by: Aurélien Rainone --- clientlibrary/worker/shard-consumer.go | 5 ++--- clientlibrary/worker/worker.go | 9 +++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 139c27d..87b24aa 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -28,11 +28,12 @@ package worker import ( - log "github.com/sirupsen/logrus" "math" "sync" "time" + log "github.com/sirupsen/logrus" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/kinesis" @@ -79,7 +80,6 @@ type ShardConsumer struct { recordProcessor kcl.IRecordProcessor kclConfig *config.KinesisClientLibConfiguration stop *chan struct{} - waitGroup *sync.WaitGroup consumerID string mService metrics.MonitoringService state ShardConsumerState @@ -126,7 +126,6 @@ func (sc *ShardConsumer) getShardIterator(shard *par.ShardStatus) (*string, erro // getRecords continously poll one shard for data record // Precondition: it currently has the lease on the shard. func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { - defer sc.waitGroup.Done() defer sc.releaseLease(shard) // If the shard is child shard, need to wait until the parent finished. diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 5bed3fa..2a7bd52 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -201,8 +201,7 @@ func (w *Worker) initialize() error { stopChan := make(chan struct{}) w.stop = &stopChan - wg := sync.WaitGroup{} - w.waitGroup = &wg + w.waitGroup = &sync.WaitGroup{} log.Info("Initialization complete.") @@ -220,7 +219,6 @@ func (w *Worker) newShardConsumer(shard *par.ShardStatus) *ShardConsumer { kclConfig: w.kclConfig, consumerID: w.workerID, stop: w.stop, - waitGroup: w.waitGroup, mService: w.mService, state: WAITING_ON_PARENT_SHARDS, } @@ -283,8 +281,11 @@ func (w *Worker) eventLoop() { log.Infof("Start Shard Consumer for shard: %v", shard.ID) sc := w.newShardConsumer(shard) - go sc.getRecords(shard) w.waitGroup.Add(1) + go func() { + defer w.waitGroup.Done() + sc.getRecords(shard) + }() // exit from for loop and not to grab more shard for now. break } From 0d91fbd443a1d2d0edac77514db2bfbce9a303ed Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 28 Oct 2019 07:08:18 -0500 Subject: [PATCH 45/90] Add generic logger support (#43) * Add generic logger support The current KCL has tight coupling with logrus and it causes issue for customer to use different logging system such as zap log. The issue has been opened via: https://github.com/vmware/vmware-go-kcl/issues/27 This change is to created a logger interface be able to abstract above logrus and zap log. It makes easy to add support for other logging system in the fugure. The work is based on: https://www.mountedthoughts.com/golang-logger-interface/ Some updates are made in order to make logging system easily injectable and add more unit tests. Tested against real kinesis and dyamodb as well. Signed-off-by: Tao Jiang * Add lumberjack configuration options to have fine grained control Update the file log configuratio by adding most of luberjack configuration to avoid hardcode default value. Let user to specify the value because log retention and rotation are very important for prod environment. Signed-off-by: Tao Jiang --- .../checkpoint/dynamodb-checkpointer.go | 28 +-- clientlibrary/config/config.go | 4 + clientlibrary/config/config_test.go | 5 + clientlibrary/config/kcl-config.go | 13 +- clientlibrary/metrics/cloudwatch.go | 22 +-- clientlibrary/metrics/interfaces.go | 9 + clientlibrary/metrics/prometheus.go | 19 +- clientlibrary/worker/shard-consumer.go | 7 +- clientlibrary/worker/worker.go | 40 +++-- go.mod | 7 + go.sum | 13 ++ logger/logger.go | 115 ++++++++++++ logger/logger_test.go | 84 +++++++++ logger/logrus.go | 170 ++++++++++++++++++ logger/zap.go | 151 ++++++++++++++++ test/logger_test.go | 87 +++++++++ test/worker_test.go | 54 ++++-- 17 files changed, 768 insertions(+), 60 deletions(-) create mode 100644 logger/logger.go create mode 100644 logger/logger_test.go create mode 100644 logger/logrus.go create mode 100644 logger/zap.go create mode 100644 test/logger_test.go diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 684d0cc..1cce247 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -29,18 +29,18 @@ package checkpoint import ( "errors" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/session" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" - log "github.com/sirupsen/logrus" "github.com/vmware/vmware-go-kcl/clientlibrary/config" par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" + "github.com/vmware/vmware-go-kcl/logger" ) const ( @@ -53,18 +53,20 @@ const ( // DynamoCheckpoint implements the Checkpoint interface using DynamoDB as a backend type DynamoCheckpoint struct { + log logger.Logger TableName string leaseTableReadCapacity int64 leaseTableWriteCapacity int64 - LeaseDuration int - svc dynamodbiface.DynamoDBAPI - kclConfig *config.KinesisClientLibConfiguration - Retries int + LeaseDuration int + svc dynamodbiface.DynamoDBAPI + kclConfig *config.KinesisClientLibConfiguration + Retries int } func NewDynamoCheckpoint(kclConfig *config.KinesisClientLibConfiguration) *DynamoCheckpoint { checkpointer := &DynamoCheckpoint{ + log: kclConfig.Logger, TableName: kclConfig.TableName, leaseTableReadCapacity: int64(kclConfig.InitialLeaseTableReadCapacity), leaseTableWriteCapacity: int64(kclConfig.InitialLeaseTableWriteCapacity), @@ -84,7 +86,7 @@ func (checkpointer *DynamoCheckpoint) WithDynamoDB(svc dynamodbiface.DynamoDBAPI // Init initialises the DynamoDB Checkpoint func (checkpointer *DynamoCheckpoint) Init() error { - log.Info("Creating DynamoDB session") + checkpointer.log.Infof("Creating DynamoDB session") s, err := session.NewSession(&aws.Config{ Region: aws.String(checkpointer.kclConfig.RegionName), @@ -95,7 +97,7 @@ func (checkpointer *DynamoCheckpoint) Init() error { if err != nil { // no need to move forward - log.Fatalf("Failed in getting DynamoDB session for creating Worker: %+v", err) + checkpointer.log.Fatalf("Failed in getting DynamoDB session for creating Worker: %+v", err) } if checkpointer.svc == nil { @@ -137,7 +139,7 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign return errors.New(ErrLeaseNotAquired) } - log.Debugf("Attempting to get a lock for shard: %s, leaseTimeout: %s, assignedTo: %s", shard.ID, currentLeaseTimeout, assignedTo) + checkpointer.log.Debugf("Attempting to get a lock for shard: %s, leaseTimeout: %s, assignedTo: %s", shard.ID, currentLeaseTimeout, assignedTo) conditionalExpression = "ShardID = :id AND AssignedTo = :assigned_to AND LeaseTimeout = :lease_timeout" expressionAttributeValues = map[string]*dynamodb.AttributeValue{ ":id": { @@ -228,7 +230,7 @@ func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *par.ShardStatus) er if !ok { return ErrSequenceIDNotFound } - log.Debugf("Retrieved Shard Iterator %s", *sequenceID.S) + checkpointer.log.Debugf("Retrieved Shard Iterator %s", *sequenceID.S) shard.Mux.Lock() defer shard.Mux.Unlock() shard.Checkpoint = aws.StringValue(sequenceID.S) @@ -244,9 +246,9 @@ func (checkpointer *DynamoCheckpoint) RemoveLeaseInfo(shardID string) error { err := checkpointer.removeItem(shardID) if err != nil { - log.Errorf("Error in removing lease info for shard: %s, Error: %+v", shardID, err) + checkpointer.log.Errorf("Error in removing lease info for shard: %s, Error: %+v", shardID, err) } else { - log.Infof("Lease info for shard: %s has been removed.", shardID) + checkpointer.log.Infof("Lease info for shard: %s has been removed.", shardID) } return err diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index 3200725..9db8d4f 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -41,6 +41,7 @@ import ( "github.com/aws/aws-sdk-go/aws" creds "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/vmware/vmware-go-kcl/logger" ) const ( @@ -256,6 +257,9 @@ type ( // Worker should skip syncing shards and leases at startup if leases are present // This is useful for optimizing deployments to large fleets working on a stable stream. SkipShardSyncAtWorkerInitializationIfLeasesExist bool + + // Logger used to log message. + Logger logger.Logger } ) diff --git a/clientlibrary/config/config_test.go b/clientlibrary/config/config_test.go index 466d6b0..6f90796 100644 --- a/clientlibrary/config/config_test.go +++ b/clientlibrary/config/config_test.go @@ -19,6 +19,7 @@ package config import ( + "github.com/vmware/vmware-go-kcl/logger" "testing" "github.com/stretchr/testify/assert" @@ -37,4 +38,8 @@ func TestConfig(t *testing.T) { assert.Equal(t, "appName", kclConfig.ApplicationName) assert.Equal(t, 500, kclConfig.FailoverTimeMillis) + + contextLogger := kclConfig.Logger.WithFields(logger.Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with default logger") + contextLogger.Infof("Default logger is awesome") } diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index 1419ad8..f1dc058 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -34,10 +34,12 @@ package config import ( - "github.com/aws/aws-sdk-go/aws/credentials" + "log" "time" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/vmware/vmware-go-kcl/clientlibrary/utils" + "github.com/vmware/vmware-go-kcl/logger" ) // NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. @@ -92,6 +94,7 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio InitialLeaseTableReadCapacity: DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, InitialLeaseTableWriteCapacity: DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, SkipShardSyncAtWorkerInitializationIfLeasesExist: DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST, + Logger: logger.GetDefaultLogger(), } } @@ -201,3 +204,11 @@ func (c *KinesisClientLibConfiguration) WithMetricsMaxQueueSize(metricsMaxQueueS c.MetricsMaxQueueSize = metricsMaxQueueSize return c } + +func (c *KinesisClientLibConfiguration) WithLogger(logger logger.Logger) *KinesisClientLibConfiguration { + if logger == nil { + log.Panic("Logger cannot be null") + } + c.Logger = logger + return c +} diff --git a/clientlibrary/metrics/cloudwatch.go b/clientlibrary/metrics/cloudwatch.go index 477f127..a189656 100644 --- a/clientlibrary/metrics/cloudwatch.go +++ b/clientlibrary/metrics/cloudwatch.go @@ -28,15 +28,16 @@ package metrics import ( - "github.com/aws/aws-sdk-go/aws/credentials" "sync" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" - log "github.com/sirupsen/logrus" + + "github.com/vmware/vmware-go-kcl/logger" ) type CloudWatchMonitoringService struct { @@ -45,6 +46,7 @@ type CloudWatchMonitoringService struct { WorkerID string Region string Credentials *credentials.Credentials + Logger logger.Logger // control how often to pusblish to CloudWatch MetricsBufferTimeMillis int @@ -72,7 +74,7 @@ func (cw *CloudWatchMonitoringService) Init() error { cfg.Credentials = cw.Credentials s, err := session.NewSession(cfg) if err != nil { - log.Errorf("Error in creating session for cloudwatch. %+v", err) + cw.Logger.Errorf("Error in creating session for cloudwatch. %+v", err) return err } cw.svc = cloudwatch.New(s) @@ -94,10 +96,10 @@ func (cw *CloudWatchMonitoringService) Start() error { } func (cw *CloudWatchMonitoringService) Shutdown() { - log.Info("Shutting down cloudwatch metrics system...") + cw.Logger.Infof("Shutting down cloudwatch metrics system...") close(*cw.stop) cw.waitGroup.Wait() - log.Info("Cloudwatch metrics system has been shutdown.") + cw.Logger.Infof("Cloudwatch metrics system has been shutdown.") } // Start daemon to flush metrics periodically @@ -106,14 +108,14 @@ func (cw *CloudWatchMonitoringService) eventloop() { for { if err := cw.flush(); err != nil { - log.Errorf("Error sending metrics to CloudWatch. %+v", err) + cw.Logger.Errorf("Error sending metrics to CloudWatch. %+v", err) } select { case <-*cw.stop: - log.Info("Shutting down monitoring system") + cw.Logger.Infof("Shutting down monitoring system") if err := cw.flush(); err != nil { - log.Errorf("Error sending metrics to CloudWatch. %+v", err) + cw.Logger.Errorf("Error sending metrics to CloudWatch. %+v", err) } return case <-time.After(time.Duration(cw.MetricsBufferTimeMillis) * time.Millisecond): @@ -237,7 +239,7 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat metric.getRecordsTime = []float64{} metric.processRecordsTime = []float64{} } else { - log.Errorf("Error in publishing cloudwatch metrics. Error: %+v", err) + cw.Logger.Errorf("Error in publishing cloudwatch metrics. Error: %+v", err) } metric.Unlock() @@ -245,7 +247,7 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat } func (cw *CloudWatchMonitoringService) flush() error { - log.Debugf("Flushing metrics data. Stream: %s, Worker: %s", cw.KinesisStream, cw.WorkerID) + cw.Logger.Debugf("Flushing metrics data. Stream: %s, Worker: %s", cw.KinesisStream, cw.WorkerID) // publish per shard metrics cw.shardMetrics.Range(func(k, v interface{}) bool { shard, metric := k.(string), v.(*cloudWatchMetrics) diff --git a/clientlibrary/metrics/interfaces.go b/clientlibrary/metrics/interfaces.go index 41ef053..c79cb61 100644 --- a/clientlibrary/metrics/interfaces.go +++ b/clientlibrary/metrics/interfaces.go @@ -29,6 +29,7 @@ package metrics import ( "fmt" + "github.com/vmware/vmware-go-kcl/logger" ) // MonitoringConfiguration allows you to configure how record processing metrics are exposed @@ -38,6 +39,7 @@ type MonitoringConfiguration struct { Prometheus PrometheusMonitoringService CloudWatch CloudWatchMonitoringService service MonitoringService + Logger logger.Logger } type MonitoringService interface { @@ -60,18 +62,25 @@ func (m *MonitoringConfiguration) Init(nameSpace, streamName string, workerID st return nil } + // Config with default logger if logger is not specified. + if m.Logger == nil { + m.Logger = logger.GetDefaultLogger() + } + switch m.MonitoringService { case "prometheus": m.Prometheus.Namespace = nameSpace m.Prometheus.KinesisStream = streamName m.Prometheus.WorkerID = workerID m.Prometheus.Region = m.Region + m.Prometheus.Logger = m.Logger m.service = &m.Prometheus case "cloudwatch": m.CloudWatch.Namespace = nameSpace m.CloudWatch.KinesisStream = streamName m.CloudWatch.WorkerID = workerID m.CloudWatch.Region = m.Region + m.CloudWatch.Logger = m.Logger m.service = &m.CloudWatch default: return fmt.Errorf("Invalid monitoring service type %s", m.MonitoringService) diff --git a/clientlibrary/metrics/prometheus.go b/clientlibrary/metrics/prometheus.go index 81c08ce..3dae914 100644 --- a/clientlibrary/metrics/prometheus.go +++ b/clientlibrary/metrics/prometheus.go @@ -32,7 +32,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - log "github.com/sirupsen/logrus" + + "github.com/vmware/vmware-go-kcl/logger" ) // PrometheusMonitoringService to start Prometheus as metrics system. @@ -41,10 +42,12 @@ import ( type PrometheusMonitoringService struct { ListenAddress string - Namespace string - KinesisStream string - WorkerID string - Region string + Namespace string + KinesisStream string + WorkerID string + Region string + Logger logger.Logger + processedRecords *prometheus.CounterVec processedBytes *prometheus.CounterVec behindLatestMillis *prometheus.GaugeVec @@ -106,12 +109,12 @@ func (p *PrometheusMonitoringService) Init() error { func (p *PrometheusMonitoringService) Start() error { http.Handle("/metrics", promhttp.Handler()) go func() { - log.Infof("Starting Prometheus listener on %s", p.ListenAddress) + p.Logger.Infof("Starting Prometheus listener on %s", p.ListenAddress) err := http.ListenAndServe(p.ListenAddress, nil) if err != nil { - log.Errorf("Error starting Prometheus metrics endpoint. %+v", err) + p.Logger.Errorf("Error starting Prometheus metrics endpoint. %+v", err) } - log.Info("Stopped metrics server") + p.Logger.Infof("Stopped metrics server") }() return nil diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 87b24aa..04bae9f 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -32,8 +32,6 @@ import ( "sync" "time" - log "github.com/sirupsen/logrus" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/kinesis" @@ -86,6 +84,8 @@ type ShardConsumer struct { } func (sc *ShardConsumer) getShardIterator(shard *par.ShardStatus) (*string, error) { + log := sc.kclConfig.Logger + // Get checkpoint of the shard from dynamoDB err := sc.checkpointer.FetchCheckpoint(shard) if err != nil && err != chk.ErrSequenceIDNotFound { @@ -128,6 +128,8 @@ func (sc *ShardConsumer) getShardIterator(shard *par.ShardStatus) (*string, erro func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { defer sc.releaseLease(shard) + log := sc.kclConfig.Logger + // If the shard is child shard, need to wait until the parent finished. if err := sc.waitOnParentShard(shard); err != nil { // If parent shard has been deleted by Kinesis system already, just ignore the error. @@ -282,6 +284,7 @@ func (sc *ShardConsumer) waitOnParentShard(shard *par.ShardStatus) error { // Cleanup the internal lease cache func (sc *ShardConsumer) releaseLease(shard *par.ShardStatus) { + log := sc.kclConfig.Logger log.Infof("Release lease for shard %s", shard.ID) shard.SetLeaseOwner("") diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 2a7bd52..fac1a7d 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -32,8 +32,6 @@ import ( "sync" "time" - log "github.com/sirupsen/logrus" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" @@ -105,19 +103,20 @@ func (w *Worker) WithCheckpointer(checker chk.Checkpointer) *Worker { // Run starts consuming data from the stream, and pass it to the application record processors. func (w *Worker) Start() error { + log := w.kclConfig.Logger if err := w.initialize(); err != nil { log.Errorf("Failed to initialize Worker: %+v", err) return err } // Start monitoring service - log.Info("Starting monitoring service.") + log.Infof("Starting monitoring service.") if err := w.mService.Start(); err != nil { log.Errorf("Failed to start monitoring service: %+v", err) return err } - log.Info("Starting worker event loop.") + log.Infof("Starting worker event loop.") // entering event loop go w.eventLoop() return nil @@ -125,7 +124,8 @@ func (w *Worker) Start() error { // Shutdown signals worker to shutdown. Worker will try initiating shutdown of all record processors. func (w *Worker) Shutdown() { - log.Info("Worker shutdown in requested.") + log := w.kclConfig.Logger + log.Infof("Worker shutdown in requested.") if w.done { return @@ -136,11 +136,12 @@ func (w *Worker) Shutdown() { w.waitGroup.Wait() w.mService.Shutdown() - log.Info("Worker loop is complete. Exiting from worker.") + log.Infof("Worker loop is complete. Exiting from worker.") } // Publish to write some data into stream. This function is mainly used for testing purpose. func (w *Worker) Publish(streamName, partitionKey string, data []byte) error { + log := w.kclConfig.Logger _, err := w.kc.PutRecord(&kinesis.PutRecordInput{ Data: data, StreamName: aws.String(streamName), @@ -154,12 +155,13 @@ func (w *Worker) Publish(streamName, partitionKey string, data []byte) error { // initialize func (w *Worker) initialize() error { - log.Info("Worker initialization in progress...") + log := w.kclConfig.Logger + log.Infof("Worker initialization in progress...") // Create default Kinesis session if w.kc == nil { // create session for Kinesis - log.Info("Creating Kinesis session") + log.Infof("Creating Kinesis session") s, err := session.NewSession(&aws.Config{ Region: aws.String(w.regionName), @@ -173,15 +175,15 @@ func (w *Worker) initialize() error { } w.kc = kinesis.New(s) } else { - log.Info("Use custom Kinesis service.") + log.Infof("Use custom Kinesis service.") } // Create default dynamodb based checkpointer implementation if w.checkpointer == nil { - log.Info("Creating DynamoDB based checkpointer") + log.Infof("Creating DynamoDB based checkpointer") w.checkpointer = chk.NewDynamoCheckpoint(w.kclConfig) } else { - log.Info("Use custom checkpointer implementation.") + log.Infof("Use custom checkpointer implementation.") } err := w.metricsConfig.Init(w.kclConfig.ApplicationName, w.streamName, w.workerID) @@ -190,7 +192,7 @@ func (w *Worker) initialize() error { } w.mService = w.metricsConfig.GetMonitoringService() - log.Info("Initializing Checkpointer") + log.Infof("Initializing Checkpointer") if err := w.checkpointer.Init(); err != nil { log.Errorf("Failed to start Checkpointer: %+v", err) return err @@ -203,7 +205,7 @@ func (w *Worker) initialize() error { w.waitGroup = &sync.WaitGroup{} - log.Info("Initialization complete.") + log.Infof("Initialization complete.") return nil } @@ -226,6 +228,8 @@ func (w *Worker) newShardConsumer(shard *par.ShardStatus) *ShardConsumer { // eventLoop func (w *Worker) eventLoop() { + log := w.kclConfig.Logger + for { err := w.syncShard() if err != nil { @@ -271,7 +275,7 @@ func (w *Worker) eventLoop() { if err != nil { // cannot get lease on the shard if err.Error() != chk.ErrLeaseNotAquired { - log.Error(err) + log.Errorf("Cannot get lease: %+v", err) } continue } @@ -284,7 +288,9 @@ func (w *Worker) eventLoop() { w.waitGroup.Add(1) go func() { defer w.waitGroup.Done() - sc.getRecords(shard) + if err := sc.getRecords(shard); err != nil { + log.Errorf("Error in getRecords: %+v", err) + } }() // exit from for loop and not to grab more shard for now. break @@ -293,7 +299,7 @@ func (w *Worker) eventLoop() { select { case <-*w.stop: - log.Info("Shutting down...") + log.Infof("Shutting down...") return case <-time.After(time.Duration(w.kclConfig.ShardSyncIntervalMillis) * time.Millisecond): } @@ -303,6 +309,7 @@ func (w *Worker) eventLoop() { // List all ACTIVE shard and store them into shardStatus table // If shard has been removed, need to exclude it from cached shard status. func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) error { + log := w.kclConfig.Logger // The default pagination limit is 100. args := &kinesis.DescribeStreamInput{ StreamName: aws.String(w.streamName), @@ -355,6 +362,7 @@ func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) err // syncShard to sync the cached shard info with actual shard info from Kinesis func (w *Worker) syncShard() error { + log := w.kclConfig.Logger shardInfo := make(map[string]bool) err := w.getShardIDs("", shardInfo) diff --git a/go.mod b/go.mod index c18af03..9a4ec48 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,7 @@ module github.com/vmware/vmware-go-kcl require ( + github.com/BurntSushi/toml v0.3.1 // indirect github.com/aws/aws-sdk-go v1.19.38 github.com/google/uuid v1.1.1 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect @@ -9,7 +10,13 @@ require ( github.com/prometheus/procfs v0.0.0-20190523193104-a7aeb8df3389 // indirect github.com/sirupsen/logrus v1.4.2 github.com/stretchr/testify v1.3.0 + go.uber.org/atomic v1.4.0 // indirect + go.uber.org/multierr v1.2.0 // indirect + go.uber.org/zap v1.11.0 golang.org/x/net v0.0.0-20190522155817-f3200d17e092 // indirect golang.org/x/sys v0.0.0-20190528012530-adf421d2caf4 // indirect golang.org/x/text v0.3.2 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) + +go 1.13 diff --git a/go.sum b/go.sum index 9a5146b..19dc05c 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -32,6 +34,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -59,6 +62,12 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4= +go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.11.0 h1:gSmpCfs+R47a4yQPAI4xJ0IPDLTRGXskm6UelqNXpqE= +go.uber.org/zap v1.11.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -78,5 +87,9 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/logger/logger.go b/logger/logger.go new file mode 100644 index 0000000..1712899 --- /dev/null +++ b/logger/logger.go @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2019 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/ +// https://github.com/amitrai48/logger + +package logger + +import ( + "github.com/sirupsen/logrus" +) + +// Fields Type to pass when we want to call WithFields for structured logging +type Fields map[string]interface{} + +const ( + //Debug has verbose message + Debug = "debug" + //Info is default log level + Info = "info" + //Warn is for logging messages about possible issues + Warn = "warn" + //Error is for logging errors + Error = "error" + //Fatal is for logging fatal messages. The sytem shutsdown after logging the message. + Fatal = "fatal" +) + +// Logger is the common interface for logging. +type Logger interface { + Debugf(format string, args ...interface{}) + + Infof(format string, args ...interface{}) + + Warnf(format string, args ...interface{}) + + Errorf(format string, args ...interface{}) + + Fatalf(format string, args ...interface{}) + + Panicf(format string, args ...interface{}) + + WithFields(keyValues Fields) Logger +} + +// Configuration stores the config for the logger +// For some loggers there can only be one level across writers, for such the level of Console is picked by default +type Configuration struct { + EnableConsole bool + ConsoleJSONFormat bool + ConsoleLevel string + EnableFile bool + FileJSONFormat bool + FileLevel string + + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSizeMB int + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is 7 days. + MaxAgeDays int + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool +} + +// GetDefaultLogger creates a default logger. +func GetDefaultLogger() Logger { + return NewLogrusLogger(logrus.StandardLogger()) +} + +// normalizeConfig to enforce default value in configuration. +func normalizeConfig(config *Configuration) { + if config.MaxSizeMB <= 0 { + config.MaxSizeMB = 100 + } + + if config.MaxAgeDays <= 0 { + config.MaxAgeDays = 7 + } + + if config.MaxBackups < 0 { + config.MaxBackups = 0 + } +} diff --git a/logger/logger_test.go b/logger/logger_test.go new file mode 100644 index 0000000..c55a6a5 --- /dev/null +++ b/logger/logger_test.go @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2019 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/ + +package logger + +import ( + "github.com/stretchr/testify/assert" + + "github.com/sirupsen/logrus" + "go.uber.org/zap" + "testing" +) + +func TestZapLoggerWithConfig(t *testing.T) { + config := Configuration{ + EnableConsole: true, + ConsoleLevel: Debug, + ConsoleJSONFormat: true, + EnableFile: false, + FileLevel: Info, + FileJSONFormat: true, + Filename: "log.log", + } + + log := NewZapLoggerWithConfig(config) + + contextLogger := log.WithFields(Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with zap") + contextLogger.Infof("Zap is awesome") +} + +func TestZapLogger(t *testing.T) { + zapLogger, err := zap.NewProduction() + assert.Nil(t, err) + + log := NewZapLogger(zapLogger.Sugar()) + + contextLogger := log.WithFields(Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with zap") + contextLogger.Infof("Zap is awesome") +} + +func TestLogrusLoggerWithConfig(t *testing.T) { + config := Configuration{ + EnableConsole: true, + ConsoleLevel: Debug, + ConsoleJSONFormat: false, + EnableFile: false, + FileLevel: Info, + FileJSONFormat: true, + } + + log := NewLogrusLoggerWithConfig(config) + + contextLogger := log.WithFields(Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with logrus") + contextLogger.Infof("Logrus is awesome") +} + +func TestLogrusLogger(t *testing.T) { + // adapts to Logger interface + log := NewLogrusLogger(logrus.StandardLogger()) + + contextLogger := log.WithFields(Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with logrus") + contextLogger.Infof("Logrus is awesome") +} diff --git a/logger/logrus.go b/logger/logrus.go new file mode 100644 index 0000000..464f691 --- /dev/null +++ b/logger/logrus.go @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2019 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/ +// https://github.com/amitrai48/logger + +package logger + +import ( + "io" + "os" + + "github.com/sirupsen/logrus" + lumberjack "gopkg.in/natefinch/lumberjack.v2" +) + +type LogrusLogEntry struct { + entry *logrus.Entry +} + +type LogrusLogger struct { + logger *logrus.Logger +} + +// NewLogrusLogger adapts existing logrus logger to Logger interface. +// The call is responsible for configuring logrus logger appropriately. +func NewLogrusLogger(lLogger *logrus.Logger) Logger { + return &LogrusLogger{ + logger: lLogger, + } +} + +// NewLogrusLoggerWithConfig creates and configs Logger instance backed by +// logrus logger. +func NewLogrusLoggerWithConfig(config Configuration) Logger { + logLevel := config.ConsoleLevel + if logLevel == "" { + logLevel = config.FileLevel + } + + level, err := logrus.ParseLevel(logLevel) + if err != nil { + // fallback to InfoLevel + level = logrus.InfoLevel + } + + normalizeConfig(&config) + + stdOutHandler := os.Stdout + fileHandler := &lumberjack.Logger{ + Filename: config.Filename, + MaxSize: config.MaxSizeMB, + Compress: true, + MaxAge: config.MaxAgeDays, + MaxBackups: config.MaxBackups, + LocalTime: config.LocalTime, + } + lLogger := &logrus.Logger{ + Out: stdOutHandler, + Formatter: getFormatter(config.ConsoleJSONFormat), + Hooks: make(logrus.LevelHooks), + Level: level, + } + + if config.EnableConsole && config.EnableFile { + lLogger.SetOutput(io.MultiWriter(stdOutHandler, fileHandler)) + } else { + if config.EnableFile { + lLogger.SetOutput(fileHandler) + lLogger.SetFormatter(getFormatter(config.FileJSONFormat)) + } + } + + return &LogrusLogger{ + logger: lLogger, + } +} + +func (l *LogrusLogger) Debugf(format string, args ...interface{}) { + l.logger.Debugf(format, args...) +} + +func (l *LogrusLogger) Infof(format string, args ...interface{}) { + l.logger.Infof(format, args...) +} + +func (l *LogrusLogger) Warnf(format string, args ...interface{}) { + l.logger.Warnf(format, args...) +} + +func (l *LogrusLogger) Errorf(format string, args ...interface{}) { + l.logger.Errorf(format, args...) +} + +func (l *LogrusLogger) Fatalf(format string, args ...interface{}) { + l.logger.Fatalf(format, args...) +} + +func (l *LogrusLogger) Panicf(format string, args ...interface{}) { + l.logger.Fatalf(format, args...) +} + +func (l *LogrusLogger) WithFields(fields Fields) Logger { + return &LogrusLogEntry{ + entry: l.logger.WithFields(convertToLogrusFields(fields)), + } +} + +func (l *LogrusLogEntry) Debugf(format string, args ...interface{}) { + l.entry.Debugf(format, args...) +} + +func (l *LogrusLogEntry) Infof(format string, args ...interface{}) { + l.entry.Infof(format, args...) +} + +func (l *LogrusLogEntry) Warnf(format string, args ...interface{}) { + l.entry.Warnf(format, args...) +} + +func (l *LogrusLogEntry) Errorf(format string, args ...interface{}) { + l.entry.Errorf(format, args...) +} + +func (l *LogrusLogEntry) Fatalf(format string, args ...interface{}) { + l.entry.Fatalf(format, args...) +} + +func (l *LogrusLogEntry) Panicf(format string, args ...interface{}) { + l.entry.Fatalf(format, args...) +} + +func (l *LogrusLogEntry) WithFields(fields Fields) Logger { + return &LogrusLogEntry{ + entry: l.entry.WithFields(convertToLogrusFields(fields)), + } +} + +func getFormatter(isJSON bool) logrus.Formatter { + if isJSON { + return &logrus.JSONFormatter{} + } + return &logrus.TextFormatter{ + FullTimestamp: true, + DisableLevelTruncation: true, + } +} + +func convertToLogrusFields(fields Fields) logrus.Fields { + logrusFields := logrus.Fields{} + for index, val := range fields { + logrusFields[index] = val + } + return logrusFields +} diff --git a/logger/zap.go b/logger/zap.go new file mode 100644 index 0000000..01fdeb7 --- /dev/null +++ b/logger/zap.go @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2019 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/ +// https://github.com/amitrai48/logger + +package logger + +import ( + "os" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + lumberjack "gopkg.in/natefinch/lumberjack.v2" +) + +type ZapLogger struct { + sugaredLogger *zap.SugaredLogger +} + +// NewZapLogger adapts existing sugared zap logger to Logger interface. +// The call is responsible for configuring sugard zap logger appropriately. +// +// Note: Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +// +// Base zap logger can be convert to SugaredLogger by calling to add a wrapper: +// sugaredLogger := log.Sugar() +// +func NewZapLogger(logger *zap.SugaredLogger) Logger { + return &ZapLogger{ + sugaredLogger: logger, + } +} + +// NewZapLoggerWithConfig creates and configs Logger instance backed by +// zap Sugared logger. +func NewZapLoggerWithConfig(config Configuration) Logger { + cores := []zapcore.Core{} + + if config.EnableConsole { + level := getZapLevel(config.ConsoleLevel) + writer := zapcore.Lock(os.Stdout) + core := zapcore.NewCore(getEncoder(config.ConsoleJSONFormat), writer, level) + cores = append(cores, core) + } + + if config.EnableFile { + level := getZapLevel(config.FileLevel) + writer := zapcore.AddSync(&lumberjack.Logger{ + Filename: config.Filename, + MaxSize: config.MaxSizeMB, + Compress: true, + MaxAge: config.MaxAgeDays, + MaxBackups: config.MaxBackups, + LocalTime: config.LocalTime, + }) + core := zapcore.NewCore(getEncoder(config.FileJSONFormat), writer, level) + cores = append(cores, core) + } + + combinedCore := zapcore.NewTee(cores...) + + // AddCallerSkip skips 2 number of callers, this is important else the file that gets + // logged will always be the wrapped file. In our case zap.go + logger := zap.New(combinedCore, + zap.AddCallerSkip(2), + zap.AddCaller(), + ).Sugar() + + return &ZapLogger{ + sugaredLogger: logger, + } +} + +func (l *ZapLogger) Debugf(format string, args ...interface{}) { + l.sugaredLogger.Debugf(format, args...) +} + +func (l *ZapLogger) Infof(format string, args ...interface{}) { + l.sugaredLogger.Infof(format, args...) +} + +func (l *ZapLogger) Warnf(format string, args ...interface{}) { + l.sugaredLogger.Warnf(format, args...) +} + +func (l *ZapLogger) Errorf(format string, args ...interface{}) { + l.sugaredLogger.Errorf(format, args...) +} + +func (l *ZapLogger) Fatalf(format string, args ...interface{}) { + l.sugaredLogger.Fatalf(format, args...) +} + +func (l *ZapLogger) Panicf(format string, args ...interface{}) { + l.sugaredLogger.Fatalf(format, args...) +} + +func (l *ZapLogger) WithFields(fields Fields) Logger { + var f = make([]interface{}, 0) + for k, v := range fields { + f = append(f, k) + f = append(f, v) + } + newLogger := l.sugaredLogger.With(f...) + return &ZapLogger{newLogger} +} + +func getEncoder(isJSON bool) zapcore.Encoder { + encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder + if isJSON { + return zapcore.NewJSONEncoder(encoderConfig) + } + return zapcore.NewConsoleEncoder(encoderConfig) +} + +func getZapLevel(level string) zapcore.Level { + switch level { + case Info: + return zapcore.InfoLevel + case Warn: + return zapcore.WarnLevel + case Debug: + return zapcore.DebugLevel + case Error: + return zapcore.ErrorLevel + case Fatal: + return zapcore.FatalLevel + default: + return zapcore.InfoLevel + } +} diff --git a/test/logger_test.go b/test/logger_test.go new file mode 100644 index 0000000..42e44f0 --- /dev/null +++ b/test/logger_test.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2019 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/ + +package test + +import ( + "github.com/stretchr/testify/assert" + + "github.com/sirupsen/logrus" + "go.uber.org/zap" + "testing" + + "github.com/vmware/vmware-go-kcl/logger" +) + +func TestZapLoggerWithConfig(t *testing.T) { + config := logger.Configuration{ + EnableConsole: true, + ConsoleLevel: logger.Debug, + ConsoleJSONFormat: true, + EnableFile: true, + FileLevel: logger.Info, + FileJSONFormat: true, + Filename: "log.log", + } + + log := logger.NewZapLoggerWithConfig(config) + + contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with zap") + contextLogger.Infof("Zap is awesome") +} + +func TestZapLogger(t *testing.T) { + zapLogger, err := zap.NewProduction() + assert.Nil(t, err) + + log := logger.NewZapLogger(zapLogger.Sugar()) + + contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with zap") + contextLogger.Infof("Zap is awesome") +} + +func TestLogrusLoggerWithConfig(t *testing.T) { + config := logger.Configuration{ + EnableConsole: true, + ConsoleLevel: logger.Debug, + ConsoleJSONFormat: false, + EnableFile: true, + FileLevel: logger.Info, + FileJSONFormat: true, + Filename: "log.log", + } + + log := logger.NewLogrusLoggerWithConfig(config) + + contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with logrus") + contextLogger.Infof("Logrus is awesome") +} + +func TestLogrusLogger(t *testing.T) { + // adapts to Logger interface + log := logger.NewLogrusLogger(logrus.StandardLogger()) + + contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with logrus") + contextLogger.Infof("Logrus is awesome") +} diff --git a/test/worker_test.go b/test/worker_test.go index d950058..22f7ccb 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -19,9 +19,6 @@ package test import ( - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/session" "net/http" "os" "os/signal" @@ -30,15 +27,17 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/session" "github.com/prometheus/common/expfmt" - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" kc "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" "github.com/vmware/vmware-go-kcl/clientlibrary/utils" wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" + "github.com/vmware/vmware-go-kcl/logger" ) const ( @@ -53,6 +52,22 @@ const metricsSystem = "cloudwatch" var shardID string func TestWorker(t *testing.T) { + // At miminal. use standard logrus logger + // log := logger.NewLogrusLogger(logrus.StandardLogger()) + // + // In order to have precise control over logging. Use logger with config + config := logger.Configuration{ + EnableConsole: true, + ConsoleLevel: logger.Debug, + ConsoleJSONFormat: false, + EnableFile: true, + FileLevel: logger.Info, + FileJSONFormat: true, + Filename: "log.log", + } + // Use logrus logger + log := logger.NewLogrusLoggerWithConfig(config) + kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). @@ -60,12 +75,31 @@ func TestWorker(t *testing.T) { WithShardSyncIntervalMillis(5000). WithFailoverTimeMillis(300000). WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20) + WithMetricsMaxQueueSize(20). + WithLogger(log) runTest(kclConfig, false, t) } func TestWorkerWithSigInt(t *testing.T) { + // At miminal. use standard zap logger + //zapLogger, err := zap.NewProduction() + //assert.Nil(t, err) + //log := logger.NewZapLogger(zapLogger.Sugar()) + // + // In order to have precise control over logging. Use logger with config. + config := logger.Configuration{ + EnableConsole: true, + ConsoleLevel: logger.Debug, + ConsoleJSONFormat: true, + EnableFile: true, + FileLevel: logger.Info, + FileJSONFormat: true, + Filename: "log.log", + } + // use zap logger + log := logger.NewZapLoggerWithConfig(config) + kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). @@ -73,7 +107,8 @@ func TestWorkerWithSigInt(t *testing.T) { WithShardSyncIntervalMillis(5000). WithFailoverTimeMillis(300000). WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20) + WithMetricsMaxQueueSize(20). + WithLogger(log) runTest(kclConfig, true, t) } @@ -120,9 +155,6 @@ func TestWorkerAssumeRole(t *testing.T) { } func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *testing.T) { - log.SetOutput(os.Stdout) - log.SetLevel(log.DebugLevel) - assert.Equal(t, regionName, kclConfig.RegionName) assert.Equal(t, streamName, kclConfig.StreamName) @@ -192,6 +224,7 @@ func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service stri return &metrics.MonitoringConfiguration{ MonitoringService: "cloudwatch", Region: regionName, + Logger: kclConfig.Logger, CloudWatch: metrics.CloudWatchMonitoringService{ Credentials: kclConfig.CloudWatchCredentials, // Those value should come from kclConfig @@ -205,6 +238,7 @@ func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service stri return &metrics.MonitoringConfiguration{ MonitoringService: "prometheus", Region: regionName, + Logger: kclConfig.Logger, Prometheus: metrics.PrometheusMonitoringService{ ListenAddress: ":8080", }, From 971d748195422a2579426e7565467b6d2ef467c1 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Fri, 1 Nov 2019 08:42:04 -0500 Subject: [PATCH 46/90] Fix missing init position with AT_TIMESTAMP (#44) AT_TIMESTAMP start from the record at or after the specified server-side Timestamp. However, the implementation was missing. The bug was not notices until recently because most of users never use this feature. Signed-off-by: Tao Jiang --- clientlibrary/worker/shard-consumer.go | 23 ++++++++++++++++++----- test/worker_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 5 deletions(-) diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 04bae9f..2b44922 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -95,13 +95,26 @@ func (sc *ShardConsumer) getShardIterator(shard *par.ShardStatus) (*string, erro // If there isn't any checkpoint for the shard, use the configuration value. if shard.Checkpoint == "" { initPos := sc.kclConfig.InitialPositionInStream + shardIteratorType := config.InitalPositionInStreamToShardIteratorType(initPos) log.Debugf("No checkpoint recorded for shard: %v, starting with: %v", shard.ID, - aws.StringValue(config.InitalPositionInStreamToShardIteratorType(initPos))) - shardIterArgs := &kinesis.GetShardIteratorInput{ - ShardId: &shard.ID, - ShardIteratorType: config.InitalPositionInStreamToShardIteratorType(initPos), - StreamName: &sc.streamName, + aws.StringValue(shardIteratorType)) + + var shardIterArgs *kinesis.GetShardIteratorInput + if initPos == config.AT_TIMESTAMP { + shardIterArgs = &kinesis.GetShardIteratorInput{ + ShardId: &shard.ID, + ShardIteratorType: shardIteratorType, + Timestamp: sc.kclConfig.InitialPositionInStreamExtended.Timestamp, + StreamName: &sc.streamName, + } + } else { + shardIterArgs = &kinesis.GetShardIteratorInput{ + ShardId: &shard.ID, + ShardIteratorType: shardIteratorType, + StreamName: &sc.streamName, + } } + iterResp, err := sc.kc.GetShardIterator(shardIterArgs) if err != nil { return nil, err diff --git a/test/worker_test.go b/test/worker_test.go index 22f7ccb..e83966a 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -81,6 +81,30 @@ func TestWorker(t *testing.T) { runTest(kclConfig, false, t) } +func TestWorkerWithTimestamp(t *testing.T) { + // In order to have precise control over logging. Use logger with config + config := logger.Configuration{ + EnableConsole: true, + ConsoleLevel: logger.Debug, + ConsoleJSONFormat: false, + } + // Use logrus logger + log := logger.NewLogrusLoggerWithConfig(config) + + ts := time.Now().Add(time.Second * 5) + kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + WithTimestampAtInitialPositionInStream(&ts). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithMetricsBufferTimeMillis(10000). + WithMetricsMaxQueueSize(20). + WithLogger(log) + + runTest(kclConfig, false, t) +} + func TestWorkerWithSigInt(t *testing.T) { // At miminal. use standard zap logger //zapLogger, err := zap.NewProduction() From 8a8f9e6339160d9f08948531cd96df9a8165a0a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Rainone?= <476650+arl@users.noreply.github.com> Date: Mon, 4 Nov 2019 18:22:29 +0100 Subject: [PATCH 47/90] logger: move zap into its own package (#47) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since #27 vmware-go-kcl has support the any logger interface, which is very nice. However due to the fact that `logger/zap.go` directly imports zap. zap became a dependency of whoever uses `vmware-go-kcl.` The problem is that zap also has many dependencies. In order to avoid KCL users to pay a cost for a feature they don't need, the zap stuff has been moved to a `logger/zap` sub-package. Fixes #45 Signed-off-by: Aurélien Rainone --- logger/logger_test.go | 33 +-------------------------------- logger/{ => zap}/zap.go | 31 ++++++++++++++++--------------- logger/zap/zap_test.go | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 47 deletions(-) rename logger/{ => zap}/zap.go (88%) create mode 100644 logger/zap/zap_test.go diff --git a/logger/logger_test.go b/logger/logger_test.go index c55a6a5..980c022 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -21,42 +21,11 @@ package logger import ( - "github.com/stretchr/testify/assert" + "testing" "github.com/sirupsen/logrus" - "go.uber.org/zap" - "testing" ) -func TestZapLoggerWithConfig(t *testing.T) { - config := Configuration{ - EnableConsole: true, - ConsoleLevel: Debug, - ConsoleJSONFormat: true, - EnableFile: false, - FileLevel: Info, - FileJSONFormat: true, - Filename: "log.log", - } - - log := NewZapLoggerWithConfig(config) - - contextLogger := log.WithFields(Fields{"key1": "value1"}) - contextLogger.Debugf("Starting with zap") - contextLogger.Infof("Zap is awesome") -} - -func TestZapLogger(t *testing.T) { - zapLogger, err := zap.NewProduction() - assert.Nil(t, err) - - log := NewZapLogger(zapLogger.Sugar()) - - contextLogger := log.WithFields(Fields{"key1": "value1"}) - contextLogger.Debugf("Starting with zap") - contextLogger.Infof("Zap is awesome") -} - func TestLogrusLoggerWithConfig(t *testing.T) { config := Configuration{ EnableConsole: true, diff --git a/logger/zap.go b/logger/zap/zap.go similarity index 88% rename from logger/zap.go rename to logger/zap/zap.go index 01fdeb7..237303e 100644 --- a/logger/zap.go +++ b/logger/zap/zap.go @@ -19,18 +19,19 @@ // Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/ // https://github.com/amitrai48/logger -package logger +package zap import ( "os" - "go.uber.org/zap" + "github.com/vmware/vmware-go-kcl/logger" + uzap "go.uber.org/zap" "go.uber.org/zap/zapcore" lumberjack "gopkg.in/natefinch/lumberjack.v2" ) type ZapLogger struct { - sugaredLogger *zap.SugaredLogger + sugaredLogger *uzap.SugaredLogger } // NewZapLogger adapts existing sugared zap logger to Logger interface. @@ -44,7 +45,7 @@ type ZapLogger struct { // Base zap logger can be convert to SugaredLogger by calling to add a wrapper: // sugaredLogger := log.Sugar() // -func NewZapLogger(logger *zap.SugaredLogger) Logger { +func NewZapLogger(logger *uzap.SugaredLogger) logger.Logger { return &ZapLogger{ sugaredLogger: logger, } @@ -52,7 +53,7 @@ func NewZapLogger(logger *zap.SugaredLogger) Logger { // NewZapLoggerWithConfig creates and configs Logger instance backed by // zap Sugared logger. -func NewZapLoggerWithConfig(config Configuration) Logger { +func NewZapLoggerWithConfig(config logger.Configuration) logger.Logger { cores := []zapcore.Core{} if config.EnableConsole { @@ -80,9 +81,9 @@ func NewZapLoggerWithConfig(config Configuration) Logger { // AddCallerSkip skips 2 number of callers, this is important else the file that gets // logged will always be the wrapped file. In our case zap.go - logger := zap.New(combinedCore, - zap.AddCallerSkip(2), - zap.AddCaller(), + logger := uzap.New(combinedCore, + uzap.AddCallerSkip(2), + uzap.AddCaller(), ).Sugar() return &ZapLogger{ @@ -114,7 +115,7 @@ func (l *ZapLogger) Panicf(format string, args ...interface{}) { l.sugaredLogger.Fatalf(format, args...) } -func (l *ZapLogger) WithFields(fields Fields) Logger { +func (l *ZapLogger) WithFields(fields logger.Fields) logger.Logger { var f = make([]interface{}, 0) for k, v := range fields { f = append(f, k) @@ -125,7 +126,7 @@ func (l *ZapLogger) WithFields(fields Fields) Logger { } func getEncoder(isJSON bool) zapcore.Encoder { - encoderConfig := zap.NewProductionEncoderConfig() + encoderConfig := uzap.NewProductionEncoderConfig() encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder if isJSON { return zapcore.NewJSONEncoder(encoderConfig) @@ -135,15 +136,15 @@ func getEncoder(isJSON bool) zapcore.Encoder { func getZapLevel(level string) zapcore.Level { switch level { - case Info: + case logger.Info: return zapcore.InfoLevel - case Warn: + case logger.Warn: return zapcore.WarnLevel - case Debug: + case logger.Debug: return zapcore.DebugLevel - case Error: + case logger.Error: return zapcore.ErrorLevel - case Fatal: + case logger.Fatal: return zapcore.FatalLevel default: return zapcore.InfoLevel diff --git a/logger/zap/zap_test.go b/logger/zap/zap_test.go new file mode 100644 index 0000000..820f31b --- /dev/null +++ b/logger/zap/zap_test.go @@ -0,0 +1,39 @@ +package zap_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/vmware/vmware-go-kcl/logger" + "github.com/vmware/vmware-go-kcl/logger/zap" + uzap "go.uber.org/zap" +) + +func TestZapLoggerWithConfig(t *testing.T) { + config := logger.Configuration{ + EnableConsole: true, + ConsoleLevel: logger.Debug, + ConsoleJSONFormat: true, + EnableFile: false, + FileLevel: logger.Info, + FileJSONFormat: true, + Filename: "log.log", + } + + log := zap.NewZapLoggerWithConfig(config) + + contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with zap") + contextLogger.Infof("Zap is awesome") +} + +func TestZapLogger(t *testing.T) { + zapLogger, err := uzap.NewProduction() + assert.Nil(t, err) + + log := zap.NewZapLogger(zapLogger.Sugar()) + + contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with zap") + contextLogger.Infof("Zap is awesome") +} From d6369e48c2f04efce41ff89ad4dea6d464e3af0a Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 4 Nov 2019 11:39:50 -0600 Subject: [PATCH 48/90] Fix the broken integrationt test (#48) The https://github.com/vmware/vmware-go-kcl/pull/47 move zap into its own packge but it also breaks the integration test. This change is to fix integ test by correcting its package reference. Signed-off-by: Tao Jiang --- test/logger_test.go | 5 +++-- test/worker_test.go | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/test/logger_test.go b/test/logger_test.go index 42e44f0..502b509 100644 --- a/test/logger_test.go +++ b/test/logger_test.go @@ -28,6 +28,7 @@ import ( "testing" "github.com/vmware/vmware-go-kcl/logger" + zaplogger "github.com/vmware/vmware-go-kcl/logger/zap" ) func TestZapLoggerWithConfig(t *testing.T) { @@ -41,7 +42,7 @@ func TestZapLoggerWithConfig(t *testing.T) { Filename: "log.log", } - log := logger.NewZapLoggerWithConfig(config) + log := zaplogger.NewZapLoggerWithConfig(config) contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) contextLogger.Debugf("Starting with zap") @@ -52,7 +53,7 @@ func TestZapLogger(t *testing.T) { zapLogger, err := zap.NewProduction() assert.Nil(t, err) - log := logger.NewZapLogger(zapLogger.Sugar()) + log := zaplogger.NewZapLogger(zapLogger.Sugar()) contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) contextLogger.Debugf("Starting with zap") diff --git a/test/worker_test.go b/test/worker_test.go index e83966a..85fa31c 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -38,6 +38,7 @@ import ( "github.com/vmware/vmware-go-kcl/clientlibrary/utils" wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" "github.com/vmware/vmware-go-kcl/logger" + zaplogger "github.com/vmware/vmware-go-kcl/logger/zap" ) const ( @@ -109,7 +110,7 @@ func TestWorkerWithSigInt(t *testing.T) { // At miminal. use standard zap logger //zapLogger, err := zap.NewProduction() //assert.Nil(t, err) - //log := logger.NewZapLogger(zapLogger.Sugar()) + //log := zaplogger.NewZapLogger(zapLogger.Sugar()) // // In order to have precise control over logging. Use logger with config. config := logger.Configuration{ @@ -122,7 +123,7 @@ func TestWorkerWithSigInt(t *testing.T) { Filename: "log.log", } // use zap logger - log := logger.NewZapLoggerWithConfig(config) + log := zaplogger.NewZapLoggerWithConfig(config) kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). From 21980a54e395280bc64ad6c14fd3a5b8f831f955 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Rainone?= <476650+arl@users.noreply.github.com> Date: Wed, 6 Nov 2019 14:53:21 +0100 Subject: [PATCH 49/90] Expose monitoring service (#49) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove MonitoringConfiguration and export no-op service MonitoringConfiguration is not needed anymore as the user directly implements its monitoring service or use one the default constructors. Signed-off-by: Aurélien Rainone * Provide a constructor for CloudWatchMonitoringService Unexport all fields Signed-off-by: Aurélien Rainone * Provide a constructor to PrometheusMonitoringService Unexport fields Signed-off-by: Aurélien Rainone * Remove all CloudWatch specific-stuff from config package Signed-off-by: Aurélien Rainone * NewWorker accepts a metrics.MonitoringService Signed-off-by: Aurélien Rainone * Fix tests Signed-off-by: Aurélien Rainone * Add WithMonitoringService to config Instead of having an additional parameter to NewWorker so that the user can provide its own MonitoringService, WithMonitoringService is added to the configuration. This is much cleaner and remains in-line with the rest of the current API. Signed-off-by: Aurélien Rainone * Fix tests after introduction of WithMonitoringService Also, fix tests that should have been fixed in earlier commits. Signed-off-by: Aurélien Rainone * Move Prometheus into its own package Also rename it to prometheus.MonitoringService to not have to repeat Prometheus twice when using. Signed-off-by: Aurélien Rainone * Move CloudWatch metrics into its own package Also rename it to cloudwatch.MonitoringService to not have to repeat Cloudwatch twice when using. Signed-off-by: Aurélien Rainone * Remove references to Cloudwatch in comments Signed-off-by: Aurélien Rainone --- .../checkpoint/dynamodb-checkpointer_test.go | 10 +- clientlibrary/common/errors.go | 2 +- clientlibrary/config/config.go | 22 +-- clientlibrary/config/config_test.go | 6 +- clientlibrary/config/kcl-config.go | 39 ++-- .../metrics/{ => cloudwatch}/cloudwatch.go | 127 ++++++++------ clientlibrary/metrics/interfaces.go | 78 ++------ clientlibrary/metrics/prometheus.go | 155 ---------------- .../metrics/prometheus/prometheus.go | 166 ++++++++++++++++++ clientlibrary/worker/worker.go | 25 ++- test/worker_custom_test.go | 25 +-- test/worker_test.go | 47 ++--- 12 files changed, 313 insertions(+), 389 deletions(-) rename clientlibrary/metrics/{ => cloudwatch}/cloudwatch.go (70%) delete mode 100644 clientlibrary/metrics/prometheus.go create mode 100644 clientlibrary/metrics/prometheus/prometheus.go diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go index 1b824b7..6eaead5 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go @@ -67,9 +67,8 @@ func TestGetLeaseNotAquired(t *testing.T) { WithMaxRecords(10). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). - WithFailoverTimeMillis(300000). - WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20) + WithFailoverTimeMillis(300000) + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) checkpoint.Init() err := checkpoint.GetLease(&par.ShardStatus{ @@ -98,9 +97,8 @@ func TestGetLeaseAquired(t *testing.T) { WithMaxRecords(10). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). - WithFailoverTimeMillis(300000). - WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20) + WithFailoverTimeMillis(300000) + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) checkpoint.Init() marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ diff --git a/clientlibrary/common/errors.go b/clientlibrary/common/errors.go index b5db8ea..da32eef 100644 --- a/clientlibrary/common/errors.go +++ b/clientlibrary/common/errors.go @@ -67,7 +67,7 @@ var errorMap = map[ErrorCode]ClientLibraryError{ KinesisClientLibRetryableError: {ErrorCode: KinesisClientLibRetryableError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Retryable exceptions (e.g. transient errors). The request/operation is expected to succeed upon (back off and) retry."}, KinesisClientLibIOError: {ErrorCode: KinesisClientLibIOError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Error in reading/writing information (e.g. shard information from Kinesis may not be current/complete)."}, BlockedOnParentShardError: {ErrorCode: BlockedOnParentShardError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Cannot start processing data for a shard because the data from the parent shard has not been completely processed (yet)."}, - KinesisClientLibDependencyError: {ErrorCode: KinesisClientLibDependencyError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Cannot talk to its dependencies (e.g. fetching data from Kinesis, DynamoDB table reads/writes, emitting metrics to CloudWatch)."}, + KinesisClientLibDependencyError: {ErrorCode: KinesisClientLibDependencyError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Cannot talk to its dependencies (e.g. fetching data from Kinesis, DynamoDB table reads/writes)."}, ThrottlingError: {ErrorCode: ThrottlingError, Retryable: true, Status: http.StatusTooManyRequests, Msg: "Requests are throttled by a service (e.g. DynamoDB when storing a checkpoint)."}, // Non-Retryable diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index 9db8d4f..e4fd36e 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -41,6 +41,7 @@ import ( "github.com/aws/aws-sdk-go/aws" creds "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" "github.com/vmware/vmware-go-kcl/logger" ) @@ -88,12 +89,6 @@ const ( // Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures). DEFAULT_TASK_BACKOFF_TIME_MILLIS = 500 - // Buffer metrics for at most this long before publishing to CloudWatch. - DEFAULT_METRICS_BUFFER_TIME_MILLIS = 10000 - - // Buffer at most this many metrics before publishing to CloudWatch. - DEFAULT_METRICS_MAX_QUEUE_SIZE = 10000 - // KCL will validate client provided sequence numbers with a call to Amazon Kinesis before // checkpointing for calls to {@link RecordProcessorCheckpointer#checkpoint(String)} by default. DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING = true @@ -174,9 +169,6 @@ type ( // DynamoDBCredentials is used to access DynamoDB DynamoDBCredentials *creds.Credentials - // CloudWatchCredentials is used to access CloudWatch - CloudWatchCredentials *creds.Credentials - // TableName is name of the dynamo db table for managing kinesis stream default to ApplicationName TableName string @@ -192,7 +184,7 @@ type ( // InitialPositionInStreamExtended provides actual AT_TMESTAMP value InitialPositionInStreamExtended InitialPositionInStreamExtended - // credentials to access Kinesis/Dynamo/CloudWatch: https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/ + // credentials to access Kinesis/Dynamo: https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/ // Note: No need to configure here. Use NewEnvCredentials for testing and EC2RoleProvider for production // FailoverTimeMillis Lease duration (leases not renewed within this period will be claimed by others) @@ -219,18 +211,11 @@ type ( // kinesisClientConfig Client Configuration used by Kinesis client // dynamoDBClientConfig Client Configuration used by DynamoDB client - // cloudWatchClientConfig Client Configuration used by CloudWatch client // Note: we will use default client provided by AWS SDK // TaskBackoffTimeMillis Backoff period when tasks encounter an exception TaskBackoffTimeMillis int - // MetricsBufferTimeMillis Metrics are buffered for at most this long before publishing to CloudWatch - MetricsBufferTimeMillis int - - // MetricsMaxQueueSize Max number of metrics to buffer before publishing to CloudWatch - MetricsMaxQueueSize int - // ValidateSequenceNumberBeforeCheckpointing whether KCL should validate client provided sequence numbers ValidateSequenceNumberBeforeCheckpointing bool @@ -260,6 +245,9 @@ type ( // Logger used to log message. Logger logger.Logger + + // MonitoringService publishes per worker-scoped metrics. + MonitoringService metrics.MonitoringService } ) diff --git a/clientlibrary/config/config_test.go b/clientlibrary/config/config_test.go index 6f90796..80a9395 100644 --- a/clientlibrary/config/config_test.go +++ b/clientlibrary/config/config_test.go @@ -32,12 +32,10 @@ func TestConfig(t *testing.T) { WithInitialPositionInStream(TRIM_HORIZON). WithIdleTimeBetweenReadsInMillis(20). WithCallProcessRecordsEvenForEmptyRecordList(true). - WithTaskBackoffTimeMillis(10). - WithMetricsBufferTimeMillis(500). - WithMetricsMaxQueueSize(200) + WithTaskBackoffTimeMillis(10) assert.Equal(t, "appName", kclConfig.ApplicationName) - assert.Equal(t, 500, kclConfig.FailoverTimeMillis) + assert.Equal(t, 500, kclConfig.TaskBackoffTimeMillis) contextLogger := kclConfig.Logger.WithFields(logger.Fields{"key1": "value1"}) contextLogger.Debugf("Starting with default logger") diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index f1dc058..ec59be9 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -37,26 +37,28 @@ import ( "log" "time" + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/vmware/vmware-go-kcl/clientlibrary/utils" "github.com/vmware/vmware-go-kcl/logger" ) -// NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. +// NewKinesisClientLibConfig creates a default KinesisClientLibConfiguration based on the required fields. func NewKinesisClientLibConfig(applicationName, streamName, regionName, workerID string) *KinesisClientLibConfiguration { return NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID, - nil, nil, nil) + nil, nil) } -// NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. +// NewKinesisClientLibConfigWithCredential creates a default KinesisClientLibConfiguration based on the required fields and unique credentials. func NewKinesisClientLibConfigWithCredential(applicationName, streamName, regionName, workerID string, creds *credentials.Credentials) *KinesisClientLibConfiguration { - return NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID, creds, creds, creds) + return NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID, creds, creds) } -// NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields. +// NewKinesisClientLibConfigWithCredentials creates a default KinesisClientLibConfiguration based on the required fields and specific credentials for each service. func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID string, - kiniesisCreds, dynamodbCreds, cloudwatchCreds *credentials.Credentials) *KinesisClientLibConfiguration { + kiniesisCreds, dynamodbCreds *credentials.Credentials) *KinesisClientLibConfiguration { checkIsValueNotEmpty("ApplicationName", applicationName) checkIsValueNotEmpty("StreamName", streamName) checkIsValueNotEmpty("RegionName", regionName) @@ -70,7 +72,6 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio ApplicationName: applicationName, KinesisCredentials: kiniesisCreds, DynamoDBCredentials: dynamodbCreds, - CloudWatchCredentials: cloudwatchCreds, TableName: applicationName, StreamName: streamName, RegionName: regionName, @@ -85,8 +86,6 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio ShardSyncIntervalMillis: DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, CleanupTerminatedShardsBeforeExpiry: DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, TaskBackoffTimeMillis: DEFAULT_TASK_BACKOFF_TIME_MILLIS, - MetricsBufferTimeMillis: DEFAULT_METRICS_BUFFER_TIME_MILLIS, - MetricsMaxQueueSize: DEFAULT_METRICS_MAX_QUEUE_SIZE, ValidateSequenceNumberBeforeCheckpointing: DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, ShutdownGraceMillis: DEFAULT_SHUTDOWN_GRACE_MILLIS, MaxLeasesForWorker: DEFAULT_MAX_LEASES_FOR_WORKER, @@ -191,20 +190,6 @@ func (c *KinesisClientLibConfiguration) WithTaskBackoffTimeMillis(taskBackoffTim return c } -// WithMetricsBufferTimeMillis configures Metrics are buffered for at most this long before publishing to CloudWatch -func (c *KinesisClientLibConfiguration) WithMetricsBufferTimeMillis(metricsBufferTimeMillis int) *KinesisClientLibConfiguration { - checkIsValuePositive("MetricsBufferTimeMillis", metricsBufferTimeMillis) - c.MetricsBufferTimeMillis = metricsBufferTimeMillis - return c -} - -// WithMetricsMaxQueueSize configures Max number of metrics to buffer before publishing to CloudWatch -func (c *KinesisClientLibConfiguration) WithMetricsMaxQueueSize(metricsMaxQueueSize int) *KinesisClientLibConfiguration { - checkIsValuePositive("MetricsMaxQueueSize", metricsMaxQueueSize) - c.MetricsMaxQueueSize = metricsMaxQueueSize - return c -} - func (c *KinesisClientLibConfiguration) WithLogger(logger logger.Logger) *KinesisClientLibConfiguration { if logger == nil { log.Panic("Logger cannot be null") @@ -212,3 +197,11 @@ func (c *KinesisClientLibConfiguration) WithLogger(logger logger.Logger) *Kinesi c.Logger = logger return c } + +// WithMonitoringService sets the monitoring service to use to publish metrics. +func (c *KinesisClientLibConfiguration) WithMonitoringService(mService metrics.MonitoringService) *KinesisClientLibConfiguration { + // Nil case is handled downward (at worker creation) so no need to do it here. + // Plus the user might want to be explicit about passing a nil monitoring service here. + c.MonitoringService = mService + return c +} diff --git a/clientlibrary/metrics/cloudwatch.go b/clientlibrary/metrics/cloudwatch/cloudwatch.go similarity index 70% rename from clientlibrary/metrics/cloudwatch.go rename to clientlibrary/metrics/cloudwatch/cloudwatch.go index a189656..9572579 100644 --- a/clientlibrary/metrics/cloudwatch.go +++ b/clientlibrary/metrics/cloudwatch/cloudwatch.go @@ -25,7 +25,7 @@ // The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -package metrics +package cloudwatch import ( "sync" @@ -34,23 +34,25 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/cloudwatch" + cwatch "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" "github.com/vmware/vmware-go-kcl/logger" ) -type CloudWatchMonitoringService struct { - Namespace string - KinesisStream string - WorkerID string - Region string - Credentials *credentials.Credentials - Logger logger.Logger +// Buffer metrics for at most this long before publishing to CloudWatch. +const DEFAULT_CLOUDWATCH_METRICS_BUFFER_DURATION = 10 * time.Second - // control how often to pusblish to CloudWatch - MetricsBufferTimeMillis int - MetricsMaxQueueSize int +type MonitoringService struct { + appName string + streamName string + workerID string + region string + credentials *credentials.Credentials + logger logger.Logger + + // control how often to publish to CloudWatch + bufferDuration time.Duration stop *chan struct{} waitGroup *sync.WaitGroup @@ -59,6 +61,8 @@ type CloudWatchMonitoringService struct { } type cloudWatchMetrics struct { + sync.Mutex + processedRecords int64 processedBytes int64 behindLatestMillis []float64 @@ -66,18 +70,33 @@ type cloudWatchMetrics struct { leaseRenewals int64 getRecordsTime []float64 processRecordsTime []float64 - sync.Mutex } -func (cw *CloudWatchMonitoringService) Init() error { - cfg := &aws.Config{Region: aws.String(cw.Region)} - cfg.Credentials = cw.Credentials +// NewMonitoringService returns a Monitoring service publishing metrics to CloudWatch. +func NewMonitoringService(region string, creds *credentials.Credentials) *MonitoringService { + return NewMonitoringServiceWithOptions(region, creds, logger.GetDefaultLogger(), DEFAULT_CLOUDWATCH_METRICS_BUFFER_DURATION) +} + +// NewMonitoringServiceWithOptions returns a Monitoring service publishing metrics to +// CloudWatch with the provided credentials, buffering duration and logger. +func NewMonitoringServiceWithOptions(region string, creds *credentials.Credentials, logger logger.Logger, bufferDur time.Duration) *MonitoringService { + return &MonitoringService{ + region: region, + credentials: creds, + logger: logger, + bufferDuration: bufferDur, + } +} + +func (cw *MonitoringService) Init(appName, streamName, workerID string) error { + cfg := &aws.Config{Region: aws.String(cw.region)} + cfg.Credentials = cw.credentials s, err := session.NewSession(cfg) if err != nil { - cw.Logger.Errorf("Error in creating session for cloudwatch. %+v", err) + cw.logger.Errorf("Error in creating session for cloudwatch. %+v", err) return err } - cw.svc = cloudwatch.New(s) + cw.svc = cwatch.New(s) cw.shardMetrics = new(sync.Map) stopChan := make(chan struct{}) @@ -88,71 +107,71 @@ func (cw *CloudWatchMonitoringService) Init() error { return nil } -func (cw *CloudWatchMonitoringService) Start() error { +func (cw *MonitoringService) Start() error { cw.waitGroup.Add(1) // entering eventloop for sending metrics to CloudWatch go cw.eventloop() return nil } -func (cw *CloudWatchMonitoringService) Shutdown() { - cw.Logger.Infof("Shutting down cloudwatch metrics system...") +func (cw *MonitoringService) Shutdown() { + cw.logger.Infof("Shutting down cloudwatch metrics system...") close(*cw.stop) cw.waitGroup.Wait() - cw.Logger.Infof("Cloudwatch metrics system has been shutdown.") + cw.logger.Infof("Cloudwatch metrics system has been shutdown.") } // Start daemon to flush metrics periodically -func (cw *CloudWatchMonitoringService) eventloop() { +func (cw *MonitoringService) eventloop() { defer cw.waitGroup.Done() for { if err := cw.flush(); err != nil { - cw.Logger.Errorf("Error sending metrics to CloudWatch. %+v", err) + cw.logger.Errorf("Error sending metrics to CloudWatch. %+v", err) } select { case <-*cw.stop: - cw.Logger.Infof("Shutting down monitoring system") + cw.logger.Infof("Shutting down monitoring system") if err := cw.flush(); err != nil { - cw.Logger.Errorf("Error sending metrics to CloudWatch. %+v", err) + cw.logger.Errorf("Error sending metrics to CloudWatch. %+v", err) } return - case <-time.After(time.Duration(cw.MetricsBufferTimeMillis) * time.Millisecond): + case <-time.After(cw.bufferDuration): } } } -func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWatchMetrics) bool { +func (cw *MonitoringService) flushShard(shard string, metric *cloudWatchMetrics) bool { metric.Lock() - defaultDimensions := []*cloudwatch.Dimension{ + defaultDimensions := []*cwatch.Dimension{ { Name: aws.String("Shard"), Value: &shard, }, { Name: aws.String("KinesisStreamName"), - Value: &cw.KinesisStream, + Value: &cw.streamName, }, } - leaseDimensions := []*cloudwatch.Dimension{ + leaseDimensions := []*cwatch.Dimension{ { Name: aws.String("Shard"), Value: &shard, }, { Name: aws.String("KinesisStreamName"), - Value: &cw.KinesisStream, + Value: &cw.streamName, }, { Name: aws.String("WorkerID"), - Value: &cw.WorkerID, + Value: &cw.workerID, }, } metricTimestamp := time.Now() - data := []*cloudwatch.MetricDatum{ + data := []*cwatch.MetricDatum{ { Dimensions: defaultDimensions, MetricName: aws.String("RecordsProcessed"), @@ -184,12 +203,12 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat } if len(metric.behindLatestMillis) > 0 { - data = append(data, &cloudwatch.MetricDatum{ + data = append(data, &cwatch.MetricDatum{ Dimensions: defaultDimensions, MetricName: aws.String("MillisBehindLatest"), Unit: aws.String("Milliseconds"), Timestamp: &metricTimestamp, - StatisticValues: &cloudwatch.StatisticSet{ + StatisticValues: &cwatch.StatisticSet{ SampleCount: aws.Float64(float64(len(metric.behindLatestMillis))), Sum: sumFloat64(metric.behindLatestMillis), Maximum: maxFloat64(metric.behindLatestMillis), @@ -198,12 +217,12 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat } if len(metric.getRecordsTime) > 0 { - data = append(data, &cloudwatch.MetricDatum{ + data = append(data, &cwatch.MetricDatum{ Dimensions: defaultDimensions, MetricName: aws.String("KinesisDataFetcher.getRecords.Time"), Unit: aws.String("Milliseconds"), Timestamp: &metricTimestamp, - StatisticValues: &cloudwatch.StatisticSet{ + StatisticValues: &cwatch.StatisticSet{ SampleCount: aws.Float64(float64(len(metric.getRecordsTime))), Sum: sumFloat64(metric.getRecordsTime), Maximum: maxFloat64(metric.getRecordsTime), @@ -212,12 +231,12 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat } if len(metric.processRecordsTime) > 0 { - data = append(data, &cloudwatch.MetricDatum{ + data = append(data, &cwatch.MetricDatum{ Dimensions: defaultDimensions, MetricName: aws.String("RecordProcessor.processRecords.Time"), Unit: aws.String("Milliseconds"), Timestamp: &metricTimestamp, - StatisticValues: &cloudwatch.StatisticSet{ + StatisticValues: &cwatch.StatisticSet{ SampleCount: aws.Float64(float64(len(metric.processRecordsTime))), Sum: sumFloat64(metric.processRecordsTime), Maximum: maxFloat64(metric.processRecordsTime), @@ -226,8 +245,8 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat } // Publish metrics data to cloud watch - _, err := cw.svc.PutMetricData(&cloudwatch.PutMetricDataInput{ - Namespace: aws.String(cw.Namespace), + _, err := cw.svc.PutMetricData(&cwatch.PutMetricDataInput{ + Namespace: aws.String(cw.appName), MetricData: data, }) @@ -239,15 +258,15 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat metric.getRecordsTime = []float64{} metric.processRecordsTime = []float64{} } else { - cw.Logger.Errorf("Error in publishing cloudwatch metrics. Error: %+v", err) + cw.logger.Errorf("Error in publishing cloudwatch metrics. Error: %+v", err) } metric.Unlock() return true } -func (cw *CloudWatchMonitoringService) flush() error { - cw.Logger.Debugf("Flushing metrics data. Stream: %s, Worker: %s", cw.KinesisStream, cw.WorkerID) +func (cw *MonitoringService) flush() error { + cw.logger.Debugf("Flushing metrics data. Stream: %s, Worker: %s", cw.streamName, cw.workerID) // publish per shard metrics cw.shardMetrics.Range(func(k, v interface{}) bool { shard, metric := k.(string), v.(*cloudWatchMetrics) @@ -257,62 +276,62 @@ func (cw *CloudWatchMonitoringService) flush() error { return nil } -func (cw *CloudWatchMonitoringService) IncrRecordsProcessed(shard string, count int) { +func (cw *MonitoringService) IncrRecordsProcessed(shard string, count int) { m := cw.getOrCreatePerShardMetrics(shard) m.Lock() defer m.Unlock() m.processedRecords += int64(count) } -func (cw *CloudWatchMonitoringService) IncrBytesProcessed(shard string, count int64) { +func (cw *MonitoringService) IncrBytesProcessed(shard string, count int64) { m := cw.getOrCreatePerShardMetrics(shard) m.Lock() defer m.Unlock() m.processedBytes += count } -func (cw *CloudWatchMonitoringService) MillisBehindLatest(shard string, millSeconds float64) { +func (cw *MonitoringService) MillisBehindLatest(shard string, millSeconds float64) { m := cw.getOrCreatePerShardMetrics(shard) m.Lock() defer m.Unlock() m.behindLatestMillis = append(m.behindLatestMillis, millSeconds) } -func (cw *CloudWatchMonitoringService) LeaseGained(shard string) { +func (cw *MonitoringService) LeaseGained(shard string) { m := cw.getOrCreatePerShardMetrics(shard) m.Lock() defer m.Unlock() m.leasesHeld++ } -func (cw *CloudWatchMonitoringService) LeaseLost(shard string) { +func (cw *MonitoringService) LeaseLost(shard string) { m := cw.getOrCreatePerShardMetrics(shard) m.Lock() defer m.Unlock() m.leasesHeld-- } -func (cw *CloudWatchMonitoringService) LeaseRenewed(shard string) { +func (cw *MonitoringService) LeaseRenewed(shard string) { m := cw.getOrCreatePerShardMetrics(shard) m.Lock() defer m.Unlock() m.leaseRenewals++ } -func (cw *CloudWatchMonitoringService) RecordGetRecordsTime(shard string, time float64) { +func (cw *MonitoringService) RecordGetRecordsTime(shard string, time float64) { m := cw.getOrCreatePerShardMetrics(shard) m.Lock() defer m.Unlock() m.getRecordsTime = append(m.getRecordsTime, time) } -func (cw *CloudWatchMonitoringService) RecordProcessRecordsTime(shard string, time float64) { +func (cw *MonitoringService) RecordProcessRecordsTime(shard string, time float64) { m := cw.getOrCreatePerShardMetrics(shard) m.Lock() defer m.Unlock() m.processRecordsTime = append(m.processRecordsTime, time) } -func (cw *CloudWatchMonitoringService) getOrCreatePerShardMetrics(shard string) *cloudWatchMetrics { +func (cw *MonitoringService) getOrCreatePerShardMetrics(shard string) *cloudWatchMetrics { var i interface{} var ok bool if i, ok = cw.shardMetrics.Load(shard); !ok { diff --git a/clientlibrary/metrics/interfaces.go b/clientlibrary/metrics/interfaces.go index c79cb61..ddd6188 100644 --- a/clientlibrary/metrics/interfaces.go +++ b/clientlibrary/metrics/interfaces.go @@ -27,23 +27,8 @@ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. package metrics -import ( - "fmt" - "github.com/vmware/vmware-go-kcl/logger" -) - -// MonitoringConfiguration allows you to configure how record processing metrics are exposed -type MonitoringConfiguration struct { - MonitoringService string // Type of monitoring to expose. Supported types are "prometheus" - Region string - Prometheus PrometheusMonitoringService - CloudWatch CloudWatchMonitoringService - service MonitoringService - Logger logger.Logger -} - type MonitoringService interface { - Init() error + Init(appName, streamName, workerID string) error Start() error IncrRecordsProcessed(string, int) IncrBytesProcessed(string, int64) @@ -56,53 +41,18 @@ type MonitoringService interface { Shutdown() } -func (m *MonitoringConfiguration) Init(nameSpace, streamName string, workerID string) error { - if m.MonitoringService == "" { - m.service = &noopMonitoringService{} - return nil - } +// NoopMonitoringService implements MonitoringService by does nothing. +type NoopMonitoringService struct{} - // Config with default logger if logger is not specified. - if m.Logger == nil { - m.Logger = logger.GetDefaultLogger() - } +func (NoopMonitoringService) Init(appName, streamName, workerID string) error { return nil } +func (NoopMonitoringService) Start() error { return nil } +func (NoopMonitoringService) Shutdown() {} - switch m.MonitoringService { - case "prometheus": - m.Prometheus.Namespace = nameSpace - m.Prometheus.KinesisStream = streamName - m.Prometheus.WorkerID = workerID - m.Prometheus.Region = m.Region - m.Prometheus.Logger = m.Logger - m.service = &m.Prometheus - case "cloudwatch": - m.CloudWatch.Namespace = nameSpace - m.CloudWatch.KinesisStream = streamName - m.CloudWatch.WorkerID = workerID - m.CloudWatch.Region = m.Region - m.CloudWatch.Logger = m.Logger - m.service = &m.CloudWatch - default: - return fmt.Errorf("Invalid monitoring service type %s", m.MonitoringService) - } - return m.service.Init() -} - -func (m *MonitoringConfiguration) GetMonitoringService() MonitoringService { - return m.service -} - -type noopMonitoringService struct{} - -func (n *noopMonitoringService) Init() error { return nil } -func (n *noopMonitoringService) Start() error { return nil } -func (n *noopMonitoringService) Shutdown() {} - -func (n *noopMonitoringService) IncrRecordsProcessed(shard string, count int) {} -func (n *noopMonitoringService) IncrBytesProcessed(shard string, count int64) {} -func (n *noopMonitoringService) MillisBehindLatest(shard string, millSeconds float64) {} -func (n *noopMonitoringService) LeaseGained(shard string) {} -func (n *noopMonitoringService) LeaseLost(shard string) {} -func (n *noopMonitoringService) LeaseRenewed(shard string) {} -func (n *noopMonitoringService) RecordGetRecordsTime(shard string, time float64) {} -func (n *noopMonitoringService) RecordProcessRecordsTime(shard string, time float64) {} +func (NoopMonitoringService) IncrRecordsProcessed(shard string, count int) {} +func (NoopMonitoringService) IncrBytesProcessed(shard string, count int64) {} +func (NoopMonitoringService) MillisBehindLatest(shard string, millSeconds float64) {} +func (NoopMonitoringService) LeaseGained(shard string) {} +func (NoopMonitoringService) LeaseLost(shard string) {} +func (NoopMonitoringService) LeaseRenewed(shard string) {} +func (NoopMonitoringService) RecordGetRecordsTime(shard string, time float64) {} +func (NoopMonitoringService) RecordProcessRecordsTime(shard string, time float64) {} diff --git a/clientlibrary/metrics/prometheus.go b/clientlibrary/metrics/prometheus.go deleted file mode 100644 index 3dae914..0000000 --- a/clientlibrary/metrics/prometheus.go +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright (c) 2018 VMware, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and - * associated documentation files (the "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is furnished to do - * so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT - * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -// The implementation is derived from https://github.com/patrobinson/gokini -// -// Copyright 2018 Patrick robinson -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -package metrics - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - - "github.com/vmware/vmware-go-kcl/logger" -) - -// PrometheusMonitoringService to start Prometheus as metrics system. -// It might be trick if the service onboarding with KCL also uses Prometheus. -// Therefore, we should start cloudwatch metrics by default instead. -type PrometheusMonitoringService struct { - ListenAddress string - - Namespace string - KinesisStream string - WorkerID string - Region string - Logger logger.Logger - - processedRecords *prometheus.CounterVec - processedBytes *prometheus.CounterVec - behindLatestMillis *prometheus.GaugeVec - leasesHeld *prometheus.GaugeVec - leaseRenewals *prometheus.CounterVec - getRecordsTime *prometheus.HistogramVec - processRecordsTime *prometheus.HistogramVec -} - -func (p *PrometheusMonitoringService) Init() error { - p.processedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: p.Namespace + `_processed_bytes`, - Help: "Number of bytes processed", - }, []string{"kinesisStream", "shard"}) - p.processedRecords = prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: p.Namespace + `_processed_records`, - Help: "Number of records processed", - }, []string{"kinesisStream", "shard"}) - p.behindLatestMillis = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: p.Namespace + `_behind_latest_millis`, - Help: "The amount of milliseconds processing is behind", - }, []string{"kinesisStream", "shard"}) - p.leasesHeld = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Name: p.Namespace + `_leases_held`, - Help: "The number of leases held by the worker", - }, []string{"kinesisStream", "shard", "workerID"}) - p.leaseRenewals = prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: p.Namespace + `_lease_renewals`, - Help: "The number of successful lease renewals", - }, []string{"kinesisStream", "shard", "workerID"}) - p.getRecordsTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: p.Namespace + `_get_records_duration_milliseconds`, - Help: "The time taken to fetch records and process them", - }, []string{"kinesisStream", "shard"}) - p.processRecordsTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Name: p.Namespace + `_process_records_duration_milliseconds`, - Help: "The time taken to process records", - }, []string{"kinesisStream", "shard"}) - - metrics := []prometheus.Collector{ - p.processedBytes, - p.processedRecords, - p.behindLatestMillis, - p.leasesHeld, - p.leaseRenewals, - p.getRecordsTime, - p.processRecordsTime, - } - for _, metric := range metrics { - err := prometheus.Register(metric) - if err != nil { - return err - } - } - - return nil -} - -func (p *PrometheusMonitoringService) Start() error { - http.Handle("/metrics", promhttp.Handler()) - go func() { - p.Logger.Infof("Starting Prometheus listener on %s", p.ListenAddress) - err := http.ListenAndServe(p.ListenAddress, nil) - if err != nil { - p.Logger.Errorf("Error starting Prometheus metrics endpoint. %+v", err) - } - p.Logger.Infof("Stopped metrics server") - }() - - return nil -} - -func (p *PrometheusMonitoringService) Shutdown() {} - -func (p *PrometheusMonitoringService) IncrRecordsProcessed(shard string, count int) { - p.processedRecords.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Add(float64(count)) -} - -func (p *PrometheusMonitoringService) IncrBytesProcessed(shard string, count int64) { - p.processedBytes.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Add(float64(count)) -} - -func (p *PrometheusMonitoringService) MillisBehindLatest(shard string, millSeconds float64) { - p.behindLatestMillis.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Set(millSeconds) -} - -func (p *PrometheusMonitoringService) LeaseGained(shard string) { - p.leasesHeld.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream, "workerID": p.WorkerID}).Inc() -} - -func (p *PrometheusMonitoringService) LeaseLost(shard string) { - p.leasesHeld.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream, "workerID": p.WorkerID}).Dec() -} - -func (p *PrometheusMonitoringService) LeaseRenewed(shard string) { - p.leaseRenewals.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream, "workerID": p.WorkerID}).Inc() -} - -func (p *PrometheusMonitoringService) RecordGetRecordsTime(shard string, time float64) { - p.getRecordsTime.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Observe(time) -} - -func (p *PrometheusMonitoringService) RecordProcessRecordsTime(shard string, time float64) { - p.processRecordsTime.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Observe(time) -} diff --git a/clientlibrary/metrics/prometheus/prometheus.go b/clientlibrary/metrics/prometheus/prometheus.go new file mode 100644 index 0000000..e6277c6 --- /dev/null +++ b/clientlibrary/metrics/prometheus/prometheus.go @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/patrobinson/gokini +// +// Copyright 2018 Patrick robinson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +package prometheus + +import ( + "net/http" + + prom "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/vmware/vmware-go-kcl/logger" +) + +// MonitoringService publishes kcl metrics to Prometheus. +// It might be trick if the service onboarding with KCL already uses Prometheus. +type MonitoringService struct { + listenAddress string + namespace string + streamName string + workerID string + region string + logger logger.Logger + + processedRecords *prom.CounterVec + processedBytes *prom.CounterVec + behindLatestMillis *prom.GaugeVec + leasesHeld *prom.GaugeVec + leaseRenewals *prom.CounterVec + getRecordsTime *prom.HistogramVec + processRecordsTime *prom.HistogramVec +} + +// NewMonitoringService returns a Monitoring service publishing metrics to Prometheus. +func NewMonitoringService(listenAddress, region string, logger logger.Logger) *MonitoringService { + return &MonitoringService{ + listenAddress: listenAddress, + region: region, + logger: logger, + } +} + +func (p *MonitoringService) Init(appName, streamName, workerID string) error { + p.namespace = appName + p.streamName = streamName + p.workerID = workerID + + p.processedBytes = prom.NewCounterVec(prom.CounterOpts{ + Name: p.namespace + `_processed_bytes`, + Help: "Number of bytes processed", + }, []string{"kinesisStream", "shard"}) + p.processedRecords = prom.NewCounterVec(prom.CounterOpts{ + Name: p.namespace + `_processed_records`, + Help: "Number of records processed", + }, []string{"kinesisStream", "shard"}) + p.behindLatestMillis = prom.NewGaugeVec(prom.GaugeOpts{ + Name: p.namespace + `_behind_latest_millis`, + Help: "The amount of milliseconds processing is behind", + }, []string{"kinesisStream", "shard"}) + p.leasesHeld = prom.NewGaugeVec(prom.GaugeOpts{ + Name: p.namespace + `_leases_held`, + Help: "The number of leases held by the worker", + }, []string{"kinesisStream", "shard", "workerID"}) + p.leaseRenewals = prom.NewCounterVec(prom.CounterOpts{ + Name: p.namespace + `_lease_renewals`, + Help: "The number of successful lease renewals", + }, []string{"kinesisStream", "shard", "workerID"}) + p.getRecordsTime = prom.NewHistogramVec(prom.HistogramOpts{ + Name: p.namespace + `_get_records_duration_milliseconds`, + Help: "The time taken to fetch records and process them", + }, []string{"kinesisStream", "shard"}) + p.processRecordsTime = prom.NewHistogramVec(prom.HistogramOpts{ + Name: p.namespace + `_process_records_duration_milliseconds`, + Help: "The time taken to process records", + }, []string{"kinesisStream", "shard"}) + + metrics := []prom.Collector{ + p.processedBytes, + p.processedRecords, + p.behindLatestMillis, + p.leasesHeld, + p.leaseRenewals, + p.getRecordsTime, + p.processRecordsTime, + } + for _, metric := range metrics { + err := prom.Register(metric) + if err != nil { + return err + } + } + + return nil +} + +func (p *MonitoringService) Start() error { + http.Handle("/metrics", promhttp.Handler()) + go func() { + p.logger.Infof("Starting Prometheus listener on %s", p.listenAddress) + err := http.ListenAndServe(p.listenAddress, nil) + if err != nil { + p.logger.Errorf("Error starting Prometheus metrics endpoint. %+v", err) + } + p.logger.Infof("Stopped metrics server") + }() + + return nil +} + +func (p *MonitoringService) Shutdown() {} + +func (p *MonitoringService) IncrRecordsProcessed(shard string, count int) { + p.processedRecords.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Add(float64(count)) +} + +func (p *MonitoringService) IncrBytesProcessed(shard string, count int64) { + p.processedBytes.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Add(float64(count)) +} + +func (p *MonitoringService) MillisBehindLatest(shard string, millSeconds float64) { + p.behindLatestMillis.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Set(millSeconds) +} + +func (p *MonitoringService) LeaseGained(shard string) { + p.leasesHeld.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName, "workerID": p.workerID}).Inc() +} + +func (p *MonitoringService) LeaseLost(shard string) { + p.leasesHeld.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName, "workerID": p.workerID}).Dec() +} + +func (p *MonitoringService) LeaseRenewed(shard string) { + p.leaseRenewals.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName, "workerID": p.workerID}).Inc() +} + +func (p *MonitoringService) RecordGetRecordsTime(shard string, time float64) { + p.getRecordsTime.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Observe(time) +} + +func (p *MonitoringService) RecordProcessRecordsTime(shard string, time float64) { + p.processRecordsTime.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Observe(time) +} diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index fac1a7d..cf339fe 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -58,34 +58,32 @@ type Worker struct { kclConfig *config.KinesisClientLibConfiguration kc kinesisiface.KinesisAPI checkpointer chk.Checkpointer + mService metrics.MonitoringService stop *chan struct{} waitGroup *sync.WaitGroup done bool shardStatus map[string]*par.ShardStatus - - metricsConfig *metrics.MonitoringConfiguration - mService metrics.MonitoringService } // NewWorker constructs a Worker instance for processing Kinesis stream data. -func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisClientLibConfiguration, metricsConfig *metrics.MonitoringConfiguration) *Worker { - w := &Worker{ +func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisClientLibConfiguration) *Worker { + var mService metrics.MonitoringService + if kclConfig.MonitoringService == nil { + // Replaces nil with noop monitor service (not emitting any metrics). + mService = metrics.NoopMonitoringService{} + } + + return &Worker{ streamName: kclConfig.StreamName, regionName: kclConfig.RegionName, workerID: kclConfig.WorkerID, processorFactory: factory, kclConfig: kclConfig, - metricsConfig: metricsConfig, + mService: mService, done: false, } - - if w.metricsConfig == nil { - // "" means noop monitor service. i.e. not emitting any metrics. - w.metricsConfig = &metrics.MonitoringConfiguration{MonitoringService: ""} - } - return w } // WithKinesis is used to provide Kinesis service for either custom implementation or unit testing. @@ -186,11 +184,10 @@ func (w *Worker) initialize() error { log.Infof("Use custom checkpointer implementation.") } - err := w.metricsConfig.Init(w.kclConfig.ApplicationName, w.streamName, w.workerID) + err := w.mService.Init(w.kclConfig.ApplicationName, w.streamName, w.workerID) if err != nil { log.Errorf("Failed to start monitoring service: %+v", err) } - w.mService = w.metricsConfig.GetMonitoringService() log.Infof("Initializing Checkpointer") if err := w.checkpointer.Init(); err != nil { diff --git a/test/worker_custom_test.go b/test/worker_custom_test.go index 2ac300f..46f21be 100644 --- a/test/worker_custom_test.go +++ b/test/worker_custom_test.go @@ -44,10 +44,7 @@ func TestWorkerInjectCheckpointer(t *testing.T) { WithMaxRecords(10). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). - WithFailoverTimeMillis(300000). - WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20) - + WithFailoverTimeMillis(300000) log.SetOutput(os.Stdout) log.SetLevel(log.DebugLevel) @@ -55,13 +52,13 @@ func TestWorkerInjectCheckpointer(t *testing.T) { assert.Equal(t, streamName, kclConfig.StreamName) // configure cloudwatch as metrics system - metricsConfig := getMetricsConfig(kclConfig, metricsSystem) + kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem)) // custom checkpointer or a mock checkpointer. checkpointer := chk.NewDynamoCheckpoint(kclConfig) // Inject a custom checkpointer into the worker. - worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig). + worker := wk.NewWorker(recordProcessorFactory(t), kclConfig). WithCheckpointer(checkpointer) err := worker.Start() @@ -101,9 +98,7 @@ func TestWorkerInjectKinesis(t *testing.T) { WithMaxRecords(10). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). - WithFailoverTimeMillis(300000). - WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20) + WithFailoverTimeMillis(300000) log.SetOutput(os.Stdout) log.SetLevel(log.DebugLevel) @@ -112,7 +107,7 @@ func TestWorkerInjectKinesis(t *testing.T) { assert.Equal(t, streamName, kclConfig.StreamName) // configure cloudwatch as metrics system - metricsConfig := getMetricsConfig(kclConfig, metricsSystem) + kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem)) // create custom Kinesis s, err := session.NewSession(&aws.Config{ @@ -122,7 +117,7 @@ func TestWorkerInjectKinesis(t *testing.T) { kc := kinesis.New(s) // Inject a custom checkpointer into the worker. - worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig). + worker := wk.NewWorker(recordProcessorFactory(t), kclConfig). WithKinesis(kc) err = worker.Start() @@ -148,9 +143,7 @@ func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) { WithMaxRecords(10). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). - WithFailoverTimeMillis(300000). - WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20) + WithFailoverTimeMillis(300000) log.SetOutput(os.Stdout) log.SetLevel(log.DebugLevel) @@ -159,7 +152,7 @@ func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) { assert.Equal(t, streamName, kclConfig.StreamName) // configure cloudwatch as metrics system - metricsConfig := getMetricsConfig(kclConfig, metricsSystem) + kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem)) // create custom Kinesis s, err := session.NewSession(&aws.Config{ @@ -172,7 +165,7 @@ func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) { checkpointer := chk.NewDynamoCheckpoint(kclConfig) // Inject both custom checkpointer and kinesis into the worker. - worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig). + worker := wk.NewWorker(recordProcessorFactory(t), kclConfig). WithKinesis(kc). WithCheckpointer(checkpointer) diff --git a/test/worker_test.go b/test/worker_test.go index 85fa31c..54699a2 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -75,8 +75,6 @@ func TestWorker(t *testing.T) { WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). WithFailoverTimeMillis(300000). - WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20). WithLogger(log) runTest(kclConfig, false, t) @@ -99,8 +97,6 @@ func TestWorkerWithTimestamp(t *testing.T) { WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). WithFailoverTimeMillis(300000). - WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20). WithLogger(log) runTest(kclConfig, false, t) @@ -131,8 +127,6 @@ func TestWorkerWithSigInt(t *testing.T) { WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). WithFailoverTimeMillis(300000). - WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20). WithLogger(log) runTest(kclConfig, true, t) @@ -148,9 +142,7 @@ func TestWorkerStatic(t *testing.T) { WithMaxRecords(10). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). - WithFailoverTimeMillis(300000). - WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20) + WithFailoverTimeMillis(300000) runTest(kclConfig, false, t) } @@ -172,9 +164,7 @@ func TestWorkerAssumeRole(t *testing.T) { WithMaxRecords(10). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). - WithFailoverTimeMillis(300000). - WithMetricsBufferTimeMillis(10000). - WithMetricsMaxQueueSize(20) + WithFailoverTimeMillis(300000) runTest(kclConfig, false, t) } @@ -184,9 +174,9 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t assert.Equal(t, streamName, kclConfig.StreamName) // configure cloudwatch as metrics system - metricsConfig := getMetricsConfig(kclConfig, metricsSystem) + kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem)) - worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig) + worker := wk.NewWorker(recordProcessorFactory(t), kclConfig) err := worker.Start() assert.Nil(t, err) @@ -223,7 +213,7 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t // wait a few seconds before shutdown processing time.Sleep(10 * time.Second) - if metricsConfig != nil && metricsConfig.MonitoringService == "prometheus" { + if metricsSystem == "prometheus" { res, err := http.Get("http://localhost:8080/metrics") if err != nil { t.Fatalf("Error scraping Prometheus endpoint %s", err) @@ -244,30 +234,17 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t } // configure different metrics system -func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service string) *metrics.MonitoringConfiguration { +func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service string) metrics.MonitoringService { + if service == "cloudwatch" { - return &metrics.MonitoringConfiguration{ - MonitoringService: "cloudwatch", - Region: regionName, - Logger: kclConfig.Logger, - CloudWatch: metrics.CloudWatchMonitoringService{ - Credentials: kclConfig.CloudWatchCredentials, - // Those value should come from kclConfig - MetricsBufferTimeMillis: kclConfig.MetricsBufferTimeMillis, - MetricsMaxQueueSize: kclConfig.MetricsMaxQueueSize, - }, - } + return metrics.NewDetailedCloudWatchMonitoringService(kclConfig.RegionName, + kclConfig.KinesisCredentials, + kclConfig.Logger, + metrics.DEFAULT_CLOUDWATCH_METRICS_BUFFER_DURATION) } if service == "prometheus" { - return &metrics.MonitoringConfiguration{ - MonitoringService: "prometheus", - Region: regionName, - Logger: kclConfig.Logger, - Prometheus: metrics.PrometheusMonitoringService{ - ListenAddress: ":8080", - }, - } + return metrics.NewPrometheusMonitoringService(":8080", regionName, kclConfig.Logger) } return nil From eb56e3b1d70750cba03f9762b04f206735ffea76 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Wed, 6 Nov 2019 08:35:08 -0600 Subject: [PATCH 50/90] Fix broken tests (#50) Fix some broken unit and integ tests introduced by last commit. Tests: 1. hmake test 2. Run integration test on Goland IDE and make sure all pass. Signed-off-by: Tao Jiang --- clientlibrary/config/config_test.go | 3 ++- clientlibrary/worker/worker.go | 4 ++-- test/worker_test.go | 8 +++++--- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/clientlibrary/config/config_test.go b/clientlibrary/config/config_test.go index 80a9395..a7015d9 100644 --- a/clientlibrary/config/config_test.go +++ b/clientlibrary/config/config_test.go @@ -35,7 +35,8 @@ func TestConfig(t *testing.T) { WithTaskBackoffTimeMillis(10) assert.Equal(t, "appName", kclConfig.ApplicationName) - assert.Equal(t, 500, kclConfig.TaskBackoffTimeMillis) + assert.Equal(t, 500, kclConfig.FailoverTimeMillis) + assert.Equal(t, 10, kclConfig.TaskBackoffTimeMillis) contextLogger := kclConfig.Logger.WithFields(logger.Fields{"key1": "value1"}) contextLogger.Debugf("Starting with default logger") diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index cf339fe..132c123 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -69,8 +69,8 @@ type Worker struct { // NewWorker constructs a Worker instance for processing Kinesis stream data. func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisClientLibConfiguration) *Worker { - var mService metrics.MonitoringService - if kclConfig.MonitoringService == nil { + mService := kclConfig.MonitoringService + if mService == nil { // Replaces nil with noop monitor service (not emitting any metrics). mService = metrics.NoopMonitoringService{} } diff --git a/test/worker_test.go b/test/worker_test.go index 54699a2..7cf4f87 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -35,6 +35,8 @@ import ( cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" kc "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics/cloudwatch" + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics/prometheus" "github.com/vmware/vmware-go-kcl/clientlibrary/utils" wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" "github.com/vmware/vmware-go-kcl/logger" @@ -237,14 +239,14 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service string) metrics.MonitoringService { if service == "cloudwatch" { - return metrics.NewDetailedCloudWatchMonitoringService(kclConfig.RegionName, + return cloudwatch.NewMonitoringServiceWithOptions(kclConfig.RegionName, kclConfig.KinesisCredentials, kclConfig.Logger, - metrics.DEFAULT_CLOUDWATCH_METRICS_BUFFER_DURATION) + cloudwatch.DEFAULT_CLOUDWATCH_METRICS_BUFFER_DURATION) } if service == "prometheus" { - return metrics.NewPrometheusMonitoringService(":8080", regionName, kclConfig.Logger) + return prometheus.NewMonitoringService(":8080", regionName, kclConfig.Logger) } return nil From c9793728a33ffac28319dfe297490112f56ed9b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Rainone?= <476650+arl@users.noreply.github.com> Date: Sat, 9 Nov 2019 20:27:20 +0100 Subject: [PATCH 51/90] Fix 'get records time' metric (#53) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The time sent to the `metrics.MonitoringService.RecordGetRecordsTime`' was not the time taken by GetRecords, it was the time taken by `GetRecords` and `ProcessRecords` additioned together. Fixes #51 Signed-off-by: Aurélien Rainone --- clientlibrary/worker/shard-consumer.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 2b44922..06a09eb 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -169,7 +169,6 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { retriedErrors := 0 for { - getRecordsStartTime := time.Now() if time.Now().UTC().After(shard.LeaseTimeout.Add(-5 * time.Second)) { log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID) err = sc.checkpointer.GetLease(shard, sc.consumerID) @@ -185,6 +184,8 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { } } + getRecordsStartTime := time.Now() + log.Debugf("Trying to read %d record from iterator: %v", sc.kclConfig.MaxRecords, aws.StringValue(shardIterator)) getRecordsArgs := &kinesis.GetRecordsInput{ Limit: aws.Int64(int64(sc.kclConfig.MaxRecords)), @@ -207,6 +208,10 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { return err } + // Convert from nanoseconds to milliseconds + getRecordsTime := time.Since(getRecordsStartTime) / 1000000 + sc.mService.RecordGetRecordsTime(shard.ID, float64(getRecordsTime)) + // reset the retry count after success retriedErrors = 0 @@ -240,10 +245,6 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { sc.mService.IncrBytesProcessed(shard.ID, recordBytes) sc.mService.MillisBehindLatest(shard.ID, float64(*getResp.MillisBehindLatest)) - // Convert from nanoseconds to milliseconds - getRecordsTime := time.Since(getRecordsStartTime) / 1000000 - sc.mService.RecordGetRecordsTime(shard.ID, float64(getRecordsTime)) - // Idle between each read, the user is responsible for checkpoint the progress // This value is only used when no records are returned; if records are returned, it should immediately // retrieve the next set of records. From 9ca9d901ca8dde26c25cc5365c780b605fef8955 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 11 Nov 2019 14:11:44 -0600 Subject: [PATCH 52/90] Fix error in puslishing cloud watch metrics (#55) Reported at: https://github.com/vmware/vmware-go-kcl/issues/54 The input params are not used to set monitor service in cloudwatch Init function. The empty appName, streamName and workerID cause PutMetricData failed with error string "Error in publishing cloudwatch metrics. Error: InvalidParameter...". Signed-off-by: Tao Jiang --- clientlibrary/metrics/cloudwatch/cloudwatch.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/clientlibrary/metrics/cloudwatch/cloudwatch.go b/clientlibrary/metrics/cloudwatch/cloudwatch.go index 9572579..3fea191 100644 --- a/clientlibrary/metrics/cloudwatch/cloudwatch.go +++ b/clientlibrary/metrics/cloudwatch/cloudwatch.go @@ -89,6 +89,10 @@ func NewMonitoringServiceWithOptions(region string, creds *credentials.Credentia } func (cw *MonitoringService) Init(appName, streamName, workerID string) error { + cw.appName = appName + cw.streamName = streamName + cw.workerID = workerID + cfg := &aws.Config{Region: aws.String(cw.region)} cfg.Credentials = cw.credentials s, err := session.NewSession(cfg) From 6c9e5947514055d504bcfb8739ae6c288311e54c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Rainone?= <476650+arl@users.noreply.github.com> Date: Thu, 14 Nov 2019 00:15:33 +0100 Subject: [PATCH 53/90] Make the lease refresh period configurable (#56) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add LeaseRefreshSpanMillis in configuration For certain use cases of KCL the hard-coded value of 5s value, representing the time span before the end of a lease timeout in which the current owner gets to renew its own lease, is not sufficient. When the time taken by ProcessRecords is higher than 5s, the lease gets lost and the shard may end up to another worker. This commit adds a new configuration value, that defaults to 5s, to let the user set this value to its own needs. Signed-off-by: Aurélien Rainone * Slight code simplification Or readability improvement Signed-off-by: Aurélien Rainone --- clientlibrary/checkpoint/dynamodb-checkpointer.go | 2 +- clientlibrary/config/config.go | 8 +++++++- clientlibrary/config/kcl-config.go | 7 +++++++ clientlibrary/worker/shard-consumer.go | 4 ++-- 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 1cce247..1b61f45 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -135,7 +135,7 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign return err } - if !time.Now().UTC().After(currentLeaseTimeout) && assignedTo != newAssignTo { + if time.Now().UTC().Before(currentLeaseTimeout) && assignedTo != newAssignTo { return errors.New(ErrLeaseNotAquired) } diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index e4fd36e..c517645 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -63,6 +63,9 @@ const ( // the number of DynamoDB IOPS required for tracking leases. DEFAULT_FAILOVER_TIME_MILLIS = 10000 + // Period before the end of lease during which a lease is refreshed by the owner. + DEFAULT_LEASE_REFRESH_PERIOD_MILLIS = 5000 + // Max records to fetch from Kinesis in a single GetRecords call. DEFAULT_MAX_RECORDS = 10000 @@ -190,7 +193,10 @@ type ( // FailoverTimeMillis Lease duration (leases not renewed within this period will be claimed by others) FailoverTimeMillis int - /// MaxRecords Max records to read per Kinesis getRecords() call + // LeaseRefreshPeriodMillis is the period before the end of lease during which a lease is refreshed by the owner. + LeaseRefreshPeriodMillis int + + // MaxRecords Max records to read per Kinesis getRecords() call MaxRecords int // IdleTimeBetweenReadsInMillis Idle time between calls to fetch data from Kinesis diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index ec59be9..d7960e9 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -79,6 +79,7 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio InitialPositionInStream: DEFAULT_INITIAL_POSITION_IN_STREAM, InitialPositionInStreamExtended: *newInitialPosition(DEFAULT_INITIAL_POSITION_IN_STREAM), FailoverTimeMillis: DEFAULT_FAILOVER_TIME_MILLIS, + LeaseRefreshPeriodMillis: DEFAULT_LEASE_REFRESH_PERIOD_MILLIS, MaxRecords: DEFAULT_MAX_RECORDS, IdleTimeBetweenReadsInMillis: DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, CallProcessRecordsEvenForEmptyRecordList: DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, @@ -133,6 +134,12 @@ func (c *KinesisClientLibConfiguration) WithFailoverTimeMillis(failoverTimeMilli return c } +func (c *KinesisClientLibConfiguration) WithLeaseRefreshPeriodMillis(leaseRefreshPeriodMillis int) *KinesisClientLibConfiguration { + checkIsValuePositive("LeaseRefreshPeriodMillis", leaseRefreshPeriodMillis) + c.LeaseRefreshPeriodMillis = leaseRefreshPeriodMillis + return c +} + func (c *KinesisClientLibConfiguration) WithShardSyncIntervalMillis(shardSyncIntervalMillis int) *KinesisClientLibConfiguration { checkIsValuePositive("ShardSyncIntervalMillis", shardSyncIntervalMillis) c.ShardSyncIntervalMillis = shardSyncIntervalMillis diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 06a09eb..476ab01 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -169,7 +169,7 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { retriedErrors := 0 for { - if time.Now().UTC().After(shard.LeaseTimeout.Add(-5 * time.Second)) { + if time.Now().UTC().After(shard.LeaseTimeout.Add(-time.Duration(sc.kclConfig.LeaseRefreshPeriodMillis) * time.Millisecond)) { log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID) err = sc.checkpointer.GetLease(shard, sc.consumerID) if err != nil { @@ -266,7 +266,7 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.REQUESTED, Checkpointer: recordCheckpointer} sc.recordProcessor.Shutdown(shutdownInput) return nil - case <-time.After(1 * time.Nanosecond): + default: } } } From a35f4960a8ad1cbf1f0f7d2fbb723dbcc4d25f5f Mon Sep 17 00:00:00 2001 From: dferstay Date: Thu, 14 Nov 2019 19:53:34 -0800 Subject: [PATCH 54/90] Make Worker.Shutdown() synchronous (#58) Previously, a WaitGroup was used to track executing ShardConsumers and prevent Worker.Shutdown() from returning until all ShardConsumers had completed. Unfortunately, it was possible for Shutdown() to race with the eventLoop(), leading to a situation where Worker.Shutdown() returns while a ShardConsumer is still executing. Now, we increment the WaitGroup to keep track the eventLoop() as well as the ShardConsumers. This prevents shutdown from returning until all background go-routines have completed. Signed-off-by: Daniel Ferstay --- clientlibrary/worker/worker.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 132c123..f14eec0 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -115,8 +115,12 @@ func (w *Worker) Start() error { } log.Infof("Starting worker event loop.") - // entering event loop - go w.eventLoop() + w.waitGroup.Add(1) + go func() { + defer w.waitGroup.Done() + // entering event loop + w.eventLoop() + }() return nil } From 43a936cab3fd3bf9c9ad7ec0ea9ed41de15c792f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Rainone?= <476650+arl@users.noreply.github.com> Date: Fri, 29 Nov 2019 19:59:35 +0100 Subject: [PATCH 55/90] Issue 61/add shard sync jitter (#62) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add a random number generator to Worker Signed-off-by: Aurélien Rainone * Add random jitter to the worker shard sync sleep Signed-off-by: Aurélien Rainone * Add random jitter in case syncShard fails Fixes #61 Signed-off-by: Aurélien Rainone --- clientlibrary/worker/worker.go | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index f14eec0..a9e461d 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -29,6 +29,7 @@ package worker import ( "errors" + "math/rand" "sync" "time" @@ -64,6 +65,8 @@ type Worker struct { waitGroup *sync.WaitGroup done bool + rng *rand.Rand + shardStatus map[string]*par.ShardStatus } @@ -75,6 +78,9 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli mService = metrics.NoopMonitoringService{} } + // Create a pseudo-random number generator and seed it. + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + return &Worker{ streamName: kclConfig.StreamName, regionName: kclConfig.RegionName, @@ -83,6 +89,7 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli kclConfig: kclConfig, mService: mService, done: false, + rng: rng, } } @@ -235,7 +242,10 @@ func (w *Worker) eventLoop() { err := w.syncShard() if err != nil { log.Errorf("Error getting Kinesis shards: %+v", err) - time.Sleep(time.Duration(w.kclConfig.ShardSyncIntervalMillis) * time.Millisecond) + + // Add [-50%, +50%] random jitter to ShardSyncIntervalMillis in case of error. + shardSyncSleep := w.kclConfig.ShardSyncIntervalMillis/2 + w.rng.Intn(int(w.kclConfig.ShardSyncIntervalMillis)) + time.Sleep(time.Duration(shardSyncSleep) * time.Millisecond) continue } @@ -302,7 +312,13 @@ func (w *Worker) eventLoop() { case <-*w.stop: log.Infof("Shutting down...") return - case <-time.After(time.Duration(w.kclConfig.ShardSyncIntervalMillis) * time.Millisecond): + default: + // Add [-50%, +50%] random jitter to ShardSyncIntervalMillis. When multiple workers + // starts at the same time, this decreases the probability of them calling + // kinesis.DescribeStream at the same time, and hit the hard-limit on aws API calls. + // On average the period remains the same so that doesn't affect behavior. + shardSyncSleep := w.kclConfig.ShardSyncIntervalMillis/2 + w.rng.Intn(int(w.kclConfig.ShardSyncIntervalMillis)) + time.Sleep(time.Duration(shardSyncSleep) * time.Millisecond) } } } From df60778d89c4e3a4dfa21a31adf1c0f8e04acd44 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Fri, 29 Nov 2019 14:27:05 -0600 Subject: [PATCH 56/90] Re-org code for adding jittered delay for syncShard (#63) Minor update for the previous commit by removing duplicated code. No functional change. Signed-off-by: Tao Jiang --- clientlibrary/worker/worker.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index a9e461d..13eb8d9 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -239,12 +239,15 @@ func (w *Worker) eventLoop() { log := w.kclConfig.Logger for { + // Add [-50%, +50%] random jitter to ShardSyncIntervalMillis. When multiple workers + // starts at the same time, this decreases the probability of them calling + // kinesis.DescribeStream at the same time, and hit the hard-limit on aws API calls. + // On average the period remains the same so that doesn't affect behavior. + shardSyncSleep := w.kclConfig.ShardSyncIntervalMillis/2 + w.rng.Intn(int(w.kclConfig.ShardSyncIntervalMillis)) + err := w.syncShard() if err != nil { - log.Errorf("Error getting Kinesis shards: %+v", err) - - // Add [-50%, +50%] random jitter to ShardSyncIntervalMillis in case of error. - shardSyncSleep := w.kclConfig.ShardSyncIntervalMillis/2 + w.rng.Intn(int(w.kclConfig.ShardSyncIntervalMillis)) + log.Errorf("Error syncing shards: %+v, Retrying in %d ms...", err, shardSyncSleep) time.Sleep(time.Duration(shardSyncSleep) * time.Millisecond) continue } @@ -313,11 +316,7 @@ func (w *Worker) eventLoop() { log.Infof("Shutting down...") return default: - // Add [-50%, +50%] random jitter to ShardSyncIntervalMillis. When multiple workers - // starts at the same time, this decreases the probability of them calling - // kinesis.DescribeStream at the same time, and hit the hard-limit on aws API calls. - // On average the period remains the same so that doesn't affect behavior. - shardSyncSleep := w.kclConfig.ShardSyncIntervalMillis/2 + w.rng.Intn(int(w.kclConfig.ShardSyncIntervalMillis)) + log.Debugf("Trying to sync shards in %d ms...", shardSyncSleep) time.Sleep(time.Duration(shardSyncSleep) * time.Millisecond) } } From f1935bc0ff180fa7e476450583248f07b40056d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Rainone?= <476650+arl@users.noreply.github.com> Date: Mon, 9 Dec 2019 16:21:20 +0100 Subject: [PATCH 57/90] Fix potentially delayed shutdown on shard sync (#64) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ull-request #62 wrongly introduced an increased delay on shutdown. Before #62 the `stop` channel could be triggered while waiting for `syncShard` milliseconds, so the function could return as soon as `stop` was received. However #62 changed this behavior by sleeping in the default case: `stop` couldn't be handled right away anymore. Instead it was handled after a whole new loop, potentially delaying shutdown by minutes. (up to synchard * 1.5 ms). This commit fixes that. Signed-off-by: Aurélien Rainone --- clientlibrary/worker/worker.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 13eb8d9..95c1acd 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -315,9 +315,8 @@ func (w *Worker) eventLoop() { case <-*w.stop: log.Infof("Shutting down...") return - default: - log.Debugf("Trying to sync shards in %d ms...", shardSyncSleep) - time.Sleep(time.Duration(shardSyncSleep) * time.Millisecond) + case <-time.After(time.Duration(shardSyncSleep) * time.Millisecond): + log.Debugf("Waited %d ms to sync shards...", shardSyncSleep) } } } From 8f0d7bc8d85d0572f937fb2d1c218063e7ae1e88 Mon Sep 17 00:00:00 2001 From: Kevin Burns Date: Wed, 22 Jan 2020 03:26:22 -0800 Subject: [PATCH 58/90] Reduce log noise from found shards in worker event loop (#66) Signed-off-by: Kev Burns --- clientlibrary/worker/worker.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 95c1acd..e7efa66 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -238,6 +238,7 @@ func (w *Worker) newShardConsumer(shard *par.ShardStatus) *ShardConsumer { func (w *Worker) eventLoop() { log := w.kclConfig.Logger + var foundShards int for { // Add [-50%, +50%] random jitter to ShardSyncIntervalMillis. When multiple workers // starts at the same time, this decreases the probability of them calling @@ -252,7 +253,10 @@ func (w *Worker) eventLoop() { continue } - log.Infof("Found %d shards", len(w.shardStatus)) + if foundShards == 0 || foundShards != len(w.shardStatus) { + foundShards = len(w.shardStatus) + log.Infof("Found %d shards", foundShards) + } // Count the number of leases hold by this worker excluding the processed shard counter := 0 From 5dd53bf73139fcadda992dcd747fd8810559bb11 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Thu, 30 Jan 2020 20:06:03 -0600 Subject: [PATCH 59/90] Add nil check before shutdown (#68) Signed-off-by: Tao Jiang --- clientlibrary/worker/worker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index e7efa66..6d054fd 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -136,7 +136,7 @@ func (w *Worker) Shutdown() { log := w.kclConfig.Logger log.Infof("Worker shutdown in requested.") - if w.done { + if w.done || w.stop == nil { return } From 384482169c47ac2e7291fd12361b4626c41945c8 Mon Sep 17 00:00:00 2001 From: wgerges-discovery <55554408+wgerges-discovery@users.noreply.github.com> Date: Tue, 10 Mar 2020 12:00:57 -0400 Subject: [PATCH 60/90] Refactor `getShardIDs` (#70) * Refactor * Use `nextToken` paramter as string. Use `nextToken` paramter as string instead of pointer to match the original code base. * Log the last shard token when failing. * Use aws.StringValue to get the string pointer value. Co-authored-by: Wesam Gerges --- clientlibrary/worker/worker.go | 39 ++++++++++++++-------------------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 6d054fd..2fc3ab2 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -28,7 +28,6 @@ package worker import ( - "errors" "math/rand" "sync" "time" @@ -325,32 +324,27 @@ func (w *Worker) eventLoop() { } } -// List all ACTIVE shard and store them into shardStatus table +// List all shards and store them into shardStatus table // If shard has been removed, need to exclude it from cached shard status. -func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) error { +func (w *Worker) getShardIDs(nextToken string, shardInfo map[string]bool) error { log := w.kclConfig.Logger - // The default pagination limit is 100. - args := &kinesis.DescribeStreamInput{ - StreamName: aws.String(w.streamName), + + args := &kinesis.ListShardsInput{} + + // When you have a nextToken, you can't set the streamName + if nextToken != "" { + args.NextToken = aws.String(nextToken) + } else { + args.StreamName = aws.String(w.streamName) } - if startShardID != "" { - args.ExclusiveStartShardId = aws.String(startShardID) - } - - streamDesc, err := w.kc.DescribeStream(args) + listShards, err := w.kc.ListShards(args) if err != nil { - log.Errorf("Error in DescribeStream: %s Error: %+v Request: %s", w.streamName, err, args) + log.Errorf("Error in ListShards: %s Error: %+v Request: %s", w.streamName, err, args) return err } - if *streamDesc.StreamDescription.StreamStatus != "ACTIVE" { - log.Warnf("Stream %s is not active", w.streamName) - return errors.New("stream not active") - } - - var lastShardID string - for _, s := range streamDesc.StreamDescription.Shards { + for _, s := range listShards.Shards { // record avail shardId from fresh reading from Kinesis shardInfo[*s.ShardId] = true @@ -365,13 +359,12 @@ func (w *Worker) getShardIDs(startShardID string, shardInfo map[string]bool) err EndingSequenceNumber: aws.StringValue(s.SequenceNumberRange.EndingSequenceNumber), } } - lastShardID = *s.ShardId } - if *streamDesc.StreamDescription.HasMoreShards { - err := w.getShardIDs(lastShardID, shardInfo) + if listShards.NextToken != nil { + err := w.getShardIDs(aws.StringValue(listShards.NextToken), shardInfo) if err != nil { - log.Errorf("Error in getShardIDs: %s Error: %+v", lastShardID, err) + log.Errorf("Error in ListShards: %s Error: %+v Request: %s", w.streamName, err, args) return err } } From 10d6b28edf1236dd56e9d2c3f0d7a81787456d02 Mon Sep 17 00:00:00 2001 From: Mike Pye Date: Fri, 5 Jun 2020 03:25:06 +0100 Subject: [PATCH 61/90] Use FieldLogger interface to represent logrus logger (#75) The FieldLogger interface is satisfied by either *Logger or *Entry. Accepting this interface in place of the concrete *Logger type allows users to inject a logger with some fields already set. For example, the application developer might want all logging from the library to have a `subsystem=kcl` field. Signed-off-by: Mike Pye --- logger/logrus.go | 4 ++-- test/logger_test.go | 12 +++++++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/logger/logrus.go b/logger/logrus.go index 464f691..e4f7a67 100644 --- a/logger/logrus.go +++ b/logger/logrus.go @@ -34,12 +34,12 @@ type LogrusLogEntry struct { } type LogrusLogger struct { - logger *logrus.Logger + logger logrus.FieldLogger } // NewLogrusLogger adapts existing logrus logger to Logger interface. // The call is responsible for configuring logrus logger appropriately. -func NewLogrusLogger(lLogger *logrus.Logger) Logger { +func NewLogrusLogger(lLogger logrus.FieldLogger) Logger { return &LogrusLogger{ logger: lLogger, } diff --git a/test/logger_test.go b/test/logger_test.go index 502b509..2d63124 100644 --- a/test/logger_test.go +++ b/test/logger_test.go @@ -79,10 +79,20 @@ func TestLogrusLoggerWithConfig(t *testing.T) { } func TestLogrusLogger(t *testing.T) { - // adapts to Logger interface + // adapts to Logger interface from *logrus.Logger log := logger.NewLogrusLogger(logrus.StandardLogger()) contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) contextLogger.Debugf("Starting with logrus") contextLogger.Infof("Logrus is awesome") } + +func TestLogrusLoggerWithFieldsAtInit(t *testing.T) { + // adapts to Logger interface from *logrus.Entry + fieldLogger := logrus.StandardLogger().WithField("key0", "value0") + log := logger.NewLogrusLogger(fieldLogger) + + contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with logrus") + contextLogger.Infof("Structured logging is awesome") +} From 499e9cf1bedaea2bca8889e8248aecca39104cf6 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Fri, 18 Dec 2020 16:52:04 -0600 Subject: [PATCH 62/90] Update aws go sdk and tests (#81) Update aws go sdk to the latest. Also, update integration tests by publishing data using both PutRecord and PutRecords. Signed-off-by: Tao Jiang --- clientlibrary/worker/worker.go | 14 ----- go.mod | 5 +- go.sum | 21 +++++--- test/record_processor_test.go | 88 ++++++++++++++++++++++++++++++ test/record_publisher_test.go | 99 ++++++++++++++++++++++++++++++++++ test/worker_custom_test.go | 26 ++------- test/worker_test.go | 77 +++----------------------- 7 files changed, 212 insertions(+), 118 deletions(-) create mode 100644 test/record_processor_test.go create mode 100644 test/record_publisher_test.go diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 2fc3ab2..32273c0 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -147,20 +147,6 @@ func (w *Worker) Shutdown() { log.Infof("Worker loop is complete. Exiting from worker.") } -// Publish to write some data into stream. This function is mainly used for testing purpose. -func (w *Worker) Publish(streamName, partitionKey string, data []byte) error { - log := w.kclConfig.Logger - _, err := w.kc.PutRecord(&kinesis.PutRecordInput{ - Data: data, - StreamName: aws.String(streamName), - PartitionKey: aws.String(partitionKey), - }) - if err != nil { - log.Errorf("Error in publishing data to %s/%s. Error: %+v", streamName, partitionKey, err) - } - return err -} - // initialize func (w *Worker) initialize() error { log := w.kclConfig.Logger diff --git a/go.mod b/go.mod index 9a4ec48..84de9ab 100644 --- a/go.mod +++ b/go.mod @@ -2,18 +2,17 @@ module github.com/vmware/vmware-go-kcl require ( github.com/BurntSushi/toml v0.3.1 // indirect - github.com/aws/aws-sdk-go v1.19.38 + github.com/aws/aws-sdk-go v1.34.8 github.com/google/uuid v1.1.1 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/prometheus/client_golang v0.9.3 github.com/prometheus/common v0.4.1 github.com/prometheus/procfs v0.0.0-20190523193104-a7aeb8df3389 // indirect github.com/sirupsen/logrus v1.4.2 - github.com/stretchr/testify v1.3.0 + github.com/stretchr/testify v1.5.1 go.uber.org/atomic v1.4.0 // indirect go.uber.org/multierr v1.2.0 // indirect go.uber.org/zap v1.11.0 - golang.org/x/net v0.0.0-20190522155817-f3200d17e092 // indirect golang.org/x/sys v0.0.0-20190528012530-adf421d2caf4 // indirect golang.org/x/text v0.3.2 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 diff --git a/go.sum b/go.sum index 19dc05c..d6c3fa2 100644 --- a/go.sum +++ b/go.sum @@ -3,8 +3,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/aws/aws-sdk-go v1.19.38 h1:WKjobgPO4Ua1ww2NJJl2/zQNreUZxvqmEzwMlRjjm9g= -github.com/aws/aws-sdk-go v1.19.38/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.34.8 h1:GDfVeXG8XQDbpOeAj7415F8qCQZwvY/k/fj+HBqUnBA= +github.com/aws/aws-sdk-go v1.34.8/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -16,6 +16,7 @@ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -23,8 +24,8 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= @@ -36,6 +37,8 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -60,8 +63,8 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4= @@ -71,8 +74,8 @@ go.uber.org/zap v1.11.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -93,3 +96,5 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXL gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/test/record_processor_test.go b/test/record_processor_test.go new file mode 100644 index 0000000..6c93d6c --- /dev/null +++ b/test/record_processor_test.go @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2020 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package test + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" + kc "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" + "testing" +) + +// Record processor factory is used to create RecordProcessor +func recordProcessorFactory(t *testing.T) kc.IRecordProcessorFactory { + return &dumpRecordProcessorFactory{t: t} +} + +// simple record processor and dump everything +type dumpRecordProcessorFactory struct { + t *testing.T +} + +func (d *dumpRecordProcessorFactory) CreateProcessor() kc.IRecordProcessor { + return &dumpRecordProcessor{ + t: d.t, + } +} + +// Create a dump record processor for printing out all data from record. +type dumpRecordProcessor struct { + t *testing.T + count int +} + +func (dd *dumpRecordProcessor) Initialize(input *kc.InitializationInput) { + dd.t.Logf("Processing SharId: %v at checkpoint: %v", input.ShardId, aws.StringValue(input.ExtendedSequenceNumber.SequenceNumber)) + shardID = input.ShardId + dd.count = 0 +} + +func (dd *dumpRecordProcessor) ProcessRecords(input *kc.ProcessRecordsInput) { + dd.t.Log("Processing Records...") + + // don't process empty record + if len(input.Records) == 0 { + return + } + + for _, v := range input.Records { + dd.t.Logf("Record = %s", v.Data) + assert.Equal(dd.t, specstr, string(v.Data)) + dd.count++ + } + + // checkpoint it after processing this batch + lastRecordSequenceNubmer := input.Records[len(input.Records)-1].SequenceNumber + dd.t.Logf("Checkpoint progress at: %v, MillisBehindLatest = %v", lastRecordSequenceNubmer, input.MillisBehindLatest) + input.Checkpointer.Checkpoint(lastRecordSequenceNubmer) +} + +func (dd *dumpRecordProcessor) Shutdown(input *kc.ShutdownInput) { + dd.t.Logf("Shutdown Reason: %v", aws.StringValue(kc.ShutdownReasonMessage(input.ShutdownReason))) + dd.t.Logf("Processed Record Count = %d", dd.count) + + // When the value of {@link ShutdownInput#getShutdownReason()} is + // {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you + // checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress. + if input.ShutdownReason == kc.TERMINATE { + input.Checkpointer.Checkpoint(nil) + } + + assert.True(dd.t, dd.count > 0) +} diff --git a/test/record_publisher_test.go b/test/record_publisher_test.go new file mode 100644 index 0000000..1bddbf2 --- /dev/null +++ b/test/record_publisher_test.go @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2020 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package test + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + "github.com/vmware/vmware-go-kcl/clientlibrary/utils" + "testing" +) + +const specstr = `{"name":"kube-qQyhk","networking":{"containerNetworkCidr":"10.2.0.0/16"},"orgName":"BVT-Org-cLQch","projectName":"project-tDSJd","serviceLevel":"DEVELOPER","size":{"count":1},"version":"1.8.1-4"}` + +// NewKinesisClient to create a Kinesis Client. +func NewKinesisClient(t *testing.T, regionName, endpoint string, credentials *credentials.Credentials) *kinesis.Kinesis{ + s, err := session.NewSession(&aws.Config{ + Region: aws.String(regionName), + Endpoint: aws.String(endpoint), + Credentials: credentials, + }) + + if err != nil { + // no need to move forward + t.Fatalf("Failed in getting Kinesis session for creating Worker: %+v", err) + } + return kinesis.New(s) +} + +// publishSomeData to put some records into Kinesis stream +func publishSomeData(t *testing.T, kc kinesisiface.KinesisAPI) { + // Put some data into stream. + t.Log("Putting data into stream using PutRecord API...") + for i := 0; i < 50; i++ { + publishRecord(t, kc) + } + t.Log("Done putting data into stream using PutRecord API.") + + // Put some data into stream using PutRecords API + t.Log("Putting data into stream using PutRecords API...") + for i := 0; i < 10; i++ { + publishRecords(t, kc) + } + t.Log("Done putting data into stream using PutRecords API.") +} + +// publishRecord to put a record into Kinesis stream using PutRecord API. +func publishRecord(t *testing.T, kc kinesisiface.KinesisAPI) { + // Use random string as partition key to ensure even distribution across shards + _, err := kc.PutRecord(&kinesis.PutRecordInput{ + Data: []byte(specstr), + StreamName: aws.String(streamName), + PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), + }) + + if err != nil { + t.Errorf("Error in PutRecord. %+v", err) + } +} + +// publishRecord to put a record into Kinesis stream using PutRecords API. +func publishRecords(t *testing.T, kc kinesisiface.KinesisAPI) { + // Use random string as partition key to ensure even distribution across shards + records := make([]*kinesis.PutRecordsRequestEntry, 5) + + for i:= 0; i < 5; i++ { + records[i] = &kinesis.PutRecordsRequestEntry{ + Data: []byte(specstr), + PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), + } + } + + _, err := kc.PutRecords(&kinesis.PutRecordsInput{ + Records: records, + StreamName: aws.String(streamName), + }) + + if err != nil { + t.Errorf("Error in PutRecords. %+v", err) + } +} \ No newline at end of file diff --git a/test/worker_custom_test.go b/test/worker_custom_test.go index 46f21be..85330c1 100644 --- a/test/worker_custom_test.go +++ b/test/worker_custom_test.go @@ -34,7 +34,6 @@ import ( chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" - "github.com/vmware/vmware-go-kcl/clientlibrary/utils" wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" ) @@ -65,13 +64,8 @@ func TestWorkerInjectCheckpointer(t *testing.T) { assert.Nil(t, err) // Put some data into stream. - for i := 0; i < 100; i++ { - // Use random string as partition key to ensure even distribution across shards - err := worker.Publish(streamName, utils.RandStringBytesMaskImpr(10), []byte(specstr)) - if err != nil { - t.Errorf("Errorin Publish. %+v", err) - } - } + kc := NewKinesisClient(t, regionName, kclConfig.KinesisEndpoint, kclConfig.KinesisCredentials) + publishSomeData(t, kc) // wait a few seconds before shutdown processing time.Sleep(10 * time.Second) @@ -124,13 +118,7 @@ func TestWorkerInjectKinesis(t *testing.T) { assert.Nil(t, err) // Put some data into stream. - for i := 0; i < 100; i++ { - // Use random string as partition key to ensure even distribution across shards - err := worker.Publish(streamName, utils.RandStringBytesMaskImpr(10), []byte(specstr)) - if err != nil { - t.Errorf("Errorin Publish. %+v", err) - } - } + publishSomeData(t, kc) // wait a few seconds before shutdown processing time.Sleep(10 * time.Second) @@ -173,13 +161,7 @@ func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) { assert.Nil(t, err) // Put some data into stream. - for i := 0; i < 100; i++ { - // Use random string as partition key to ensure even distribution across shards - err := worker.Publish(streamName, utils.RandStringBytesMaskImpr(10), []byte(specstr)) - if err != nil { - t.Errorf("Errorin Publish. %+v", err) - } - } + publishSomeData(t, kc) // wait a few seconds before shutdown processing time.Sleep(10 * time.Second) diff --git a/test/worker_test.go b/test/worker_test.go index 7cf4f87..73f6cdf 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -26,18 +26,15 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/session" "github.com/prometheus/common/expfmt" "github.com/stretchr/testify/assert" cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" - kc "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics/cloudwatch" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics/prometheus" - "github.com/vmware/vmware-go-kcl/clientlibrary/utils" wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" "github.com/vmware/vmware-go-kcl/logger" zaplogger "github.com/vmware/vmware-go-kcl/logger/zap" @@ -49,7 +46,6 @@ const ( workerID = "test-worker" ) -const specstr = `{"name":"kube-qQyhk","networking":{"containerNetworkCidr":"10.2.0.0/16"},"orgName":"BVT-Org-cLQch","projectName":"project-tDSJd","serviceLevel":"DEVELOPER","size":{"count":1},"version":"1.8.1-4"}` const metricsSystem = "cloudwatch" var shardID string @@ -137,7 +133,9 @@ func TestWorkerWithSigInt(t *testing.T) { func TestWorkerStatic(t *testing.T) { t.Skip("Need to provide actual credentials") - creds := credentials.NewStaticCredentials("AccessKeyId", "SecretAccessKey", "") + // Fill in the credentials for accessing Kinesis and DynamoDB. + // Note: use empty string as SessionToken for long-term credentials. + creds := credentials.NewStaticCredentials("AccessKeyId", "SecretAccessKey", "SessionToken") kclConfig := cfg.NewKinesisClientLibConfigWithCredential("appName", streamName, regionName, workerID, creds). WithInitialPositionInStream(cfg.LATEST). @@ -196,15 +194,8 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t }() // Put some data into stream. - t.Log("Putting data into stream.") - for i := 0; i < 100; i++ { - // Use random string as partition key to ensure even distribution across shards - err := worker.Publish(streamName, utils.RandStringBytesMaskImpr(10), []byte(specstr)) - if err != nil { - t.Errorf("Errorin Publish. %+v", err) - } - } - t.Log("Done putting data into stream.") + kc := NewKinesisClient(t, regionName, kclConfig.KinesisEndpoint, kclConfig.KinesisCredentials) + publishSomeData(t, kc) if triggersig { t.Log("Trigger signal SIGINT") @@ -250,60 +241,4 @@ func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service stri } return nil -} - -// Record processor factory is used to create RecordProcessor -func recordProcessorFactory(t *testing.T) kc.IRecordProcessorFactory { - return &dumpRecordProcessorFactory{t: t} -} - -// simple record processor and dump everything -type dumpRecordProcessorFactory struct { - t *testing.T -} - -func (d *dumpRecordProcessorFactory) CreateProcessor() kc.IRecordProcessor { - return &dumpRecordProcessor{ - t: d.t, - } -} - -// Create a dump record processor for printing out all data from record. -type dumpRecordProcessor struct { - t *testing.T -} - -func (dd *dumpRecordProcessor) Initialize(input *kc.InitializationInput) { - dd.t.Logf("Processing SharId: %v at checkpoint: %v", input.ShardId, aws.StringValue(input.ExtendedSequenceNumber.SequenceNumber)) - shardID = input.ShardId -} - -func (dd *dumpRecordProcessor) ProcessRecords(input *kc.ProcessRecordsInput) { - dd.t.Log("Processing Records...") - - // don't process empty record - if len(input.Records) == 0 { - return - } - - for _, v := range input.Records { - dd.t.Logf("Record = %s", v.Data) - assert.Equal(dd.t, specstr, string(v.Data)) - } - - // checkpoint it after processing this batch - lastRecordSequenceNubmer := input.Records[len(input.Records)-1].SequenceNumber - dd.t.Logf("Checkpoint progress at: %v, MillisBehindLatest = %v", lastRecordSequenceNubmer, input.MillisBehindLatest) - input.Checkpointer.Checkpoint(lastRecordSequenceNubmer) -} - -func (dd *dumpRecordProcessor) Shutdown(input *kc.ShutdownInput) { - dd.t.Logf("Shutdown Reason: %v", aws.StringValue(kc.ShutdownReasonMessage(input.ShutdownReason))) - - // When the value of {@link ShutdownInput#getShutdownReason()} is - // {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you - // checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress. - if input.ShutdownReason == kc.TERMINATE { - input.Checkpointer.Checkpoint(nil) - } -} +} \ No newline at end of file From f1982602ff4d075b134d87830e24d68d8a9b92d1 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Fri, 18 Dec 2020 22:11:10 -0600 Subject: [PATCH 63/90] Fix data race during checkpointing (#82) Make sure shard is locked during checkpointing. Signed-off-by: Tao Jiang --- clientlibrary/worker/record-processor-checkpointer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clientlibrary/worker/record-processor-checkpointer.go b/clientlibrary/worker/record-processor-checkpointer.go index ec44eb0..e1034b6 100644 --- a/clientlibrary/worker/record-processor-checkpointer.go +++ b/clientlibrary/worker/record-processor-checkpointer.go @@ -65,6 +65,7 @@ func (pc *PreparedCheckpointer) Checkpoint() error { func (rc *RecordProcessorCheckpointer) Checkpoint(sequenceNumber *string) error { rc.shard.Mux.Lock() + defer rc.shard.Mux.Unlock() // checkpoint the last sequence of a closed shard if sequenceNumber == nil { @@ -73,7 +74,6 @@ func (rc *RecordProcessorCheckpointer) Checkpoint(sequenceNumber *string) error rc.shard.Checkpoint = aws.StringValue(sequenceNumber) } - rc.shard.Mux.Unlock() return rc.checkpoint.CheckpointSequence(rc.shard) } From 6ff3cd1b1587fc57b69f7bfbd5b0463dec929ea7 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Sat, 19 Dec 2020 17:44:40 -0600 Subject: [PATCH 64/90] Fix retry logic for dynamodb (#83) Adding min/max retry and throttle delay for the retryer. Also, increase the max retries to 10 which is inline with dynamodb default retry count. Signed-off-by: Tao Jiang --- clientlibrary/checkpoint/dynamodb-checkpointer.go | 10 ++++++++-- test/record_processor_test.go | 2 +- test/record_publisher_test.go | 10 +++++----- test/worker_test.go | 4 ++-- 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 1b61f45..694522b 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -48,7 +48,7 @@ const ( ErrInvalidDynamoDBSchema = "The DynamoDB schema is invalid and may need to be re-created" // NumMaxRetries is the max times of doing retry - NumMaxRetries = 5 + NumMaxRetries = 10 ) // DynamoCheckpoint implements the Checkpoint interface using DynamoDB as a backend @@ -92,7 +92,13 @@ func (checkpointer *DynamoCheckpoint) Init() error { Region: aws.String(checkpointer.kclConfig.RegionName), Endpoint: aws.String(checkpointer.kclConfig.DynamoDBEndpoint), Credentials: checkpointer.kclConfig.DynamoDBCredentials, - Retryer: client.DefaultRetryer{NumMaxRetries: checkpointer.Retries}, + Retryer: client.DefaultRetryer{ + NumMaxRetries: checkpointer.Retries, + MinRetryDelay: client.DefaultRetryerMinRetryDelay, + MinThrottleDelay: client.DefaultRetryerMinThrottleDelay, + MaxRetryDelay: client.DefaultRetryerMaxRetryDelay, + MaxThrottleDelay: client.DefaultRetryerMaxRetryDelay, + }, }) if err != nil { diff --git a/test/record_processor_test.go b/test/record_processor_test.go index 6c93d6c..c20b901 100644 --- a/test/record_processor_test.go +++ b/test/record_processor_test.go @@ -43,7 +43,7 @@ func (d *dumpRecordProcessorFactory) CreateProcessor() kc.IRecordProcessor { // Create a dump record processor for printing out all data from record. type dumpRecordProcessor struct { - t *testing.T + t *testing.T count int } diff --git a/test/record_publisher_test.go b/test/record_publisher_test.go index 1bddbf2..cb61f83 100644 --- a/test/record_publisher_test.go +++ b/test/record_publisher_test.go @@ -31,7 +31,7 @@ import ( const specstr = `{"name":"kube-qQyhk","networking":{"containerNetworkCidr":"10.2.0.0/16"},"orgName":"BVT-Org-cLQch","projectName":"project-tDSJd","serviceLevel":"DEVELOPER","size":{"count":1},"version":"1.8.1-4"}` // NewKinesisClient to create a Kinesis Client. -func NewKinesisClient(t *testing.T, regionName, endpoint string, credentials *credentials.Credentials) *kinesis.Kinesis{ +func NewKinesisClient(t *testing.T, regionName, endpoint string, credentials *credentials.Credentials) *kinesis.Kinesis { s, err := session.NewSession(&aws.Config{ Region: aws.String(regionName), Endpoint: aws.String(endpoint), @@ -81,7 +81,7 @@ func publishRecords(t *testing.T, kc kinesisiface.KinesisAPI) { // Use random string as partition key to ensure even distribution across shards records := make([]*kinesis.PutRecordsRequestEntry, 5) - for i:= 0; i < 5; i++ { + for i := 0; i < 5; i++ { records[i] = &kinesis.PutRecordsRequestEntry{ Data: []byte(specstr), PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), @@ -89,11 +89,11 @@ func publishRecords(t *testing.T, kc kinesisiface.KinesisAPI) { } _, err := kc.PutRecords(&kinesis.PutRecordsInput{ - Records: records, - StreamName: aws.String(streamName), + Records: records, + StreamName: aws.String(streamName), }) if err != nil { t.Errorf("Error in PutRecords. %+v", err) } -} \ No newline at end of file +} diff --git a/test/worker_test.go b/test/worker_test.go index 73f6cdf..bbe5e51 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -51,7 +51,7 @@ const metricsSystem = "cloudwatch" var shardID string func TestWorker(t *testing.T) { - // At miminal. use standard logrus logger + // At minimal. use standard logrus logger // log := logger.NewLogrusLogger(logrus.StandardLogger()) // // In order to have precise control over logging. Use logger with config @@ -241,4 +241,4 @@ func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service stri } return nil -} \ No newline at end of file +} From 1044485392e97f28691ea72ec7c5ac41e600cc41 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Wed, 23 Dec 2020 09:47:47 -0600 Subject: [PATCH 65/90] Support Kinesis aggregation format (#84) Add support for Kinesis aggregation format to consume record published by KPL. Note: current implementation need to checkpoint the whole batch of the de-aggregated records instead of just portion of them. Add cache entry and exit time. Signed-off-by: Tao Jiang --- clientlibrary/checkpoint/checkpointer.go | 12 +- .../checkpoint/dynamodb-checkpointer.go | 40 ++--- .../checkpoint/dynamodb-checkpointer_test.go | 26 +-- clientlibrary/common/errors.go | 165 ------------------ clientlibrary/config/config.go | 9 - clientlibrary/interfaces/inputs.go | 34 +++- .../worker/record-processor-checkpointer.go | 2 +- clientlibrary/worker/shard-consumer.go | 35 ++-- clientlibrary/worker/worker.go | 6 +- go.mod | 2 + go.sum | 6 + test/record_processor_test.go | 10 +- test/record_publisher_test.go | 64 +++++++ test/worker_test.go | 2 +- 14 files changed, 166 insertions(+), 247 deletions(-) delete mode 100644 clientlibrary/common/errors.go diff --git a/clientlibrary/checkpoint/checkpointer.go b/clientlibrary/checkpoint/checkpointer.go index b3af0b7..4ff6fd2 100644 --- a/clientlibrary/checkpoint/checkpointer.go +++ b/clientlibrary/checkpoint/checkpointer.go @@ -33,14 +33,14 @@ import ( ) const ( - LEASE_KEY_KEY = "ShardID" - LEASE_OWNER_KEY = "AssignedTo" - LEASE_TIMEOUT_KEY = "LeaseTimeout" - CHECKPOINT_SEQUENCE_NUMBER_KEY = "Checkpoint" - PARENT_SHARD_ID_KEY = "ParentShardId" + LeaseKeyKey = "ShardID" + LeaseOwnerKey = "AssignedTo" + LeaseTimeoutKey = "LeaseTimeout" + SequenceNumberKey = "Checkpoint" + ParentShardIdKey = "ParentShardId" // We've completely processed all records in this shard. - SHARD_END = "SHARD_END" + ShardEnd = "SHARD_END" // ErrLeaseNotAquired is returned when we failed to get a lock on the shard ErrLeaseNotAquired = "Lease is already held by another node" diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 694522b..1145454 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -125,8 +125,8 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign return err } - assignedVar, assignedToOk := currentCheckpoint[LEASE_OWNER_KEY] - leaseVar, leaseTimeoutOk := currentCheckpoint[LEASE_TIMEOUT_KEY] + assignedVar, assignedToOk := currentCheckpoint[LeaseOwnerKey] + leaseVar, leaseTimeoutOk := currentCheckpoint[LeaseTimeoutKey] var conditionalExpression string var expressionAttributeValues map[string]*dynamodb.AttributeValue @@ -161,23 +161,23 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign } marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ - LEASE_KEY_KEY: { + LeaseKeyKey: { S: aws.String(shard.ID), }, - LEASE_OWNER_KEY: { + LeaseOwnerKey: { S: aws.String(newAssignTo), }, - LEASE_TIMEOUT_KEY: { + LeaseTimeoutKey: { S: aws.String(newLeaseTimeoutString), }, } if len(shard.ParentShardId) > 0 { - marshalledCheckpoint[PARENT_SHARD_ID_KEY] = &dynamodb.AttributeValue{S: aws.String(shard.ParentShardId)} + marshalledCheckpoint[ParentShardIdKey] = &dynamodb.AttributeValue{S: aws.String(shard.ParentShardId)} } if shard.Checkpoint != "" { - marshalledCheckpoint[CHECKPOINT_SEQUENCE_NUMBER_KEY] = &dynamodb.AttributeValue{ + marshalledCheckpoint[SequenceNumberKey] = &dynamodb.AttributeValue{ S: aws.String(shard.Checkpoint), } } @@ -204,22 +204,22 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *par.ShardStatus) error { leaseTimeout := shard.LeaseTimeout.UTC().Format(time.RFC3339) marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ - LEASE_KEY_KEY: { + LeaseKeyKey: { S: aws.String(shard.ID), }, - CHECKPOINT_SEQUENCE_NUMBER_KEY: { + SequenceNumberKey: { S: aws.String(shard.Checkpoint), }, - LEASE_OWNER_KEY: { + LeaseOwnerKey: { S: aws.String(shard.AssignedTo), }, - LEASE_TIMEOUT_KEY: { + LeaseTimeoutKey: { S: aws.String(leaseTimeout), }, } if len(shard.ParentShardId) > 0 { - marshalledCheckpoint[PARENT_SHARD_ID_KEY] = &dynamodb.AttributeValue{S: &shard.ParentShardId} + marshalledCheckpoint[ParentShardIdKey] = &dynamodb.AttributeValue{S: &shard.ParentShardId} } return checkpointer.saveItem(marshalledCheckpoint) @@ -232,7 +232,7 @@ func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *par.ShardStatus) er return err } - sequenceID, ok := checkpoint[CHECKPOINT_SEQUENCE_NUMBER_KEY] + sequenceID, ok := checkpoint[SequenceNumberKey] if !ok { return ErrSequenceIDNotFound } @@ -241,7 +241,7 @@ func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *par.ShardStatus) er defer shard.Mux.Unlock() shard.Checkpoint = aws.StringValue(sequenceID.S) - if assignedTo, ok := checkpoint[LEASE_OWNER_KEY]; ok { + if assignedTo, ok := checkpoint[LeaseOwnerKey]; ok { shard.AssignedTo = aws.StringValue(assignedTo.S) } return nil @@ -265,11 +265,11 @@ func (checkpointer *DynamoCheckpoint) RemoveLeaseOwner(shardID string) error { input := &dynamodb.UpdateItemInput{ TableName: aws.String(checkpointer.TableName), Key: map[string]*dynamodb.AttributeValue{ - LEASE_KEY_KEY: { + LeaseKeyKey: { S: aws.String(shardID), }, }, - UpdateExpression: aws.String("remove " + LEASE_OWNER_KEY), + UpdateExpression: aws.String("remove " + LeaseOwnerKey), } _, err := checkpointer.svc.UpdateItem(input) @@ -281,13 +281,13 @@ func (checkpointer *DynamoCheckpoint) createTable() error { input := &dynamodb.CreateTableInput{ AttributeDefinitions: []*dynamodb.AttributeDefinition{ { - AttributeName: aws.String(LEASE_KEY_KEY), + AttributeName: aws.String(LeaseKeyKey), AttributeType: aws.String("S"), }, }, KeySchema: []*dynamodb.KeySchemaElement{ { - AttributeName: aws.String(LEASE_KEY_KEY), + AttributeName: aws.String(LeaseKeyKey), KeyType: aws.String("HASH"), }, }, @@ -334,7 +334,7 @@ func (checkpointer *DynamoCheckpoint) getItem(shardID string) (map[string]*dynam item, err := checkpointer.svc.GetItem(&dynamodb.GetItemInput{ TableName: aws.String(checkpointer.TableName), Key: map[string]*dynamodb.AttributeValue{ - LEASE_KEY_KEY: { + LeaseKeyKey: { S: aws.String(shardID), }, }, @@ -346,7 +346,7 @@ func (checkpointer *DynamoCheckpoint) removeItem(shardID string) error { _, err := checkpointer.svc.DeleteItem(&dynamodb.DeleteItemInput{ TableName: aws.String(checkpointer.TableName), Key: map[string]*dynamodb.AttributeValue{ - LEASE_KEY_KEY: { + LeaseKeyKey: { S: aws.String(shardID), }, }, diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go index 6eaead5..ec3894b 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go @@ -131,7 +131,7 @@ func TestGetLeaseAquired(t *testing.T) { t.Errorf("Lease not aquired after timeout %s", err) } - id, ok := svc.item[CHECKPOINT_SEQUENCE_NUMBER_KEY] + id, ok := svc.item[SequenceNumberKey] if !ok { t.Error("Expected checkpoint to be set by GetLease") } else if *id.S != "deadbeef" { @@ -172,24 +172,24 @@ func (m *mockDynamoDB) DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.De func (m *mockDynamoDB) PutItem(input *dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) { item := input.Item - if shardID, ok := item[LEASE_KEY_KEY]; ok { - m.item[LEASE_KEY_KEY] = shardID + if shardID, ok := item[LeaseKeyKey]; ok { + m.item[LeaseKeyKey] = shardID } - if owner, ok := item[LEASE_OWNER_KEY]; ok { - m.item[LEASE_OWNER_KEY] = owner + if owner, ok := item[LeaseOwnerKey]; ok { + m.item[LeaseOwnerKey] = owner } - if timeout, ok := item[LEASE_TIMEOUT_KEY]; ok { - m.item[LEASE_TIMEOUT_KEY] = timeout + if timeout, ok := item[LeaseTimeoutKey]; ok { + m.item[LeaseTimeoutKey] = timeout } - if checkpoint, ok := item[CHECKPOINT_SEQUENCE_NUMBER_KEY]; ok { - m.item[CHECKPOINT_SEQUENCE_NUMBER_KEY] = checkpoint + if checkpoint, ok := item[SequenceNumberKey]; ok { + m.item[SequenceNumberKey] = checkpoint } - if parent, ok := item[PARENT_SHARD_ID_KEY]; ok { - m.item[PARENT_SHARD_ID_KEY] = parent + if parent, ok := item[ParentShardIdKey]; ok { + m.item[ParentShardIdKey] = parent } return nil, nil @@ -204,8 +204,8 @@ func (m *mockDynamoDB) GetItem(input *dynamodb.GetItemInput) (*dynamodb.GetItemO func (m *mockDynamoDB) UpdateItem(input *dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) { exp := input.UpdateExpression - if aws.StringValue(exp) == "remove "+LEASE_OWNER_KEY { - delete(m.item, LEASE_OWNER_KEY) + if aws.StringValue(exp) == "remove "+LeaseOwnerKey { + delete(m.item, LeaseOwnerKey) } return nil, nil diff --git a/clientlibrary/common/errors.go b/clientlibrary/common/errors.go deleted file mode 100644 index da32eef..0000000 --- a/clientlibrary/common/errors.go +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright (c) 2018 VMware, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and - * associated documentation files (the "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is furnished to do - * so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT - * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -package common - -import ( - "fmt" - "net/http" -) - -// ErrorCode is unified definition of numerical error codes -type ErrorCode int32 - -// pre-defined error codes -const ( - // System Wide 41000 - 42000 - KinesisClientLibError ErrorCode = 41000 - - // KinesisClientLibrary Retryable Errors 41001 - 41100 - KinesisClientLibRetryableError ErrorCode = 41001 - - KinesisClientLibIOError ErrorCode = 41002 - BlockedOnParentShardError ErrorCode = 41003 - KinesisClientLibDependencyError ErrorCode = 41004 - ThrottlingError ErrorCode = 41005 - - // KinesisClientLibrary NonRetryable Errors 41100 - 41200 - KinesisClientLibNonRetryableException ErrorCode = 41100 - - InvalidStateError ErrorCode = 41101 - ShutdownError ErrorCode = 41102 - - // Kinesis Lease Errors 41200 - 41300 - LeasingError ErrorCode = 41200 - - LeasingInvalidStateError ErrorCode = 41201 - LeasingDependencyError ErrorCode = 41202 - LeasingProvisionedThroughputError ErrorCode = 41203 - - // Misc Errors 41300 - 41400 - // NotImplemented - KinesisClientLibNotImplemented ErrorCode = 41301 - - // Error indicates passing illegal or inappropriate argument - IllegalArgumentError ErrorCode = 41302 -) - -var errorMap = map[ErrorCode]ClientLibraryError{ - KinesisClientLibError: {ErrorCode: KinesisClientLibError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Top level error of Kinesis Client Library"}, - - // Retryable - KinesisClientLibRetryableError: {ErrorCode: KinesisClientLibRetryableError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Retryable exceptions (e.g. transient errors). The request/operation is expected to succeed upon (back off and) retry."}, - KinesisClientLibIOError: {ErrorCode: KinesisClientLibIOError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Error in reading/writing information (e.g. shard information from Kinesis may not be current/complete)."}, - BlockedOnParentShardError: {ErrorCode: BlockedOnParentShardError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Cannot start processing data for a shard because the data from the parent shard has not been completely processed (yet)."}, - KinesisClientLibDependencyError: {ErrorCode: KinesisClientLibDependencyError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Cannot talk to its dependencies (e.g. fetching data from Kinesis, DynamoDB table reads/writes)."}, - ThrottlingError: {ErrorCode: ThrottlingError, Retryable: true, Status: http.StatusTooManyRequests, Msg: "Requests are throttled by a service (e.g. DynamoDB when storing a checkpoint)."}, - - // Non-Retryable - KinesisClientLibNonRetryableException: {ErrorCode: KinesisClientLibNonRetryableException, Retryable: false, Status: http.StatusServiceUnavailable, Msg: "Non-retryable exceptions. Simply retrying the same request/operation is not expected to succeed."}, - InvalidStateError: {ErrorCode: InvalidStateError, Retryable: false, Status: http.StatusServiceUnavailable, Msg: "Kinesis Library has issues with internal state (e.g. DynamoDB table is not found)."}, - ShutdownError: {ErrorCode: ShutdownError, Retryable: false, Status: http.StatusServiceUnavailable, Msg: "The RecordProcessor instance has been shutdown (e.g. and attempts a checkpiont)."}, - - // Leasing - LeasingError: {ErrorCode: LeasingError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Top-level error type for the leasing code."}, - LeasingInvalidStateError: {ErrorCode: LeasingInvalidStateError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Error in a lease operation has failed because DynamoDB is an invalid state"}, - LeasingDependencyError: {ErrorCode: LeasingDependencyError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Error in a lease operation has failed because a dependency of the leasing system has failed."}, - LeasingProvisionedThroughputError: {ErrorCode: LeasingProvisionedThroughputError, Retryable: false, Status: http.StatusServiceUnavailable, Msg: "Error in a lease operation has failed due to lack of provisioned throughput for a DynamoDB table."}, - - // IllegalArgumentError - IllegalArgumentError: {ErrorCode: IllegalArgumentError, Retryable: false, Status: http.StatusBadRequest, Msg: "Error indicates that a method has been passed an illegal or inappropriate argument."}, - - // Not Implemented - KinesisClientLibNotImplemented: {ErrorCode: KinesisClientLibNotImplemented, Retryable: false, Status: http.StatusNotImplemented, Msg: "Not Implemented"}, -} - -// Message returns the message of the error code -func (c ErrorCode) Message() string { - return errorMap[c].Msg -} - -// MakeErr makes an error with default message -func (c ErrorCode) MakeErr() *ClientLibraryError { - e := errorMap[c] - return &e -} - -// MakeError makes an error with message and data -func (c ErrorCode) MakeError(detail string) error { - e := errorMap[c] - return e.WithDetail(detail) -} - -// ClientLibraryError is unified error -type ClientLibraryError struct { - // ErrorCode is the numerical error code. - ErrorCode `json:"code"` - // Retryable is a bool flag to indicate the whether the error is retryable or not. - Retryable bool `json:"tryable"` - // Status is the HTTP status code. - Status int `json:"status"` - // Msg provides a terse description of the error. Its value is defined in errorMap. - Msg string `json:"msg"` - // Detail provides a detailed description of the error. Its value is set using WithDetail. - Detail string `json:"detail"` -} - -// Error implements error -func (e *ClientLibraryError) Error() string { - var prefix string - if e.Retryable { - prefix = "Retryable" - } else { - prefix = "NonRetryable" - } - msg := fmt.Sprintf("%v Error [%d]: %s", prefix, int32(e.ErrorCode), e.Msg) - if e.Detail != "" { - msg = fmt.Sprintf("%s, detail: %s", msg, e.Detail) - } - return msg -} - -// WithMsg overwrites the default error message -func (e *ClientLibraryError) WithMsg(format string, v ...interface{}) *ClientLibraryError { - e.Msg = fmt.Sprintf(format, v...) - return e -} - -// WithDetail adds a detailed message to error -func (e *ClientLibraryError) WithDetail(format string, v ...interface{}) *ClientLibraryError { - if len(e.Detail) == 0 { - e.Detail = fmt.Sprintf(format, v...) - } else { - e.Detail += ", " + fmt.Sprintf(format, v...) - } - return e -} - -// WithCause adds CauseBy to error -func (e *ClientLibraryError) WithCause(err error) *ClientLibraryError { - if err != nil { - // Store error message in Detail, so the info can be preserved - // when CascadeError is marshaled to json. - if len(e.Detail) == 0 { - e.Detail = err.Error() - } else { - e.Detail += ", cause: " + err.Error() - } - } - return e -} diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index c517645..91d73b8 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -121,15 +121,6 @@ const ( // The amount of milliseconds to wait before graceful shutdown forcefully terminates. DEFAULT_SHUTDOWN_GRACE_MILLIS = 5000 - - // The size of the thread pool to create for the lease renewer to use. - DEFAULT_MAX_LEASE_RENEWAL_THREADS = 20 - - // The sleep time between two listShards calls from the proxy when throttled. - DEFAULT_LIST_SHARDS_BACKOFF_TIME_IN_MILLIS = 1500 - - // The number of times the Proxy will retry listShards call when throttled. - DEFAULT_MAX_LIST_SHARDS_RETRY_ATTEMPTS = 50 ) type ( diff --git a/clientlibrary/interfaces/inputs.go b/clientlibrary/interfaces/inputs.go index 84bf03f..eb12387 100644 --- a/clientlibrary/interfaces/inputs.go +++ b/clientlibrary/interfaces/inputs.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 VMware, Inc. + * Copyright (c) 2020 VMware, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and * associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -48,6 +48,7 @@ const ( * instead depend on a different interface for backward compatibility. */ REQUESTED ShutdownReason = iota + 1 + /** * Terminate processing for this RecordProcessor (resharding use case). * Indicates that the shard is closed and all records from the shard have been delivered to the application. @@ -55,6 +56,7 @@ const ( * from this shard and processing of child shards can be started. */ TERMINATE + /** * Processing will be moved to a different record processor (fail over, load balancing use cases). * Applications SHOULD NOT checkpoint their progress (as another record processor may have already started @@ -76,22 +78,36 @@ type ( ShutdownReason int InitializationInput struct { - ShardId string - ExtendedSequenceNumber *ExtendedSequenceNumber - PendingCheckpointSequenceNumber *ExtendedSequenceNumber + // The shardId that the record processor is being initialized for. + ShardId string + + // The last extended sequence number that was successfully checkpointed by the previous record processor. + ExtendedSequenceNumber *ExtendedSequenceNumber } ProcessRecordsInput struct { - CacheEntryTime *time.Time - CacheExitTime *time.Time - Records []*ks.Record - Checkpointer IRecordProcessorCheckpointer + // The time that this batch of records was received by the KCL. + CacheEntryTime *time.Time + + // The time that this batch of records was prepared to be provided to the RecordProcessor. + CacheExitTime *time.Time + + // The records received from Kinesis. These records may have been de-aggregated if they were published by the KPL. + Records []*ks.Record + + // A checkpointer that the RecordProcessor can use to checkpoint its progress. + Checkpointer IRecordProcessorCheckpointer + + // How far behind this batch of records was when received from Kinesis. MillisBehindLatest int64 } ShutdownInput struct { + // ShutdownReason shows why RecordProcessor is going to be shutdown. ShutdownReason ShutdownReason - Checkpointer IRecordProcessorCheckpointer + + // Checkpointer is used to record the current progress. + Checkpointer IRecordProcessorCheckpointer } ) diff --git a/clientlibrary/worker/record-processor-checkpointer.go b/clientlibrary/worker/record-processor-checkpointer.go index e1034b6..b6baee6 100644 --- a/clientlibrary/worker/record-processor-checkpointer.go +++ b/clientlibrary/worker/record-processor-checkpointer.go @@ -69,7 +69,7 @@ func (rc *RecordProcessorCheckpointer) Checkpoint(sequenceNumber *string) error // checkpoint the last sequence of a closed shard if sequenceNumber == nil { - rc.shard.Checkpoint = chk.SHARD_END + rc.shard.Checkpoint = chk.ShardEnd } else { rc.shard.Checkpoint = aws.StringValue(sequenceNumber) } diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 476ab01..4cb963f 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -36,6 +36,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + deagg "github.com/awslabs/kinesis-aggregation/go/deaggregator" chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" "github.com/vmware/vmware-go-kcl/clientlibrary/config" @@ -47,19 +48,7 @@ import ( const ( // This is the initial state of a shard consumer. This causes the consumer to remain blocked until the all // parent shards have been completed. - WAITING_ON_PARENT_SHARDS ShardConsumerState = iota + 1 - - // This state is responsible for initializing the record processor with the shard information. - INITIALIZING - - // - PROCESSING - - SHUTDOWN_REQUESTED - - SHUTTING_DOWN - - SHUTDOWN_COMPLETE + WaitingOnParentShards ShardConsumerState = iota + 1 // ErrCodeKMSThrottlingException is defined in the API Reference https://docs.aws.amazon.com/sdk-for-go/api/service/kinesis/#Kinesis.GetRecords // But it's not a constant? @@ -215,9 +204,21 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { // reset the retry count after success retriedErrors = 0 + log.Debugf("Received %d original records.", len(getResp.Records)) + + // De-aggregate the records if they were published by the KPL. + dars := make([]*kinesis.Record, 0) + dars, err = deagg.DeaggregateRecords(getResp.Records) + + if err != nil { + // The error is caused by bad KPL publisher and just skip the bad records + // instead of being stuck here. + log.Errorf("Error in de-aggregating KPL records: %+v", err) + } + // IRecordProcessorCheckpointer input := &kcl.ProcessRecordsInput{ - Records: getResp.Records, + Records: dars, MillisBehindLatest: aws.Int64Value(getResp.MillisBehindLatest), Checkpointer: recordCheckpointer, } @@ -226,7 +227,7 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { recordBytes := int64(0) log.Debugf("Received %d records, MillisBehindLatest: %v", recordLength, input.MillisBehindLatest) - for _, r := range getResp.Records { + for _, r := range dars { recordBytes += int64(len(r.Data)) } @@ -234,6 +235,8 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { processRecordsStartTime := time.Now() // Delivery the events to the record processor + input.CacheEntryTime = &getRecordsStartTime + input.CacheExitTime = &processRecordsStartTime sc.recordProcessor.ProcessRecords(input) // Convert from nanoseconds to milliseconds @@ -288,7 +291,7 @@ func (sc *ShardConsumer) waitOnParentShard(shard *par.ShardStatus) error { } // Parent shard is finished. - if pshard.Checkpoint == chk.SHARD_END { + if pshard.Checkpoint == chk.ShardEnd { return nil } diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 32273c0..4b42847 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -215,7 +215,7 @@ func (w *Worker) newShardConsumer(shard *par.ShardStatus) *ShardConsumer { consumerID: w.workerID, stop: w.stop, mService: w.mService, - state: WAITING_ON_PARENT_SHARDS, + state: WaitingOnParentShards, } } @@ -246,7 +246,7 @@ func (w *Worker) eventLoop() { // Count the number of leases hold by this worker excluding the processed shard counter := 0 for _, shard := range w.shardStatus { - if shard.GetLeaseOwner() == w.workerID && shard.Checkpoint != chk.SHARD_END { + if shard.GetLeaseOwner() == w.workerID && shard.Checkpoint != chk.ShardEnd { counter++ } } @@ -270,7 +270,7 @@ func (w *Worker) eventLoop() { } // The shard is closed and we have processed all records - if shard.Checkpoint == chk.SHARD_END { + if shard.Checkpoint == chk.ShardEnd { continue } diff --git a/go.mod b/go.mod index 84de9ab..c8fa96f 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,8 @@ module github.com/vmware/vmware-go-kcl require ( github.com/BurntSushi/toml v0.3.1 // indirect github.com/aws/aws-sdk-go v1.34.8 + github.com/awslabs/kinesis-aggregation/go v0.0.0-20201211133042-142dfe1d7a6d + github.com/golang/protobuf v1.3.1 github.com/google/uuid v1.1.1 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/prometheus/client_golang v0.9.3 diff --git a/go.sum b/go.sum index d6c3fa2..60c4332 100644 --- a/go.sum +++ b/go.sum @@ -3,8 +3,11 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.34.8 h1:GDfVeXG8XQDbpOeAj7415F8qCQZwvY/k/fj+HBqUnBA= github.com/aws/aws-sdk-go v1.34.8/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20201211133042-142dfe1d7a6d h1:kGtsYh3+yYsCafn/pp/j/SMbc2bOiWJBxxkzCnAQWF4= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20201211133042-142dfe1d7a6d/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -18,12 +21,14 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -63,6 +68,7 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= diff --git a/test/record_processor_test.go b/test/record_processor_test.go index c20b901..f392efb 100644 --- a/test/record_processor_test.go +++ b/test/record_processor_test.go @@ -67,10 +67,12 @@ func (dd *dumpRecordProcessor) ProcessRecords(input *kc.ProcessRecordsInput) { dd.count++ } - // checkpoint it after processing this batch - lastRecordSequenceNubmer := input.Records[len(input.Records)-1].SequenceNumber - dd.t.Logf("Checkpoint progress at: %v, MillisBehindLatest = %v", lastRecordSequenceNubmer, input.MillisBehindLatest) - input.Checkpointer.Checkpoint(lastRecordSequenceNubmer) + // checkpoint it after processing this batch. + // Especially, for processing de-aggregated KPL records, checkpointing has to happen at the end of batch + // because de-aggregated records share the same sequence number. + lastRecordSequenceNumber := input.Records[len(input.Records)-1].SequenceNumber + dd.t.Logf("Checkpoint progress at: %v, MillisBehindLatest = %v", lastRecordSequenceNumber, input.MillisBehindLatest) + input.Checkpointer.Checkpoint(lastRecordSequenceNumber) } func (dd *dumpRecordProcessor) Shutdown(input *kc.ShutdownInput) { diff --git a/test/record_publisher_test.go b/test/record_publisher_test.go index cb61f83..f948fc1 100644 --- a/test/record_publisher_test.go +++ b/test/record_publisher_test.go @@ -19,12 +19,17 @@ package test import ( + "crypto/md5" + "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + rec "github.com/awslabs/kinesis-aggregation/go/records" + "github.com/golang/protobuf/proto" "github.com/vmware/vmware-go-kcl/clientlibrary/utils" + "testing" ) @@ -60,6 +65,13 @@ func publishSomeData(t *testing.T, kc kinesisiface.KinesisAPI) { publishRecords(t, kc) } t.Log("Done putting data into stream using PutRecords API.") + + // Put some data into stream using KPL Aggregate Record format + t.Log("Putting data into stream using KPL Aggregate Record ...") + for i := 0; i < 10; i++ { + publishAggregateRecord(t, kc) + } + t.Log("Done putting data into stream using KPL Aggregate Record.") } // publishRecord to put a record into Kinesis stream using PutRecord API. @@ -97,3 +109,55 @@ func publishRecords(t *testing.T, kc kinesisiface.KinesisAPI) { t.Errorf("Error in PutRecords. %+v", err) } } + +// publishRecord to put a record into Kinesis stream using PutRecord API. +func publishAggregateRecord(t *testing.T, kc kinesisiface.KinesisAPI) { + data := generateAggregateRecord(5, specstr) + // Use random string as partition key to ensure even distribution across shards + _, err := kc.PutRecord(&kinesis.PutRecordInput{ + Data: data, + StreamName: aws.String(streamName), + PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), + }) + + if err != nil { + t.Errorf("Error in PutRecord. %+v", err) + } +} + +// generateAggregateRecord generates an aggregate record in the correct AWS-specified format used by KPL. +// https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md +// copy from: https://github.com/awslabs/kinesis-aggregation/blob/master/go/deaggregator/deaggregator_test.go +func generateAggregateRecord(numRecords int, content string) []byte { + aggr := &rec.AggregatedRecord{} + // Start with the magic header + aggRecord := []byte("\xf3\x89\x9a\xc2") + partKeyTable := make([]string, 0) + + // Create proto record with numRecords length + for i := 0; i < numRecords; i++ { + var partKey uint64 + var hashKey uint64 + partKey = uint64(i) + hashKey = uint64(i) * uint64(10) + r := &rec.Record{ + PartitionKeyIndex: &partKey, + ExplicitHashKeyIndex: &hashKey, + Data: []byte(content), + Tags: make([]*rec.Tag, 0), + } + + aggr.Records = append(aggr.Records, r) + partKeyVal := fmt.Sprint(i) + partKeyTable = append(partKeyTable, partKeyVal) + } + + aggr.PartitionKeyTable = partKeyTable + // Marshal to protobuf record, create md5 sum from proto record + // and append both to aggRecord with magic header + data, _ := proto.Marshal(aggr) + md5Hash := md5.Sum(data) + aggRecord = append(aggRecord, data...) + aggRecord = append(aggRecord, md5Hash[:]...) + return aggRecord +} diff --git a/test/worker_test.go b/test/worker_test.go index bbe5e51..e79f115 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -69,7 +69,7 @@ func TestWorker(t *testing.T) { kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). - WithMaxRecords(10). + WithMaxRecords(8). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). WithFailoverTimeMillis(300000). From adb264717bf0aa6f22a6adba2a0f0dfc435b5eaf Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Wed, 23 Dec 2020 13:22:01 -0600 Subject: [PATCH 66/90] Fix naming convention (#85) Minor fix on constant naming convention. Signed-off-by: Tao Jiang --- clientlibrary/checkpoint/checkpointer.go | 4 +-- .../checkpoint/dynamodb-checkpointer.go | 4 +-- .../checkpoint/dynamodb-checkpointer_test.go | 2 +- clientlibrary/config/config.go | 34 +++++++++--------- clientlibrary/config/kcl-config.go | 36 +++++++++---------- clientlibrary/worker/shard-consumer.go | 4 +-- clientlibrary/worker/worker.go | 2 +- test/record_processor_test.go | 4 ++- 8 files changed, 46 insertions(+), 44 deletions(-) diff --git a/clientlibrary/checkpoint/checkpointer.go b/clientlibrary/checkpoint/checkpointer.go index 4ff6fd2..2345533 100644 --- a/clientlibrary/checkpoint/checkpointer.go +++ b/clientlibrary/checkpoint/checkpointer.go @@ -42,8 +42,8 @@ const ( // We've completely processed all records in this shard. ShardEnd = "SHARD_END" - // ErrLeaseNotAquired is returned when we failed to get a lock on the shard - ErrLeaseNotAquired = "Lease is already held by another node" + // ErrLeaseNotAcquired is returned when we failed to get a lock on the shard + ErrLeaseNotAcquired = "lease is already held by another node" ) // Checkpointer handles checkpointing when a record has been processed diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 1145454..fd6751e 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -142,7 +142,7 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign } if time.Now().UTC().Before(currentLeaseTimeout) && assignedTo != newAssignTo { - return errors.New(ErrLeaseNotAquired) + return errors.New(ErrLeaseNotAcquired) } checkpointer.log.Debugf("Attempting to get a lock for shard: %s, leaseTimeout: %s, assignedTo: %s", shard.ID, currentLeaseTimeout, assignedTo) @@ -186,7 +186,7 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == dynamodb.ErrCodeConditionalCheckFailedException { - return errors.New(ErrLeaseNotAquired) + return errors.New(ErrLeaseNotAcquired) } } return err diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go index ec3894b..55ec973 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go @@ -85,7 +85,7 @@ func TestGetLeaseNotAquired(t *testing.T) { Checkpoint: "", Mux: &sync.Mutex{}, }, "ijkl-mnop") - if err == nil || err.Error() != ErrLeaseNotAquired { + if err == nil || err.Error() != ErrLeaseNotAcquired { t.Errorf("Got a lease when it was already held by abcd-efgh: %s", err) } } diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index 91d73b8..5337269 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -55,72 +55,72 @@ const ( // The location in the shard from which the KinesisClientLibrary will start fetching records from // when the application starts for the first time and there is no checkpoint for the shard. - DEFAULT_INITIAL_POSITION_IN_STREAM = LATEST + DefaultInitialPositionInStream = LATEST // Fail over time in milliseconds. A worker which does not renew it's lease within this time interval // will be regarded as having problems and it's shards will be assigned to other workers. // For applications that have a large number of shards, this may be set to a higher number to reduce // the number of DynamoDB IOPS required for tracking leases. - DEFAULT_FAILOVER_TIME_MILLIS = 10000 + DefaultFailoverTimeMillis = 10000 // Period before the end of lease during which a lease is refreshed by the owner. - DEFAULT_LEASE_REFRESH_PERIOD_MILLIS = 5000 + DefaultLeaseRefreshPeriodMillis = 5000 // Max records to fetch from Kinesis in a single GetRecords call. - DEFAULT_MAX_RECORDS = 10000 + DefaultMaxRecords = 10000 // The default value for how long the {@link ShardConsumer} should sleep if no records are returned // from the call to - DEFAULT_IDLETIME_BETWEEN_READS_MILLIS = 1000 + DefaultIdletimeBetweenReadsMillis = 1000 // Don't call processRecords() on the record processor for empty record lists. - DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST = false + DefaultDontCallProcessRecordsForEmptyRecordList = false // Interval in milliseconds between polling to check for parent shard completion. // Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on // completion of parent shards). - DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS = 10000 + DefaultParentShardPollIntervalMillis = 10000 // Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks. - DEFAULT_SHARD_SYNC_INTERVAL_MILLIS = 60000 + DefaultShardSyncIntervalMillis = 60000 // Cleanup leases upon shards completion (don't wait until they expire in Kinesis). // Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by // default we try to delete the ones we don't need any longer. - DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION = true + DefaultCleanupLeasesUponShardsCompletion = true // Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures). - DEFAULT_TASK_BACKOFF_TIME_MILLIS = 500 + DefaultTaskBackoffTimeMillis = 500 // KCL will validate client provided sequence numbers with a call to Amazon Kinesis before // checkpointing for calls to {@link RecordProcessorCheckpointer#checkpoint(String)} by default. - DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING = true + DefaultValidateSequenceNumberBeforeCheckpointing = true // The max number of leases (shards) this worker should process. // This can be useful to avoid overloading (and thrashing) a worker when a host has resource constraints // or during deployment. // NOTE: Setting this to a low value can cause data loss if workers are not able to pick up all shards in the // stream due to the max limit. - DEFAULT_MAX_LEASES_FOR_WORKER = math.MaxInt16 + DefaultMaxLeasesForWorker = math.MaxInt16 // Max leases to steal from another worker at one time (for load balancing). // Setting this to a higher number can allow for faster load convergence (e.g. during deployments, cold starts), // but can cause higher churn in the system. - DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME = 1 + DefaultMaxLeasesToStealAtOneTime = 1 // The Amazon DynamoDB table used for tracking leases will be provisioned with this read capacity. - DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY = 10 + DefaultInitialLeaseTableReadCapacity = 10 // The Amazon DynamoDB table used for tracking leases will be provisioned with this write capacity. - DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY = 10 + DefaultInitialLeaseTableWriteCapacity = 10 // The Worker will skip shard sync during initialization if there are one or more leases in the lease table. This // assumes that the shards and leases are in-sync. This enables customers to choose faster startup times (e.g. // during incremental deployments of an application). - DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST = false + DefaultSkipShardSyncAtStartupIfLeasesExist = false // The amount of milliseconds to wait before graceful shutdown forcefully terminates. - DEFAULT_SHUTDOWN_GRACE_MILLIS = 5000 + DefaultShutdownGraceMillis = 5000 ) type ( diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index d7960e9..e42d864 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -76,24 +76,24 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio StreamName: streamName, RegionName: regionName, WorkerID: workerID, - InitialPositionInStream: DEFAULT_INITIAL_POSITION_IN_STREAM, - InitialPositionInStreamExtended: *newInitialPosition(DEFAULT_INITIAL_POSITION_IN_STREAM), - FailoverTimeMillis: DEFAULT_FAILOVER_TIME_MILLIS, - LeaseRefreshPeriodMillis: DEFAULT_LEASE_REFRESH_PERIOD_MILLIS, - MaxRecords: DEFAULT_MAX_RECORDS, - IdleTimeBetweenReadsInMillis: DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, - CallProcessRecordsEvenForEmptyRecordList: DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, - ParentShardPollIntervalMillis: DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS, - ShardSyncIntervalMillis: DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, - CleanupTerminatedShardsBeforeExpiry: DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, - TaskBackoffTimeMillis: DEFAULT_TASK_BACKOFF_TIME_MILLIS, - ValidateSequenceNumberBeforeCheckpointing: DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, - ShutdownGraceMillis: DEFAULT_SHUTDOWN_GRACE_MILLIS, - MaxLeasesForWorker: DEFAULT_MAX_LEASES_FOR_WORKER, - MaxLeasesToStealAtOneTime: DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME, - InitialLeaseTableReadCapacity: DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, - InitialLeaseTableWriteCapacity: DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, - SkipShardSyncAtWorkerInitializationIfLeasesExist: DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST, + InitialPositionInStream: DefaultInitialPositionInStream, + InitialPositionInStreamExtended: *newInitialPosition(DefaultInitialPositionInStream), + FailoverTimeMillis: DefaultFailoverTimeMillis, + LeaseRefreshPeriodMillis: DefaultLeaseRefreshPeriodMillis, + MaxRecords: DefaultMaxRecords, + IdleTimeBetweenReadsInMillis: DefaultIdletimeBetweenReadsMillis, + CallProcessRecordsEvenForEmptyRecordList: DefaultDontCallProcessRecordsForEmptyRecordList, + ParentShardPollIntervalMillis: DefaultParentShardPollIntervalMillis, + ShardSyncIntervalMillis: DefaultShardSyncIntervalMillis, + CleanupTerminatedShardsBeforeExpiry: DefaultCleanupLeasesUponShardsCompletion, + TaskBackoffTimeMillis: DefaultTaskBackoffTimeMillis, + ValidateSequenceNumberBeforeCheckpointing: DefaultValidateSequenceNumberBeforeCheckpointing, + ShutdownGraceMillis: DefaultShutdownGraceMillis, + MaxLeasesForWorker: DefaultMaxLeasesForWorker, + MaxLeasesToStealAtOneTime: DefaultMaxLeasesToStealAtOneTime, + InitialLeaseTableReadCapacity: DefaultInitialLeaseTableReadCapacity, + InitialLeaseTableWriteCapacity: DefaultInitialLeaseTableWriteCapacity, + SkipShardSyncAtWorkerInitializationIfLeasesExist: DefaultSkipShardSyncAtStartupIfLeasesExist, Logger: logger.GetDefaultLogger(), } } diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 4cb963f..6d3a330 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -162,7 +162,7 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID) err = sc.checkpointer.GetLease(shard, sc.consumerID) if err != nil { - if err.Error() == chk.ErrLeaseNotAquired { + if err.Error() == chk.ErrLeaseNotAcquired { log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", shard.ID, sc.consumerID) return nil } @@ -225,7 +225,7 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { recordLength := len(input.Records) recordBytes := int64(0) - log.Debugf("Received %d records, MillisBehindLatest: %v", recordLength, input.MillisBehindLatest) + log.Debugf("Received %d de-aggregated records, MillisBehindLatest: %v", recordLength, input.MillisBehindLatest) for _, r := range dars { recordBytes += int64(len(r.Data)) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 4b42847..907bf5d 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -277,7 +277,7 @@ func (w *Worker) eventLoop() { err = w.checkpointer.GetLease(shard, w.workerID) if err != nil { // cannot get lease on the shard - if err.Error() != chk.ErrLeaseNotAquired { + if err.Error() != chk.ErrLeaseNotAcquired { log.Errorf("Cannot get lease: %+v", err) } continue diff --git a/test/record_processor_test.go b/test/record_processor_test.go index f392efb..31a8556 100644 --- a/test/record_processor_test.go +++ b/test/record_processor_test.go @@ -71,7 +71,9 @@ func (dd *dumpRecordProcessor) ProcessRecords(input *kc.ProcessRecordsInput) { // Especially, for processing de-aggregated KPL records, checkpointing has to happen at the end of batch // because de-aggregated records share the same sequence number. lastRecordSequenceNumber := input.Records[len(input.Records)-1].SequenceNumber - dd.t.Logf("Checkpoint progress at: %v, MillisBehindLatest = %v", lastRecordSequenceNumber, input.MillisBehindLatest) + // Calculate the time taken from polling records and delivering to record processor for a batch. + diff := input.CacheExitTime.Sub(*input.CacheEntryTime) + dd.t.Logf("Checkpoint progress at: %v, MillisBehindLatest = %v, KCLProcessTime = %v", lastRecordSequenceNumber, input.MillisBehindLatest, diff) input.Checkpointer.Checkpoint(lastRecordSequenceNumber) } From 909d1774a350aa2fae343474f44491a7d48cc31d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20Rainone?= <476650+arl@users.noreply.github.com> Date: Tue, 26 Jan 2021 05:30:10 +0100 Subject: [PATCH 67/90] Add context to ErrLeaseNotAcquired (#87) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * clientlibrary/checkpoint: convert ErrLeaseAcquired to struct Signed-off-by: Aurélien Rainone * clientlibrary/checkpoint: add context to ErrLeaseNotAcquired Signed-off-by: Aurélien Rainone * Use errors.As to check for ErrLeaseNotAcquired error Signed-off-by: Aurélien Rainone --- clientlibrary/checkpoint/checkpointer.go | 13 ++++++++++--- clientlibrary/checkpoint/dynamodb-checkpointer.go | 5 ++--- .../checkpoint/dynamodb-checkpointer_test.go | 5 +++-- clientlibrary/worker/shard-consumer.go | 3 ++- clientlibrary/worker/worker.go | 3 ++- 5 files changed, 19 insertions(+), 10 deletions(-) diff --git a/clientlibrary/checkpoint/checkpointer.go b/clientlibrary/checkpoint/checkpointer.go index 2345533..fe91359 100644 --- a/clientlibrary/checkpoint/checkpointer.go +++ b/clientlibrary/checkpoint/checkpointer.go @@ -29,6 +29,8 @@ package checkpoint import ( "errors" + "fmt" + par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" ) @@ -41,11 +43,16 @@ const ( // We've completely processed all records in this shard. ShardEnd = "SHARD_END" - - // ErrLeaseNotAcquired is returned when we failed to get a lock on the shard - ErrLeaseNotAcquired = "lease is already held by another node" ) +type ErrLeaseNotAcquired struct { + cause string +} + +func (e ErrLeaseNotAcquired) Error() string { + return fmt.Sprintf("lease not acquired: %s", e.cause) +} + // Checkpointer handles checkpointing when a record has been processed type Checkpointer interface { // Init initialises the Checkpoint diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index fd6751e..3d70f36 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -28,7 +28,6 @@ package checkpoint import ( - "errors" "time" "github.com/aws/aws-sdk-go/aws" @@ -142,7 +141,7 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign } if time.Now().UTC().Before(currentLeaseTimeout) && assignedTo != newAssignTo { - return errors.New(ErrLeaseNotAcquired) + return ErrLeaseNotAcquired{"current lease timeout not yet expired"} } checkpointer.log.Debugf("Attempting to get a lock for shard: %s, leaseTimeout: %s, assignedTo: %s", shard.ID, currentLeaseTimeout, assignedTo) @@ -186,7 +185,7 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign if err != nil { if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == dynamodb.ErrCodeConditionalCheckFailedException { - return errors.New(ErrLeaseNotAcquired) + return ErrLeaseNotAcquired{dynamodb.ErrCodeConditionalCheckFailedException} } } return err diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go index 55ec973..fe8a383 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go @@ -29,11 +29,12 @@ package checkpoint import ( "errors" - "github.com/stretchr/testify/assert" "sync" "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/dynamodb" @@ -85,7 +86,7 @@ func TestGetLeaseNotAquired(t *testing.T) { Checkpoint: "", Mux: &sync.Mutex{}, }, "ijkl-mnop") - if err == nil || err.Error() != ErrLeaseNotAcquired { + if err == nil || !errors.As(err, &ErrLeaseNotAcquired{}) { t.Errorf("Got a lease when it was already held by abcd-efgh: %s", err) } } diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go index 6d3a330..2f87f40 100644 --- a/clientlibrary/worker/shard-consumer.go +++ b/clientlibrary/worker/shard-consumer.go @@ -28,6 +28,7 @@ package worker import ( + "errors" "math" "sync" "time" @@ -162,7 +163,7 @@ func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID) err = sc.checkpointer.GetLease(shard, sc.consumerID) if err != nil { - if err.Error() == chk.ErrLeaseNotAcquired { + if errors.As(err, &chk.ErrLeaseNotAcquired{}) { log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", shard.ID, sc.consumerID) return nil } diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 907bf5d..5b04453 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -28,6 +28,7 @@ package worker import ( + "errors" "math/rand" "sync" "time" @@ -277,7 +278,7 @@ func (w *Worker) eventLoop() { err = w.checkpointer.GetLease(shard, w.workerID) if err != nil { // cannot get lease on the shard - if err.Error() != chk.ErrLeaseNotAcquired { + if !errors.As(err, &chk.ErrLeaseNotAcquired{}) { log.Errorf("Cannot get lease: %+v", err) } continue From ddcc2d0f95837cc1320c011c6972e4bbef476afa Mon Sep 17 00:00:00 2001 From: Ilia Cimpoes Date: Tue, 27 Apr 2021 18:51:26 +0300 Subject: [PATCH 68/90] Support enhanced fan-out feature (#90) * Implement enhanced fan-out consumer Signed-off-by: Ilia Cimpoes * Add test cases Signed-off-by: Ilia Cimpoes * Small adjustments in fan-out consumer Signed-off-by: Ilia Cimpoes --- .../checkpoint/dynamodb-checkpointer.go | 22 +- .../checkpoint/dynamodb-checkpointer_test.go | 11 +- clientlibrary/config/config.go | 22 +- clientlibrary/config/config_test.go | 28 +- clientlibrary/config/kcl-config.go | 24 +- clientlibrary/partition/partition.go | 18 +- clientlibrary/utils/awserr.go | 31 ++ clientlibrary/worker/common-shard-consumer.go | 169 ++++++++++ .../worker/fan-out-shard-consumer.go | 168 ++++++++++ .../worker/polling-shard-consumer.go | 171 ++++++++++ .../worker/record-processor-checkpointer.go | 7 +- clientlibrary/worker/shard-consumer.go | 317 ------------------ clientlibrary/worker/worker-fan-out.go | 88 +++++ clientlibrary/worker/worker.go | 69 ++-- test/worker_custom_test.go | 5 +- test/worker_test.go | 80 ++++- 16 files changed, 846 insertions(+), 384 deletions(-) create mode 100644 clientlibrary/utils/awserr.go create mode 100644 clientlibrary/worker/common-shard-consumer.go create mode 100644 clientlibrary/worker/fan-out-shard-consumer.go create mode 100644 clientlibrary/worker/polling-shard-consumer.go delete mode 100644 clientlibrary/worker/shard-consumer.go create mode 100644 clientlibrary/worker/worker-fan-out.go diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 3d70f36..dd8dd55 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -31,7 +31,6 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" @@ -39,6 +38,7 @@ import ( "github.com/vmware/vmware-go-kcl/clientlibrary/config" par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" + "github.com/vmware/vmware-go-kcl/clientlibrary/utils" "github.com/vmware/vmware-go-kcl/logger" ) @@ -144,7 +144,7 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign return ErrLeaseNotAcquired{"current lease timeout not yet expired"} } - checkpointer.log.Debugf("Attempting to get a lock for shard: %s, leaseTimeout: %s, assignedTo: %s", shard.ID, currentLeaseTimeout, assignedTo) + checkpointer.log.Debugf("Attempting to get a lock for shard: %s, leaseTimeout: %s, assignedTo: %s, newAssignedTo: %s", shard.ID, currentLeaseTimeout, assignedTo, newAssignTo) conditionalExpression = "ShardID = :id AND AssignedTo = :assigned_to AND LeaseTimeout = :lease_timeout" expressionAttributeValues = map[string]*dynamodb.AttributeValue{ ":id": { @@ -175,18 +175,16 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign marshalledCheckpoint[ParentShardIdKey] = &dynamodb.AttributeValue{S: aws.String(shard.ParentShardId)} } - if shard.Checkpoint != "" { + if shard.GetCheckpoint() != "" { marshalledCheckpoint[SequenceNumberKey] = &dynamodb.AttributeValue{ - S: aws.String(shard.Checkpoint), + S: aws.String(shard.GetCheckpoint()), } } err = checkpointer.conditionalUpdate(conditionalExpression, expressionAttributeValues, marshalledCheckpoint) if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == dynamodb.ErrCodeConditionalCheckFailedException { - return ErrLeaseNotAcquired{dynamodb.ErrCodeConditionalCheckFailedException} - } + if utils.AWSErrCode(err) == dynamodb.ErrCodeConditionalCheckFailedException { + return ErrLeaseNotAcquired{dynamodb.ErrCodeConditionalCheckFailedException} } return err } @@ -207,7 +205,7 @@ func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *par.ShardStatus) S: aws.String(shard.ID), }, SequenceNumberKey: { - S: aws.String(shard.Checkpoint), + S: aws.String(shard.GetCheckpoint()), }, LeaseOwnerKey: { S: aws.String(shard.AssignedTo), @@ -236,12 +234,10 @@ func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *par.ShardStatus) er return ErrSequenceIDNotFound } checkpointer.log.Debugf("Retrieved Shard Iterator %s", *sequenceID.S) - shard.Mux.Lock() - defer shard.Mux.Unlock() - shard.Checkpoint = aws.StringValue(sequenceID.S) + shard.SetCheckpoint(aws.StringValue(sequenceID.S)) if assignedTo, ok := checkpoint[LeaseOwnerKey]; ok { - shard.AssignedTo = aws.StringValue(assignedTo.S) + shard.SetLeaseOwner(aws.StringValue(assignedTo.S)) } return nil } diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go index fe8a383..2217b0e 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go @@ -33,12 +33,11 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/stretchr/testify/assert" cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" @@ -75,7 +74,7 @@ func TestGetLeaseNotAquired(t *testing.T) { err := checkpoint.GetLease(&par.ShardStatus{ ID: "0001", Checkpoint: "", - Mux: &sync.Mutex{}, + Mux: &sync.RWMutex{}, }, "abcd-efgh") if err != nil { t.Errorf("Error getting lease %s", err) @@ -84,7 +83,7 @@ func TestGetLeaseNotAquired(t *testing.T) { err = checkpoint.GetLease(&par.ShardStatus{ ID: "0001", Checkpoint: "", - Mux: &sync.Mutex{}, + Mux: &sync.RWMutex{}, }, "ijkl-mnop") if err == nil || !errors.As(err, &ErrLeaseNotAcquired{}) { t.Errorf("Got a lease when it was already held by abcd-efgh: %s", err) @@ -124,7 +123,7 @@ func TestGetLeaseAquired(t *testing.T) { shard := &par.ShardStatus{ ID: "0001", Checkpoint: "deadbeef", - Mux: &sync.Mutex{}, + Mux: &sync.RWMutex{}, } err := checkpoint.GetLease(shard, "ijkl-mnop") @@ -145,7 +144,7 @@ func TestGetLeaseAquired(t *testing.T) { status := &par.ShardStatus{ ID: shard.ID, - Mux: &sync.Mutex{}, + Mux: &sync.RWMutex{}, } checkpoint.FetchCheckpoint(status) diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index 5337269..2170d44 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -41,6 +41,7 @@ import ( "github.com/aws/aws-sdk-go/aws" creds "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" "github.com/vmware/vmware-go-kcl/logger" ) @@ -169,13 +170,24 @@ type ( // StreamName is the name of Kinesis stream StreamName string + // EnableEnhancedFanOutConsumer enables enhanced fan-out consumer + // See: https://docs.aws.amazon.com/streams/latest/dev/enhanced-consumers.html + // Either consumer name or consumer ARN must be specified when Enhanced Fan-Out is enabled. + EnableEnhancedFanOutConsumer bool + + // EnhancedFanOutConsumerName is the name of the enhanced fan-out consumer to create. + EnhancedFanOutConsumerName string + + // EnhancedFanOutConsumerARN is the ARN of an already created enhanced fan-out consumer, if this is set no automatic consumer creation will be attempted + EnhancedFanOutConsumerARN string + // WorkerID used to distinguish different workers/processes of a Kinesis application WorkerID string // InitialPositionInStream specifies the Position in the stream where a new application should start from InitialPositionInStream InitialPositionInStream - // InitialPositionInStreamExtended provides actual AT_TMESTAMP value + // InitialPositionInStreamExtended provides actual AT_TIMESTAMP value InitialPositionInStreamExtended InitialPositionInStreamExtended // credentials to access Kinesis/Dynamo: https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/ @@ -262,18 +274,18 @@ func empty(s string) bool { return len(strings.TrimSpace(s)) == 0 } -// checkIsValuePositive make sure the value is possitive. +// checkIsValueNotEmpty makes sure the value is not empty. func checkIsValueNotEmpty(key string, value string) { if empty(value) { // There is no point to continue for incorrect configuration. Fail fast! - log.Panicf("Non-empty value exepected for %v, actual: %v", key, value) + log.Panicf("Non-empty value expected for %v, actual: %v", key, value) } } -// checkIsValuePositive make sure the value is possitive. +// checkIsValuePositive makes sure the value is possitive. func checkIsValuePositive(key string, value int) { if value <= 0 { // There is no point to continue for incorrect configuration. Fail fast! - log.Panicf("Positive value exepected for %v, actual: %v", key, value) + log.Panicf("Positive value expected for %v, actual: %v", key, value) } } diff --git a/clientlibrary/config/config_test.go b/clientlibrary/config/config_test.go index a7015d9..576042c 100644 --- a/clientlibrary/config/config_test.go +++ b/clientlibrary/config/config_test.go @@ -19,10 +19,11 @@ package config import ( - "github.com/vmware/vmware-go-kcl/logger" "testing" "github.com/stretchr/testify/assert" + + "github.com/vmware/vmware-go-kcl/logger" ) func TestConfig(t *testing.T) { @@ -32,13 +33,36 @@ func TestConfig(t *testing.T) { WithInitialPositionInStream(TRIM_HORIZON). WithIdleTimeBetweenReadsInMillis(20). WithCallProcessRecordsEvenForEmptyRecordList(true). - WithTaskBackoffTimeMillis(10) + WithTaskBackoffTimeMillis(10). + WithEnhancedFanOutConsumer("fan-out-consumer") assert.Equal(t, "appName", kclConfig.ApplicationName) assert.Equal(t, 500, kclConfig.FailoverTimeMillis) assert.Equal(t, 10, kclConfig.TaskBackoffTimeMillis) + assert.True(t, kclConfig.EnableEnhancedFanOutConsumer) + assert.Equal(t, "fan-out-consumer", kclConfig.EnhancedFanOutConsumerName) contextLogger := kclConfig.Logger.WithFields(logger.Fields{"key1": "value1"}) contextLogger.Debugf("Starting with default logger") contextLogger.Infof("Default logger is awesome") } + +func TestEmptyEnhancedFanOutConsumerName(t *testing.T) { + assert.PanicsWithValue(t, "Non-empty value expected for EnhancedFanOutConsumerName, actual: ", func() { + NewKinesisClientLibConfig("app", "stream", "us-west-2", "worker").WithEnhancedFanOutConsumer("") + }) +} + +func TestConfigWithEnhancedFanOutConsumerARN(t *testing.T) { + kclConfig := NewKinesisClientLibConfig("app", "stream", "us-west-2", "worker"). + WithEnhancedFanOutConsumerARN("consumer:arn") + + assert.True(t, kclConfig.EnableEnhancedFanOutConsumer) + assert.Equal(t, "consumer:arn", kclConfig.EnhancedFanOutConsumerARN) +} + +func TestEmptyEnhancedFanOutConsumerARN(t *testing.T) { + assert.PanicsWithValue(t, "Non-empty value expected for EnhancedFanOutConsumerARN, actual: ", func() { + NewKinesisClientLibConfig("app", "stream", "us-west-2", "worker").WithEnhancedFanOutConsumerARN("") + }) +} diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index e42d864..810e4c9 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -37,9 +37,9 @@ import ( "log" "time" - "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" - "github.com/aws/aws-sdk-go/aws/credentials" + + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" "github.com/vmware/vmware-go-kcl/clientlibrary/utils" "github.com/vmware/vmware-go-kcl/logger" ) @@ -212,3 +212,23 @@ func (c *KinesisClientLibConfiguration) WithMonitoringService(mService metrics.M c.MonitoringService = mService return c } + +// WithEnhancedFanOutConsumer enables enhanced fan-out consumer with the specified name +// For more info see: https://docs.aws.amazon.com/streams/latest/dev/enhanced-consumers.html +// Note: You can register up to twenty consumers per stream to use enhanced fan-out. +func (c *KinesisClientLibConfiguration) WithEnhancedFanOutConsumer(consumerName string) *KinesisClientLibConfiguration { + checkIsValueNotEmpty("EnhancedFanOutConsumerName", consumerName) + c.EnhancedFanOutConsumerName = consumerName + c.EnableEnhancedFanOutConsumer = true + return c +} + +// WithEnhancedFanOutConsumerARN enables enhanced fan-out consumer with the specified consumer ARN +// For more info see: https://docs.aws.amazon.com/streams/latest/dev/enhanced-consumers.html +// Note: You can register up to twenty consumers per stream to use enhanced fan-out. +func (c *KinesisClientLibConfiguration) WithEnhancedFanOutConsumerARN(consumerARN string) *KinesisClientLibConfiguration { + checkIsValueNotEmpty("EnhancedFanOutConsumerARN", consumerARN) + c.EnhancedFanOutConsumerARN = consumerARN + c.EnableEnhancedFanOutConsumer = true + return c +} diff --git a/clientlibrary/partition/partition.go b/clientlibrary/partition/partition.go index c261672..955bf08 100644 --- a/clientlibrary/partition/partition.go +++ b/clientlibrary/partition/partition.go @@ -37,7 +37,7 @@ type ShardStatus struct { ParentShardId string Checkpoint string AssignedTo string - Mux *sync.Mutex + Mux *sync.RWMutex LeaseTimeout time.Time // Shard Range StartingSequenceNumber string @@ -46,8 +46,8 @@ type ShardStatus struct { } func (ss *ShardStatus) GetLeaseOwner() string { - ss.Mux.Lock() - defer ss.Mux.Unlock() + ss.Mux.RLock() + defer ss.Mux.RUnlock() return ss.AssignedTo } @@ -56,3 +56,15 @@ func (ss *ShardStatus) SetLeaseOwner(owner string) { defer ss.Mux.Unlock() ss.AssignedTo = owner } + +func (ss *ShardStatus) GetCheckpoint() string { + ss.Mux.RLock() + defer ss.Mux.RUnlock() + return ss.Checkpoint +} + +func (ss *ShardStatus) SetCheckpoint(c string) { + ss.Mux.Lock() + defer ss.Mux.Unlock() + ss.Checkpoint = c +} diff --git a/clientlibrary/utils/awserr.go b/clientlibrary/utils/awserr.go new file mode 100644 index 0000000..6e692b8 --- /dev/null +++ b/clientlibrary/utils/awserr.go @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2021 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package utils + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +func AWSErrCode(err error) string { + awsErr, _ := err.(awserr.Error) + if awsErr != nil { + return awsErr.Code() + } + return "" +} diff --git a/clientlibrary/worker/common-shard-consumer.go b/clientlibrary/worker/common-shard-consumer.go new file mode 100644 index 0000000..416ac13 --- /dev/null +++ b/clientlibrary/worker/common-shard-consumer.go @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2021 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package worker + +import ( + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + deagg "github.com/awslabs/kinesis-aggregation/go/deaggregator" + + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" + "github.com/vmware/vmware-go-kcl/clientlibrary/config" + kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" + par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" +) + +type shardConsumer interface { + getRecords() error +} + +// commonShardConsumer implements common functionality for regular and enhanced fan-out consumers +type commonShardConsumer struct { + shard *par.ShardStatus + kc kinesisiface.KinesisAPI + checkpointer chk.Checkpointer + recordProcessor kcl.IRecordProcessor + kclConfig *config.KinesisClientLibConfiguration + mService metrics.MonitoringService +} + +// Cleanup the internal lease cache +func (sc *commonShardConsumer) releaseLease() { + log := sc.kclConfig.Logger + log.Infof("Release lease for shard %s", sc.shard.ID) + sc.shard.SetLeaseOwner("") + + // Release the lease by wiping out the lease owner for the shard + // Note: we don't need to do anything in case of error here and shard lease will eventually be expired. + if err := sc.checkpointer.RemoveLeaseOwner(sc.shard.ID); err != nil { + log.Errorf("Failed to release shard lease or shard: %s Error: %+v", sc.shard.ID, err) + } + + // reporting lease lose metrics + sc.mService.LeaseLost(sc.shard.ID) +} + +// getStartingPosition gets kinesis stating position. +// First try to fetch checkpoint. If checkpoint is not found use InitialPositionInStream +func (sc *commonShardConsumer) getStartingPosition() (*kinesis.StartingPosition, error) { + err := sc.checkpointer.FetchCheckpoint(sc.shard) + if err != nil && err != chk.ErrSequenceIDNotFound { + return nil, err + } + + checkpoint := sc.shard.GetCheckpoint() + if checkpoint != "" { + sc.kclConfig.Logger.Debugf("Start shard: %v at checkpoint: %v", sc.shard.ID, checkpoint) + return &kinesis.StartingPosition{ + Type: aws.String("AFTER_SEQUENCE_NUMBER"), + SequenceNumber: &checkpoint, + }, nil + } + + shardIteratorType := config.InitalPositionInStreamToShardIteratorType(sc.kclConfig.InitialPositionInStream) + sc.kclConfig.Logger.Debugf("No checkpoint recorded for shard: %v, starting with: %v", sc.shard.ID, aws.StringValue(shardIteratorType)) + + if sc.kclConfig.InitialPositionInStream == config.AT_TIMESTAMP { + return &kinesis.StartingPosition{ + Type: shardIteratorType, + Timestamp: sc.kclConfig.InitialPositionInStreamExtended.Timestamp, + }, nil + } + + return &kinesis.StartingPosition{ + Type: shardIteratorType, + }, nil +} + +// Need to wait until the parent shard finished +func (sc *commonShardConsumer) waitOnParentShard() error { + if len(sc.shard.ParentShardId) == 0 { + return nil + } + + pshard := &par.ShardStatus{ + ID: sc.shard.ParentShardId, + Mux: &sync.RWMutex{}, + } + + for { + if err := sc.checkpointer.FetchCheckpoint(pshard); err != nil { + return err + } + + // Parent shard is finished. + if pshard.GetCheckpoint() == chk.ShardEnd { + return nil + } + + time.Sleep(time.Duration(sc.kclConfig.ParentShardPollIntervalMillis) * time.Millisecond) + } +} + +func (sc *commonShardConsumer) processRecords(getRecordsStartTime time.Time, records []*kinesis.Record, millisBehindLatest *int64, recordCheckpointer kcl.IRecordProcessorCheckpointer) { + log := sc.kclConfig.Logger + + getRecordsTime := time.Since(getRecordsStartTime).Milliseconds() + sc.mService.RecordGetRecordsTime(sc.shard.ID, float64(getRecordsTime)) + + log.Debugf("Received %d original records.", len(records)) + + // De-aggregate the records if they were published by the KPL. + dars, err := deagg.DeaggregateRecords(records) + if err != nil { + // The error is caused by bad KPL publisher and just skip the bad records + // instead of being stuck here. + log.Errorf("Error in de-aggregating KPL records: %+v", err) + } + + input := &kcl.ProcessRecordsInput{ + Records: dars, + MillisBehindLatest: aws.Int64Value(millisBehindLatest), + Checkpointer: recordCheckpointer, + } + + recordLength := len(input.Records) + recordBytes := int64(0) + log.Debugf("Received %d de-aggregated records, MillisBehindLatest: %v", recordLength, input.MillisBehindLatest) + + for _, r := range input.Records { + recordBytes += int64(len(r.Data)) + } + + if recordLength > 0 || sc.kclConfig.CallProcessRecordsEvenForEmptyRecordList { + processRecordsStartTime := time.Now() + + // Delivery the events to the record processor + input.CacheEntryTime = &getRecordsStartTime + input.CacheExitTime = &processRecordsStartTime + sc.recordProcessor.ProcessRecords(input) + + processedRecordsTiming := time.Since(processRecordsStartTime).Milliseconds() + sc.mService.RecordProcessRecordsTime(sc.shard.ID, float64(processedRecordsTiming)) + } + + sc.mService.IncrRecordsProcessed(sc.shard.ID, recordLength) + sc.mService.IncrBytesProcessed(sc.shard.ID, recordBytes) + sc.mService.MillisBehindLatest(sc.shard.ID, float64(*millisBehindLatest)) +} diff --git a/clientlibrary/worker/fan-out-shard-consumer.go b/clientlibrary/worker/fan-out-shard-consumer.go new file mode 100644 index 0000000..ba4484c --- /dev/null +++ b/clientlibrary/worker/fan-out-shard-consumer.go @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2021 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package worker + +import ( + "errors" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kinesis" + + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" + kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" +) + +// FanOutShardConsumer is responsible for consuming data records of a (specified) shard. +// Note: FanOutShardConsumer only deal with one shard. +// For more info see: https://docs.aws.amazon.com/streams/latest/dev/enhanced-consumers.html +type FanOutShardConsumer struct { + commonShardConsumer + consumerARN string + consumerID string + stop *chan struct{} +} + +// getRecords subscribes to a shard and reads events from it. +// Precondition: it currently has the lease on the shard. +func (sc *FanOutShardConsumer) getRecords() error { + defer sc.releaseLease() + + log := sc.kclConfig.Logger + + // If the shard is child shard, need to wait until the parent finished. + if err := sc.waitOnParentShard(); err != nil { + // If parent shard has been deleted by Kinesis system already, just ignore the error. + if err != chk.ErrSequenceIDNotFound { + log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", sc.shard.ParentShardId, err) + return err + } + } + + shardSub, err := sc.subscribeToShard() + if err != nil { + log.Errorf("Unable to subscribe to shard %s: %v", sc.shard.ID, err) + return err + } + defer func() { + if shardSub == nil || shardSub.EventStream == nil { + log.Debugf("Nothing to close, EventStream is nil") + return + } + err = shardSub.EventStream.Close() + if err != nil { + log.Errorf("Unable to close event stream for %s: %v", sc.shard.ID, err) + } + }() + + input := &kcl.InitializationInput{ + ShardId: sc.shard.ID, + ExtendedSequenceNumber: &kcl.ExtendedSequenceNumber{SequenceNumber: aws.String(sc.shard.GetCheckpoint())}, + } + sc.recordProcessor.Initialize(input) + recordCheckpointer := NewRecordProcessorCheckpoint(sc.shard, sc.checkpointer) + + var continuationSequenceNumber *string + refreshLeaseTimer := time.After(time.Until(sc.shard.LeaseTimeout.Add(-time.Duration(sc.kclConfig.LeaseRefreshPeriodMillis) * time.Millisecond))) + for { + getRecordsStartTime := time.Now() + select { + case <-*sc.stop: + shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.REQUESTED, Checkpointer: recordCheckpointer} + sc.recordProcessor.Shutdown(shutdownInput) + return nil + case <-refreshLeaseTimer: + log.Debugf("Refreshing lease on shard: %s for worker: %s", sc.shard.ID, sc.consumerID) + err = sc.checkpointer.GetLease(sc.shard, sc.consumerID) + if err != nil { + if errors.As(err, &chk.ErrLeaseNotAcquired{}) { + log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", sc.shard.ID, sc.consumerID) + return nil + } + log.Errorf("Error in refreshing lease on shard: %s for worker: %s. Error: %+v", sc.shard.ID, sc.consumerID, err) + return err + } + refreshLeaseTimer = time.After(time.Until(sc.shard.LeaseTimeout.Add(-time.Duration(sc.kclConfig.LeaseRefreshPeriodMillis) * time.Millisecond))) + case event, ok := <-shardSub.EventStream.Events(): + if !ok { + // need to resubscribe to shard + log.Debugf("Event stream ended, refreshing subscription on shard: %s for worker: %s", sc.shard.ID, sc.consumerID) + if continuationSequenceNumber == nil || *continuationSequenceNumber == "" { + log.Debugf("No continuation sequence number") + return nil + } + shardSub, err = sc.resubscribe(shardSub, continuationSequenceNumber) + if err != nil { + return err + } + continue + } + subEvent, ok := event.(*kinesis.SubscribeToShardEvent) + if !ok { + log.Errorf("Received unexpected event type: %T", event) + continue + } + continuationSequenceNumber = subEvent.ContinuationSequenceNumber + sc.processRecords(getRecordsStartTime, subEvent.Records, subEvent.MillisBehindLatest, recordCheckpointer) + + // The shard has been closed, so no new records can be read from it + if continuationSequenceNumber == nil { + log.Infof("Shard %s closed", sc.shard.ID) + shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.TERMINATE, Checkpointer: recordCheckpointer} + sc.recordProcessor.Shutdown(shutdownInput) + return nil + } + } + } +} + +func (sc *FanOutShardConsumer) subscribeToShard() (*kinesis.SubscribeToShardOutput, error) { + startPosition, err := sc.getStartingPosition() + if err != nil { + return nil, err + } + + return sc.kc.SubscribeToShard(&kinesis.SubscribeToShardInput{ + ConsumerARN: &sc.consumerARN, + ShardId: &sc.shard.ID, + StartingPosition: startPosition, + }) +} + +func (sc *FanOutShardConsumer) resubscribe(shardSub *kinesis.SubscribeToShardOutput, continuationSequence *string) (*kinesis.SubscribeToShardOutput, error) { + err := shardSub.EventStream.Close() + if err != nil { + sc.kclConfig.Logger.Errorf("Unable to close event stream for %s: %v", sc.shard.ID, err) + return nil, err + } + startPosition := &kinesis.StartingPosition{ + Type: aws.String("AFTER_SEQUENCE_NUMBER"), + SequenceNumber: continuationSequence, + } + shardSub, err = sc.kc.SubscribeToShard(&kinesis.SubscribeToShardInput{ + ConsumerARN: &sc.consumerARN, + ShardId: &sc.shard.ID, + StartingPosition: startPosition, + }) + if err != nil { + sc.kclConfig.Logger.Errorf("Unable to resubscribe to shard %s: %v", sc.shard.ID, err) + return nil, err + } + return shardSub, nil +} diff --git a/clientlibrary/worker/polling-shard-consumer.go b/clientlibrary/worker/polling-shard-consumer.go new file mode 100644 index 0000000..27e5c80 --- /dev/null +++ b/clientlibrary/worker/polling-shard-consumer.go @@ -0,0 +1,171 @@ +/* + * Copyright (c) 2018 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// The implementation is derived from https://github.com/patrobinson/gokini +// +// Copyright 2018 Patrick robinson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +package worker + +import ( + "errors" + "math" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/kinesis" + + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" + kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" + "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" + "github.com/vmware/vmware-go-kcl/clientlibrary/utils" +) + +// PollingShardConsumer is responsible for polling data records from a (specified) shard. +// Note: PollingShardConsumer only deal with one shard. +type PollingShardConsumer struct { + commonShardConsumer + streamName string + stop *chan struct{} + consumerID string + mService metrics.MonitoringService +} + +func (sc *PollingShardConsumer) getShardIterator() (*string, error) { + startPosition, err := sc.getStartingPosition() + if err != nil { + return nil, err + } + shardIterArgs := &kinesis.GetShardIteratorInput{ + ShardId: &sc.shard.ID, + ShardIteratorType: startPosition.Type, + StartingSequenceNumber: startPosition.SequenceNumber, + Timestamp: startPosition.Timestamp, + StreamName: &sc.streamName, + } + iterResp, err := sc.kc.GetShardIterator(shardIterArgs) + if err != nil { + return nil, err + } + return iterResp.ShardIterator, nil +} + +// getRecords continously poll one shard for data record +// Precondition: it currently has the lease on the shard. +func (sc *PollingShardConsumer) getRecords() error { + defer sc.releaseLease() + + log := sc.kclConfig.Logger + + // If the shard is child shard, need to wait until the parent finished. + if err := sc.waitOnParentShard(); err != nil { + // If parent shard has been deleted by Kinesis system already, just ignore the error. + if err != chk.ErrSequenceIDNotFound { + log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", sc.shard.ParentShardId, err) + return err + } + } + + shardIterator, err := sc.getShardIterator() + if err != nil { + log.Errorf("Unable to get shard iterator for %s: %v", sc.shard.ID, err) + return err + } + + // Start processing events and notify record processor on shard and starting checkpoint + input := &kcl.InitializationInput{ + ShardId: sc.shard.ID, + ExtendedSequenceNumber: &kcl.ExtendedSequenceNumber{SequenceNumber: aws.String(sc.shard.GetCheckpoint())}, + } + sc.recordProcessor.Initialize(input) + + recordCheckpointer := NewRecordProcessorCheckpoint(sc.shard, sc.checkpointer) + retriedErrors := 0 + + for { + if time.Now().UTC().After(sc.shard.LeaseTimeout.Add(-time.Duration(sc.kclConfig.LeaseRefreshPeriodMillis) * time.Millisecond)) { + log.Debugf("Refreshing lease on shard: %s for worker: %s", sc.shard.ID, sc.consumerID) + err = sc.checkpointer.GetLease(sc.shard, sc.consumerID) + if err != nil { + if errors.As(err, &chk.ErrLeaseNotAcquired{}) { + log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", sc.shard.ID, sc.consumerID) + return nil + } + // log and return error + log.Errorf("Error in refreshing lease on shard: %s for worker: %s. Error: %+v", + sc.shard.ID, sc.consumerID, err) + return err + } + } + + getRecordsStartTime := time.Now() + + log.Debugf("Trying to read %d record from iterator: %v", sc.kclConfig.MaxRecords, aws.StringValue(shardIterator)) + getRecordsArgs := &kinesis.GetRecordsInput{ + Limit: aws.Int64(int64(sc.kclConfig.MaxRecords)), + ShardIterator: shardIterator, + } + // Get records from stream and retry as needed + getResp, err := sc.kc.GetRecords(getRecordsArgs) + if err != nil { + if utils.AWSErrCode(err) == kinesis.ErrCodeProvisionedThroughputExceededException || utils.AWSErrCode(err) == kinesis.ErrCodeKMSThrottlingException { + log.Errorf("Error getting records from shard %v: %+v", sc.shard.ID, err) + retriedErrors++ + // exponential backoff + // https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff + time.Sleep(time.Duration(math.Exp2(float64(retriedErrors))*100) * time.Millisecond) + continue + } + log.Errorf("Error getting records from Kinesis that cannot be retried: %+v Request: %s", err, getRecordsArgs) + return err + } + // reset the retry count after success + retriedErrors = 0 + + sc.processRecords(getRecordsStartTime, getResp.Records, getResp.MillisBehindLatest, recordCheckpointer) + + // The shard has been closed, so no new records can be read from it + if getResp.NextShardIterator == nil { + log.Infof("Shard %s closed", sc.shard.ID) + shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.TERMINATE, Checkpointer: recordCheckpointer} + sc.recordProcessor.Shutdown(shutdownInput) + return nil + } + shardIterator = getResp.NextShardIterator + + // Idle between each read, the user is responsible for checkpoint the progress + // This value is only used when no records are returned; if records are returned, it should immediately + // retrieve the next set of records. + if len(getResp.Records) == 0 && aws.Int64Value(getResp.MillisBehindLatest) < int64(sc.kclConfig.IdleTimeBetweenReadsInMillis) { + time.Sleep(time.Duration(sc.kclConfig.IdleTimeBetweenReadsInMillis) * time.Millisecond) + } + + select { + case <-*sc.stop: + shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.REQUESTED, Checkpointer: recordCheckpointer} + sc.recordProcessor.Shutdown(shutdownInput) + return nil + default: + } + } +} diff --git a/clientlibrary/worker/record-processor-checkpointer.go b/clientlibrary/worker/record-processor-checkpointer.go index b6baee6..e47bbba 100644 --- a/clientlibrary/worker/record-processor-checkpointer.go +++ b/clientlibrary/worker/record-processor-checkpointer.go @@ -64,14 +64,11 @@ func (pc *PreparedCheckpointer) Checkpoint() error { } func (rc *RecordProcessorCheckpointer) Checkpoint(sequenceNumber *string) error { - rc.shard.Mux.Lock() - defer rc.shard.Mux.Unlock() - // checkpoint the last sequence of a closed shard if sequenceNumber == nil { - rc.shard.Checkpoint = chk.ShardEnd + rc.shard.SetCheckpoint(chk.ShardEnd) } else { - rc.shard.Checkpoint = aws.StringValue(sequenceNumber) + rc.shard.SetCheckpoint(aws.StringValue(sequenceNumber)) } return rc.checkpoint.CheckpointSequence(rc.shard) diff --git a/clientlibrary/worker/shard-consumer.go b/clientlibrary/worker/shard-consumer.go deleted file mode 100644 index 2f87f40..0000000 --- a/clientlibrary/worker/shard-consumer.go +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Copyright (c) 2018 VMware, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and - * associated documentation files (the "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is furnished to do - * so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT - * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -// The implementation is derived from https://github.com/patrobinson/gokini -// -// Copyright 2018 Patrick robinson -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -package worker - -import ( - "errors" - "math" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" - deagg "github.com/awslabs/kinesis-aggregation/go/deaggregator" - - chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" - "github.com/vmware/vmware-go-kcl/clientlibrary/config" - kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" - "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" - par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" -) - -const ( - // This is the initial state of a shard consumer. This causes the consumer to remain blocked until the all - // parent shards have been completed. - WaitingOnParentShards ShardConsumerState = iota + 1 - - // ErrCodeKMSThrottlingException is defined in the API Reference https://docs.aws.amazon.com/sdk-for-go/api/service/kinesis/#Kinesis.GetRecords - // But it's not a constant? - ErrCodeKMSThrottlingException = "KMSThrottlingException" -) - -type ShardConsumerState int - -// ShardConsumer is responsible for consuming data records of a (specified) shard. -// Note: ShardConsumer only deal with one shard. -type ShardConsumer struct { - streamName string - shard *par.ShardStatus - kc kinesisiface.KinesisAPI - checkpointer chk.Checkpointer - recordProcessor kcl.IRecordProcessor - kclConfig *config.KinesisClientLibConfiguration - stop *chan struct{} - consumerID string - mService metrics.MonitoringService - state ShardConsumerState -} - -func (sc *ShardConsumer) getShardIterator(shard *par.ShardStatus) (*string, error) { - log := sc.kclConfig.Logger - - // Get checkpoint of the shard from dynamoDB - err := sc.checkpointer.FetchCheckpoint(shard) - if err != nil && err != chk.ErrSequenceIDNotFound { - return nil, err - } - - // If there isn't any checkpoint for the shard, use the configuration value. - if shard.Checkpoint == "" { - initPos := sc.kclConfig.InitialPositionInStream - shardIteratorType := config.InitalPositionInStreamToShardIteratorType(initPos) - log.Debugf("No checkpoint recorded for shard: %v, starting with: %v", shard.ID, - aws.StringValue(shardIteratorType)) - - var shardIterArgs *kinesis.GetShardIteratorInput - if initPos == config.AT_TIMESTAMP { - shardIterArgs = &kinesis.GetShardIteratorInput{ - ShardId: &shard.ID, - ShardIteratorType: shardIteratorType, - Timestamp: sc.kclConfig.InitialPositionInStreamExtended.Timestamp, - StreamName: &sc.streamName, - } - } else { - shardIterArgs = &kinesis.GetShardIteratorInput{ - ShardId: &shard.ID, - ShardIteratorType: shardIteratorType, - StreamName: &sc.streamName, - } - } - - iterResp, err := sc.kc.GetShardIterator(shardIterArgs) - if err != nil { - return nil, err - } - return iterResp.ShardIterator, nil - } - - log.Debugf("Start shard: %v at checkpoint: %v", shard.ID, shard.Checkpoint) - shardIterArgs := &kinesis.GetShardIteratorInput{ - ShardId: &shard.ID, - ShardIteratorType: aws.String("AFTER_SEQUENCE_NUMBER"), - StartingSequenceNumber: &shard.Checkpoint, - StreamName: &sc.streamName, - } - iterResp, err := sc.kc.GetShardIterator(shardIterArgs) - if err != nil { - return nil, err - } - return iterResp.ShardIterator, nil -} - -// getRecords continously poll one shard for data record -// Precondition: it currently has the lease on the shard. -func (sc *ShardConsumer) getRecords(shard *par.ShardStatus) error { - defer sc.releaseLease(shard) - - log := sc.kclConfig.Logger - - // If the shard is child shard, need to wait until the parent finished. - if err := sc.waitOnParentShard(shard); err != nil { - // If parent shard has been deleted by Kinesis system already, just ignore the error. - if err != chk.ErrSequenceIDNotFound { - log.Errorf("Error in waiting for parent shard: %v to finish. Error: %+v", shard.ParentShardId, err) - return err - } - } - - shardIterator, err := sc.getShardIterator(shard) - if err != nil { - log.Errorf("Unable to get shard iterator for %s: %v", shard.ID, err) - return err - } - - // Start processing events and notify record processor on shard and starting checkpoint - input := &kcl.InitializationInput{ - ShardId: shard.ID, - ExtendedSequenceNumber: &kcl.ExtendedSequenceNumber{SequenceNumber: aws.String(shard.Checkpoint)}, - } - sc.recordProcessor.Initialize(input) - - recordCheckpointer := NewRecordProcessorCheckpoint(shard, sc.checkpointer) - retriedErrors := 0 - - for { - if time.Now().UTC().After(shard.LeaseTimeout.Add(-time.Duration(sc.kclConfig.LeaseRefreshPeriodMillis) * time.Millisecond)) { - log.Debugf("Refreshing lease on shard: %s for worker: %s", shard.ID, sc.consumerID) - err = sc.checkpointer.GetLease(shard, sc.consumerID) - if err != nil { - if errors.As(err, &chk.ErrLeaseNotAcquired{}) { - log.Warnf("Failed in acquiring lease on shard: %s for worker: %s", shard.ID, sc.consumerID) - return nil - } - // log and return error - log.Errorf("Error in refreshing lease on shard: %s for worker: %s. Error: %+v", - shard.ID, sc.consumerID, err) - return err - } - } - - getRecordsStartTime := time.Now() - - log.Debugf("Trying to read %d record from iterator: %v", sc.kclConfig.MaxRecords, aws.StringValue(shardIterator)) - getRecordsArgs := &kinesis.GetRecordsInput{ - Limit: aws.Int64(int64(sc.kclConfig.MaxRecords)), - ShardIterator: shardIterator, - } - // Get records from stream and retry as needed - getResp, err := sc.kc.GetRecords(getRecordsArgs) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == kinesis.ErrCodeProvisionedThroughputExceededException || awsErr.Code() == ErrCodeKMSThrottlingException { - log.Errorf("Error getting records from shard %v: %+v", shard.ID, err) - retriedErrors++ - // exponential backoff - // https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff - time.Sleep(time.Duration(math.Exp2(float64(retriedErrors))*100) * time.Millisecond) - continue - } - } - log.Errorf("Error getting records from Kinesis that cannot be retried: %+v Request: %s", err, getRecordsArgs) - return err - } - - // Convert from nanoseconds to milliseconds - getRecordsTime := time.Since(getRecordsStartTime) / 1000000 - sc.mService.RecordGetRecordsTime(shard.ID, float64(getRecordsTime)) - - // reset the retry count after success - retriedErrors = 0 - - log.Debugf("Received %d original records.", len(getResp.Records)) - - // De-aggregate the records if they were published by the KPL. - dars := make([]*kinesis.Record, 0) - dars, err = deagg.DeaggregateRecords(getResp.Records) - - if err != nil { - // The error is caused by bad KPL publisher and just skip the bad records - // instead of being stuck here. - log.Errorf("Error in de-aggregating KPL records: %+v", err) - } - - // IRecordProcessorCheckpointer - input := &kcl.ProcessRecordsInput{ - Records: dars, - MillisBehindLatest: aws.Int64Value(getResp.MillisBehindLatest), - Checkpointer: recordCheckpointer, - } - - recordLength := len(input.Records) - recordBytes := int64(0) - log.Debugf("Received %d de-aggregated records, MillisBehindLatest: %v", recordLength, input.MillisBehindLatest) - - for _, r := range dars { - recordBytes += int64(len(r.Data)) - } - - if recordLength > 0 || sc.kclConfig.CallProcessRecordsEvenForEmptyRecordList { - processRecordsStartTime := time.Now() - - // Delivery the events to the record processor - input.CacheEntryTime = &getRecordsStartTime - input.CacheExitTime = &processRecordsStartTime - sc.recordProcessor.ProcessRecords(input) - - // Convert from nanoseconds to milliseconds - processedRecordsTiming := time.Since(processRecordsStartTime) / 1000000 - sc.mService.RecordProcessRecordsTime(shard.ID, float64(processedRecordsTiming)) - } - - sc.mService.IncrRecordsProcessed(shard.ID, recordLength) - sc.mService.IncrBytesProcessed(shard.ID, recordBytes) - sc.mService.MillisBehindLatest(shard.ID, float64(*getResp.MillisBehindLatest)) - - // Idle between each read, the user is responsible for checkpoint the progress - // This value is only used when no records are returned; if records are returned, it should immediately - // retrieve the next set of records. - if recordLength == 0 && aws.Int64Value(getResp.MillisBehindLatest) < int64(sc.kclConfig.IdleTimeBetweenReadsInMillis) { - time.Sleep(time.Duration(sc.kclConfig.IdleTimeBetweenReadsInMillis) * time.Millisecond) - } - - // The shard has been closed, so no new records can be read from it - if getResp.NextShardIterator == nil { - log.Infof("Shard %s closed", shard.ID) - shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.TERMINATE, Checkpointer: recordCheckpointer} - sc.recordProcessor.Shutdown(shutdownInput) - return nil - } - shardIterator = getResp.NextShardIterator - - select { - case <-*sc.stop: - shutdownInput := &kcl.ShutdownInput{ShutdownReason: kcl.REQUESTED, Checkpointer: recordCheckpointer} - sc.recordProcessor.Shutdown(shutdownInput) - return nil - default: - } - } -} - -// Need to wait until the parent shard finished -func (sc *ShardConsumer) waitOnParentShard(shard *par.ShardStatus) error { - if len(shard.ParentShardId) == 0 { - return nil - } - - pshard := &par.ShardStatus{ - ID: shard.ParentShardId, - Mux: &sync.Mutex{}, - } - - for { - if err := sc.checkpointer.FetchCheckpoint(pshard); err != nil { - return err - } - - // Parent shard is finished. - if pshard.Checkpoint == chk.ShardEnd { - return nil - } - - time.Sleep(time.Duration(sc.kclConfig.ParentShardPollIntervalMillis) * time.Millisecond) - } -} - -// Cleanup the internal lease cache -func (sc *ShardConsumer) releaseLease(shard *par.ShardStatus) { - log := sc.kclConfig.Logger - log.Infof("Release lease for shard %s", shard.ID) - shard.SetLeaseOwner("") - - // Release the lease by wiping out the lease owner for the shard - // Note: we don't need to do anything in case of error here and shard lease will eventuall be expired. - if err := sc.checkpointer.RemoveLeaseOwner(shard.ID); err != nil { - log.Errorf("Failed to release shard lease or shard: %s Error: %+v", shard.ID, err) - } - - // reporting lease lose metrics - sc.mService.LeaseLost(shard.ID) -} diff --git a/clientlibrary/worker/worker-fan-out.go b/clientlibrary/worker/worker-fan-out.go new file mode 100644 index 0000000..0725671 --- /dev/null +++ b/clientlibrary/worker/worker-fan-out.go @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2021 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +package worker + +import ( + "fmt" + "math" + "time" + + "github.com/aws/aws-sdk-go/service/kinesis" + + "github.com/vmware/vmware-go-kcl/clientlibrary/utils" +) + +// fetchConsumerARNWithRetry tries to fetch consumer ARN. Retries 10 times with exponential backoff in case of an error +func (w *Worker) fetchConsumerARNWithRetry() (string, error) { + for retry := 0; ; retry++ { + consumerARN, err := w.fetchConsumerARN() + if err == nil { + return consumerARN, nil + } + if retry < 10 { + sleepDuration := time.Duration(math.Exp2(float64(retry))*100) * time.Millisecond + w.kclConfig.Logger.Errorf("Could not get consumer ARN: %v, retrying after: %s", err, sleepDuration) + time.Sleep(sleepDuration) + continue + } + return consumerARN, err + } +} + +// fetchConsumerARN gets enhanced fan-out consumerARN. +// Registers enhanced fan-out consumer if the consumer is not found +func (w *Worker) fetchConsumerARN() (string, error) { + log := w.kclConfig.Logger + log.Debugf("Fetching stream consumer ARN") + streamDescription, err := w.kc.DescribeStream(&kinesis.DescribeStreamInput{ + StreamName: &w.kclConfig.StreamName, + }) + if err != nil { + log.Errorf("Could not describe stream: %v", err) + return "", err + } + streamConsumerDescription, err := w.kc.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{ + ConsumerName: &w.kclConfig.EnhancedFanOutConsumerName, + StreamARN: streamDescription.StreamDescription.StreamARN, + }) + if err == nil { + log.Infof("Enhanced fan-out consumer found, consumer status: %s", *streamConsumerDescription.ConsumerDescription.ConsumerStatus) + if *streamConsumerDescription.ConsumerDescription.ConsumerStatus != kinesis.ConsumerStatusActive { + return "", fmt.Errorf("consumer is not in active status yet, current status: %s", *streamConsumerDescription.ConsumerDescription.ConsumerStatus) + } + return *streamConsumerDescription.ConsumerDescription.ConsumerARN, nil + } + if utils.AWSErrCode(err) == kinesis.ErrCodeResourceNotFoundException { + log.Infof("Enhanced fan-out consumer not found, registering new consumer with name: %s", w.kclConfig.EnhancedFanOutConsumerName) + out, err := w.kc.RegisterStreamConsumer(&kinesis.RegisterStreamConsumerInput{ + ConsumerName: &w.kclConfig.EnhancedFanOutConsumerName, + StreamARN: streamDescription.StreamDescription.StreamARN, + }) + if err != nil { + log.Errorf("Could not register enhanced fan-out consumer: %v", err) + return "", err + } + if *out.Consumer.ConsumerStatus != kinesis.ConsumerStatusActive { + return "", fmt.Errorf("consumer is not in active status yet, current status: %s", *out.Consumer.ConsumerStatus) + } + return *out.Consumer.ConsumerARN, nil + } + log.Errorf("Could not describe stream consumer: %v", err) + return "", err +} diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 5b04453..4d4bc06 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -51,9 +51,10 @@ import ( * the shards). */ type Worker struct { - streamName string - regionName string - workerID string + streamName string + regionName string + workerID string + consumerARN string processorFactory kcl.IRecordProcessorFactory kclConfig *config.KinesisClientLibConfiguration @@ -181,6 +182,24 @@ func (w *Worker) initialize() error { log.Infof("Use custom checkpointer implementation.") } + if w.kclConfig.EnableEnhancedFanOutConsumer { + log.Debugf("Enhanced fan-out is enabled") + switch { + case w.kclConfig.EnhancedFanOutConsumerARN != "": + w.consumerARN = w.kclConfig.EnhancedFanOutConsumerARN + case w.kclConfig.EnhancedFanOutConsumerName != "": + var err error + w.consumerARN, err = w.fetchConsumerARNWithRetry() + if err != nil { + log.Errorf("Failed to fetch consumer ARN for: %s, %v", w.kclConfig.EnhancedFanOutConsumerName, err) + return err + } + default: + log.Errorf("Consumer Name or ARN were not specified with enhanced fan-out enabled") + return errors.New("Consumer Name or ARN must be specified when enhanced fan-out is enabled") + } + } + err := w.mService.Init(w.kclConfig.ApplicationName, w.streamName, w.workerID) if err != nil { log.Errorf("Failed to start monitoring service: %+v", err) @@ -204,19 +223,32 @@ func (w *Worker) initialize() error { return nil } -// newShardConsumer to create a shard consumer instance -func (w *Worker) newShardConsumer(shard *par.ShardStatus) *ShardConsumer { - return &ShardConsumer{ - streamName: w.streamName, +// newShardConsumer creates shard consumer for the specified shard +func (w *Worker) newShardConsumer(shard *par.ShardStatus) shardConsumer { + common := commonShardConsumer{ shard: shard, kc: w.kc, checkpointer: w.checkpointer, recordProcessor: w.processorFactory.CreateProcessor(), kclConfig: w.kclConfig, - consumerID: w.workerID, - stop: w.stop, mService: w.mService, - state: WaitingOnParentShards, + } + if w.kclConfig.EnableEnhancedFanOutConsumer { + w.kclConfig.Logger.Infof("Start enhanced fan-out shard consumer for shard: %v", shard.ID) + return &FanOutShardConsumer{ + commonShardConsumer: common, + consumerARN: w.consumerARN, + consumerID: w.workerID, + stop: w.stop, + } + } + w.kclConfig.Logger.Infof("Start polling shard consumer for shard: %v", shard.ID) + return &PollingShardConsumer{ + commonShardConsumer: common, + streamName: w.streamName, + consumerID: w.workerID, + stop: w.stop, + mService: w.mService, } } @@ -230,7 +262,7 @@ func (w *Worker) eventLoop() { // starts at the same time, this decreases the probability of them calling // kinesis.DescribeStream at the same time, and hit the hard-limit on aws API calls. // On average the period remains the same so that doesn't affect behavior. - shardSyncSleep := w.kclConfig.ShardSyncIntervalMillis/2 + w.rng.Intn(int(w.kclConfig.ShardSyncIntervalMillis)) + shardSyncSleep := w.kclConfig.ShardSyncIntervalMillis/2 + w.rng.Intn(w.kclConfig.ShardSyncIntervalMillis) err := w.syncShard() if err != nil { @@ -247,7 +279,7 @@ func (w *Worker) eventLoop() { // Count the number of leases hold by this worker excluding the processed shard counter := 0 for _, shard := range w.shardStatus { - if shard.GetLeaseOwner() == w.workerID && shard.Checkpoint != chk.ShardEnd { + if shard.GetLeaseOwner() == w.workerID && shard.GetCheckpoint() != chk.ShardEnd { counter++ } } @@ -271,7 +303,7 @@ func (w *Worker) eventLoop() { } // The shard is closed and we have processed all records - if shard.Checkpoint == chk.ShardEnd { + if shard.GetCheckpoint() == chk.ShardEnd { continue } @@ -286,16 +318,13 @@ func (w *Worker) eventLoop() { // log metrics on got lease w.mService.LeaseGained(shard.ID) - - log.Infof("Start Shard Consumer for shard: %v", shard.ID) - sc := w.newShardConsumer(shard) w.waitGroup.Add(1) - go func() { + go func(shard *par.ShardStatus) { defer w.waitGroup.Done() - if err := sc.getRecords(shard); err != nil { + if err := w.newShardConsumer(shard).getRecords(); err != nil { log.Errorf("Error in getRecords: %+v", err) } - }() + }(shard) // exit from for loop and not to grab more shard for now. break } @@ -341,7 +370,7 @@ func (w *Worker) getShardIDs(nextToken string, shardInfo map[string]bool) error w.shardStatus[*s.ShardId] = &par.ShardStatus{ ID: *s.ShardId, ParentShardId: aws.StringValue(s.ParentShardId), - Mux: &sync.Mutex{}, + Mux: &sync.RWMutex{}, StartingSequenceNumber: aws.StringValue(s.SequenceNumberRange.StartingSequenceNumber), EndingSequenceNumber: aws.StringValue(s.SequenceNumberRange.EndingSequenceNumber), } diff --git a/test/worker_custom_test.go b/test/worker_custom_test.go index 85330c1..ef48491 100644 --- a/test/worker_custom_test.go +++ b/test/worker_custom_test.go @@ -27,10 +27,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" - log "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" @@ -74,7 +73,7 @@ func TestWorkerInjectCheckpointer(t *testing.T) { // verify the checkpointer after graceful shutdown status := &par.ShardStatus{ ID: shardID, - Mux: &sync.Mutex{}, + Mux: &sync.RWMutex{}, } checkpointer.FetchCheckpoint(status) diff --git a/test/worker_test.go b/test/worker_test.go index e79f115..2e2e784 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -31,6 +31,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/prometheus/common/expfmt" "github.com/stretchr/testify/assert" + cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics/cloudwatch" @@ -41,9 +42,11 @@ import ( ) const ( - streamName = "kcl-test" - regionName = "us-west-2" - workerID = "test-worker" + appName = "appName" + streamName = "kcl-test" + regionName = "us-west-2" + workerID = "test-worker" + consumerName = "enhanced-fan-out-consumer" ) const metricsSystem = "cloudwatch" @@ -67,7 +70,7 @@ func TestWorker(t *testing.T) { // Use logrus logger log := logger.NewLogrusLoggerWithConfig(config) - kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(8). WithMaxLeasesForWorker(1). @@ -89,7 +92,7 @@ func TestWorkerWithTimestamp(t *testing.T) { log := logger.NewLogrusLoggerWithConfig(config) ts := time.Now().Add(time.Second * 5) - kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID). WithTimestampAtInitialPositionInStream(&ts). WithMaxRecords(10). WithMaxLeasesForWorker(1). @@ -119,7 +122,7 @@ func TestWorkerWithSigInt(t *testing.T) { // use zap logger log := zaplogger.NewZapLoggerWithConfig(config) - kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). WithMaxLeasesForWorker(1). @@ -137,7 +140,7 @@ func TestWorkerStatic(t *testing.T) { // Note: use empty string as SessionToken for long-term credentials. creds := credentials.NewStaticCredentials("AccessKeyId", "SecretAccessKey", "SessionToken") - kclConfig := cfg.NewKinesisClientLibConfigWithCredential("appName", streamName, regionName, workerID, creds). + kclConfig := cfg.NewKinesisClientLibConfigWithCredential(appName, streamName, regionName, workerID, creds). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). WithMaxLeasesForWorker(1). @@ -159,7 +162,7 @@ func TestWorkerAssumeRole(t *testing.T) { // referenced by the "myRoleARN" ARN. creds := stscreds.NewCredentials(sess, "arn:aws:iam::*:role/kcl-test-publisher") - kclConfig := cfg.NewKinesisClientLibConfigWithCredential("appName", streamName, regionName, workerID, creds). + kclConfig := cfg.NewKinesisClientLibConfigWithCredential(appName, streamName, regionName, workerID, creds). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). WithMaxLeasesForWorker(1). @@ -169,6 +172,67 @@ func TestWorkerAssumeRole(t *testing.T) { runTest(kclConfig, false, t) } +func TestEnhancedFanOutConsumer(t *testing.T) { + // At miminal, use standard logrus logger + // log := logger.NewLogrusLogger(logrus.StandardLogger()) + // + // In order to have precise control over logging. Use logger with config + config := logger.Configuration{ + EnableConsole: true, + ConsoleLevel: logger.Debug, + ConsoleJSONFormat: false, + EnableFile: true, + FileLevel: logger.Info, + FileJSONFormat: true, + Filename: "log.log", + } + // Use logrus logger + log := logger.NewLogrusLoggerWithConfig(config) + + kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID). + WithInitialPositionInStream(cfg.LATEST). + WithEnhancedFanOutConsumer(consumerName). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithLogger(log) + + runTest(kclConfig, false, t) +} + +func TestEnhancedFanOutConsumerARN(t *testing.T) { + t.Skip("Need to provide actual consumerARN") + + consumerARN := "arn:aws:kinesis:*:stream/kcl-test/consumer/fanout-poc-consumer-test:*" + // At miminal, use standard logrus logger + // log := logger.NewLogrusLogger(logrus.StandardLogger()) + // + // In order to have precise control over logging. Use logger with config + config := logger.Configuration{ + EnableConsole: true, + ConsoleLevel: logger.Debug, + ConsoleJSONFormat: false, + EnableFile: true, + FileLevel: logger.Info, + FileJSONFormat: true, + Filename: "log.log", + } + // Use logrus logger + log := logger.NewLogrusLoggerWithConfig(config) + + kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID). + WithInitialPositionInStream(cfg.LATEST). + WithEnhancedFanOutConsumerARN(consumerARN). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithLogger(log) + + runTest(kclConfig, false, t) +} + func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *testing.T) { assert.Equal(t, regionName, kclConfig.RegionName) assert.Equal(t, streamName, kclConfig.StreamName) From 4a642bfa2f5a8ae31608830ac394901dbca500a7 Mon Sep 17 00:00:00 2001 From: Ilia Cimpoes Date: Thu, 29 Apr 2021 05:19:12 +0300 Subject: [PATCH 69/90] Use application name as default enhanced fan-out consumer name (#91) * Use ApplicationName as default for EnhancedFanOutConsumerName Signed-off-by: Ilia Cimpoes * Add tests Signed-off-by: Ilia Cimpoes --- clientlibrary/config/config.go | 2 +- clientlibrary/config/config_test.go | 12 +++++++++-- clientlibrary/config/kcl-config.go | 13 ++++++++++-- clientlibrary/worker/worker.go | 9 ++------- test/worker_test.go | 31 ++++++++++++++++++++++++++++- 5 files changed, 54 insertions(+), 13 deletions(-) diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index 2170d44..f8102eb 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -175,7 +175,7 @@ type ( // Either consumer name or consumer ARN must be specified when Enhanced Fan-Out is enabled. EnableEnhancedFanOutConsumer bool - // EnhancedFanOutConsumerName is the name of the enhanced fan-out consumer to create. + // EnhancedFanOutConsumerName is the name of the enhanced fan-out consumer to create. If this isn't set the ApplicationName will be used. EnhancedFanOutConsumerName string // EnhancedFanOutConsumerARN is the ARN of an already created enhanced fan-out consumer, if this is set no automatic consumer creation will be attempted diff --git a/clientlibrary/config/config_test.go b/clientlibrary/config/config_test.go index 576042c..c02dfab 100644 --- a/clientlibrary/config/config_test.go +++ b/clientlibrary/config/config_test.go @@ -34,7 +34,7 @@ func TestConfig(t *testing.T) { WithIdleTimeBetweenReadsInMillis(20). WithCallProcessRecordsEvenForEmptyRecordList(true). WithTaskBackoffTimeMillis(10). - WithEnhancedFanOutConsumer("fan-out-consumer") + WithEnhancedFanOutConsumerName("fan-out-consumer") assert.Equal(t, "appName", kclConfig.ApplicationName) assert.Equal(t, 500, kclConfig.FailoverTimeMillis) @@ -47,9 +47,17 @@ func TestConfig(t *testing.T) { contextLogger.Infof("Default logger is awesome") } +func TestConfigDefaultEnhancedFanOutConsumerName(t *testing.T) { + kclConfig := NewKinesisClientLibConfig("appName", "StreamName", "us-west-2", "workerId") + + assert.Equal(t, "appName", kclConfig.ApplicationName) + assert.False(t, kclConfig.EnableEnhancedFanOutConsumer) + assert.Equal(t, "appName", kclConfig.EnhancedFanOutConsumerName) +} + func TestEmptyEnhancedFanOutConsumerName(t *testing.T) { assert.PanicsWithValue(t, "Non-empty value expected for EnhancedFanOutConsumerName, actual: ", func() { - NewKinesisClientLibConfig("app", "stream", "us-west-2", "worker").WithEnhancedFanOutConsumer("") + NewKinesisClientLibConfig("app", "stream", "us-west-2", "worker").WithEnhancedFanOutConsumerName("") }) } diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index 810e4c9..91f39b7 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -73,6 +73,7 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio KinesisCredentials: kiniesisCreds, DynamoDBCredentials: dynamodbCreds, TableName: applicationName, + EnhancedFanOutConsumerName: applicationName, StreamName: streamName, RegionName: regionName, WorkerID: workerID, @@ -213,10 +214,18 @@ func (c *KinesisClientLibConfiguration) WithMonitoringService(mService metrics.M return c } -// WithEnhancedFanOutConsumer enables enhanced fan-out consumer with the specified name +// WithEnhancedFanOutConsumer sets EnableEnhancedFanOutConsumer. If enhanced fan-out is enabled and ConsumerName is not specified ApplicationName is used as ConsumerName. // For more info see: https://docs.aws.amazon.com/streams/latest/dev/enhanced-consumers.html // Note: You can register up to twenty consumers per stream to use enhanced fan-out. -func (c *KinesisClientLibConfiguration) WithEnhancedFanOutConsumer(consumerName string) *KinesisClientLibConfiguration { +func (c *KinesisClientLibConfiguration) WithEnhancedFanOutConsumer(enable bool) *KinesisClientLibConfiguration { + c.EnableEnhancedFanOutConsumer = enable + return c +} + +// WithEnhancedFanOutConsumerName enables enhanced fan-out consumer with the specified name +// For more info see: https://docs.aws.amazon.com/streams/latest/dev/enhanced-consumers.html +// Note: You can register up to twenty consumers per stream to use enhanced fan-out. +func (c *KinesisClientLibConfiguration) WithEnhancedFanOutConsumerName(consumerName string) *KinesisClientLibConfiguration { checkIsValueNotEmpty("EnhancedFanOutConsumerName", consumerName) c.EnhancedFanOutConsumerName = consumerName c.EnableEnhancedFanOutConsumer = true diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 4d4bc06..0ab4d17 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -184,19 +184,14 @@ func (w *Worker) initialize() error { if w.kclConfig.EnableEnhancedFanOutConsumer { log.Debugf("Enhanced fan-out is enabled") - switch { - case w.kclConfig.EnhancedFanOutConsumerARN != "": - w.consumerARN = w.kclConfig.EnhancedFanOutConsumerARN - case w.kclConfig.EnhancedFanOutConsumerName != "": + w.consumerARN = w.kclConfig.EnhancedFanOutConsumerARN + if w.consumerARN == "" { var err error w.consumerARN, err = w.fetchConsumerARNWithRetry() if err != nil { log.Errorf("Failed to fetch consumer ARN for: %s, %v", w.kclConfig.EnhancedFanOutConsumerName, err) return err } - default: - log.Errorf("Consumer Name or ARN were not specified with enhanced fan-out enabled") - return errors.New("Consumer Name or ARN must be specified when enhanced fan-out is enabled") } } diff --git a/test/worker_test.go b/test/worker_test.go index 2e2e784..b9f9a32 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -191,7 +191,36 @@ func TestEnhancedFanOutConsumer(t *testing.T) { kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). - WithEnhancedFanOutConsumer(consumerName). + WithEnhancedFanOutConsumerName(consumerName). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithLogger(log) + + runTest(kclConfig, false, t) +} + +func TestEnhancedFanOutConsumerDefaultConsumerName(t *testing.T) { + // At miminal, use standard logrus logger + // log := logger.NewLogrusLogger(logrus.StandardLogger()) + // + // In order to have precise control over logging. Use logger with config + config := logger.Configuration{ + EnableConsole: true, + ConsoleLevel: logger.Debug, + ConsoleJSONFormat: false, + EnableFile: true, + FileLevel: logger.Info, + FileJSONFormat: true, + Filename: "log.log", + } + // Use logrus logger + log := logger.NewLogrusLoggerWithConfig(config) + + kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID). + WithInitialPositionInStream(cfg.LATEST). + WithEnhancedFanOutConsumer(true). WithMaxRecords(10). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). From 7de4607b711a9af5096719b1c5cacc8ff6420cf1 Mon Sep 17 00:00:00 2001 From: Connor McKelvey Date: Tue, 1 Jun 2021 17:18:26 -0600 Subject: [PATCH 70/90] Add support for lease stealing (#78) Fixes #4 Signed-off-by: Connor McKelvey Signed-off-by: Ali Hobbs Co-authored-by: Ali Hobbs Co-authored-by: Ali Hobbs --- HyperMake | 4 +- clientlibrary/checkpoint/checkpointer.go | 14 + .../checkpoint/dynamodb-checkpointer.go | 192 +++++++++- .../checkpoint/dynamodb-checkpointer_test.go | 355 +++++++++++++++++- clientlibrary/config/config.go | 24 ++ clientlibrary/config/config_test.go | 26 ++ clientlibrary/config/kcl-config.go | 21 +- clientlibrary/partition/partition.go | 24 ++ .../worker/polling-shard-consumer.go | 2 +- clientlibrary/worker/worker.go | 115 +++++- support/toolchain/docker/Dockerfile | 2 +- test/lease_stealing_util_test.go | 230 ++++++++++++ test/logger_test.go | 3 +- test/record_processor_test.go | 3 +- test/record_publisher_test.go | 88 ++++- test/worker_custom_test.go | 38 +- test/worker_lease_stealing_test.go | 127 +++++++ test/worker_test.go | 15 +- 18 files changed, 1233 insertions(+), 50 deletions(-) create mode 100644 test/lease_stealing_util_test.go create mode 100644 test/worker_lease_stealing_test.go diff --git a/HyperMake b/HyperMake index f444947..7ca3d06 100644 --- a/HyperMake +++ b/HyperMake @@ -8,8 +8,8 @@ targets: rebuild-toolchain: description: build toolchain image watches: - - support/docker/toolchain - build: support/docker/toolchain + - support/toolchain/docker + build: support/toolchain/docker toolchain: description: placeholder for additional toolchain dependencies diff --git a/clientlibrary/checkpoint/checkpointer.go b/clientlibrary/checkpoint/checkpointer.go index fe91359..4d4ceaa 100644 --- a/clientlibrary/checkpoint/checkpointer.go +++ b/clientlibrary/checkpoint/checkpointer.go @@ -40,9 +40,13 @@ const ( LeaseTimeoutKey = "LeaseTimeout" SequenceNumberKey = "Checkpoint" ParentShardIdKey = "ParentShardId" + ClaimRequestKey = "ClaimRequest" // We've completely processed all records in this shard. ShardEnd = "SHARD_END" + + // ErrShardClaimed is returned when shard is claimed + ErrShardClaimed = "Shard is already claimed by another node" ) type ErrLeaseNotAcquired struct { @@ -72,7 +76,17 @@ type Checkpointer interface { // RemoveLeaseOwner to remove lease owner for the shard entry to make the shard available for reassignment RemoveLeaseOwner(string) error + + // New Lease Stealing Methods + // ListActiveWorkers returns active workers and their shards + ListActiveWorkers(map[string]*par.ShardStatus) (map[string][]*par.ShardStatus, error) + + // ClaimShard claims a shard for stealing + ClaimShard(*par.ShardStatus, string) error } // ErrSequenceIDNotFound is returned by FetchCheckpoint when no SequenceID is found var ErrSequenceIDNotFound = errors.New("SequenceIDNotFoundForShard") + +// ErrShardNotAssigned is returned by ListActiveWorkers when no AssignedTo is found +var ErrShardNotAssigned = errors.New("AssignedToNotFoundForShard") diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index dd8dd55..8df5e37 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -28,6 +28,8 @@ package checkpoint import ( + "errors" + "fmt" "time" "github.com/aws/aws-sdk-go/aws" @@ -61,6 +63,7 @@ type DynamoCheckpoint struct { svc dynamodbiface.DynamoDBAPI kclConfig *config.KinesisClientLibConfiguration Retries int + lastLeaseSync time.Time } func NewDynamoCheckpoint(kclConfig *config.KinesisClientLibConfiguration) *DynamoCheckpoint { @@ -124,8 +127,22 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign return err } + isClaimRequestExpired := shard.IsClaimRequestExpired(checkpointer.kclConfig) + + var claimRequest string + if checkpointer.kclConfig.EnableLeaseStealing { + if currentCheckpointClaimRequest, ok := currentCheckpoint[ClaimRequestKey]; ok && currentCheckpointClaimRequest.S != nil { + claimRequest = *currentCheckpointClaimRequest.S + if newAssignTo != claimRequest && !isClaimRequestExpired { + checkpointer.log.Debugf("another worker: %s has a claim on this shard. Not going to renew the lease", claimRequest) + return errors.New(ErrShardClaimed) + } + } + } + assignedVar, assignedToOk := currentCheckpoint[LeaseOwnerKey] leaseVar, leaseTimeoutOk := currentCheckpoint[LeaseTimeoutKey] + var conditionalExpression string var expressionAttributeValues map[string]*dynamodb.AttributeValue @@ -140,8 +157,14 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign return err } - if time.Now().UTC().Before(currentLeaseTimeout) && assignedTo != newAssignTo { - return ErrLeaseNotAcquired{"current lease timeout not yet expired"} + if checkpointer.kclConfig.EnableLeaseStealing { + if time.Now().UTC().Before(currentLeaseTimeout) && assignedTo != newAssignTo && !isClaimRequestExpired { + return ErrLeaseNotAcquired{"current lease timeout not yet expired"} + } + } else { + if time.Now().UTC().Before(currentLeaseTimeout) && assignedTo != newAssignTo { + return ErrLeaseNotAcquired{"current lease timeout not yet expired"} + } } checkpointer.log.Debugf("Attempting to get a lock for shard: %s, leaseTimeout: %s, assignedTo: %s, newAssignedTo: %s", shard.ID, currentLeaseTimeout, assignedTo, newAssignTo) @@ -175,9 +198,21 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign marshalledCheckpoint[ParentShardIdKey] = &dynamodb.AttributeValue{S: aws.String(shard.ParentShardId)} } - if shard.GetCheckpoint() != "" { + if checkpoint := shard.GetCheckpoint(); checkpoint != "" { marshalledCheckpoint[SequenceNumberKey] = &dynamodb.AttributeValue{ - S: aws.String(shard.GetCheckpoint()), + S: aws.String(checkpoint), + } + } + + if checkpointer.kclConfig.EnableLeaseStealing { + if claimRequest != "" && claimRequest == newAssignTo && !isClaimRequestExpired { + if expressionAttributeValues == nil { + expressionAttributeValues = make(map[string]*dynamodb.AttributeValue) + } + conditionalExpression = conditionalExpression + " AND ClaimRequest = :claim_request" + expressionAttributeValues[":claim_request"] = &dynamodb.AttributeValue{ + S: &claimRequest, + } } } @@ -199,7 +234,7 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign // CheckpointSequence writes a checkpoint at the designated sequence ID func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *par.ShardStatus) error { - leaseTimeout := shard.LeaseTimeout.UTC().Format(time.RFC3339) + leaseTimeout := shard.GetLeaseTimeout().UTC().Format(time.RFC3339) marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ LeaseKeyKey: { S: aws.String(shard.ID), @@ -208,7 +243,7 @@ func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *par.ShardStatus) S: aws.String(shard.GetCheckpoint()), }, LeaseOwnerKey: { - S: aws.String(shard.AssignedTo), + S: aws.String(shard.GetLeaseOwner()), }, LeaseTimeoutKey: { S: aws.String(leaseTimeout), @@ -239,6 +274,16 @@ func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *par.ShardStatus) er if assignedTo, ok := checkpoint[LeaseOwnerKey]; ok { shard.SetLeaseOwner(aws.StringValue(assignedTo.S)) } + + // Use up-to-date leaseTimeout to avoid ConditionalCheckFailedException when claiming + if leaseTimeout, ok := checkpoint[LeaseTimeoutKey]; ok && leaseTimeout.S != nil { + currentLeaseTimeout, err := time.Parse(time.RFC3339, aws.StringValue(leaseTimeout.S)) + if err != nil { + return err + } + shard.LeaseTimeout = currentLeaseTimeout + } + return nil } @@ -265,6 +310,12 @@ func (checkpointer *DynamoCheckpoint) RemoveLeaseOwner(shardID string) error { }, }, UpdateExpression: aws.String("remove " + LeaseOwnerKey), + ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ + ":assigned_to": { + S: aws.String(checkpointer.kclConfig.WorkerID), + }, + }, + ConditionExpression: aws.String("AssignedTo = :assigned_to"), } _, err := checkpointer.svc.UpdateItem(input) @@ -272,6 +323,135 @@ func (checkpointer *DynamoCheckpoint) RemoveLeaseOwner(shardID string) error { return err } +// ListActiveWorkers returns a map of workers and their shards +func (checkpointer *DynamoCheckpoint) ListActiveWorkers(shardStatus map[string]*par.ShardStatus) (map[string][]*par.ShardStatus, error) { + err := checkpointer.syncLeases(shardStatus) + if err != nil { + return nil, err + } + + workers := map[string][]*par.ShardStatus{} + for _, shard := range shardStatus { + if shard.GetCheckpoint() == ShardEnd { + continue + } + + leaseOwner := shard.GetLeaseOwner() + if leaseOwner == "" { + checkpointer.log.Debugf("Shard Not Assigned Error. ShardID: %s, WorkerID: %s", shard.ID, checkpointer.kclConfig.WorkerID) + return nil, ErrShardNotAssigned + } + if w, ok := workers[leaseOwner]; ok { + workers[leaseOwner] = append(w, shard) + } else { + workers[leaseOwner] = []*par.ShardStatus{shard} + } + } + return workers, nil +} + +// ClaimShard places a claim request on a shard to signal a steal attempt +func (checkpointer *DynamoCheckpoint) ClaimShard(shard *par.ShardStatus, claimID string) error { + err := checkpointer.FetchCheckpoint(shard) + if err != nil && err != ErrSequenceIDNotFound { + return err + } + leaseTimeoutString := shard.GetLeaseTimeout().Format(time.RFC3339) + + conditionalExpression := `ShardID = :id AND LeaseTimeout = :lease_timeout AND attribute_not_exists(ClaimRequest)` + expressionAttributeValues := map[string]*dynamodb.AttributeValue{ + ":id": { + S: aws.String(shard.ID), + }, + ":lease_timeout": { + S: aws.String(leaseTimeoutString), + }, + } + + marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ + LeaseKeyKey: { + S: &shard.ID, + }, + LeaseTimeoutKey: { + S: &leaseTimeoutString, + }, + SequenceNumberKey: { + S: &shard.Checkpoint, + }, + ClaimRequestKey: { + S: &claimID, + }, + } + + if leaseOwner := shard.GetLeaseOwner(); leaseOwner == "" { + conditionalExpression += " AND attribute_not_exists(AssignedTo)" + } else { + marshalledCheckpoint[LeaseOwnerKey] = &dynamodb.AttributeValue{S: &leaseOwner} + conditionalExpression += "AND AssignedTo = :assigned_to" + expressionAttributeValues[":assigned_to"] = &dynamodb.AttributeValue{S: &leaseOwner} + } + + if checkpoint := shard.GetCheckpoint(); checkpoint == "" { + conditionalExpression += " AND attribute_not_exists(Checkpoint)" + } else if checkpoint == ShardEnd { + conditionalExpression += " AND Checkpoint <> :checkpoint" + expressionAttributeValues[":checkpoint"] = &dynamodb.AttributeValue{S: aws.String(ShardEnd)} + } else { + conditionalExpression += " AND Checkpoint = :checkpoint" + expressionAttributeValues[":checkpoint"] = &dynamodb.AttributeValue{S: &checkpoint} + } + + if shard.ParentShardId == "" { + conditionalExpression += " AND attribute_not_exists(ParentShardId)" + } else { + marshalledCheckpoint[ParentShardIdKey] = &dynamodb.AttributeValue{S: aws.String(shard.ParentShardId)} + conditionalExpression += " AND ParentShardId = :parent_shard" + expressionAttributeValues[":parent_shard"] = &dynamodb.AttributeValue{S: &shard.ParentShardId} + } + + return checkpointer.conditionalUpdate(conditionalExpression, expressionAttributeValues, marshalledCheckpoint) +} + +func (checkpointer *DynamoCheckpoint) syncLeases(shardStatus map[string]*par.ShardStatus) error { + log := checkpointer.kclConfig.Logger + + if (checkpointer.lastLeaseSync.Add(time.Duration(checkpointer.kclConfig.LeaseSyncingTimeIntervalMillis) * time.Millisecond)).After(time.Now()) { + return nil + } + + checkpointer.lastLeaseSync = time.Now() + input := &dynamodb.ScanInput{ + ProjectionExpression: aws.String(fmt.Sprintf("%s,%s,%s", LeaseKeyKey, LeaseOwnerKey, SequenceNumberKey)), + Select: aws.String("SPECIFIC_ATTRIBUTES"), + TableName: aws.String(checkpointer.kclConfig.TableName), + } + + err := checkpointer.svc.ScanPages(input, + func(pages *dynamodb.ScanOutput, lastPage bool) bool { + results := pages.Items + for _, result := range results { + shardId, foundShardId := result[LeaseKeyKey] + assignedTo, foundAssignedTo := result[LeaseOwnerKey] + checkpoint, foundCheckpoint := result[SequenceNumberKey] + if !foundShardId || !foundAssignedTo || !foundCheckpoint { + continue + } + if shard, ok := shardStatus[aws.StringValue(shardId.S)]; ok { + shard.SetLeaseOwner(aws.StringValue(assignedTo.S)) + shard.SetCheckpoint(aws.StringValue(checkpoint.S)) + } + } + return !lastPage + }) + + if err != nil { + log.Debugf("Error performing SyncLeases. Error: %+v ", err) + return err + } + log.Debugf("Lease sync completed. Next lease sync will occur in %s", time.Duration(checkpointer.kclConfig.LeaseSyncingTimeIntervalMillis)*time.Millisecond) + return nil +} + func (checkpointer *DynamoCheckpoint) createTable() error { input := &dynamodb.CreateTableInput{ AttributeDefinitions: []*dynamodb.AttributeDefinition{ diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go index 2217b0e..38da0b3 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go @@ -85,6 +85,7 @@ func TestGetLeaseNotAquired(t *testing.T) { Checkpoint: "", Mux: &sync.RWMutex{}, }, "ijkl-mnop") + if err == nil || !errors.As(err, &ErrLeaseNotAcquired{}) { t.Errorf("Got a lease when it was already held by abcd-efgh: %s", err) } @@ -102,16 +103,16 @@ func TestGetLeaseAquired(t *testing.T) { checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) checkpoint.Init() marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ - "ShardID": { + LeaseKeyKey: { S: aws.String("0001"), }, - "AssignedTo": { + LeaseOwnerKey: { S: aws.String("abcd-efgh"), }, - "LeaseTimeout": { + LeaseTimeoutKey: { S: aws.String(time.Now().AddDate(0, -1, 0).UTC().Format(time.RFC3339)), }, - "SequenceID": { + SequenceNumberKey: { S: aws.String("deadbeef"), }, } @@ -156,10 +157,221 @@ func TestGetLeaseAquired(t *testing.T) { assert.Equal(t, "", status.GetLeaseOwner()) } +func TestGetLeaseShardClaimed(t *testing.T) { + leaseTimeout := time.Now().Add(-100 * time.Second).UTC() + svc := &mockDynamoDB{ + tableExist: true, + item: map[string]*dynamodb.AttributeValue{ + ClaimRequestKey: {S: aws.String("ijkl-mnop")}, + LeaseTimeoutKey: {S: aws.String(leaseTimeout.Format(time.RFC3339))}, + }, + } + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithLeaseStealing(true) + + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) + checkpoint.Init() + err := checkpoint.GetLease(&par.ShardStatus{ + ID: "0001", + Checkpoint: "", + LeaseTimeout: leaseTimeout, + Mux: &sync.RWMutex{}, + }, "abcd-efgh") + if err == nil || err.Error() != ErrShardClaimed { + t.Errorf("Got a lease when it was already claimed by by ijkl-mnop: %s", err) + } + + err = checkpoint.GetLease(&par.ShardStatus{ + ID: "0001", + Checkpoint: "", + LeaseTimeout: leaseTimeout, + Mux: &sync.RWMutex{}, + }, "ijkl-mnop") + if err != nil { + t.Errorf("Error getting lease %s", err) + } +} + +func TestGetLeaseClaimRequestExpiredOwner(t *testing.T) { + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithLeaseStealing(true) + + // Not expired + leaseTimeout := time.Now(). + Add(-time.Duration(kclConfig.LeaseStealingClaimTimeoutMillis) * time.Millisecond). + Add(1 * time.Second). + UTC() + + svc := &mockDynamoDB{ + tableExist: true, + item: map[string]*dynamodb.AttributeValue{ + LeaseOwnerKey: {S: aws.String("abcd-efgh")}, + ClaimRequestKey: {S: aws.String("ijkl-mnop")}, + LeaseTimeoutKey: {S: aws.String(leaseTimeout.Format(time.RFC3339))}, + }, + } + + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) + checkpoint.Init() + err := checkpoint.GetLease(&par.ShardStatus{ + ID: "0001", + Checkpoint: "", + LeaseTimeout: leaseTimeout, + Mux: &sync.RWMutex{}, + }, "abcd-efgh") + if err == nil || err.Error() != ErrShardClaimed { + t.Errorf("Got a lease when it was already claimed by ijkl-mnop: %s", err) + } +} + +func TestGetLeaseClaimRequestExpiredClaimer(t *testing.T) { + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithLeaseStealing(true) + + // Not expired + leaseTimeout := time.Now(). + Add(-time.Duration(kclConfig.LeaseStealingClaimTimeoutMillis) * time.Millisecond). + Add(121 * time.Second). + UTC() + + svc := &mockDynamoDB{ + tableExist: true, + item: map[string]*dynamodb.AttributeValue{ + LeaseOwnerKey: {S: aws.String("abcd-efgh")}, + ClaimRequestKey: {S: aws.String("ijkl-mnop")}, + LeaseTimeoutKey: {S: aws.String(leaseTimeout.Format(time.RFC3339))}, + }, + } + + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) + checkpoint.Init() + err := checkpoint.GetLease(&par.ShardStatus{ + ID: "0001", + Checkpoint: "", + LeaseTimeout: leaseTimeout, + Mux: &sync.RWMutex{}, + }, "ijkl-mnop") + if err == nil || !errors.As(err, &ErrLeaseNotAcquired{}) { + t.Errorf("Got a lease when it was already claimed by ijkl-mnop: %s", err) + } +} + +func TestFetchCheckpointWithStealing(t *testing.T) { + future := time.Now().AddDate(0, 1, 0) + + svc := &mockDynamoDB{ + tableExist: true, + item: map[string]*dynamodb.AttributeValue{ + SequenceNumberKey: {S: aws.String("deadbeef")}, + LeaseOwnerKey: {S: aws.String("abcd-efgh")}, + LeaseTimeoutKey: { + S: aws.String(future.Format(time.RFC3339)), + }, + }, + } + + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithLeaseStealing(true) + + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) + checkpoint.Init() + + status := &par.ShardStatus{ + ID: "0001", + Checkpoint: "", + LeaseTimeout: time.Now(), + Mux: &sync.RWMutex{}, + } + + checkpoint.FetchCheckpoint(status) + + leaseTimeout, _ := time.Parse(time.RFC3339, *svc.item[LeaseTimeoutKey].S) + assert.Equal(t, leaseTimeout, status.LeaseTimeout) +} + +func TestGetLeaseConditional(t *testing.T) { + svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithLeaseStealing(true) + + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) + checkpoint.Init() + marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ + LeaseKeyKey: { + S: aws.String("0001"), + }, + LeaseOwnerKey: { + S: aws.String("abcd-efgh"), + }, + LeaseTimeoutKey: { + S: aws.String(time.Now().Add(-1 * time.Second).UTC().Format(time.RFC3339)), + }, + SequenceNumberKey: { + S: aws.String("deadbeef"), + }, + ClaimRequestKey: { + S: aws.String("ijkl-mnop"), + }, + } + input := &dynamodb.PutItemInput{ + TableName: aws.String("TableName"), + Item: marshalledCheckpoint, + } + checkpoint.svc.PutItem(input) + shard := &par.ShardStatus{ + ID: "0001", + Checkpoint: "deadbeef", + ClaimRequest: "ijkl-mnop", + Mux: &sync.RWMutex{}, + } + err := checkpoint.FetchCheckpoint(shard) + if err != nil { + t.Errorf("Could not fetch checkpoint %s", err) + } + + err = checkpoint.GetLease(shard, "ijkl-mnop") + if err != nil { + t.Errorf("Lease not aquired after timeout %s", err) + } + assert.Equal(t, *svc.expressionAttributeValues[":claim_request"].S, "ijkl-mnop") + assert.Contains(t, svc.conditionalExpression, " AND ClaimRequest = :claim_request") +} + type mockDynamoDB struct { dynamodbiface.DynamoDBAPI - tableExist bool - item map[string]*dynamodb.AttributeValue + tableExist bool + item map[string]*dynamodb.AttributeValue + conditionalExpression string + expressionAttributeValues map[string]*dynamodb.AttributeValue +} + +func (m *mockDynamoDB) ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error { + return nil } func (m *mockDynamoDB) DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error) { @@ -192,6 +404,16 @@ func (m *mockDynamoDB) PutItem(input *dynamodb.PutItemInput) (*dynamodb.PutItemO m.item[ParentShardIdKey] = parent } + if claimRequest, ok := item[ClaimRequestKey]; ok { + m.item[ClaimRequestKey] = claimRequest + } + + if input.ConditionExpression != nil { + m.conditionalExpression = *input.ConditionExpression + } + + m.expressionAttributeValues = input.ExpressionAttributeValues + return nil, nil } @@ -214,3 +436,124 @@ func (m *mockDynamoDB) UpdateItem(input *dynamodb.UpdateItemInput) (*dynamodb.Up func (m *mockDynamoDB) CreateTable(input *dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) { return &dynamodb.CreateTableOutput{}, nil } + +func TestListActiveWorkers(t *testing.T) { + svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithLeaseStealing(true) + + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) + err := checkpoint.Init() + if err != nil { + t.Errorf("Checkpoint initialization failed: %+v", err) + } + + shardStatus := map[string]*par.ShardStatus{ + "0000": {ID: "0000", AssignedTo: "worker_1", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0001": {ID: "0001", AssignedTo: "worker_2", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0002": {ID: "0002", AssignedTo: "worker_4", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0003": {ID: "0003", AssignedTo: "worker_0", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0004": {ID: "0004", AssignedTo: "worker_1", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0005": {ID: "0005", AssignedTo: "worker_3", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0006": {ID: "0006", AssignedTo: "worker_3", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0007": {ID: "0007", AssignedTo: "worker_0", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0008": {ID: "0008", AssignedTo: "worker_4", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0009": {ID: "0009", AssignedTo: "worker_2", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0010": {ID: "0010", AssignedTo: "worker_0", Checkpoint: ShardEnd, Mux: &sync.RWMutex{}}, + } + + workers, err := checkpoint.ListActiveWorkers(shardStatus) + if err != nil { + t.Error(err) + } + + for workerID, shards := range workers { + assert.Equal(t, 2, len(shards)) + for _, shard := range shards { + assert.Equal(t, workerID, shard.AssignedTo) + } + } +} + +func TestListActiveWorkersErrShardNotAssigned(t *testing.T) { + svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithLeaseStealing(true) + + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) + err := checkpoint.Init() + if err != nil { + t.Errorf("Checkpoint initialization failed: %+v", err) + } + + shardStatus := map[string]*par.ShardStatus{ + "0000": {ID: "0000", Mux: &sync.RWMutex{}}, + } + + _, err = checkpoint.ListActiveWorkers(shardStatus) + if err != ErrShardNotAssigned { + t.Error("Expected ErrShardNotAssigned when shard is missing AssignedTo value") + } +} + +func TestClaimShard(t *testing.T) { + svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithLeaseStealing(true) + + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) + checkpoint.Init() + + marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ + "ShardID": { + S: aws.String("0001"), + }, + "AssignedTo": { + S: aws.String("abcd-efgh"), + }, + "LeaseTimeout": { + S: aws.String(time.Now().AddDate(0, -1, 0).UTC().Format(time.RFC3339)), + }, + "Checkpoint": { + S: aws.String("deadbeef"), + }, + } + input := &dynamodb.PutItemInput{ + TableName: aws.String("TableName"), + Item: marshalledCheckpoint, + } + checkpoint.svc.PutItem(input) + shard := &par.ShardStatus{ + ID: "0001", + Checkpoint: "deadbeef", + Mux: &sync.RWMutex{}, + } + + err := checkpoint.ClaimShard(shard, "ijkl-mnop") + if err != nil { + t.Errorf("Shard not claimed %s", err) + } + + claimRequest, ok := svc.item[ClaimRequestKey] + if !ok { + t.Error("Expected claimRequest to be set by ClaimShard") + } else if *claimRequest.S != "ijkl-mnop" { + t.Errorf("Expected checkpoint to be ijkl-mnop. Got '%s'", *claimRequest.S) + } + + status := &par.ShardStatus{ + ID: shard.ID, + Mux: &sync.RWMutex{}, + } + checkpoint.FetchCheckpoint(status) + + // asiggnedTo, checkpointer, and parent shard id should be the same + assert.Equal(t, shard.AssignedTo, status.AssignedTo) + assert.Equal(t, shard.Checkpoint, status.Checkpoint) + assert.Equal(t, shard.ParentShardId, status.ParentShardId) +} diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index f8102eb..9f3f002 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -122,6 +122,18 @@ const ( // The amount of milliseconds to wait before graceful shutdown forcefully terminates. DefaultShutdownGraceMillis = 5000 + + // Lease stealing defaults to false for backwards compatibility. + DefaultEnableLeaseStealing = false + + // Interval between rebalance tasks defaults to 5 seconds. + DefaultLeaseStealingIntervalMillis = 5000 + + // Number of milliseconds to wait before another worker can aquire a claimed shard + DefaultLeaseStealingClaimTimeoutMillis = 120000 + + // Number of milliseconds to wait before syncing with lease table (dynamodDB) + DefaultLeaseSyncingIntervalMillis = 60000 ) type ( @@ -257,6 +269,18 @@ type ( // MonitoringService publishes per worker-scoped metrics. MonitoringService metrics.MonitoringService + + // EnableLeaseStealing turns on lease stealing + EnableLeaseStealing bool + + // LeaseStealingIntervalMillis The number of milliseconds between rebalance tasks + LeaseStealingIntervalMillis int + + // LeaseStealingClaimTimeoutMillis The number of milliseconds to wait before another worker can aquire a claimed shard + LeaseStealingClaimTimeoutMillis int + + // LeaseSyncingTimeInterval The number of milliseconds to wait before syncing with lease table (dynamoDB) + LeaseSyncingTimeIntervalMillis int } ) diff --git a/clientlibrary/config/config_test.go b/clientlibrary/config/config_test.go index c02dfab..1785e91 100644 --- a/clientlibrary/config/config_test.go +++ b/clientlibrary/config/config_test.go @@ -39,9 +39,35 @@ func TestConfig(t *testing.T) { assert.Equal(t, "appName", kclConfig.ApplicationName) assert.Equal(t, 500, kclConfig.FailoverTimeMillis) assert.Equal(t, 10, kclConfig.TaskBackoffTimeMillis) + assert.True(t, kclConfig.EnableEnhancedFanOutConsumer) assert.Equal(t, "fan-out-consumer", kclConfig.EnhancedFanOutConsumerName) + assert.Equal(t, false, kclConfig.EnableLeaseStealing) + assert.Equal(t, 5000, kclConfig.LeaseStealingIntervalMillis) + + contextLogger := kclConfig.Logger.WithFields(logger.Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with default logger") + contextLogger.Infof("Default logger is awesome") +} + +func TestConfigLeaseStealing(t *testing.T) { + kclConfig := NewKinesisClientLibConfig("appName", "StreamName", "us-west-2", "workerId"). + WithFailoverTimeMillis(500). + WithMaxRecords(100). + WithInitialPositionInStream(TRIM_HORIZON). + WithIdleTimeBetweenReadsInMillis(20). + WithCallProcessRecordsEvenForEmptyRecordList(true). + WithTaskBackoffTimeMillis(10). + WithLeaseStealing(true). + WithLeaseStealingIntervalMillis(10000) + + assert.Equal(t, "appName", kclConfig.ApplicationName) + assert.Equal(t, 500, kclConfig.FailoverTimeMillis) + assert.Equal(t, 10, kclConfig.TaskBackoffTimeMillis) + assert.Equal(t, true, kclConfig.EnableLeaseStealing) + assert.Equal(t, 10000, kclConfig.LeaseStealingIntervalMillis) + contextLogger := kclConfig.Logger.WithFields(logger.Fields{"key1": "value1"}) contextLogger.Debugf("Starting with default logger") contextLogger.Infof("Default logger is awesome") diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index 91f39b7..a831e88 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -95,7 +95,11 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio InitialLeaseTableReadCapacity: DefaultInitialLeaseTableReadCapacity, InitialLeaseTableWriteCapacity: DefaultInitialLeaseTableWriteCapacity, SkipShardSyncAtWorkerInitializationIfLeasesExist: DefaultSkipShardSyncAtStartupIfLeasesExist, - Logger: logger.GetDefaultLogger(), + EnableLeaseStealing: DefaultEnableLeaseStealing, + LeaseStealingIntervalMillis: DefaultLeaseStealingIntervalMillis, + LeaseStealingClaimTimeoutMillis: DefaultLeaseStealingClaimTimeoutMillis, + LeaseSyncingTimeIntervalMillis: DefaultLeaseSyncingIntervalMillis, + Logger: logger.GetDefaultLogger(), } } @@ -241,3 +245,18 @@ func (c *KinesisClientLibConfiguration) WithEnhancedFanOutConsumerARN(consumerAR c.EnableEnhancedFanOutConsumer = true return c } + +func (c *KinesisClientLibConfiguration) WithLeaseStealing(enableLeaseStealing bool) *KinesisClientLibConfiguration { + c.EnableLeaseStealing = enableLeaseStealing + return c +} + +func (c *KinesisClientLibConfiguration) WithLeaseStealingIntervalMillis(leaseStealingIntervalMillis int) *KinesisClientLibConfiguration { + c.LeaseStealingIntervalMillis = leaseStealingIntervalMillis + return c +} + +func (c *KinesisClientLibConfiguration) WithLeaseSyncingIntervalMillis(leaseSyncingIntervalMillis int) *KinesisClientLibConfiguration { + c.LeaseSyncingTimeIntervalMillis = leaseSyncingIntervalMillis + return c +} diff --git a/clientlibrary/partition/partition.go b/clientlibrary/partition/partition.go index 955bf08..b3f287f 100644 --- a/clientlibrary/partition/partition.go +++ b/clientlibrary/partition/partition.go @@ -30,6 +30,8 @@ package worker import ( "sync" "time" + + "github.com/vmware/vmware-go-kcl/clientlibrary/config" ) type ShardStatus struct { @@ -43,6 +45,7 @@ type ShardStatus struct { StartingSequenceNumber string // child shard doesn't have end sequence number EndingSequenceNumber string + ClaimRequest string } func (ss *ShardStatus) GetLeaseOwner() string { @@ -68,3 +71,24 @@ func (ss *ShardStatus) SetCheckpoint(c string) { defer ss.Mux.Unlock() ss.Checkpoint = c } + +func (ss *ShardStatus) GetLeaseTimeout() time.Time { + ss.Mux.Lock() + defer ss.Mux.Unlock() + return ss.LeaseTimeout +} + +func (ss *ShardStatus) SetLeaseTimeout(timeout time.Time) { + ss.Mux.Lock() + defer ss.Mux.Unlock() + ss.LeaseTimeout = timeout +} + +func (ss *ShardStatus) IsClaimRequestExpired(kclConfig *config.KinesisClientLibConfiguration) bool { + if leaseTimeout := ss.GetLeaseTimeout(); leaseTimeout.IsZero() { + return false + } else { + return leaseTimeout. + Before(time.Now().UTC().Add(time.Duration(-kclConfig.LeaseStealingClaimTimeoutMillis) * time.Millisecond)) + } +} diff --git a/clientlibrary/worker/polling-shard-consumer.go b/clientlibrary/worker/polling-shard-consumer.go index 27e5c80..90371b0 100644 --- a/clientlibrary/worker/polling-shard-consumer.go +++ b/clientlibrary/worker/polling-shard-consumer.go @@ -103,7 +103,7 @@ func (sc *PollingShardConsumer) getRecords() error { retriedErrors := 0 for { - if time.Now().UTC().After(sc.shard.LeaseTimeout.Add(-time.Duration(sc.kclConfig.LeaseRefreshPeriodMillis) * time.Millisecond)) { + if time.Now().UTC().After(sc.shard.GetLeaseTimeout().Add(-time.Duration(sc.kclConfig.LeaseRefreshPeriodMillis) * time.Millisecond)) { log.Debugf("Refreshing lease on shard: %s for worker: %s", sc.shard.ID, sc.consumerID) err = sc.checkpointer.GetLease(sc.shard, sc.consumerID) if err != nil { diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 0ab4d17..fb2dd4a 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -68,7 +68,8 @@ type Worker struct { rng *rand.Rand - shardStatus map[string]*par.ShardStatus + shardStatus map[string]*par.ShardStatus + shardStealInProgress bool } // NewWorker constructs a Worker instance for processing Kinesis stream data. @@ -271,7 +272,7 @@ func (w *Worker) eventLoop() { log.Infof("Found %d shards", foundShards) } - // Count the number of leases hold by this worker excluding the processed shard + // Count the number of leases held by this worker excluding the processed shard counter := 0 for _, shard := range w.shardStatus { if shard.GetLeaseOwner() == w.workerID && shard.GetCheckpoint() != chk.ShardEnd { @@ -302,6 +303,20 @@ func (w *Worker) eventLoop() { continue } + var stealShard bool + if w.kclConfig.EnableLeaseStealing && shard.ClaimRequest != "" { + upcomingStealingInterval := time.Now().UTC().Add(time.Duration(w.kclConfig.LeaseStealingIntervalMillis) * time.Millisecond) + if shard.GetLeaseTimeout().Before(upcomingStealingInterval) && !shard.IsClaimRequestExpired(w.kclConfig) { + if shard.ClaimRequest == w.workerID { + stealShard = true + log.Debugf("Stealing shard: %s", shard.ID) + } else { + log.Debugf("Shard being stolen: %s", shard.ID) + continue + } + } + } + err = w.checkpointer.GetLease(shard, w.workerID) if err != nil { // cannot get lease on the shard @@ -311,6 +326,11 @@ func (w *Worker) eventLoop() { continue } + if stealShard { + log.Debugf("Successfully stole shard: %+v", shard.ID) + w.shardStealInProgress = false + } + // log metrics on got lease w.mService.LeaseGained(shard.ID) w.waitGroup.Add(1) @@ -325,6 +345,13 @@ func (w *Worker) eventLoop() { } } + if w.kclConfig.EnableLeaseStealing { + err = w.rebalance() + if err != nil { + log.Warnf("Error in rebalance: %+v", err) + } + } + select { case <-*w.stop: log.Infof("Shutting down...") @@ -335,6 +362,90 @@ func (w *Worker) eventLoop() { } } +func (w *Worker) rebalance() error { + log := w.kclConfig.Logger + + workers, err := w.checkpointer.ListActiveWorkers(w.shardStatus) + if err != nil { + log.Debugf("Error listing workers. workerID: %s. Error: %+v ", w.workerID, err) + return err + } + + // Only attempt to steal one shard at at time, to allow for linear convergence + if w.shardStealInProgress { + shardInfo := make(map[string]bool) + err := w.getShardIDs("", shardInfo) + if err != nil { + return err + } + for _, shard := range w.shardStatus { + if shard.ClaimRequest != "" && shard.ClaimRequest == w.workerID { + log.Debugf("Steal in progress. workerID: %s", w.workerID) + return nil + } + // Our shard steal was stomped on by a Checkpoint. + // We could deal with that, but instead just try again + w.shardStealInProgress = false + } + } + + var numShards int + for _, shards := range workers { + numShards += len(shards) + } + + numWorkers := len(workers) + + // 1:1 shards to workers is optimal, so we cannot possibly rebalance + if numWorkers >= numShards { + log.Debugf("Optimal shard allocation, not stealing any shards. workerID: %s, %v > %v. ", w.workerID, numWorkers, numShards) + return nil + } + + currentShards, ok := workers[w.workerID] + var numCurrentShards int + if !ok { + numCurrentShards = 0 + numWorkers++ + } else { + numCurrentShards = len(currentShards) + } + + optimalShards := numShards / numWorkers + + // We have more than or equal optimal shards, so no rebalancing can take place + if numCurrentShards >= optimalShards || numCurrentShards == w.kclConfig.MaxLeasesForWorker { + log.Debugf("We have enough shards, not attempting to steal any. workerID: %s", w.workerID) + return nil + } + maxShards := int(optimalShards) + var workerSteal string + for worker, shards := range workers { + if worker != w.workerID && len(shards) > maxShards { + workerSteal = worker + maxShards = len(shards) + } + } + // Not all shards are allocated so fallback to default shard allocation mechanisms + if workerSteal == "" { + log.Infof("Not all shards are allocated, not stealing any. workerID: %s", w.workerID) + return nil + } + + // Steal a random shard from the worker with the most shards + w.shardStealInProgress = true + randIndex := rand.Intn(len(workers[workerSteal])) + shardToSteal := workers[workerSteal][randIndex] + log.Debugf("Stealing shard %s from %s", shardToSteal, workerSteal) + + err = w.checkpointer.ClaimShard(w.shardStatus[shardToSteal.ID], w.workerID) + if err != nil { + w.shardStealInProgress = false + return err + } + return nil +} + // List all shards and store them into shardStatus table // If shard has been removed, need to exclude it from cached shard status. func (w *Worker) getShardIDs(nextToken string, shardInfo map[string]bool) error { diff --git a/support/toolchain/docker/Dockerfile b/support/toolchain/docker/Dockerfile index 1e66efe..47a4528 100644 --- a/support/toolchain/docker/Dockerfile +++ b/support/toolchain/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.12 +FROM golang:1.13 ENV PATH /go/bin:/src/bin:/root/go/bin:/usr/local/go/bin:$PATH ENV GOPATH /go:/src RUN go get -v github.com/alecthomas/gometalinter && \ diff --git a/test/lease_stealing_util_test.go b/test/lease_stealing_util_test.go new file mode 100644 index 0000000..21b8ab3 --- /dev/null +++ b/test/lease_stealing_util_test.go @@ -0,0 +1,230 @@ +package test + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + "github.com/stretchr/testify/assert" + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" + cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" + wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" +) + +type LeaseStealingTest struct { + t *testing.T + config *TestClusterConfig + cluster *TestCluster + kc kinesisiface.KinesisAPI + dc dynamodbiface.DynamoDBAPI + + backOffSeconds int + maxRetries int +} + +func NewLeaseStealingTest(t *testing.T, config *TestClusterConfig, workerFactory TestWorkerFactory) *LeaseStealingTest { + cluster := NewTestCluster(t, config, workerFactory) + clientConfig := cluster.workerFactory.CreateKCLConfig("test-client", config) + return &LeaseStealingTest{ + t: t, + config: config, + cluster: cluster, + kc: NewKinesisClient(t, config.regionName, clientConfig.KinesisEndpoint, clientConfig.KinesisCredentials), + dc: NewDynamoDBClient(t, config.regionName, clientConfig.DynamoDBEndpoint, clientConfig.KinesisCredentials), + backOffSeconds: 5, + maxRetries: 60, + } +} + +func (lst *LeaseStealingTest) WithBackoffSeconds(backoff int) *LeaseStealingTest { + lst.backOffSeconds = backoff + return lst +} + +func (lst *LeaseStealingTest) WithMaxRetries(retries int) *LeaseStealingTest { + lst.maxRetries = retries + return lst +} + +func (lst *LeaseStealingTest) publishSomeData() (stop func()) { + done := make(chan int) + wg := &sync.WaitGroup{} + + wg.Add(1) + go func() { + ticker := time.NewTicker(500 * time.Millisecond) + defer wg.Done() + defer ticker.Stop() + for { + select { + case <-done: + return + case <-ticker.C: + lst.t.Log("Coninuously publishing records") + publishSomeData(lst.t, lst.kc) + } + } + }() + + return func() { + close(done) + wg.Wait() + } +} + +func (lst *LeaseStealingTest) getShardCountByWorker() map[string]int { + input := &dynamodb.ScanInput{ + TableName: aws.String(lst.config.appName), + } + + shardsByWorker := map[string]map[string]bool{} + err := lst.dc.ScanPages(input, func(out *dynamodb.ScanOutput, lastPage bool) bool { + for _, result := range out.Items { + if shardID, ok := result[chk.LeaseKeyKey]; !ok { + continue + } else if assignedTo, ok := result[chk.LeaseOwnerKey]; !ok { + continue + } else { + if _, ok := shardsByWorker[*assignedTo.S]; !ok { + shardsByWorker[*assignedTo.S] = map[string]bool{} + } + shardsByWorker[*assignedTo.S][*shardID.S] = true + } + } + return !lastPage + }) + assert.Nil(lst.t, err) + + shardCountByWorker := map[string]int{} + for worker, shards := range shardsByWorker { + shardCountByWorker[worker] = len(shards) + } + return shardCountByWorker +} + +type LeaseStealingAssertions struct { + expectedLeasesForIntialWorker int + expectedLeasesPerWorker int +} + +func (lst *LeaseStealingTest) Run(assertions LeaseStealingAssertions) { + // Publish records onto stream thoughtout the entire duration of the test + stop := lst.publishSomeData() + defer stop() + + // Start worker 1 + worker1, _ := lst.cluster.SpawnWorker() + + // Wait until the above worker has all leases + var worker1ShardCount int + for i := 0; i < lst.maxRetries; i++ { + time.Sleep(time.Duration(lst.backOffSeconds) * time.Second) + + shardCountByWorker := lst.getShardCountByWorker() + if shardCount, ok := shardCountByWorker[worker1]; ok && shardCount == assertions.expectedLeasesForIntialWorker { + worker1ShardCount = shardCount + break + } + } + + // Assert correct number of leases + assert.Equal(lst.t, assertions.expectedLeasesForIntialWorker, worker1ShardCount) + + // Spawn Remaining Wokers + for i := 0; i < lst.config.numWorkers-1; i++ { + lst.cluster.SpawnWorker() + } + + // Wait For Rebalance + var shardCountByWorker map[string]int + for i := 0; i < lst.maxRetries; i++ { + time.Sleep(time.Duration(lst.backOffSeconds) * time.Second) + + shardCountByWorker = lst.getShardCountByWorker() + + correctCount := true + for _, count := range shardCountByWorker { + if count != assertions.expectedLeasesPerWorker { + correctCount = false + } + } + + if correctCount { + break + } + } + + // Assert Rebalanced + assert.Greater(lst.t, len(shardCountByWorker), 0) + for _, count := range shardCountByWorker { + assert.Equal(lst.t, assertions.expectedLeasesPerWorker, count) + } + + // Shutdown Workers + time.Sleep(10 * time.Second) + lst.cluster.Shutdown() +} + +type TestWorkerFactory interface { + CreateWorker(workerID string, kclConfig *cfg.KinesisClientLibConfiguration) *wk.Worker + CreateKCLConfig(workerID string, config *TestClusterConfig) *cfg.KinesisClientLibConfiguration +} + +type TestClusterConfig struct { + numShards int + numWorkers int + + appName string + streamName string + regionName string + workerIDTemplate string +} + +type TestCluster struct { + t *testing.T + config *TestClusterConfig + workerFactory TestWorkerFactory + workerIDs []string + workers map[string]*wk.Worker +} + +func NewTestCluster(t *testing.T, config *TestClusterConfig, workerFactory TestWorkerFactory) *TestCluster { + return &TestCluster{ + t: t, + config: config, + workerFactory: workerFactory, + workerIDs: make([]string, 0), + workers: make(map[string]*wk.Worker), + } +} + +func (tc *TestCluster) addWorker(workerID string, config *cfg.KinesisClientLibConfiguration) *wk.Worker { + worker := tc.workerFactory.CreateWorker(workerID, config) + tc.workerIDs = append(tc.workerIDs, workerID) + tc.workers[workerID] = worker + return worker +} + +func (tc *TestCluster) SpawnWorker() (string, *wk.Worker) { + id := len(tc.workers) + workerID := fmt.Sprintf(tc.config.workerIDTemplate, id) + + config := tc.workerFactory.CreateKCLConfig(workerID, tc.config) + worker := tc.addWorker(workerID, config) + + err := worker.Start() + assert.Nil(tc.t, err) + return workerID, worker +} + +func (tc *TestCluster) Shutdown() { + for workerID, worker := range tc.workers { + tc.t.Logf("Shutting down worker: %v", workerID) + worker.Shutdown() + } +} diff --git a/test/logger_test.go b/test/logger_test.go index 2d63124..f5db877 100644 --- a/test/logger_test.go +++ b/test/logger_test.go @@ -23,9 +23,10 @@ package test import ( "github.com/stretchr/testify/assert" + "testing" + "github.com/sirupsen/logrus" "go.uber.org/zap" - "testing" "github.com/vmware/vmware-go-kcl/logger" zaplogger "github.com/vmware/vmware-go-kcl/logger/zap" diff --git a/test/record_processor_test.go b/test/record_processor_test.go index 31a8556..4f36266 100644 --- a/test/record_processor_test.go +++ b/test/record_processor_test.go @@ -19,10 +19,11 @@ package test import ( + "testing" + "github.com/aws/aws-sdk-go/aws" "github.com/stretchr/testify/assert" kc "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" - "testing" ) // Record processor factory is used to create RecordProcessor diff --git a/test/record_publisher_test.go b/test/record_publisher_test.go index f948fc1..baaac57 100644 --- a/test/record_publisher_test.go +++ b/test/record_publisher_test.go @@ -21,9 +21,13 @@ package test import ( "crypto/md5" "fmt" + "sync" + "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" rec "github.com/awslabs/kinesis-aggregation/go/records" @@ -50,12 +54,79 @@ func NewKinesisClient(t *testing.T, regionName, endpoint string, credentials *cr return kinesis.New(s) } +// NewDynamoDBClient to create a Kinesis Client. +func NewDynamoDBClient(t *testing.T, regionName, endpoint string, credentials *credentials.Credentials) *dynamodb.DynamoDB { + s, err := session.NewSession(&aws.Config{ + Region: aws.String(regionName), + Endpoint: aws.String(endpoint), + Credentials: credentials, + }) + + if err != nil { + // no need to move forward + t.Fatalf("Failed in getting DynamoDB session for creating Worker: %+v", err) + } + return dynamodb.New(s) +} + +func continuouslyPublishSomeData(t *testing.T, kc kinesisiface.KinesisAPI) func() { + shards := []*kinesis.Shard{} + var nextToken *string + for { + out, err := kc.ListShards(&kinesis.ListShardsInput{ + StreamName: aws.String(streamName), + NextToken: nextToken, + }) + if err != nil { + t.Errorf("Error in ListShards. %+v", err) + } + + shards = append(shards, out.Shards...) + if out.NextToken == nil { + break + } + nextToken = out.NextToken + } + + done := make(chan int) + wg := &sync.WaitGroup{} + + wg.Add(1) + go func() { + defer wg.Done() + ticker := time.NewTicker(500 * time.Millisecond) + for { + select { + case <-done: + return + case <-ticker.C: + publishToAllShards(t, kc, shards) + publishSomeData(t, kc) + } + } + }() + + return func() { + close(done) + wg.Wait() + } +} + +func publishToAllShards(t *testing.T, kc kinesisiface.KinesisAPI, shards []*kinesis.Shard) { + // Put records to all shards + for i := 0; i < 10; i++ { + for _, shard := range shards { + publishRecord(t, kc, shard.HashKeyRange.StartingHashKey) + } + } +} + // publishSomeData to put some records into Kinesis stream func publishSomeData(t *testing.T, kc kinesisiface.KinesisAPI) { // Put some data into stream. t.Log("Putting data into stream using PutRecord API...") for i := 0; i < 50; i++ { - publishRecord(t, kc) + publishRecord(t, kc, nil) } t.Log("Done putting data into stream using PutRecord API.") @@ -75,13 +146,17 @@ func publishSomeData(t *testing.T, kc kinesisiface.KinesisAPI) { } // publishRecord to put a record into Kinesis stream using PutRecord API. -func publishRecord(t *testing.T, kc kinesisiface.KinesisAPI) { - // Use random string as partition key to ensure even distribution across shards - _, err := kc.PutRecord(&kinesis.PutRecordInput{ +func publishRecord(t *testing.T, kc kinesisiface.KinesisAPI, hashKey *string) { + input := &kinesis.PutRecordInput{ Data: []byte(specstr), StreamName: aws.String(streamName), PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), - }) + } + if hashKey != nil { + input.ExplicitHashKey = hashKey + } + // Use random string as partition key to ensure even distribution across shards + _, err := kc.PutRecord(input) if err != nil { t.Errorf("Error in PutRecord. %+v", err) @@ -94,10 +169,11 @@ func publishRecords(t *testing.T, kc kinesisiface.KinesisAPI) { records := make([]*kinesis.PutRecordsRequestEntry, 5) for i := 0; i < 5; i++ { - records[i] = &kinesis.PutRecordsRequestEntry{ + record := &kinesis.PutRecordsRequestEntry{ Data: []byte(specstr), PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), } + records[i] = record } _, err := kc.PutRecords(&kinesis.PutRecordsInput{ diff --git a/test/worker_custom_test.go b/test/worker_custom_test.go index ef48491..19a6fb7 100644 --- a/test/worker_custom_test.go +++ b/test/worker_custom_test.go @@ -37,7 +37,7 @@ import ( ) func TestWorkerInjectCheckpointer(t *testing.T) { - kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). WithMaxLeasesForWorker(1). @@ -52,6 +52,12 @@ func TestWorkerInjectCheckpointer(t *testing.T) { // configure cloudwatch as metrics system kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem)) + // Put some data into stream. + kc := NewKinesisClient(t, regionName, kclConfig.KinesisEndpoint, kclConfig.KinesisCredentials) + // publishSomeData(t, kc) + stop := continuouslyPublishSomeData(t, kc) + defer stop() + // custom checkpointer or a mock checkpointer. checkpointer := chk.NewDynamoCheckpoint(kclConfig) @@ -62,12 +68,8 @@ func TestWorkerInjectCheckpointer(t *testing.T) { err := worker.Start() assert.Nil(t, err) - // Put some data into stream. - kc := NewKinesisClient(t, regionName, kclConfig.KinesisEndpoint, kclConfig.KinesisCredentials) - publishSomeData(t, kc) - // wait a few seconds before shutdown processing - time.Sleep(10 * time.Second) + time.Sleep(30 * time.Second) worker.Shutdown() // verify the checkpointer after graceful shutdown @@ -86,7 +88,7 @@ func TestWorkerInjectCheckpointer(t *testing.T) { } func TestWorkerInjectKinesis(t *testing.T) { - kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). WithMaxLeasesForWorker(1). @@ -109,6 +111,11 @@ func TestWorkerInjectKinesis(t *testing.T) { assert.Nil(t, err) kc := kinesis.New(s) + // Put some data into stream. + // publishSomeData(t, kc) + stop := continuouslyPublishSomeData(t, kc) + defer stop() + // Inject a custom checkpointer into the worker. worker := wk.NewWorker(recordProcessorFactory(t), kclConfig). WithKinesis(kc) @@ -116,16 +123,13 @@ func TestWorkerInjectKinesis(t *testing.T) { err = worker.Start() assert.Nil(t, err) - // Put some data into stream. - publishSomeData(t, kc) - // wait a few seconds before shutdown processing - time.Sleep(10 * time.Second) + time.Sleep(30 * time.Second) worker.Shutdown() } func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) { - kclConfig := cfg.NewKinesisClientLibConfig("appName", streamName, regionName, workerID). + kclConfig := cfg.NewKinesisClientLibConfig(appName, streamName, regionName, workerID). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). WithMaxLeasesForWorker(1). @@ -148,6 +152,11 @@ func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) { assert.Nil(t, err) kc := kinesis.New(s) + // Put some data into stream. + // publishSomeData(t, kc) + stop := continuouslyPublishSomeData(t, kc) + defer stop() + // custom checkpointer or a mock checkpointer. checkpointer := chk.NewDynamoCheckpoint(kclConfig) @@ -159,10 +168,7 @@ func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) { err = worker.Start() assert.Nil(t, err) - // Put some data into stream. - publishSomeData(t, kc) - // wait a few seconds before shutdown processing - time.Sleep(10 * time.Second) + time.Sleep(30 * time.Second) worker.Shutdown() } diff --git a/test/worker_lease_stealing_test.go b/test/worker_lease_stealing_test.go new file mode 100644 index 0000000..c35974c --- /dev/null +++ b/test/worker_lease_stealing_test.go @@ -0,0 +1,127 @@ +package test + +import ( + "testing" + + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" + cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" + wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" + "github.com/vmware/vmware-go-kcl/logger" +) + +func TestLeaseStealing(t *testing.T) { + config := &TestClusterConfig{ + numShards: 4, + numWorkers: 2, + appName: appName, + streamName: streamName, + regionName: regionName, + workerIDTemplate: workerID + "-%v", + } + test := NewLeaseStealingTest(t, config, newLeaseStealingWorkerFactory(t)) + test.Run(LeaseStealingAssertions{ + expectedLeasesForIntialWorker: config.numShards, + expectedLeasesPerWorker: config.numShards / config.numWorkers, + }) +} + +type leaseStealingWorkerFactory struct { + t *testing.T +} + +func newLeaseStealingWorkerFactory(t *testing.T) *leaseStealingWorkerFactory { + return &leaseStealingWorkerFactory{t} +} + +func (wf *leaseStealingWorkerFactory) CreateKCLConfig(workerID string, config *TestClusterConfig) *cfg.KinesisClientLibConfiguration { + log := logger.NewLogrusLoggerWithConfig(logger.Configuration{ + EnableConsole: true, + ConsoleLevel: logger.Error, + ConsoleJSONFormat: false, + EnableFile: true, + FileLevel: logger.Info, + FileJSONFormat: true, + Filename: "log.log", + }) + + log.WithFields(logger.Fields{"worker": workerID}) + + return cfg.NewKinesisClientLibConfig(config.appName, config.streamName, config.regionName, workerID). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(10000). + WithLeaseStealing(true). + WithLogger(log) +} + +func (wf *leaseStealingWorkerFactory) CreateWorker(workerID string, kclConfig *cfg.KinesisClientLibConfiguration) *wk.Worker { + worker := wk.NewWorker(recordProcessorFactory(wf.t), kclConfig) + return worker +} + +func TestLeaseStealingInjectCheckpointer(t *testing.T) { + config := &TestClusterConfig{ + numShards: 4, + numWorkers: 2, + appName: appName, + streamName: streamName, + regionName: regionName, + workerIDTemplate: workerID + "-%v", + } + test := NewLeaseStealingTest(t, config, newleaseStealingWorkerFactoryCustomChk(t)) + test.Run(LeaseStealingAssertions{ + expectedLeasesForIntialWorker: config.numShards, + expectedLeasesPerWorker: config.numShards / config.numWorkers, + }) +} + +type leaseStealingWorkerFactoryCustom struct { + *leaseStealingWorkerFactory +} + +func newleaseStealingWorkerFactoryCustomChk(t *testing.T) *leaseStealingWorkerFactoryCustom { + return &leaseStealingWorkerFactoryCustom{ + newLeaseStealingWorkerFactory(t), + } +} + +func (wfc *leaseStealingWorkerFactoryCustom) CreateWorker(workerID string, kclConfig *cfg.KinesisClientLibConfiguration) *wk.Worker { + worker := wfc.leaseStealingWorkerFactory.CreateWorker(workerID, kclConfig) + checkpointer := chk.NewDynamoCheckpoint(kclConfig) + return worker.WithCheckpointer(checkpointer) +} + +func TestLeaseStealingWithMaxLeasesForWorker(t *testing.T) { + config := &TestClusterConfig{ + numShards: 4, + numWorkers: 2, + appName: appName, + streamName: streamName, + regionName: regionName, + workerIDTemplate: workerID + "-%v", + } + test := NewLeaseStealingTest(t, config, newleaseStealingWorkerFactoryMaxLeases(t, config.numShards-1)) + test.Run(LeaseStealingAssertions{ + expectedLeasesForIntialWorker: config.numShards - 1, + expectedLeasesPerWorker: 2, + }) +} + +type leaseStealingWorkerFactoryMaxLeases struct { + maxLeases int + *leaseStealingWorkerFactory +} + +func newleaseStealingWorkerFactoryMaxLeases(t *testing.T, maxLeases int) *leaseStealingWorkerFactoryMaxLeases { + return &leaseStealingWorkerFactoryMaxLeases{ + maxLeases, + newLeaseStealingWorkerFactory(t), + } +} + +func (wfm *leaseStealingWorkerFactoryMaxLeases) CreateKCLConfig(workerID string, config *TestClusterConfig) *cfg.KinesisClientLibConfiguration { + kclConfig := wfm.leaseStealingWorkerFactory.CreateKCLConfig(workerID, config) + kclConfig.WithMaxLeasesForWorker(wfm.maxLeases) + return kclConfig +} diff --git a/test/worker_test.go b/test/worker_test.go index b9f9a32..a445a59 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -60,7 +60,7 @@ func TestWorker(t *testing.T) { // In order to have precise control over logging. Use logger with config config := logger.Configuration{ EnableConsole: true, - ConsoleLevel: logger.Debug, + ConsoleLevel: logger.Error, ConsoleJSONFormat: false, EnableFile: true, FileLevel: logger.Info, @@ -269,8 +269,13 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t // configure cloudwatch as metrics system kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem)) - worker := wk.NewWorker(recordProcessorFactory(t), kclConfig) + // Put some data into stream. + kc := NewKinesisClient(t, regionName, kclConfig.KinesisEndpoint, kclConfig.KinesisCredentials) + // publishSomeData(t, kc) + stop := continuouslyPublishSomeData(t, kc) + defer stop() + worker := wk.NewWorker(recordProcessorFactory(t), kclConfig) err := worker.Start() assert.Nil(t, err) @@ -286,10 +291,6 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t //os.Exit(0) }() - // Put some data into stream. - kc := NewKinesisClient(t, regionName, kclConfig.KinesisEndpoint, kclConfig.KinesisCredentials) - publishSomeData(t, kc) - if triggersig { t.Log("Trigger signal SIGINT") p, _ := os.FindProcess(os.Getpid()) @@ -297,7 +298,7 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t } // wait a few seconds before shutdown processing - time.Sleep(10 * time.Second) + time.Sleep(30 * time.Second) if metricsSystem == "prometheus" { res, err := http.Get("http://localhost:8080/metrics") From 0094ef5a69b5c00a65892444983872cd475978e7 Mon Sep 17 00:00:00 2001 From: Luca Rinaldi Date: Fri, 23 Jul 2021 14:03:31 +0200 Subject: [PATCH 71/90] improve log event (#93) * improve log event Signed-off-by: lucarin91 * use %+v in template string Signed-off-by: lucarin91 --- clientlibrary/worker/worker.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index fb2dd4a..339a7ba 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -292,7 +292,7 @@ func (w *Worker) eventLoop() { if err != nil { // checkpoint may not existed yet is not an error condition. if err != chk.ErrSequenceIDNotFound { - log.Errorf(" Error: %+v", err) + log.Warnf("Couldn't fetch checkpoint: %+v", err) // move on to next shard continue } From 5de70c05427f70669deaf077b8d1a872d34eee96 Mon Sep 17 00:00:00 2001 From: Tao Jiang Date: Mon, 23 Aug 2021 21:34:46 -0500 Subject: [PATCH 72/90] Rebuild and publish toolchain (#95) Signed-off-by: Tao Jiang --- HyperMake | 2 +- support/toolchain/HyperMake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/HyperMake b/HyperMake index 7ca3d06..9337eb9 100644 --- a/HyperMake +++ b/HyperMake @@ -88,5 +88,5 @@ settings: default-targets: - test docker: - image: 'vmware/go-kcl-toolchain:0.1.2' + image: 'vmware/go-kcl-toolchain:0.1.3' src-volume: /go/src/github.com/vmware/vmware-go-kcl diff --git a/support/toolchain/HyperMake b/support/toolchain/HyperMake index 1dcd569..0408c88 100644 --- a/support/toolchain/HyperMake +++ b/support/toolchain/HyperMake @@ -25,4 +25,4 @@ settings: default-targets: - rebuild-toolchain docker: - image: 'vmware/go-kcl-toolchain:0.1.2' + image: 'vmware/go-kcl-toolchain:0.1.3' From 18546df7816401fc82c69ec9c973e4885c76fdd2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1t=C3=A9=20Lang?= Date: Fri, 15 Oct 2021 15:21:53 +0300 Subject: [PATCH 73/90] Added RS Zerolog logging adapter (#96) * Added RS Zerolog logging adapter Signed-off-by: Mate Lang * Fixed pull request review comments Signed-off-by: Mate Lang --- go.mod | 3 +- go.sum | 35 ++++++-- logger/zerolog/zerolog.go | 152 +++++++++++++++++++++++++++++++++ logger/zerolog/zerolog_test.go | 32 +++++++ 4 files changed, 212 insertions(+), 10 deletions(-) create mode 100644 logger/zerolog/zerolog.go create mode 100644 logger/zerolog/zerolog_test.go diff --git a/go.mod b/go.mod index c8fa96f..d6923eb 100644 --- a/go.mod +++ b/go.mod @@ -10,13 +10,12 @@ require ( github.com/prometheus/client_golang v0.9.3 github.com/prometheus/common v0.4.1 github.com/prometheus/procfs v0.0.0-20190523193104-a7aeb8df3389 // indirect + github.com/rs/zerolog v1.25.0 github.com/sirupsen/logrus v1.4.2 github.com/stretchr/testify v1.5.1 go.uber.org/atomic v1.4.0 // indirect go.uber.org/multierr v1.2.0 // indirect go.uber.org/zap v1.11.0 - golang.org/x/sys v0.0.0-20190528012530-adf421d2caf4 // indirect - golang.org/x/text v0.3.2 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) diff --git a/go.sum b/go.sum index 60c4332..992fe57 100644 --- a/go.sum +++ b/go.sum @@ -12,6 +12,7 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -21,7 +22,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= @@ -40,7 +41,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -61,6 +61,9 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/procfs v0.0.0-20190523193104-a7aeb8df3389 h1:F/k2nob1S9M6v5Xkq7KjSTQirOYaYQord0jR4TwyVmY= github.com/prometheus/procfs v0.0.0-20190523193104-a7aeb8df3389/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.25.0 h1:Rj7XygbUHKUlDPcVdoLyR91fJBsduXj5fRxyqIQj/II= +github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -71,6 +74,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4= @@ -79,28 +83,43 @@ go.uber.org/zap v1.11.0 h1:gSmpCfs+R47a4yQPAI4xJ0IPDLTRGXskm6UelqNXpqE= go.uber.org/zap v1.11.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190528012530-adf421d2caf4 h1:gd52YanAQJ4UkvuNi/7z63JEyc6ejHh9QwdzbTiEtAY= -golang.org/x/sys v0.0.0-20190528012530-adf421d2caf4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/logger/zerolog/zerolog.go b/logger/zerolog/zerolog.go new file mode 100644 index 0000000..412540f --- /dev/null +++ b/logger/zerolog/zerolog.go @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2019 VMware, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and + * associated documentation files (the "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is furnished to do + * so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT + * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +// Note: The implementation comes from https://www.mountedthoughts.com/golang-logger-interface/ +// https://github.com/amitrai48/logger + +// Package zerolog implements the KCL logger using RS Zerolog logger +package zerolog + +import ( + "github.com/rs/zerolog" + "github.com/vmware/vmware-go-kcl/logger" + "gopkg.in/natefinch/lumberjack.v2" + "os" +) + +type zeroLogger struct { + log zerolog.Logger +} + +// NewZerologLogger creates a new logger.Logger backed by RS Zerolog using a default config +func NewZerologLogger() logger.Logger { + return NewZerologLoggerWithConfig(logger.Configuration{ + EnableConsole: true, + ConsoleJSONFormat: true, + ConsoleLevel: logger.Info, + EnableFile: false, + FileJSONFormat: false, + FileLevel: logger.Info, + Filename: "", + MaxSizeMB: 0, + MaxAgeDays: 0, + MaxBackups: 0, + LocalTime: true, + }) +} + +// NewZerologLoggerWithConfig creates a new logger.Logger backed by RS Zerolog using the provided config +func NewZerologLoggerWithConfig(config logger.Configuration) logger.Logger { + var consoleHandler *zerolog.ConsoleWriter + var fileHandler *lumberjack.Logger + var finalLogger zerolog.Logger + + normalizeConfig(&config) + + if config.EnableConsole { + consoleHandler = &zerolog.ConsoleWriter{Out: os.Stdout} + } + + if config.EnableFile { + fileHandler = &lumberjack.Logger{ + Filename: config.Filename, + MaxSize: config.MaxSizeMB, + Compress: true, + MaxAge: config.MaxAgeDays, + MaxBackups: config.MaxBackups, + LocalTime: config.LocalTime, + } + } + + if config.EnableConsole && config.EnableFile { + multi := zerolog.MultiLevelWriter(consoleHandler, fileHandler) + finalLogger = zerolog.New(multi).Level(getZeroLogLevel(config.ConsoleLevel)).With().Timestamp().Logger() + } else if config.EnableFile { + finalLogger = zerolog.New(fileHandler).Level(getZeroLogLevel(config.FileLevel)).With().Timestamp().Logger() + } else { + finalLogger = zerolog.New(consoleHandler).Level(getZeroLogLevel(config.ConsoleLevel)).With().Timestamp().Logger() + } + + return &zeroLogger{log: finalLogger} +} + +func (z *zeroLogger) Debugf(format string, args ...interface{}) { + z.log.Debug().Msgf(format, args...) +} + +func (z *zeroLogger) Infof(format string, args ...interface{}) { + z.log.Info().Msgf(format, args...) +} + +func (z *zeroLogger) Warnf(format string, args ...interface{}) { + z.log.Warn().Msgf(format, args...) +} + +func (z *zeroLogger) Errorf(format string, args ...interface{}) { + z.log.Error().Msgf(format, args...) +} + +func (z *zeroLogger) Fatalf(format string, args ...interface{}) { + z.log.Fatal().Msgf(format, args...) +} + +func (z *zeroLogger) Panicf(format string, args ...interface{}) { + z.log.Panic().Msgf(format, args...) +} + +func (z *zeroLogger) WithFields(keyValues logger.Fields) logger.Logger { + newLogger := z.log.With() + for k, v := range keyValues { + newLogger.Interface(k, v) + } + + return &zeroLogger{ + log: newLogger.Logger(), + } +} + +func getZeroLogLevel(level string) zerolog.Level { + switch level { + case logger.Info: + return zerolog.InfoLevel + case logger.Warn: + return zerolog.WarnLevel + case logger.Debug: + return zerolog.DebugLevel + case logger.Error: + return zerolog.ErrorLevel + case logger.Fatal: + return zerolog.FatalLevel + default: + return zerolog.InfoLevel + } +} + +func normalizeConfig(config *logger.Configuration) { + if config.MaxSizeMB <= 0 { + config.MaxSizeMB = 100 + } + + if config.MaxAgeDays <= 0 { + config.MaxAgeDays = 7 + } + + if config.MaxBackups < 0 { + config.MaxBackups = 0 + } +} diff --git a/logger/zerolog/zerolog_test.go b/logger/zerolog/zerolog_test.go new file mode 100644 index 0000000..7d35aea --- /dev/null +++ b/logger/zerolog/zerolog_test.go @@ -0,0 +1,32 @@ +package zerolog + +import ( + "github.com/vmware/vmware-go-kcl/logger" + "testing" +) + +func TestZeroLogLoggerWithConfig(t *testing.T) { + config := logger.Configuration{ + EnableConsole: true, + ConsoleLevel: logger.Debug, + ConsoleJSONFormat: true, + EnableFile: true, + FileLevel: logger.Info, + FileJSONFormat: false, + Filename: "/tmp/kcl-zerolog-log.log", + } + + log := NewZerologLoggerWithConfig(config) + + contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with rs zerolog") + contextLogger.Infof("Rs zerolog is awesome") +} + +func TestZeroLogLogger(t *testing.T) { + log := NewZerologLogger() + + contextLogger := log.WithFields(logger.Fields{"key1": "value1"}) + contextLogger.Debugf("Starting with zerolog") + contextLogger.Infof("Zerolog is awesome") +} From 7af9290557a9cdf3782eb64d40844783ab90a034 Mon Sep 17 00:00:00 2001 From: Fabiano Arruda Date: Fri, 22 Oct 2021 03:28:41 +0200 Subject: [PATCH 74/90] Upgrade golang 1.17 (#98) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * upgrade to golang 1.17 Signed-off-by: Fabiano Graças # Conflicts: # go.mod # go.sum * improve after shell lint Signed-off-by: Fabiano Graças * improve after upgrade docker image (used by the build system) Signed-off-by: Fabiano Graças * remove not needed variable Signed-off-by: Fabiano Graças * apply fixes after security scan (hmake test) Signed-off-by: Fabiano Graças * add missing package after merge with latest master branch code. Signed-off-by: Fabiano Graças * improve docker layer Signed-off-by: Fabiano Graças * upgrade packages Signed-off-by: Fabiano Graças Co-authored-by: Fabiano Graças --- .gitignore | 2 + HyperMake | 3 +- clientlibrary/utils/random.go | 15 +- clientlibrary/utils/random_test.go | 12 +- clientlibrary/worker/worker.go | 40 +-- go.mod | 42 ++- go.sum | 483 +++++++++++++++++++++++++--- support/scripts/check.sh | 26 +- support/toolchain/HyperMake | 2 +- support/toolchain/docker/Dockerfile | 14 +- 10 files changed, 524 insertions(+), 115 deletions(-) diff --git a/.gitignore b/.gitignore index 79c027d..a3b9045 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,5 @@ .vscode *_mock_test.go filenames + +.DS_Store \ No newline at end of file diff --git a/HyperMake b/HyperMake index 9337eb9..cffd91b 100644 --- a/HyperMake +++ b/HyperMake @@ -21,7 +21,6 @@ targets: watches: - go.mod cmds: - - export GO111MODULE=on - go mod download - go mod vendor - go mod tidy @@ -88,5 +87,5 @@ settings: default-targets: - test docker: - image: 'vmware/go-kcl-toolchain:0.1.3' + image: 'vmware/go-kcl-toolchain:0.1.4' src-volume: /go/src/github.com/vmware/vmware-go-kcl diff --git a/clientlibrary/utils/random.go b/clientlibrary/utils/random.go index ef9dbc4..7c45f91 100644 --- a/clientlibrary/utils/random.go +++ b/clientlibrary/utils/random.go @@ -16,10 +16,13 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package utils package utils import ( - "math/rand" + "crypto/rand" + "math/big" "time" ) @@ -32,11 +35,13 @@ const ( func RandStringBytesMaskImpr(n int) string { b := make([]byte, n) - rand.Seed(time.Now().UTC().UnixNano()) - // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters! - for i, cache, remain := n-1, rand.Int63(), letterIdxMax; i >= 0; { + seed := time.Now().UTC().UnixNano() + rnd, _ := rand.Int(rand.Reader, big.NewInt(seed)) + // A rand.Int64() generates 64 random bits, enough for letterIdxMax letters! + for i, cache, remain := n-1, rnd.Int64(), letterIdxMax; i >= 0; { if remain == 0 { - cache, remain = rand.Int63(), letterIdxMax + rnd, _ = rand.Int(rand.Reader, big.NewInt(seed)) + cache, remain = rnd.Int64(), letterIdxMax } if idx := int(cache & letterIdxMask); idx < len(letterBytes) { b[i] = letterBytes[idx] diff --git a/clientlibrary/utils/random_test.go b/clientlibrary/utils/random_test.go index c63b21b..a613ca8 100644 --- a/clientlibrary/utils/random_test.go +++ b/clientlibrary/utils/random_test.go @@ -20,7 +20,6 @@ package utils import ( "fmt" - "math/rand" "testing" "time" ) @@ -32,17 +31,18 @@ func TestRandom(t *testing.T) { if s1 == s2 { t.Fatalf("failed in generating random string. s1: %s, s2: %s", s1, s2) } + fmt.Println(s1) + fmt.Println(s2) } } func TestRandomNum(t *testing.T) { - rand.Seed(time.Now().UTC().UnixNano()) - for i := 0; i < 10; i++ { - s1 := rand.Int63() - s2 := rand.Int63() + seed := time.Now().UTC().Second() + s1 := RandStringBytesMaskImpr(seed) + s2 := RandStringBytesMaskImpr(seed) if s1 == s2 { - t.Fatalf("failed in generating random string. s1: %d, s2: %d", s1, s2) + t.Fatalf("failed in generating random string. s1: %s, s2: %s", s1, s2) } fmt.Println(s1) fmt.Println(s2) diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index 339a7ba..c5b39f4 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package worker // The implementation is derived from https://github.com/patrobinson/gokini // // Copyright 2018 Patrick robinson @@ -28,8 +30,9 @@ package worker import ( + "crypto/rand" "errors" - "math/rand" + "math/big" "sync" "time" @@ -45,11 +48,9 @@ import ( par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" ) -/** - * Worker is the high level class that Kinesis applications use to start processing data. It initializes and oversees - * different components (e.g. syncing shard and lease information, tracking shard assignments, and processing data from - * the shards). - */ +//Worker is the high level class that Kinesis applications use to start processing data. It initializes and oversees +//different components (e.g. syncing shard and lease information, tracking shard assignments, and processing data from +//the shards). type Worker struct { streamName string regionName string @@ -66,7 +67,7 @@ type Worker struct { waitGroup *sync.WaitGroup done bool - rng *rand.Rand + randomSeed int64 shardStatus map[string]*par.ShardStatus shardStealInProgress bool @@ -80,9 +81,6 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli mService = metrics.NoopMonitoringService{} } - // Create a pseudo-random number generator and seed it. - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - return &Worker{ streamName: kclConfig.StreamName, regionName: kclConfig.RegionName, @@ -91,7 +89,7 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli kclConfig: kclConfig, mService: mService, done: false, - rng: rng, + randomSeed: time.Now().UTC().UnixNano(), } } @@ -108,7 +106,7 @@ func (w *Worker) WithCheckpointer(checker chk.Checkpointer) *Worker { return w } -// Run starts consuming data from the stream, and pass it to the application record processors. +// Start Run starts consuming data from the stream, and pass it to the application record processors. func (w *Worker) Start() error { log := w.kclConfig.Logger if err := w.initialize(); err != nil { @@ -133,7 +131,7 @@ func (w *Worker) Start() error { return nil } -// Shutdown signals worker to shutdown. Worker will try initiating shutdown of all record processors. +// Shutdown signals worker to shut down. Worker will try initiating shutdown of all record processors. func (w *Worker) Shutdown() { log := w.kclConfig.Logger log.Infof("Worker shutdown in requested.") @@ -258,7 +256,8 @@ func (w *Worker) eventLoop() { // starts at the same time, this decreases the probability of them calling // kinesis.DescribeStream at the same time, and hit the hard-limit on aws API calls. // On average the period remains the same so that doesn't affect behavior. - shardSyncSleep := w.kclConfig.ShardSyncIntervalMillis/2 + w.rng.Intn(w.kclConfig.ShardSyncIntervalMillis) + rnd, _ := rand.Int(rand.Reader, big.NewInt(int64(w.kclConfig.ShardSyncIntervalMillis))) + shardSyncSleep := w.kclConfig.ShardSyncIntervalMillis/2 + int(rnd.Int64()) err := w.syncShard() if err != nil { @@ -290,7 +289,7 @@ func (w *Worker) eventLoop() { err := w.checkpointer.FetchCheckpoint(shard) if err != nil { - // checkpoint may not existed yet is not an error condition. + // checkpoint may not exist yet is not an error condition. if err != chk.ErrSequenceIDNotFound { log.Warnf("Couldn't fetch checkpoint: %+v", err) // move on to next shard @@ -371,7 +370,7 @@ func (w *Worker) rebalance() error { return err } - // Only attempt to steal one shard at at time, to allow for linear convergence + // Only attempt to steal one shard at time, to allow for linear convergence if w.shardStealInProgress { shardInfo := make(map[string]bool) err := w.getShardIDs("", shardInfo) @@ -418,12 +417,12 @@ func (w *Worker) rebalance() error { log.Debugf("We have enough shards, not attempting to steal any. workerID: %s", w.workerID) return nil } - maxShards := int(optimalShards) + var workerSteal string for worker, shards := range workers { - if worker != w.workerID && len(shards) > maxShards { + if worker != w.workerID && len(shards) > optimalShards { workerSteal = worker - maxShards = len(shards) + optimalShards = len(shards) } } // Not all shards are allocated so fallback to default shard allocation mechanisms @@ -434,7 +433,8 @@ func (w *Worker) rebalance() error { // Steal a random shard from the worker with the most shards w.shardStealInProgress = true - randIndex := rand.Intn(len(workers[workerSteal])) + rnd, _ := rand.Int(rand.Reader, big.NewInt(int64(len(workers[workerSteal])))) + randIndex := int(rnd.Int64()) shardToSteal := workers[workerSteal][randIndex] log.Debugf("Stealing shard %s from %s", shardToSteal, workerSteal) diff --git a/go.mod b/go.mod index d6923eb..264834c 100644 --- a/go.mod +++ b/go.mod @@ -1,22 +1,34 @@ module github.com/vmware/vmware-go-kcl +go 1.17 + require ( - github.com/BurntSushi/toml v0.3.1 // indirect - github.com/aws/aws-sdk-go v1.34.8 - github.com/awslabs/kinesis-aggregation/go v0.0.0-20201211133042-142dfe1d7a6d - github.com/golang/protobuf v1.3.1 - github.com/google/uuid v1.1.1 - github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect - github.com/prometheus/client_golang v0.9.3 - github.com/prometheus/common v0.4.1 - github.com/prometheus/procfs v0.0.0-20190523193104-a7aeb8df3389 // indirect + github.com/aws/aws-sdk-go v1.41.7 + github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f + github.com/golang/protobuf v1.5.2 + github.com/google/uuid v1.3.0 + github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/common v0.32.1 github.com/rs/zerolog v1.25.0 - github.com/sirupsen/logrus v1.4.2 - github.com/stretchr/testify v1.5.1 - go.uber.org/atomic v1.4.0 // indirect - go.uber.org/multierr v1.2.0 // indirect - go.uber.org/zap v1.11.0 + github.com/sirupsen/logrus v1.8.1 + github.com/stretchr/testify v1.7.0 + go.uber.org/zap v1.19.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) -go 1.13 +require ( + github.com/BurntSushi/toml v0.4.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.7.0 // indirect + golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac // indirect + google.golang.org/protobuf v1.27.1 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect +) diff --git a/go.sum b/go.sum index 992fe57..8fc9eba 100644 --- a/go.sum +++ b/go.sum @@ -1,125 +1,520 @@ -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.34.8 h1:GDfVeXG8XQDbpOeAj7415F8qCQZwvY/k/fj+HBqUnBA= -github.com/aws/aws-sdk-go v1.34.8/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/awslabs/kinesis-aggregation/go v0.0.0-20201211133042-142dfe1d7a6d h1:kGtsYh3+yYsCafn/pp/j/SMbc2bOiWJBxxkzCnAQWF4= -github.com/awslabs/kinesis-aggregation/go v0.0.0-20201211133042-142dfe1d7a6d/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= +github.com/aws/aws-sdk-go v1.41.7 h1:vlpR8Cky3ZxUVNINgeRZS6N0p6zmFvu/ZqRRwrTI25U= +github.com/aws/aws-sdk-go v1.41.7/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= +github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190523193104-a7aeb8df3389 h1:F/k2nob1S9M6v5Xkq7KjSTQirOYaYQord0jR4TwyVmY= -github.com/prometheus/procfs v0.0.0-20190523193104-a7aeb8df3389/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.25.0 h1:Rj7XygbUHKUlDPcVdoLyR91fJBsduXj5fRxyqIQj/II= github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.2.0 h1:6I+W7f5VwC5SV9dNrZ3qXrDB9mD0dyGOi/ZJmYw03T4= -go.uber.org/multierr v1.2.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.11.0 h1:gSmpCfs+R47a4yQPAI4xJ0IPDLTRGXskm6UelqNXpqE= -go.uber.org/zap v1.11.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/support/scripts/check.sh b/support/scripts/check.sh index 6126dc0..eb87f6b 100755 --- a/support/scripts/check.sh +++ b/support/scripts/check.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash . support/scripts/functions.sh @@ -12,25 +12,23 @@ checkfmt() { } lint() { - gometalinter \ - --exclude=_mock.go \ - --disable=gotype \ + golangci-lint run \ + --skip-files=_mock.go \ --disable=golint \ - --vendor \ - --skip=test \ + --skip-dirs=test \ --fast \ - --deadline=600s \ - --severity=golint:error \ - --errors \ + --timeout=600s \ + --verbose \ $(local_go_pkgs) } scanast() { set +e + gosec version gosec ./... > security.log 2>&1 set -e - local issues=$(grep -E "Severity: MEDIUM" security.log | wc -l) + local issues="$(grep -E 'Severity: MEDIUM' security.log | wc -l)" if [ -n $issues ] && [ $issues -gt 0 ]; then echo "" echo "Medium Severity Issues:" @@ -38,8 +36,8 @@ scanast() { echo $issues "medium severity issues found." fi - local issues=$(grep -E "Severity: HIGH" security.log | grep -v "vendor") - local issues_count=$(grep -E "Severity: HIGH" security.log | grep -v "vendor" | wc -l) + local issues="$(grep -E 'Severity: HIGH' security.log | grep -v vendor)" + local issues_count="$(grep -E 'Severity: HIGH' security.log | grep -v vendor | wc -l)" if [ -n $issues_count ] && [ $issues_count -gt 0 ]; then echo "" echo "High Severity Issues:" @@ -50,8 +48,8 @@ scanast() { exit 1 fi - local issues=$(grep -E "Errors unhandled" security.log | grep -v "vendor" | grep -v "/src/go/src") - local issues_count=$(grep -E "Errors unhandled" security.log | grep -v "vendor" | grep -v "/src/go/src" | wc -l) + local issues="$(grep -E 'Errors unhandled' security.log | grep -v vendor | grep -v /src/go/src)" + local issues_count="$(grep -E 'Errors unhandled' security.log | grep -v vendor | grep -v /src/go/src | wc -l)" if [ -n $issues_count ] && [ $issues_count -gt 0 ]; then echo "" echo "Unhandled errors:" diff --git a/support/toolchain/HyperMake b/support/toolchain/HyperMake index 0408c88..c294e5a 100644 --- a/support/toolchain/HyperMake +++ b/support/toolchain/HyperMake @@ -25,4 +25,4 @@ settings: default-targets: - rebuild-toolchain docker: - image: 'vmware/go-kcl-toolchain:0.1.3' + image: 'vmware/go-kcl-toolchain:0.1.4' diff --git a/support/toolchain/docker/Dockerfile b/support/toolchain/docker/Dockerfile index 47a4528..47a5d12 100644 --- a/support/toolchain/docker/Dockerfile +++ b/support/toolchain/docker/Dockerfile @@ -1,10 +1,8 @@ -FROM golang:1.13 +FROM golang:1.17 ENV PATH /go/bin:/src/bin:/root/go/bin:/usr/local/go/bin:$PATH ENV GOPATH /go:/src -RUN go get -v github.com/alecthomas/gometalinter && \ - go get -v golang.org/x/tools/cmd/... && \ - go get -v github.com/FiloSottile/gvt && \ - go get github.com/securego/gosec/cmd/gosec/... && \ - go get github.com/derekparker/delve/cmd/dlv && \ - gometalinter --install && \ - chmod -R a+rw /go \ No newline at end of file +RUN go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.42.1 && \ + go install golang.org/x/tools/cmd/...@latest && \ + go install github.com/go-delve/delve/cmd/dlv@latest && \ + curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s v2.8.1 && \ + chmod -R a+rw /go From 6372087bc3019d55af38136b707db12329c97ffe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Mon, 8 Nov 2021 14:38:23 +0100 Subject: [PATCH 75/90] removed due the new error handling https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md#error-handling --- clientlibrary/utils/awserr.go | 31 ------------------------------- 1 file changed, 31 deletions(-) delete mode 100644 clientlibrary/utils/awserr.go diff --git a/clientlibrary/utils/awserr.go b/clientlibrary/utils/awserr.go deleted file mode 100644 index 6e692b8..0000000 --- a/clientlibrary/utils/awserr.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2021 VMware, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and - * associated documentation files (the "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is furnished to do - * so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all copies or substantial - * portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT - * NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -package utils - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -func AWSErrCode(err error) string { - awsErr, _ := err.(awserr.Error) - if awsErr != nil { - return awsErr.Code() - } - return "" -} From 0c204685a9d051409f4774b8557c4a2ea2549b9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Mon, 8 Nov 2021 15:00:48 +0100 Subject: [PATCH 76/90] improve comments --- clientlibrary/checkpoint/checkpointer.go | 9 ++-- .../checkpoint/dynamodb-checkpointer.go | 2 + clientlibrary/config/config.go | 52 ++++++++++--------- clientlibrary/config/initial-stream-pos.go | 2 + clientlibrary/config/kcl-config.go | 39 +++++++------- clientlibrary/interfaces/inputs.go | 10 ++-- .../record-processor-checkpointer.go | 12 +++-- clientlibrary/interfaces/record-processor.go | 14 +++-- clientlibrary/interfaces/sequence-number.go | 2 + .../metrics/cloudwatch/cloudwatch.go | 4 +- clientlibrary/metrics/interfaces.go | 2 + .../metrics/prometheus/prometheus.go | 2 + clientlibrary/partition/partition.go | 4 +- clientlibrary/utils/random_test.go | 2 + clientlibrary/utils/uuid.go | 3 ++ .../worker/record-processor-checkpointer.go | 11 ++-- test/record_processor_test.go | 3 +- 17 files changed, 107 insertions(+), 66 deletions(-) diff --git a/clientlibrary/checkpoint/checkpointer.go b/clientlibrary/checkpoint/checkpointer.go index 4d4ceaa..fff1a51 100644 --- a/clientlibrary/checkpoint/checkpointer.go +++ b/clientlibrary/checkpoint/checkpointer.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package checkpoint // The implementation is derived from https://github.com/patrobinson/gokini // // Copyright 2018 Patrick robinson @@ -42,11 +44,11 @@ const ( ParentShardIdKey = "ParentShardId" ClaimRequestKey = "ClaimRequest" - // We've completely processed all records in this shard. + // ShardEnd We've completely processed all records in this shard. ShardEnd = "SHARD_END" // ErrShardClaimed is returned when shard is claimed - ErrShardClaimed = "Shard is already claimed by another node" + ErrShardClaimed = "shard is already claimed by another node" ) type ErrLeaseNotAcquired struct { @@ -77,8 +79,7 @@ type Checkpointer interface { // RemoveLeaseOwner to remove lease owner for the shard entry to make the shard available for reassignment RemoveLeaseOwner(string) error - // New Lease Stealing Methods - // ListActiveWorkers returns active workers and their shards + // ListActiveWorkers returns active workers and their shards (New Lease Stealing Methods) ListActiveWorkers(map[string]*par.ShardStatus) (map[string][]*par.ShardStatus, error) // ClaimShard claims a shard for stealing diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 8df5e37..0bf8086 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package checkpoint // The implementation is derived from https://github.com/patrobinson/gokini // // Copyright 2018 Patrick robinson diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index 9f3f002..f6e061d 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package config // The implementation is derived from https://github.com/awslabs/amazon-kinesis-client /* * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. @@ -54,85 +56,85 @@ const ( // AT_TIMESTAMP start from the record at or after the specified server-side Timestamp. AT_TIMESTAMP - // The location in the shard from which the KinesisClientLibrary will start fetching records from + // DefaultInitialPositionInStream The location in the shard from which the KinesisClientLibrary will start fetching records from // when the application starts for the first time and there is no checkpoint for the shard. DefaultInitialPositionInStream = LATEST - // Fail over time in milliseconds. A worker which does not renew it's lease within this time interval + // DefaultFailoverTimeMillis Fail over time in milliseconds. A worker which does not renew it's lease within this time interval // will be regarded as having problems and it's shards will be assigned to other workers. // For applications that have a large number of shards, this may be set to a higher number to reduce // the number of DynamoDB IOPS required for tracking leases. DefaultFailoverTimeMillis = 10000 - // Period before the end of lease during which a lease is refreshed by the owner. + // DefaultLeaseRefreshPeriodMillis Period before the end of lease during which a lease is refreshed by the owner. DefaultLeaseRefreshPeriodMillis = 5000 - // Max records to fetch from Kinesis in a single GetRecords call. + // DefaultMaxRecords Max records to fetch from Kinesis in a single GetRecords call. DefaultMaxRecords = 10000 - // The default value for how long the {@link ShardConsumer} should sleep if no records are returned - // from the call to - DefaultIdletimeBetweenReadsMillis = 1000 + // DefaultIdleTimeBetweenReadsMillis The default value for how long the {@link ShardConsumer} + // should sleep if no records are returned from the call to + DefaultIdleTimeBetweenReadsMillis = 1000 - // Don't call processRecords() on the record processor for empty record lists. + // DefaultDontCallProcessRecordsForEmptyRecordList Don't call processRecords() on the record processor for empty record lists. DefaultDontCallProcessRecordsForEmptyRecordList = false - // Interval in milliseconds between polling to check for parent shard completion. + // DefaultParentShardPollIntervalMillis Interval in milliseconds between polling to check for parent shard completion. // Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on // completion of parent shards). DefaultParentShardPollIntervalMillis = 10000 - // Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks. + // DefaultShardSyncIntervalMillis Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks. DefaultShardSyncIntervalMillis = 60000 - // Cleanup leases upon shards completion (don't wait until they expire in Kinesis). + // DefaultCleanupLeasesUponShardsCompletion Cleanup leases upon shards completion (don't wait until they expire in Kinesis). // Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by // default we try to delete the ones we don't need any longer. DefaultCleanupLeasesUponShardsCompletion = true - // Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures). + // DefaultTaskBackoffTimeMillis Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures). DefaultTaskBackoffTimeMillis = 500 - // KCL will validate client provided sequence numbers with a call to Amazon Kinesis before + // DefaultValidateSequenceNumberBeforeCheckpointing KCL will validate client provided sequence numbers with a call to Amazon Kinesis before // checkpointing for calls to {@link RecordProcessorCheckpointer#checkpoint(String)} by default. DefaultValidateSequenceNumberBeforeCheckpointing = true - // The max number of leases (shards) this worker should process. + // DefaultMaxLeasesForWorker The max number of leases (shards) this worker should process. // This can be useful to avoid overloading (and thrashing) a worker when a host has resource constraints // or during deployment. // NOTE: Setting this to a low value can cause data loss if workers are not able to pick up all shards in the // stream due to the max limit. DefaultMaxLeasesForWorker = math.MaxInt16 - // Max leases to steal from another worker at one time (for load balancing). + // DefaultMaxLeasesToStealAtOneTime Max leases to steal from another worker at one time (for load balancing). // Setting this to a higher number can allow for faster load convergence (e.g. during deployments, cold starts), // but can cause higher churn in the system. DefaultMaxLeasesToStealAtOneTime = 1 - // The Amazon DynamoDB table used for tracking leases will be provisioned with this read capacity. + // DefaultInitialLeaseTableReadCapacity The Amazon DynamoDB table used for tracking leases will be provisioned with this read capacity. DefaultInitialLeaseTableReadCapacity = 10 - // The Amazon DynamoDB table used for tracking leases will be provisioned with this write capacity. + // DefaultInitialLeaseTableWriteCapacity The Amazon DynamoDB table used for tracking leases will be provisioned with this write capacity. DefaultInitialLeaseTableWriteCapacity = 10 - // The Worker will skip shard sync during initialization if there are one or more leases in the lease table. This + // DefaultSkipShardSyncAtStartupIfLeasesExist The Worker will skip shard sync during initialization if there are one or more leases in the lease table. This // assumes that the shards and leases are in-sync. This enables customers to choose faster startup times (e.g. // during incremental deployments of an application). DefaultSkipShardSyncAtStartupIfLeasesExist = false - // The amount of milliseconds to wait before graceful shutdown forcefully terminates. + // DefaultShutdownGraceMillis The amount of milliseconds to wait before graceful shutdown forcefully terminates. DefaultShutdownGraceMillis = 5000 - // Lease stealing defaults to false for backwards compatibility. + // DefaultEnableLeaseStealing Lease stealing defaults to false for backwards compatibility. DefaultEnableLeaseStealing = false - // Interval between rebalance tasks defaults to 5 seconds. + // DefaultLeaseStealingIntervalMillis Interval between rebalance tasks defaults to 5 seconds. DefaultLeaseStealingIntervalMillis = 5000 - // Number of milliseconds to wait before another worker can aquire a claimed shard + // DefaultLeaseStealingClaimTimeoutMillis Number of milliseconds to wait before another worker can aquire a claimed shard DefaultLeaseStealingClaimTimeoutMillis = 120000 - // Number of milliseconds to wait before syncing with lease table (dynamodDB) + // DefaultLeaseSyncingIntervalMillis Number of milliseconds to wait before syncing with lease table (dynamodDB) DefaultLeaseSyncingIntervalMillis = 60000 ) @@ -141,7 +143,7 @@ type ( // This is used during initial application bootstrap (when a checkpoint doesn't exist for a shard or its parents) InitialPositionInStream int - // Class that houses the entities needed to specify the Position in the stream from where a new application should + // InitialPositionInStreamExtended Class that houses the entities needed to specify the Position in the stream from where a new application should // start. InitialPositionInStreamExtended struct { Position InitialPositionInStream @@ -156,7 +158,7 @@ type ( Timestamp *time.Time `type:"Timestamp" timestampFormat:"unix"` } - // Configuration for the Kinesis Client Library. + // KinesisClientLibConfiguration Configuration for the Kinesis Client Library. // Note: There is no need to configure credential provider. Credential can be get from InstanceProfile. KinesisClientLibConfiguration struct { // ApplicationName is name of application. Kinesis allows multiple applications to consume the same stream. diff --git a/clientlibrary/config/initial-stream-pos.go b/clientlibrary/config/initial-stream-pos.go index 2169812..1cb0abd 100644 --- a/clientlibrary/config/initial-stream-pos.go +++ b/clientlibrary/config/initial-stream-pos.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package config // The implementation is derived from https://github.com/awslabs/amazon-kinesis-client /* * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index a831e88..6751f6a 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package config // The implementation is derived from https://github.com/awslabs/amazon-kinesis-client /* * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. @@ -165,25 +167,24 @@ func (c *KinesisClientLibConfiguration) WithMaxLeasesForWorker(n int) *KinesisCl return c } -/** - * Controls how long the KCL will sleep if no records are returned from Kinesis - * - *

- * This value is only used when no records are returned; if records are returned, the {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ProcessTask} will - * immediately retrieve the next set of records after the call to - * {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#processRecords(ProcessRecordsInput)} - * has returned. Setting this value to high may result in the KCL being unable to catch up. If you are changing this - * value it's recommended that you enable {@link #withCallProcessRecordsEvenForEmptyRecordList(boolean)}, and - * monitor how far behind the records retrieved are by inspecting - * {@link com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput#getMillisBehindLatest()}, and the - * CloudWatch - * Metric: GetRecords.MillisBehindLatest - *

- * - * @param IdleTimeBetweenReadsInMillis - * how long to sleep between GetRecords calls when no records are returned. - * @return KinesisClientLibConfiguration +/* WithIdleTimeBetweenReadsInMillis + Controls how long the KCL will sleep if no records are returned from Kinesis + +

+ This value is only used when no records are returned; if records are returned, the {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ProcessTask} will + immediately retrieve the next set of records after the call to + {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#processRecords(ProcessRecordsInput)} + has returned. Setting this value to high may result in the KCL being unable to catch up. If you are changing this + value it's recommended that you enable {@link #withCallProcessRecordsEvenForEmptyRecordList(boolean)}, and + monitor how far behind the records retrieved are by inspecting + {@link com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput#getMillisBehindLatest()}, and the + CloudWatch + Metric: GetRecords.MillisBehindLatest +

+ + @param IdleTimeBetweenReadsInMillis: how long to sleep between GetRecords calls when no records are returned. + @return KinesisClientLibConfiguration */ func (c *KinesisClientLibConfiguration) WithIdleTimeBetweenReadsInMillis(idleTimeBetweenReadsInMillis int) *KinesisClientLibConfiguration { checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis) diff --git a/clientlibrary/interfaces/inputs.go b/clientlibrary/interfaces/inputs.go index eb12387..385a3d2 100644 --- a/clientlibrary/interfaces/inputs.go +++ b/clientlibrary/interfaces/inputs.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package interfaces // The implementation is derived from https://github.com/awslabs/amazon-kinesis-client /* * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. @@ -57,7 +59,7 @@ const ( */ TERMINATE - /** + /* * Processing will be moved to a different record processor (fail over, load balancing use cases). * Applications SHOULD NOT checkpoint their progress (as another record processor may have already started * processing data). @@ -67,12 +69,12 @@ const ( // Containers for the parameters to the IRecordProcessor type ( - /** + /* * Reason the RecordProcessor is being shutdown. * Used to distinguish between a fail-over vs. a termination (shard is closed and all records have been delivered). - * In case of a fail over, applications should NOT checkpoint as part of shutdown, + * In case of a fail-over, applications should NOT checkpoint as part of shutdown, * since another record processor may have already started processing records for that shard. - * In case of termination (resharding use case), applications SHOULD checkpoint their progress to indicate + * In case of termination (resharding use case), applications SHOULD keep checkpointing their progress to indicate * that they have successfully processed all the records (processing of child shards can then begin). */ ShutdownReason int diff --git a/clientlibrary/interfaces/record-processor-checkpointer.go b/clientlibrary/interfaces/record-processor-checkpointer.go index b4133d3..cdb1f53 100644 --- a/clientlibrary/interfaces/record-processor-checkpointer.go +++ b/clientlibrary/interfaces/record-processor-checkpointer.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package interfaces // The implementation is derived from https://github.com/awslabs/amazon-kinesis-client /* * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. @@ -37,7 +39,8 @@ type ( IPreparedCheckpointer interface { GetPendingCheckpoint() *ExtendedSequenceNumber - /** + // Checkpoint + /* * This method will record a pending checkpoint. * * @error ThrottlingError Can't store checkpoint. Can be caused by checkpointing too frequently. @@ -56,13 +59,15 @@ type ( Checkpoint() error } - /** + // IRecordProcessorCheckpointer + /* * Used by RecordProcessors when they want to checkpoint their progress. * The Kinesis Client Library will pass an object implementing this interface to RecordProcessors, so they can * checkpoint their progress. */ IRecordProcessorCheckpointer interface { - /** + // Checkpoint + /* * This method will checkpoint the progress at the provided sequenceNumber. This method is analogous to * {@link #checkpoint()} but provides the ability to specify the sequence number at which to * checkpoint. @@ -85,6 +90,7 @@ type ( */ Checkpoint(sequenceNumber *string) error + // PrepareCheckpoint /** * This method will record a pending checkpoint at the provided sequenceNumber. * diff --git a/clientlibrary/interfaces/record-processor.go b/clientlibrary/interfaces/record-processor.go index 766f79a..1c41d56 100644 --- a/clientlibrary/interfaces/record-processor.go +++ b/clientlibrary/interfaces/record-processor.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package interfaces // The implementation is derived from https://github.com/awslabs/amazon-kinesis-client /* * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. @@ -38,7 +40,8 @@ type ( // The main task of using KCL is to provide implementation on IRecordProcessor interface. // Note: This is exactly the same interface as Amazon KCL IRecordProcessor v2 IRecordProcessor interface { - /** + // Initialize + /* * Invoked by the Amazon Kinesis Client Library before data records are delivered to the RecordProcessor instance * (via processRecords). * @@ -46,7 +49,8 @@ type ( */ Initialize(initializationInput *InitializationInput) - /** + // ProcessRecords + /* * Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the * application. * Upon fail over, the new instance will get records with sequence number > checkpoint position @@ -57,7 +61,8 @@ type ( */ ProcessRecords(processRecordsInput *ProcessRecordsInput) - /** + // Shutdown + /* * Invoked by the Amazon Kinesis Client Library to indicate it will no longer send data records to this * RecordProcessor instance. * @@ -77,7 +82,8 @@ type ( // for processing shard. Client can choose either creating one processor per shard or sharing them. IRecordProcessorFactory interface { - /** + // CreateProcessor + /* * Returns a record processor to be used for processing data records for a (assigned) shard. * * @return Returns a processor object. diff --git a/clientlibrary/interfaces/sequence-number.go b/clientlibrary/interfaces/sequence-number.go index db91b9b..8cec8a9 100644 --- a/clientlibrary/interfaces/sequence-number.go +++ b/clientlibrary/interfaces/sequence-number.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package interfaces // The implementation is derived from https://github.com/awslabs/amazon-kinesis-client /* * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. diff --git a/clientlibrary/metrics/cloudwatch/cloudwatch.go b/clientlibrary/metrics/cloudwatch/cloudwatch.go index 3fea191..2a1731d 100644 --- a/clientlibrary/metrics/cloudwatch/cloudwatch.go +++ b/clientlibrary/metrics/cloudwatch/cloudwatch.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package cloudwatch // The implementation is derived from https://github.com/patrobinson/gokini // // Copyright 2018 Patrick robinson @@ -125,7 +127,7 @@ func (cw *MonitoringService) Shutdown() { cw.logger.Infof("Cloudwatch metrics system has been shutdown.") } -// Start daemon to flush metrics periodically +// eventloop start daemon to flush metrics periodically func (cw *MonitoringService) eventloop() { defer cw.waitGroup.Done() diff --git a/clientlibrary/metrics/interfaces.go b/clientlibrary/metrics/interfaces.go index ddd6188..6e06108 100644 --- a/clientlibrary/metrics/interfaces.go +++ b/clientlibrary/metrics/interfaces.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package metrics // The implementation is derived from https://github.com/patrobinson/gokini // // Copyright 2018 Patrick robinson diff --git a/clientlibrary/metrics/prometheus/prometheus.go b/clientlibrary/metrics/prometheus/prometheus.go index e6277c6..b2b4915 100644 --- a/clientlibrary/metrics/prometheus/prometheus.go +++ b/clientlibrary/metrics/prometheus/prometheus.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package prometheus // The implementation is derived from https://github.com/patrobinson/gokini // // Copyright 2018 Patrick robinson diff --git a/clientlibrary/partition/partition.go b/clientlibrary/partition/partition.go index b3f287f..5524416 100644 --- a/clientlibrary/partition/partition.go +++ b/clientlibrary/partition/partition.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package partition // The implementation is derived from https://github.com/patrobinson/gokini // // Copyright 2018 Patrick robinson @@ -25,7 +27,7 @@ // The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -package worker +package partition import ( "sync" diff --git a/clientlibrary/utils/random_test.go b/clientlibrary/utils/random_test.go index a613ca8..1f51f74 100644 --- a/clientlibrary/utils/random_test.go +++ b/clientlibrary/utils/random_test.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package utils package utils import ( diff --git a/clientlibrary/utils/uuid.go b/clientlibrary/utils/uuid.go index e36d8bb..857347c 100644 --- a/clientlibrary/utils/uuid.go +++ b/clientlibrary/utils/uuid.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package utils package utils import ( @@ -28,5 +30,6 @@ func MustNewUUID() string { if err != nil { panic(err) } + return id.String() } diff --git a/clientlibrary/worker/record-processor-checkpointer.go b/clientlibrary/worker/record-processor-checkpointer.go index e47bbba..cf96ea9 100644 --- a/clientlibrary/worker/record-processor-checkpointer.go +++ b/clientlibrary/worker/record-processor-checkpointer.go @@ -16,11 +16,11 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package worker package worker import ( - "github.com/aws/aws-sdk-go/aws" - chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" @@ -28,7 +28,9 @@ import ( type ( - /* Objects of this class are prepared to checkpoint at a specific sequence number. They use an + // PreparedCheckpointer + /* + * Objects of this class are prepared to checkpoint at a specific sequence number. They use an * IRecordProcessorCheckpointer to do the actual checkpointing, so their checkpoint is subject to the same 'didn't go * backwards' validation as a normal checkpoint. */ @@ -37,7 +39,8 @@ type ( checkpointer kcl.IRecordProcessorCheckpointer } - /** + //RecordProcessorCheckpointer + /* * This class is used to enable RecordProcessors to checkpoint their progress. * The Amazon Kinesis Client Library will instantiate an object and provide a reference to the application * RecordProcessor instance. Amazon Kinesis Client Library will create one instance per shard assignment. diff --git a/test/record_processor_test.go b/test/record_processor_test.go index 4f36266..2e37368 100644 --- a/test/record_processor_test.go +++ b/test/record_processor_test.go @@ -16,12 +16,13 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package test package test import ( "testing" - "github.com/aws/aws-sdk-go/aws" "github.com/stretchr/testify/assert" kc "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" ) From b5373056903e1dd770608a8b4cc8297de48d50b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Mon, 8 Nov 2021 15:17:48 +0100 Subject: [PATCH 77/90] add deaggregator v2 code straightly to the repo since it's seems they will not merge the PR soon. --- internal/deaggregator/deaggregator.go | 96 +++++++++ internal/deaggregator/deaggregator_test.go | 202 +++++++++++++++++++ internal/records/records.pb.go | 215 +++++++++++++++++++++ 3 files changed, 513 insertions(+) create mode 100644 internal/deaggregator/deaggregator.go create mode 100644 internal/deaggregator/deaggregator_test.go create mode 100644 internal/records/records.pb.go diff --git a/internal/deaggregator/deaggregator.go b/internal/deaggregator/deaggregator.go new file mode 100644 index 0000000..6aa8905 --- /dev/null +++ b/internal/deaggregator/deaggregator.go @@ -0,0 +1,96 @@ +// Package deaggregator +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +package deaggregator + +import ( + "crypto/md5" + "fmt" + + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" + "github.com/golang/protobuf/proto" + + rec "github.com/vmware/vmware-go-kcl/internal/records" +) + +// KplMagicHeader Magic File Header for a KPL Aggregated Record +var KplMagicHeader = fmt.Sprintf("%q", []byte("\xf3\x89\x9a\xc2")) + +const ( + KplMagicLen = 4 // Length of magic header for KPL Aggregate Record checking. + DigestSize = 16 // MD5 Message size for protobuf. +) + +// DeaggregateRecords takes an array of Kinesis records and expands any Protobuf +// records within that array, returning an array of all records +func DeaggregateRecords(records []types.Record) ([]types.Record, error) { + var isAggregated bool + allRecords := make([]types.Record, 0) + + for _, record := range records { + isAggregated = true + + var dataMagic string + var decodedDataNoMagic []byte + // Check if record is long enough to have magic file header + if len(record.Data) >= KplMagicLen { + dataMagic = fmt.Sprintf("%q", record.Data[:KplMagicLen]) + decodedDataNoMagic = record.Data[KplMagicLen:] + } else { + isAggregated = false + } + + // Check if record has KPL Aggregate Record Magic Header and data length + // is correct size + if KplMagicHeader != dataMagic || len(decodedDataNoMagic) <= DigestSize { + isAggregated = false + } + + if isAggregated { + messageDigest := fmt.Sprintf("%x", decodedDataNoMagic[len(decodedDataNoMagic)-DigestSize:]) + messageData := decodedDataNoMagic[:len(decodedDataNoMagic)-DigestSize] + + calculatedDigest := fmt.Sprintf("%x", md5.Sum(messageData)) + + // Check protobuf MD5 hash matches MD5 sum of record + if messageDigest != calculatedDigest { + isAggregated = false + } else { + aggRecord := &rec.AggregatedRecord{} + err := proto.Unmarshal(messageData, aggRecord) + + if err != nil { + return nil, err + } + + partitionKeys := aggRecord.PartitionKeyTable + + for _, aggrec := range aggRecord.Records { + newRecord := createUserRecord(partitionKeys, aggrec, record) + allRecords = append(allRecords, newRecord) + } + } + } + + if !isAggregated { + allRecords = append(allRecords, record) + } + } + + return allRecords, nil +} + +// createUserRecord takes in the partitionKeys of the aggregated record, the individual +// deaggregated record, and the original aggregated record builds a kinesis.Record and +// returns it +func createUserRecord(partitionKeys []string, aggRec *rec.Record, record types.Record) types.Record { + partitionKey := partitionKeys[*aggRec.PartitionKeyIndex] + + return types.Record{ + ApproximateArrivalTimestamp: record.ApproximateArrivalTimestamp, + Data: aggRec.Data, + EncryptionType: record.EncryptionType, + PartitionKey: &partitionKey, + SequenceNumber: record.SequenceNumber, + } +} \ No newline at end of file diff --git a/internal/deaggregator/deaggregator_test.go b/internal/deaggregator/deaggregator_test.go new file mode 100644 index 0000000..5bc8be6 --- /dev/null +++ b/internal/deaggregator/deaggregator_test.go @@ -0,0 +1,202 @@ +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +package deaggregator_test + +import ( + "crypto/md5" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + + deagg "github.com/vmware/vmware-go-kcl/internal/deaggregator" + rec "github.com/vmware/vmware-go-kcl/internal/records" +) + +// Generate an aggregate record in the correct AWS-specified format +// https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md +func generateAggregateRecord(numRecords int) []byte { + + aggr := &rec.AggregatedRecord{} + // Start with the magic header + aggRecord := []byte("\xf3\x89\x9a\xc2") + partKeyTable := make([]string, 0) + + // Create proto record with numRecords length + for i := 0; i < numRecords; i++ { + var partKey uint64 + var hashKey uint64 + partKey = uint64(i) + hashKey = uint64(i) * uint64(10) + r := &rec.Record{ + PartitionKeyIndex: &partKey, + ExplicitHashKeyIndex: &hashKey, + Data: []byte("Some test data string"), + Tags: make([]*rec.Tag, 0), + } + + aggr.Records = append(aggr.Records, r) + partKeyVal := "test" + fmt.Sprint(i) + partKeyTable = append(partKeyTable, partKeyVal) + } + + aggr.PartitionKeyTable = partKeyTable + // Marshal to protobuf record, create md5 sum from proto record + // and append both to aggRecord with magic header + data, _ := proto.Marshal(aggr) + md5Hash := md5.Sum(data) + aggRecord = append(aggRecord, data...) + aggRecord = append(aggRecord, md5Hash[:]...) + return aggRecord +} + +// Generate a generic kinesis.Record using whatever []byte +// is passed in as the data (can be normal []byte or proto record) +func generateKinesisRecord(data []byte) types.Record { + currentTime := time.Now() + encryptionType := types.EncryptionTypeNone + partitionKey := "1234" + sequenceNumber := "21269319989900637946712965403778482371" + return types.Record { + ApproximateArrivalTimestamp: ¤tTime, + Data: data, + EncryptionType: encryptionType, + PartitionKey: &partitionKey, + SequenceNumber: &sequenceNumber, + } +} + +// This tests to make sure that the data is at least larger than the length +// of the magic header to do some array slicing with index out of bounds +func TestSmallLengthReturnsCorrectNumberOfDeaggregatedRecords(t *testing.T) { + var err error + var kr types.Record + + krs := make([]types.Record, 0, 1) + + smallByte := []byte("No") + kr = generateKinesisRecord(smallByte) + krs = append(krs, kr) + dars, err := deagg.DeaggregateRecords(krs) + if err != nil { + panic(err) + } + + // Small byte test, since this is not a deaggregated record, should return 1 + // record in the array. + assert.Equal(t, 1, len(dars), "Small Byte test should return length of 1.") +} + +// This function tests to make sure that the data starts with the correct magic header +// according to KPL aggregate documentation. +func TestNonMatchingMagicHeaderReturnsSingleRecord(t *testing.T) { + var err error + var kr types.Record + + krs := make([]types.Record, 0, 1) + + min := 1 + max := 10 + n := rand.Intn(max-min) + min + aggData := generateAggregateRecord(n) + mismatchAggData := aggData[1:] + kr = generateKinesisRecord(mismatchAggData) + + krs = append(krs, kr) + + dars, err := deagg.DeaggregateRecords(krs) + if err != nil { + panic(err) + } + + // A byte record with a magic header that does not match 0xF3 0x89 0x9A 0xC2 + // should return a single record. + assert.Equal(t, 1, len(dars), "Mismatch magic header test should return length of 1.") +} + +// This function tests that the DeaggregateRecords function returns the correct number of +// deaggregated records from a single aggregated record. +func TestVariableLengthRecordsReturnsCorrectNumberOfDeaggregatedRecords(t *testing.T) { + var err error + var kr types.Record + + krs := make([]types.Record, 0, 1) + + min := 1 + max := 10 + n := rand.Intn(max-min) + min + aggData := generateAggregateRecord(n) + kr = generateKinesisRecord(aggData) + krs = append(krs, kr) + + dars, err := deagg.DeaggregateRecords(krs) + if err != nil { + panic(err) + } + + // Variable Length Aggregate Record test has aggregaterd records and should return + // n length. + assertMsg := fmt.Sprintf("Variable Length Aggregate Record should return length %v.", len(dars)) + assert.Equal(t, n, len(dars), assertMsg) +} + +// This function tests the length of the message after magic file header. If length is less than +// the digest size (16 bytes), it is not an aggregated record. +func TestRecordAfterMagicHeaderWithLengthLessThanDigestSizeReturnsSingleRecord(t *testing.T) { + var err error + var kr types.Record + + krs := make([]types.Record, 0, 1) + + min := 1 + max := 10 + n := rand.Intn(max-min) + min + aggData := generateAggregateRecord(n) + // Change size of proto message to 15 + reducedAggData := aggData[:19] + kr = generateKinesisRecord(reducedAggData) + + krs = append(krs, kr) + + dars, err := deagg.DeaggregateRecords(krs) + if err != nil { + panic(err) + } + + // A byte record with length less than 16 after the magic header should return + // a single record from DeaggregateRecords + assert.Equal(t, 1, len(dars), "Digest size test should return length of 1.") +} + +// This function tests the MD5 Sum at the end of the record by comparing MD5 sum +// at end of proto record with MD5 Sum of Proto message. If they do not match, +// it is not an aggregated record. +func TestRecordWithMismatchMd5SumReturnsSingleRecord(t *testing.T) { + var err error + var kr types.Record + + krs := make([]types.Record, 0, 1) + + min := 1 + max := 10 + n := rand.Intn(max-min) + min + aggData := generateAggregateRecord(n) + // Remove last byte from array to mismatch the MD5 sums + mismatchAggData := aggData[:len(aggData)-1] + kr = generateKinesisRecord(mismatchAggData) + + krs = append(krs, kr) + + dars, err := deagg.DeaggregateRecords(krs) + if err != nil { + panic(err) + } + + // A byte record with an MD5 sum that does not match with the md5.Sum(record) + // will be marked as a non-aggregate record and return a single record + assert.Equal(t, 1, len(dars), "Mismatch md5 sum test should return length of 1.") +} \ No newline at end of file diff --git a/internal/records/records.pb.go b/internal/records/records.pb.go new file mode 100644 index 0000000..689a1c8 --- /dev/null +++ b/internal/records/records.pb.go @@ -0,0 +1,215 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: records.proto + +package records + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type AggregatedRecord struct { + PartitionKeyTable []string `protobuf:"bytes,1,rep,name=partition_key_table,json=partitionKeyTable" json:"partition_key_table,omitempty"` + ExplicitHashKeyTable []string `protobuf:"bytes,2,rep,name=explicit_hash_key_table,json=explicitHashKeyTable" json:"explicit_hash_key_table,omitempty"` + Records []*Record `protobuf:"bytes,3,rep,name=records" json:"records,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AggregatedRecord) Reset() { *m = AggregatedRecord{} } +func (m *AggregatedRecord) String() string { return proto.CompactTextString(m) } +func (*AggregatedRecord) ProtoMessage() {} +func (*AggregatedRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_6ae0159314830e16, []int{0} +} + +func (m *AggregatedRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AggregatedRecord.Unmarshal(m, b) +} +func (m *AggregatedRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AggregatedRecord.Marshal(b, m, deterministic) +} +func (m *AggregatedRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregatedRecord.Merge(m, src) +} +func (m *AggregatedRecord) XXX_Size() int { + return xxx_messageInfo_AggregatedRecord.Size(m) +} +func (m *AggregatedRecord) XXX_DiscardUnknown() { + xxx_messageInfo_AggregatedRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregatedRecord proto.InternalMessageInfo + +func (m *AggregatedRecord) GetPartitionKeyTable() []string { + if m != nil { + return m.PartitionKeyTable + } + return nil +} + +func (m *AggregatedRecord) GetExplicitHashKeyTable() []string { + if m != nil { + return m.ExplicitHashKeyTable + } + return nil +} + +func (m *AggregatedRecord) GetRecords() []*Record { + if m != nil { + return m.Records + } + return nil +} + +type Tag struct { + Key *string `protobuf:"bytes,1,req,name=key" json:"key,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { + return fileDescriptor_6ae0159314830e16, []int{1} +} + +func (m *Tag) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Tag.Unmarshal(m, b) +} +func (m *Tag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Tag.Marshal(b, m, deterministic) +} +func (m *Tag) XXX_Merge(src proto.Message) { + xxx_messageInfo_Tag.Merge(m, src) +} +func (m *Tag) XXX_Size() int { + return xxx_messageInfo_Tag.Size(m) +} +func (m *Tag) XXX_DiscardUnknown() { + xxx_messageInfo_Tag.DiscardUnknown(m) +} + +var xxx_messageInfo_Tag proto.InternalMessageInfo + +func (m *Tag) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Tag) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Record struct { + PartitionKeyIndex *uint64 `protobuf:"varint,1,req,name=partition_key_index,json=partitionKeyIndex" json:"partition_key_index,omitempty"` + ExplicitHashKeyIndex *uint64 `protobuf:"varint,2,opt,name=explicit_hash_key_index,json=explicitHashKeyIndex" json:"explicit_hash_key_index,omitempty"` + Data []byte `protobuf:"bytes,3,req,name=data" json:"data,omitempty"` + Tags []*Tag `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Record) Reset() { *m = Record{} } +func (m *Record) String() string { return proto.CompactTextString(m) } +func (*Record) ProtoMessage() {} +func (*Record) Descriptor() ([]byte, []int) { + return fileDescriptor_6ae0159314830e16, []int{2} +} + +func (m *Record) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Record.Unmarshal(m, b) +} +func (m *Record) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Record.Marshal(b, m, deterministic) +} +func (m *Record) XXX_Merge(src proto.Message) { + xxx_messageInfo_Record.Merge(m, src) +} +func (m *Record) XXX_Size() int { + return xxx_messageInfo_Record.Size(m) +} +func (m *Record) XXX_DiscardUnknown() { + xxx_messageInfo_Record.DiscardUnknown(m) +} + +var xxx_messageInfo_Record proto.InternalMessageInfo + +func (m *Record) GetPartitionKeyIndex() uint64 { + if m != nil && m.PartitionKeyIndex != nil { + return *m.PartitionKeyIndex + } + return 0 +} + +func (m *Record) GetExplicitHashKeyIndex() uint64 { + if m != nil && m.ExplicitHashKeyIndex != nil { + return *m.ExplicitHashKeyIndex + } + return 0 +} + +func (m *Record) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *Record) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +func init() { + proto.RegisterType((*AggregatedRecord)(nil), "AggregatedRecord") + proto.RegisterType((*Tag)(nil), "Tag") + proto.RegisterType((*Record)(nil), "Record") +} + +func init() { proto.RegisterFile("records.proto", fileDescriptor_6ae0159314830e16) } + +var fileDescriptor_6ae0159314830e16 = []byte{ + // 245 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x8f, 0xc1, 0x4a, 0xc4, 0x30, + 0x10, 0x86, 0xc9, 0x26, 0xba, 0x74, 0x54, 0x58, 0xe3, 0x82, 0x39, 0xd6, 0x9e, 0x72, 0xb1, 0x07, + 0xc1, 0x07, 0xf0, 0xa6, 0x78, 0x0b, 0xbd, 0x97, 0x71, 0x3b, 0xa4, 0x61, 0xcb, 0xb6, 0xa4, 0x51, + 0xb6, 0xef, 0xa2, 0xef, 0x2a, 0x49, 0xdd, 0x45, 0x51, 0x6f, 0x93, 0xf9, 0xf9, 0x32, 0xff, 0x07, + 0x17, 0x9e, 0x36, 0xbd, 0x6f, 0xc6, 0x72, 0xf0, 0x7d, 0xe8, 0x8b, 0x77, 0x06, 0xab, 0x07, 0x6b, + 0x3d, 0x59, 0x0c, 0xd4, 0x98, 0x94, 0xc9, 0x12, 0xae, 0x06, 0xf4, 0xc1, 0x05, 0xd7, 0xef, 0xea, + 0x2d, 0x4d, 0x75, 0xc0, 0x97, 0x8e, 0x14, 0xcb, 0xb9, 0xce, 0xcc, 0xe5, 0x31, 0x7a, 0xa6, 0xa9, + 0x8a, 0x81, 0xbc, 0x87, 0x6b, 0xda, 0x0f, 0x9d, 0xdb, 0xb8, 0x50, 0xb7, 0x38, 0xb6, 0xdf, 0x98, + 0x45, 0x62, 0xd6, 0x87, 0xf8, 0x11, 0xc7, 0xf6, 0x88, 0xdd, 0xc0, 0xf2, 0xab, 0x8c, 0xe2, 0x39, + 0xd7, 0x67, 0x77, 0xcb, 0x72, 0x2e, 0x60, 0x0e, 0xfb, 0xe2, 0x16, 0x78, 0x85, 0x56, 0xae, 0x80, + 0x6f, 0x69, 0x52, 0x2c, 0x5f, 0xe8, 0xcc, 0xc4, 0x51, 0xae, 0xe1, 0xe4, 0x0d, 0xbb, 0xd7, 0x78, + 0x80, 0xe9, 0xcc, 0xcc, 0x8f, 0xe2, 0x83, 0xc1, 0xe9, 0x7f, 0x0e, 0x6e, 0xd7, 0xd0, 0x3e, 0x7d, + 0x21, 0x7e, 0x3a, 0x3c, 0xc5, 0xe0, 0x6f, 0x87, 0x99, 0x89, 0x27, 0xc4, 0x2f, 0x87, 0x19, 0x93, + 0x20, 0x1a, 0x0c, 0xa8, 0x78, 0xbe, 0xd0, 0xe7, 0x26, 0xcd, 0x52, 0x81, 0x08, 0x68, 0x47, 0x25, + 0x92, 0x94, 0x28, 0x2b, 0xb4, 0x26, 0x6d, 0x3e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x87, 0x3e, 0x63, + 0x69, 0x7d, 0x01, 0x00, 0x00, +} \ No newline at end of file From 97c6633ea006cf0782f21dcccf14ac45c94f314b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Mon, 8 Nov 2021 16:27:29 +0100 Subject: [PATCH 78/90] migrate to aws-sdk-go-v2 --- .../checkpoint/dynamodb-checkpointer.go | 300 +++++++------ .../checkpoint/dynamodb-checkpointer_test.go | 408 +++++++++--------- clientlibrary/config/config.go | 8 +- clientlibrary/config/kcl-config.go | 56 +-- clientlibrary/interfaces/inputs.go | 12 +- .../metrics/cloudwatch/cloudwatch.go | 71 ++- clientlibrary/worker/common-shard-consumer.go | 39 +- .../worker/fan-out-shard-consumer.go | 30 +- .../worker/polling-shard-consumer.go | 30 +- .../worker/record-processor-checkpointer.go | 7 +- clientlibrary/worker/worker-fan-out.go | 38 +- clientlibrary/worker/worker.go | 56 ++- go.mod | 27 +- go.sum | 203 +++++++++ test/lease_stealing_util_test.go | 52 +-- test/record_processor_test.go | 10 +- test/record_publisher_test.go | 114 +++-- test/worker_custom_test.go | 30 +- test/worker_lease_stealing_test.go | 18 +- test/worker_test.go | 57 ++- 20 files changed, 960 insertions(+), 606 deletions(-) diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 0bf8086..4465f5a 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -30,26 +30,24 @@ package checkpoint import ( + "context" "errors" "fmt" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awsConfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" "github.com/vmware/vmware-go-kcl/clientlibrary/config" par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" - "github.com/vmware/vmware-go-kcl/clientlibrary/utils" "github.com/vmware/vmware-go-kcl/logger" ) const ( - // ErrInvalidDynamoDBSchema is returned when there are one or more fields missing from the table - ErrInvalidDynamoDBSchema = "The DynamoDB schema is invalid and may need to be re-created" - // NumMaxRetries is the max times of doing retry NumMaxRetries = 10 ) @@ -62,8 +60,8 @@ type DynamoCheckpoint struct { leaseTableWriteCapacity int64 LeaseDuration int - svc dynamodbiface.DynamoDBAPI - kclConfig *config.KinesisClientLibConfiguration + svc *dynamodb.Client + kclConfig *config.KinesisClientLibConfiguration Retries int lastLeaseSync time.Time } @@ -83,7 +81,7 @@ func NewDynamoCheckpoint(kclConfig *config.KinesisClientLibConfiguration) *Dynam } // WithDynamoDB is used to provide DynamoDB service -func (checkpointer *DynamoCheckpoint) WithDynamoDB(svc dynamodbiface.DynamoDBAPI) *DynamoCheckpoint { +func (checkpointer *DynamoCheckpoint) WithDynamoDB(svc *dynamodb.Client) *DynamoCheckpoint { checkpointer.svc = svc return checkpointer } @@ -92,31 +90,40 @@ func (checkpointer *DynamoCheckpoint) WithDynamoDB(svc dynamodbiface.DynamoDBAPI func (checkpointer *DynamoCheckpoint) Init() error { checkpointer.log.Infof("Creating DynamoDB session") - s, err := session.NewSession(&aws.Config{ - Region: aws.String(checkpointer.kclConfig.RegionName), - Endpoint: aws.String(checkpointer.kclConfig.DynamoDBEndpoint), - Credentials: checkpointer.kclConfig.DynamoDBCredentials, - Retryer: client.DefaultRetryer{ - NumMaxRetries: checkpointer.Retries, - MinRetryDelay: client.DefaultRetryerMinRetryDelay, - MinThrottleDelay: client.DefaultRetryerMinThrottleDelay, - MaxRetryDelay: client.DefaultRetryerMaxRetryDelay, - MaxThrottleDelay: client.DefaultRetryerMaxRetryDelay, - }, - }) - - if err != nil { - // no need to move forward - checkpointer.log.Fatalf("Failed in getting DynamoDB session for creating Worker: %+v", err) - } - if checkpointer.svc == nil { - checkpointer.svc = dynamodb.New(s) + resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { + return aws.Endpoint{ + PartitionID: "aws", + URL: checkpointer.kclConfig.DynamoDBEndpoint, + SigningRegion: checkpointer.kclConfig.RegionName, + }, nil + }) + + cfg, err := awsConfig.LoadDefaultConfig( + context.TODO(), + awsConfig.WithRegion(checkpointer.kclConfig.RegionName), + awsConfig.WithCredentialsProvider( + credentials.NewStaticCredentialsProvider( + checkpointer.kclConfig.DynamoDBCredentials.Value.AccessKeyID, + checkpointer.kclConfig.DynamoDBCredentials.Value.SecretAccessKey, + checkpointer.kclConfig.DynamoDBCredentials.Value.SessionToken)), + awsConfig.WithEndpointResolver(resolver), + awsConfig.WithRetryer(func() aws.Retryer { + return retry.AddWithMaxBackoffDelay(retry.NewStandard(), retry.DefaultMaxBackoff) + }), + ) + + if err != nil { + checkpointer.log.Fatalf("unable to load SDK config, %v", err) + } + + checkpointer.svc = dynamodb.NewFromConfig(cfg) } if !checkpointer.doesTableExist() { return checkpointer.createTable() } + return nil } @@ -133,8 +140,12 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign var claimRequest string if checkpointer.kclConfig.EnableLeaseStealing { - if currentCheckpointClaimRequest, ok := currentCheckpoint[ClaimRequestKey]; ok && currentCheckpointClaimRequest.S != nil { - claimRequest = *currentCheckpointClaimRequest.S + if currentCheckpointClaimRequest, ok := currentCheckpoint[ClaimRequestKey]; ok { + fmt.Printf("aaaaaa %v", currentCheckpointClaimRequest) + } + if currentCheckpointClaimRequest, ok := currentCheckpoint[ClaimRequestKey]; ok && + currentCheckpointClaimRequest.(*types.AttributeValueMemberS).Value != "" { + claimRequest = currentCheckpointClaimRequest.(*types.AttributeValueMemberS).Value if newAssignTo != claimRequest && !isClaimRequestExpired { checkpointer.log.Debugf("another worker: %s has a claim on this shard. Not going to renew the lease", claimRequest) return errors.New(ErrShardClaimed) @@ -146,13 +157,13 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign leaseVar, leaseTimeoutOk := currentCheckpoint[LeaseTimeoutKey] var conditionalExpression string - var expressionAttributeValues map[string]*dynamodb.AttributeValue + var expressionAttributeValues map[string]types.AttributeValue if !leaseTimeoutOk || !assignedToOk { conditionalExpression = "attribute_not_exists(AssignedTo)" } else { - assignedTo := *assignedVar.S - leaseTimeout := *leaseVar.S + assignedTo := assignedVar.(*types.AttributeValueMemberS).Value + leaseTimeout := leaseVar.(*types.AttributeValueMemberS).Value currentLeaseTimeout, err := time.Parse(time.RFC3339, leaseTimeout) if err != nil { @@ -171,57 +182,60 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign checkpointer.log.Debugf("Attempting to get a lock for shard: %s, leaseTimeout: %s, assignedTo: %s, newAssignedTo: %s", shard.ID, currentLeaseTimeout, assignedTo, newAssignTo) conditionalExpression = "ShardID = :id AND AssignedTo = :assigned_to AND LeaseTimeout = :lease_timeout" - expressionAttributeValues = map[string]*dynamodb.AttributeValue{ - ":id": { - S: aws.String(shard.ID), + expressionAttributeValues = map[string]types.AttributeValue{ + ":id": &types.AttributeValueMemberS{ + Value: shard.ID, }, - ":assigned_to": { - S: aws.String(assignedTo), + ":assigned_to": &types.AttributeValueMemberS{ + Value: assignedTo, }, - ":lease_timeout": { - S: aws.String(leaseTimeout), + ":lease_timeout": &types.AttributeValueMemberS{ + Value: leaseTimeout, }, } } - marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ - LeaseKeyKey: { - S: aws.String(shard.ID), + marshalledCheckpoint := map[string]types.AttributeValue{ + LeaseKeyKey: &types.AttributeValueMemberS{ + Value: shard.ID, }, - LeaseOwnerKey: { - S: aws.String(newAssignTo), + LeaseOwnerKey: &types.AttributeValueMemberS{ + Value: newAssignTo, }, - LeaseTimeoutKey: { - S: aws.String(newLeaseTimeoutString), + LeaseTimeoutKey: &types.AttributeValueMemberS{ + Value: newLeaseTimeoutString, }, } if len(shard.ParentShardId) > 0 { - marshalledCheckpoint[ParentShardIdKey] = &dynamodb.AttributeValue{S: aws.String(shard.ParentShardId)} + marshalledCheckpoint[ParentShardIdKey] = &types.AttributeValueMemberS{ + Value: shard.ParentShardId, + } } if checkpoint := shard.GetCheckpoint(); checkpoint != "" { - marshalledCheckpoint[SequenceNumberKey] = &dynamodb.AttributeValue{ - S: aws.String(checkpoint), + marshalledCheckpoint[SequenceNumberKey] = &types.AttributeValueMemberS{ + Value: checkpoint, } } if checkpointer.kclConfig.EnableLeaseStealing { if claimRequest != "" && claimRequest == newAssignTo && !isClaimRequestExpired { if expressionAttributeValues == nil { - expressionAttributeValues = make(map[string]*dynamodb.AttributeValue) + expressionAttributeValues = make(map[string]types.AttributeValue) } conditionalExpression = conditionalExpression + " AND ClaimRequest = :claim_request" - expressionAttributeValues[":claim_request"] = &dynamodb.AttributeValue{ - S: &claimRequest, + expressionAttributeValues[":claim_request"] = &types.AttributeValueMemberS{ + Value: claimRequest, } } } err = checkpointer.conditionalUpdate(conditionalExpression, expressionAttributeValues, marshalledCheckpoint) if err != nil { - if utils.AWSErrCode(err) == dynamodb.ErrCodeConditionalCheckFailedException { - return ErrLeaseNotAcquired{dynamodb.ErrCodeConditionalCheckFailedException} + var conditionalCheckErr *types.ConditionalCheckFailedException + if errors.As(err, &conditionalCheckErr) { + return ErrLeaseNotAcquired{conditionalCheckErr.ErrorMessage()} } return err } @@ -237,23 +251,23 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign // CheckpointSequence writes a checkpoint at the designated sequence ID func (checkpointer *DynamoCheckpoint) CheckpointSequence(shard *par.ShardStatus) error { leaseTimeout := shard.GetLeaseTimeout().UTC().Format(time.RFC3339) - marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ - LeaseKeyKey: { - S: aws.String(shard.ID), + marshalledCheckpoint := map[string]types.AttributeValue{ + LeaseKeyKey: &types.AttributeValueMemberS{ + Value: shard.ID, }, - SequenceNumberKey: { - S: aws.String(shard.GetCheckpoint()), + SequenceNumberKey: &types.AttributeValueMemberS{ + Value: shard.GetCheckpoint(), }, - LeaseOwnerKey: { - S: aws.String(shard.GetLeaseOwner()), + LeaseOwnerKey: &types.AttributeValueMemberS{ + Value: shard.GetLeaseOwner(), }, - LeaseTimeoutKey: { - S: aws.String(leaseTimeout), + LeaseTimeoutKey: &types.AttributeValueMemberS{ + Value: leaseTimeout, }, } if len(shard.ParentShardId) > 0 { - marshalledCheckpoint[ParentShardIdKey] = &dynamodb.AttributeValue{S: &shard.ParentShardId} + marshalledCheckpoint[ParentShardIdKey] = &types.AttributeValueMemberS{Value: shard.ParentShardId} } return checkpointer.saveItem(marshalledCheckpoint) @@ -270,16 +284,17 @@ func (checkpointer *DynamoCheckpoint) FetchCheckpoint(shard *par.ShardStatus) er if !ok { return ErrSequenceIDNotFound } - checkpointer.log.Debugf("Retrieved Shard Iterator %s", *sequenceID.S) - shard.SetCheckpoint(aws.StringValue(sequenceID.S)) + + checkpointer.log.Debugf("Retrieved Shard Iterator %s", sequenceID.(*types.AttributeValueMemberS).Value) + shard.SetCheckpoint(sequenceID.(*types.AttributeValueMemberS).Value) if assignedTo, ok := checkpoint[LeaseOwnerKey]; ok { - shard.SetLeaseOwner(aws.StringValue(assignedTo.S)) + shard.SetLeaseOwner(assignedTo.(*types.AttributeValueMemberS).Value) } // Use up-to-date leaseTimeout to avoid ConditionalCheckFailedException when claiming - if leaseTimeout, ok := checkpoint[LeaseTimeoutKey]; ok && leaseTimeout.S != nil { - currentLeaseTimeout, err := time.Parse(time.RFC3339, aws.StringValue(leaseTimeout.S)) + if leaseTimeout, ok := checkpoint[LeaseTimeoutKey]; ok && leaseTimeout.(*types.AttributeValueMemberS).Value != "" { + currentLeaseTimeout, err := time.Parse(time.RFC3339, leaseTimeout.(*types.AttributeValueMemberS).Value) if err != nil { return err } @@ -306,21 +321,21 @@ func (checkpointer *DynamoCheckpoint) RemoveLeaseInfo(shardID string) error { func (checkpointer *DynamoCheckpoint) RemoveLeaseOwner(shardID string) error { input := &dynamodb.UpdateItemInput{ TableName: aws.String(checkpointer.TableName), - Key: map[string]*dynamodb.AttributeValue{ - LeaseKeyKey: { - S: aws.String(shardID), + Key: map[string]types.AttributeValue{ + LeaseKeyKey: &types.AttributeValueMemberS{ + Value: shardID, }, }, UpdateExpression: aws.String("remove " + LeaseOwnerKey), - ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ - ":assigned_to": { - S: aws.String(checkpointer.kclConfig.WorkerID), + ExpressionAttributeValues: map[string]types.AttributeValue{ + ":assigned_to": &types.AttributeValueMemberS{ + Value: checkpointer.kclConfig.WorkerID, }, }, ConditionExpression: aws.String("AssignedTo = :assigned_to"), } - _, err := checkpointer.svc.UpdateItem(input) + _, err := checkpointer.svc.UpdateItem(context.TODO(), input) return err } @@ -343,6 +358,7 @@ func (checkpointer *DynamoCheckpoint) ListActiveWorkers(shardStatus map[string]* checkpointer.log.Debugf("Shard Not Assigned Error. ShardID: %s, WorkerID: %s", shard.ID, checkpointer.kclConfig.WorkerID) return nil, ErrShardNotAssigned } + if w, ok := workers[leaseOwner]; ok { workers[leaseOwner] = append(w, shard) } else { @@ -361,54 +377,54 @@ func (checkpointer *DynamoCheckpoint) ClaimShard(shard *par.ShardStatus, claimID leaseTimeoutString := shard.GetLeaseTimeout().Format(time.RFC3339) conditionalExpression := `ShardID = :id AND LeaseTimeout = :lease_timeout AND attribute_not_exists(ClaimRequest)` - expressionAttributeValues := map[string]*dynamodb.AttributeValue{ - ":id": { - S: aws.String(shard.ID), + expressionAttributeValues := map[string]types.AttributeValue{ + ":id": &types.AttributeValueMemberS{ + Value: shard.ID, }, - ":lease_timeout": { - S: aws.String(leaseTimeoutString), + ":lease_timeout": &types.AttributeValueMemberS{ + Value: leaseTimeoutString, }, } - marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ - LeaseKeyKey: { - S: &shard.ID, + marshalledCheckpoint := map[string]types.AttributeValue{ + LeaseKeyKey: &types.AttributeValueMemberS{ + Value: shard.ID, }, - LeaseTimeoutKey: { - S: &leaseTimeoutString, + LeaseTimeoutKey: &types.AttributeValueMemberS{ + Value: leaseTimeoutString, }, - SequenceNumberKey: { - S: &shard.Checkpoint, + SequenceNumberKey: &types.AttributeValueMemberS{ + Value: shard.Checkpoint, }, - ClaimRequestKey: { - S: &claimID, + ClaimRequestKey: &types.AttributeValueMemberS{ + Value: claimID, }, } if leaseOwner := shard.GetLeaseOwner(); leaseOwner == "" { conditionalExpression += " AND attribute_not_exists(AssignedTo)" } else { - marshalledCheckpoint[LeaseOwnerKey] = &dynamodb.AttributeValue{S: &leaseOwner} + marshalledCheckpoint[LeaseOwnerKey] = &types.AttributeValueMemberS{Value: leaseOwner} conditionalExpression += "AND AssignedTo = :assigned_to" - expressionAttributeValues[":assigned_to"] = &dynamodb.AttributeValue{S: &leaseOwner} + expressionAttributeValues[":assigned_to"] = &types.AttributeValueMemberS{Value: leaseOwner} } if checkpoint := shard.GetCheckpoint(); checkpoint == "" { conditionalExpression += " AND attribute_not_exists(Checkpoint)" } else if checkpoint == ShardEnd { conditionalExpression += " AND Checkpoint <> :checkpoint" - expressionAttributeValues[":checkpoint"] = &dynamodb.AttributeValue{S: aws.String(ShardEnd)} + expressionAttributeValues[":checkpoint"] = &types.AttributeValueMemberS{Value: ShardEnd} } else { conditionalExpression += " AND Checkpoint = :checkpoint" - expressionAttributeValues[":checkpoint"] = &dynamodb.AttributeValue{S: &checkpoint} + expressionAttributeValues[":checkpoint"] = &types.AttributeValueMemberS{Value: checkpoint} } if shard.ParentShardId == "" { conditionalExpression += " AND attribute_not_exists(ParentShardId)" } else { - marshalledCheckpoint[ParentShardIdKey] = &dynamodb.AttributeValue{S: aws.String(shard.ParentShardId)} + marshalledCheckpoint[ParentShardIdKey] = &types.AttributeValueMemberS{Value: shard.ParentShardId} conditionalExpression += " AND ParentShardId = :parent_shard" - expressionAttributeValues[":parent_shard"] = &dynamodb.AttributeValue{S: &shard.ParentShardId} + expressionAttributeValues[":parent_shard"] = &types.AttributeValueMemberS{Value: shard.ParentShardId} } return checkpointer.conditionalUpdate(conditionalExpression, expressionAttributeValues, marshalledCheckpoint) @@ -424,27 +440,25 @@ func (checkpointer *DynamoCheckpoint) syncLeases(shardStatus map[string]*par.Sha checkpointer.lastLeaseSync = time.Now() input := &dynamodb.ScanInput{ ProjectionExpression: aws.String(fmt.Sprintf("%s,%s,%s", LeaseKeyKey, LeaseOwnerKey, SequenceNumberKey)), - Select: aws.String("SPECIFIC_ATTRIBUTES"), + Select: "SPECIFIC_ATTRIBUTES", TableName: aws.String(checkpointer.kclConfig.TableName), } - err := checkpointer.svc.ScanPages(input, - func(pages *dynamodb.ScanOutput, lastPage bool) bool { - results := pages.Items - for _, result := range results { - shardId, foundShardId := result[LeaseKeyKey] - assignedTo, foundAssignedTo := result[LeaseOwnerKey] - checkpoint, foundCheckpoint := result[SequenceNumberKey] - if !foundShardId || !foundAssignedTo || !foundCheckpoint { - continue - } - if shard, ok := shardStatus[aws.StringValue(shardId.S)]; ok { - shard.SetLeaseOwner(aws.StringValue(assignedTo.S)) - shard.SetCheckpoint(aws.StringValue(checkpoint.S)) - } - } - return !lastPage - }) + scanOutput, err := checkpointer.svc.Scan(context.TODO(), input) + results := scanOutput.Items + for _, result := range results { + shardId, foundShardId := result[LeaseKeyKey] + assignedTo, foundAssignedTo := result[LeaseOwnerKey] + checkpoint, foundCheckpoint := result[SequenceNumberKey] + if !foundShardId || !foundAssignedTo || !foundCheckpoint { + continue + } + + if shard, ok := shardStatus[shardId.(*types.AttributeValueMemberS).Value]; ok { + shard.SetLeaseOwner(assignedTo.(*types.AttributeValueMemberS).Value) + shard.SetCheckpoint(checkpoint.(*types.AttributeValueMemberS).Value) + } + } if err != nil { log.Debugf("Error performing SyncLeases. Error: %+v ", err) @@ -456,25 +470,26 @@ func (checkpointer *DynamoCheckpoint) syncLeases(shardStatus map[string]*par.Sha func (checkpointer *DynamoCheckpoint) createTable() error { input := &dynamodb.CreateTableInput{ - AttributeDefinitions: []*dynamodb.AttributeDefinition{ + AttributeDefinitions: []types.AttributeDefinition{ { AttributeName: aws.String(LeaseKeyKey), - AttributeType: aws.String("S"), + AttributeType: types.ScalarAttributeTypeS, }, }, - KeySchema: []*dynamodb.KeySchemaElement{ + KeySchema: []types.KeySchemaElement{ { AttributeName: aws.String(LeaseKeyKey), - KeyType: aws.String("HASH"), + KeyType: types.KeyTypeHash, }, }, - ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ + ProvisionedThroughput: &types.ProvisionedThroughput{ ReadCapacityUnits: aws.Int64(checkpointer.leaseTableReadCapacity), WriteCapacityUnits: aws.Int64(checkpointer.leaseTableWriteCapacity), }, TableName: aws.String(checkpointer.TableName), } - _, err := checkpointer.svc.CreateTable(input) + _, err := checkpointer.svc.CreateTable(context.Background(), input) + return err } @@ -482,18 +497,19 @@ func (checkpointer *DynamoCheckpoint) doesTableExist() bool { input := &dynamodb.DescribeTableInput{ TableName: aws.String(checkpointer.TableName), } - _, err := checkpointer.svc.DescribeTable(input) + _, err := checkpointer.svc.DescribeTable(context.Background(), input) + return err == nil } -func (checkpointer *DynamoCheckpoint) saveItem(item map[string]*dynamodb.AttributeValue) error { +func (checkpointer *DynamoCheckpoint) saveItem(item map[string]types.AttributeValue) error { return checkpointer.putItem(&dynamodb.PutItemInput{ TableName: aws.String(checkpointer.TableName), Item: item, }) } -func (checkpointer *DynamoCheckpoint) conditionalUpdate(conditionExpression string, expressionAttributeValues map[string]*dynamodb.AttributeValue, item map[string]*dynamodb.AttributeValue) error { +func (checkpointer *DynamoCheckpoint) conditionalUpdate(conditionExpression string, expressionAttributeValues map[string]types.AttributeValue, item map[string]types.AttributeValue) error { return checkpointer.putItem(&dynamodb.PutItemInput{ ConditionExpression: aws.String(conditionExpression), TableName: aws.String(checkpointer.TableName), @@ -503,30 +519,38 @@ func (checkpointer *DynamoCheckpoint) conditionalUpdate(conditionExpression stri } func (checkpointer *DynamoCheckpoint) putItem(input *dynamodb.PutItemInput) error { - _, err := checkpointer.svc.PutItem(input) + _, err := checkpointer.svc.PutItem(context.Background(), input) return err } -func (checkpointer *DynamoCheckpoint) getItem(shardID string) (map[string]*dynamodb.AttributeValue, error) { - item, err := checkpointer.svc.GetItem(&dynamodb.GetItemInput{ - TableName: aws.String(checkpointer.TableName), - Key: map[string]*dynamodb.AttributeValue{ - LeaseKeyKey: { - S: aws.String(shardID), +func (checkpointer *DynamoCheckpoint) getItem(shardID string) (map[string]types.AttributeValue, error) { + item, err := checkpointer.svc.GetItem(context.Background(), &dynamodb.GetItemInput{ + TableName: aws.String(checkpointer.TableName), + ConsistentRead: aws.Bool(true), + Key: map[string]types.AttributeValue{ + LeaseKeyKey: &types.AttributeValueMemberS{ + Value: shardID, }, }, }) + + // fix problem when starts the environment from scratch (dynamo table is empty) + if item == nil { + return nil, err + } + return item.Item, err } func (checkpointer *DynamoCheckpoint) removeItem(shardID string) error { - _, err := checkpointer.svc.DeleteItem(&dynamodb.DeleteItemInput{ + _, err := checkpointer.svc.DeleteItem(context.Background(), &dynamodb.DeleteItemInput{ TableName: aws.String(checkpointer.TableName), - Key: map[string]*dynamodb.AttributeValue{ - LeaseKeyKey: { - S: aws.String(shardID), + Key: map[string]types.AttributeValue{ + LeaseKeyKey: &types.AttributeValueMemberS{ + Value: shardID, }, }, }) + return err } diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go index 38da0b3..1f0a8f1 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer_test.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer_test.go @@ -28,15 +28,15 @@ package checkpoint import ( + "context" "errors" "sync" "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" "github.com/stretchr/testify/assert" cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" @@ -44,24 +44,24 @@ import ( ) func TestDoesTableExist(t *testing.T) { - svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} + svc := &mockDynamoDB{client: nil, tableExist: true, item: map[string]types.AttributeValue{}} checkpoint := &DynamoCheckpoint{ TableName: "TableName", - svc: svc, + svc: svc.client, } if !checkpoint.doesTableExist() { t.Error("Table exists but returned false") } svc = &mockDynamoDB{tableExist: false} - checkpoint.svc = svc + checkpoint.svc = svc.client if checkpoint.doesTableExist() { t.Error("Table does not exist but returned true") } } -func TestGetLeaseNotAquired(t *testing.T) { - svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} +func TestGetLeaseNotAcquired(t *testing.T) { + svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}} kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). @@ -69,8 +69,8 @@ func TestGetLeaseNotAquired(t *testing.T) { WithShardSyncIntervalMillis(5000). WithFailoverTimeMillis(300000) - checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) - checkpoint.Init() + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client) + _ = checkpoint.Init() err := checkpoint.GetLease(&par.ShardStatus{ ID: "0001", Checkpoint: "", @@ -92,7 +92,7 @@ func TestGetLeaseNotAquired(t *testing.T) { } func TestGetLeaseAquired(t *testing.T) { - svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} + svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}} kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). @@ -100,27 +100,28 @@ func TestGetLeaseAquired(t *testing.T) { WithShardSyncIntervalMillis(5000). WithFailoverTimeMillis(300000) - checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) - checkpoint.Init() - marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ - LeaseKeyKey: { - S: aws.String("0001"), + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client) + _ = checkpoint.Init() + marshalledCheckpoint := map[string]types.AttributeValue{ + LeaseKeyKey: &types.AttributeValueMemberS{ + Value: "0001", }, - LeaseOwnerKey: { - S: aws.String("abcd-efgh"), + LeaseOwnerKey: &types.AttributeValueMemberS{ + Value: "abcd-efgh", }, - LeaseTimeoutKey: { - S: aws.String(time.Now().AddDate(0, -1, 0).UTC().Format(time.RFC3339)), + LeaseTimeoutKey: &types.AttributeValueMemberS{ + Value: time.Now().AddDate(0, -1, 0).UTC().Format(time.RFC3339), }, - SequenceNumberKey: { - S: aws.String("deadbeef"), + SequenceNumberKey: &types.AttributeValueMemberS{ + Value: "deadbeef", }, } input := &dynamodb.PutItemInput{ TableName: aws.String("TableName"), Item: marshalledCheckpoint, } - checkpoint.svc.PutItem(input) + _, _ = checkpoint.svc.PutItem(context.TODO(), input) + shard := &par.ShardStatus{ ID: "0001", Checkpoint: "deadbeef", @@ -135,8 +136,8 @@ func TestGetLeaseAquired(t *testing.T) { id, ok := svc.item[SequenceNumberKey] if !ok { t.Error("Expected checkpoint to be set by GetLease") - } else if *id.S != "deadbeef" { - t.Errorf("Expected checkpoint to be deadbeef. Got '%s'", *id.S) + } else if id.(*types.AttributeValueMemberS).Value != "deadbeef" { + t.Errorf("Expected checkpoint to be deadbeef. Got '%s'", id.(*types.AttributeValueMemberS).Value) } // release owner info @@ -147,7 +148,7 @@ func TestGetLeaseAquired(t *testing.T) { ID: shard.ID, Mux: &sync.RWMutex{}, } - checkpoint.FetchCheckpoint(status) + _ = checkpoint.FetchCheckpoint(status) // checkpointer and parent shard id should be the same assert.Equal(t, shard.Checkpoint, status.Checkpoint) @@ -161,9 +162,9 @@ func TestGetLeaseShardClaimed(t *testing.T) { leaseTimeout := time.Now().Add(-100 * time.Second).UTC() svc := &mockDynamoDB{ tableExist: true, - item: map[string]*dynamodb.AttributeValue{ - ClaimRequestKey: {S: aws.String("ijkl-mnop")}, - LeaseTimeoutKey: {S: aws.String(leaseTimeout.Format(time.RFC3339))}, + item: map[string]types.AttributeValue{ + ClaimRequestKey: &types.AttributeValueMemberS{Value: "ijkl-mnop"}, + LeaseTimeoutKey: &types.AttributeValueMemberS{Value: leaseTimeout.Format(time.RFC3339)}, }, } kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). @@ -174,8 +175,8 @@ func TestGetLeaseShardClaimed(t *testing.T) { WithFailoverTimeMillis(300000). WithLeaseStealing(true) - checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) - checkpoint.Init() + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client) + _ = checkpoint.Init() err := checkpoint.GetLease(&par.ShardStatus{ ID: "0001", Checkpoint: "", @@ -214,15 +215,15 @@ func TestGetLeaseClaimRequestExpiredOwner(t *testing.T) { svc := &mockDynamoDB{ tableExist: true, - item: map[string]*dynamodb.AttributeValue{ - LeaseOwnerKey: {S: aws.String("abcd-efgh")}, - ClaimRequestKey: {S: aws.String("ijkl-mnop")}, - LeaseTimeoutKey: {S: aws.String(leaseTimeout.Format(time.RFC3339))}, + item: map[string]types.AttributeValue{ + LeaseOwnerKey: &types.AttributeValueMemberS{Value: "abcd-efgh"}, + ClaimRequestKey: &types.AttributeValueMemberS{Value: "ijkl-mnop"}, + LeaseTimeoutKey: &types.AttributeValueMemberS{Value: leaseTimeout.Format(time.RFC3339)}, }, } - checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) - checkpoint.Init() + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client) + _ = checkpoint.Init() err := checkpoint.GetLease(&par.ShardStatus{ ID: "0001", Checkpoint: "", @@ -251,15 +252,15 @@ func TestGetLeaseClaimRequestExpiredClaimer(t *testing.T) { svc := &mockDynamoDB{ tableExist: true, - item: map[string]*dynamodb.AttributeValue{ - LeaseOwnerKey: {S: aws.String("abcd-efgh")}, - ClaimRequestKey: {S: aws.String("ijkl-mnop")}, - LeaseTimeoutKey: {S: aws.String(leaseTimeout.Format(time.RFC3339))}, + item: map[string]types.AttributeValue{ + LeaseOwnerKey: &types.AttributeValueMemberS{Value: "abcd-efgh"}, + ClaimRequestKey: &types.AttributeValueMemberS{Value: "ijkl-mnop"}, + LeaseTimeoutKey: &types.AttributeValueMemberS{Value: leaseTimeout.Format(time.RFC3339)}, }, } - checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) - checkpoint.Init() + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client) + _ = checkpoint.Init() err := checkpoint.GetLease(&par.ShardStatus{ ID: "0001", Checkpoint: "", @@ -276,11 +277,11 @@ func TestFetchCheckpointWithStealing(t *testing.T) { svc := &mockDynamoDB{ tableExist: true, - item: map[string]*dynamodb.AttributeValue{ - SequenceNumberKey: {S: aws.String("deadbeef")}, - LeaseOwnerKey: {S: aws.String("abcd-efgh")}, - LeaseTimeoutKey: { - S: aws.String(future.Format(time.RFC3339)), + item: map[string]types.AttributeValue{ + SequenceNumberKey: &types.AttributeValueMemberS{Value: "deadbeef"}, + LeaseOwnerKey: &types.AttributeValueMemberS{Value: "abcd-efgh"}, + LeaseTimeoutKey: &types.AttributeValueMemberS{ + Value: future.Format(time.RFC3339), }, }, } @@ -293,8 +294,8 @@ func TestFetchCheckpointWithStealing(t *testing.T) { WithFailoverTimeMillis(300000). WithLeaseStealing(true) - checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) - checkpoint.Init() + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client) + _ = checkpoint.Init() status := &par.ShardStatus{ ID: "0001", @@ -303,14 +304,14 @@ func TestFetchCheckpointWithStealing(t *testing.T) { Mux: &sync.RWMutex{}, } - checkpoint.FetchCheckpoint(status) + _ = checkpoint.FetchCheckpoint(status) - leaseTimeout, _ := time.Parse(time.RFC3339, *svc.item[LeaseTimeoutKey].S) + leaseTimeout, _ := time.Parse(time.RFC3339, svc.item[LeaseTimeoutKey].(*types.AttributeValueMemberS).Value) assert.Equal(t, leaseTimeout, status.LeaseTimeout) } func TestGetLeaseConditional(t *testing.T) { - svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} + svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}} kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). @@ -319,30 +320,31 @@ func TestGetLeaseConditional(t *testing.T) { WithFailoverTimeMillis(300000). WithLeaseStealing(true) - checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) - checkpoint.Init() - marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ - LeaseKeyKey: { - S: aws.String("0001"), + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client) + _ = checkpoint.Init() + marshalledCheckpoint := map[string]types.AttributeValue{ + LeaseKeyKey: &types.AttributeValueMemberS{ + Value: "0001", }, - LeaseOwnerKey: { - S: aws.String("abcd-efgh"), + LeaseOwnerKey: &types.AttributeValueMemberS{ + Value: "abcd-efgh", }, - LeaseTimeoutKey: { - S: aws.String(time.Now().Add(-1 * time.Second).UTC().Format(time.RFC3339)), + LeaseTimeoutKey: &types.AttributeValueMemberS{ + Value: time.Now().Add(-1 * time.Second).UTC().Format(time.RFC3339), }, - SequenceNumberKey: { - S: aws.String("deadbeef"), + SequenceNumberKey: &types.AttributeValueMemberS{ + Value: "deadbeef", }, - ClaimRequestKey: { - S: aws.String("ijkl-mnop"), + ClaimRequestKey: &types.AttributeValueMemberS{ + Value: "ijkl-mnop", }, } input := &dynamodb.PutItemInput{ TableName: aws.String("TableName"), Item: marshalledCheckpoint, } - checkpoint.svc.PutItem(input) + _, _ = checkpoint.svc.PutItem(context.TODO(), input) + shard := &par.ShardStatus{ ID: "0001", Checkpoint: "deadbeef", @@ -358,26 +360,149 @@ func TestGetLeaseConditional(t *testing.T) { if err != nil { t.Errorf("Lease not aquired after timeout %s", err) } - assert.Equal(t, *svc.expressionAttributeValues[":claim_request"].S, "ijkl-mnop") + assert.Equal(t, svc.expressionAttributeValues[":claim_request"].(*types.AttributeValueMemberS).Value, "ijkl-mnop") assert.Contains(t, svc.conditionalExpression, " AND ClaimRequest = :claim_request") } -type mockDynamoDB struct { - dynamodbiface.DynamoDBAPI - tableExist bool - item map[string]*dynamodb.AttributeValue - conditionalExpression string - expressionAttributeValues map[string]*dynamodb.AttributeValue +func TestListActiveWorkers(t *testing.T) { + svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}} + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithLeaseStealing(true) + + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client) + err := checkpoint.Init() + if err != nil { + t.Errorf("Checkpoint initialization failed: %+v", err) + } + + shardStatus := map[string]*par.ShardStatus{ + "0000": {ID: "0000", AssignedTo: "worker_1", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0001": {ID: "0001", AssignedTo: "worker_2", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0002": {ID: "0002", AssignedTo: "worker_4", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0003": {ID: "0003", AssignedTo: "worker_0", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0004": {ID: "0004", AssignedTo: "worker_1", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0005": {ID: "0005", AssignedTo: "worker_3", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0006": {ID: "0006", AssignedTo: "worker_3", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0007": {ID: "0007", AssignedTo: "worker_0", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0008": {ID: "0008", AssignedTo: "worker_4", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0009": {ID: "0009", AssignedTo: "worker_2", Checkpoint: "", Mux: &sync.RWMutex{}}, + "0010": {ID: "0010", AssignedTo: "worker_0", Checkpoint: ShardEnd, Mux: &sync.RWMutex{}}, + } + + workers, err := checkpoint.ListActiveWorkers(shardStatus) + if err != nil { + t.Error(err) + } + + for workerID, shards := range workers { + assert.Equal(t, 2, len(shards)) + for _, shard := range shards { + assert.Equal(t, workerID, shard.AssignedTo) + } + } } -func (m *mockDynamoDB) ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error { +func TestListActiveWorkersErrShardNotAssigned(t *testing.T) { + svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}} + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithLeaseStealing(true) + + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client) + err := checkpoint.Init() + if err != nil { + t.Errorf("Checkpoint initialization failed: %+v", err) + } + + shardStatus := map[string]*par.ShardStatus{ + "0000": {ID: "0000", Mux: &sync.RWMutex{}}, + } + + _, err = checkpoint.ListActiveWorkers(shardStatus) + if err != ErrShardNotAssigned { + t.Error("Expected ErrShardNotAssigned when shard is missing AssignedTo value") + } +} + +func TestClaimShard(t *testing.T) { + svc := &mockDynamoDB{tableExist: true, item: map[string]types.AttributeValue{}} + kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). + WithInitialPositionInStream(cfg.LATEST). + WithMaxRecords(10). + WithMaxLeasesForWorker(1). + WithShardSyncIntervalMillis(5000). + WithFailoverTimeMillis(300000). + WithLeaseStealing(true) + + checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc.client) + _ = checkpoint.Init() + + marshalledCheckpoint := map[string]types.AttributeValue{ + "ShardID": &types.AttributeValueMemberS{ + Value: "0001", + }, + "AssignedTo": &types.AttributeValueMemberS{ + Value: "abcd-efgh", + }, + "LeaseTimeout": &types.AttributeValueMemberS{ + Value: time.Now().AddDate(0, -1, 0).UTC().Format(time.RFC3339), + }, + "Checkpoint": &types.AttributeValueMemberS{ + Value: "deadbeef", + }, + } + input := &dynamodb.PutItemInput{ + TableName: aws.String("TableName"), + Item: marshalledCheckpoint, + } + _, _ = checkpoint.svc.PutItem(context.TODO(), input) + + shard := &par.ShardStatus{ + ID: "0001", + Checkpoint: "deadbeef", + Mux: &sync.RWMutex{}, + } + + err := checkpoint.ClaimShard(shard, "ijkl-mnop") + if err != nil { + t.Errorf("Shard not claimed %s", err) + } + + claimRequest, ok := svc.item[ClaimRequestKey] + if !ok { + t.Error("Expected claimRequest to be set by ClaimShard") + } else if claimRequest.(*types.AttributeValueMemberS).Value != "ijkl-mnop" { + t.Errorf("Expected checkpoint to be ijkl-mnop. Got '%s'", claimRequest.(*types.AttributeValueMemberS).Value) + } + + status := &par.ShardStatus{ + ID: shard.ID, + Mux: &sync.RWMutex{}, + } + _ = checkpoint.FetchCheckpoint(status) + + // asiggnedTo, checkpointer, and parent shard id should be the same + assert.Equal(t, shard.AssignedTo, status.AssignedTo) + assert.Equal(t, shard.Checkpoint, status.Checkpoint) + assert.Equal(t, shard.ParentShardId, status.ParentShardId) +} + +type mockDynamoDB struct { + client *dynamodb.Client + tableExist bool + item map[string]types.AttributeValue + conditionalExpression string + expressionAttributeValues map[string]types.AttributeValue +} + +func (m *mockDynamoDB) ScanPages(_ *dynamodb.ScanInput, _ func(*dynamodb.ScanOutput, bool) bool) error { return nil } -func (m *mockDynamoDB) DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error) { +func (m *mockDynamoDB) DescribeTable(_ *dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error) { if !m.tableExist { - return &dynamodb.DescribeTableOutput{}, awserr.New(dynamodb.ErrCodeResourceNotFoundException, "doesNotExist", errors.New("")) + return &dynamodb.DescribeTableOutput{}, &types.ResourceNotFoundException{Message: aws.String("doesNotExist")} } + return &dynamodb.DescribeTableOutput{}, nil } @@ -417,7 +542,7 @@ func (m *mockDynamoDB) PutItem(input *dynamodb.PutItemInput) (*dynamodb.PutItemO return nil, nil } -func (m *mockDynamoDB) GetItem(input *dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) { +func (m *mockDynamoDB) GetItem(_ *dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error) { return &dynamodb.GetItemOutput{ Item: m.item, }, nil @@ -426,134 +551,13 @@ func (m *mockDynamoDB) GetItem(input *dynamodb.GetItemInput) (*dynamodb.GetItemO func (m *mockDynamoDB) UpdateItem(input *dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error) { exp := input.UpdateExpression - if aws.StringValue(exp) == "remove "+LeaseOwnerKey { + if aws.ToString(exp) == "remove "+LeaseOwnerKey { delete(m.item, LeaseOwnerKey) } return nil, nil } -func (m *mockDynamoDB) CreateTable(input *dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) { +func (m *mockDynamoDB) CreateTable(_ *dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error) { return &dynamodb.CreateTableOutput{}, nil } - -func TestListActiveWorkers(t *testing.T) { - svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} - kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). - WithLeaseStealing(true) - - checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) - err := checkpoint.Init() - if err != nil { - t.Errorf("Checkpoint initialization failed: %+v", err) - } - - shardStatus := map[string]*par.ShardStatus{ - "0000": {ID: "0000", AssignedTo: "worker_1", Checkpoint: "", Mux: &sync.RWMutex{}}, - "0001": {ID: "0001", AssignedTo: "worker_2", Checkpoint: "", Mux: &sync.RWMutex{}}, - "0002": {ID: "0002", AssignedTo: "worker_4", Checkpoint: "", Mux: &sync.RWMutex{}}, - "0003": {ID: "0003", AssignedTo: "worker_0", Checkpoint: "", Mux: &sync.RWMutex{}}, - "0004": {ID: "0004", AssignedTo: "worker_1", Checkpoint: "", Mux: &sync.RWMutex{}}, - "0005": {ID: "0005", AssignedTo: "worker_3", Checkpoint: "", Mux: &sync.RWMutex{}}, - "0006": {ID: "0006", AssignedTo: "worker_3", Checkpoint: "", Mux: &sync.RWMutex{}}, - "0007": {ID: "0007", AssignedTo: "worker_0", Checkpoint: "", Mux: &sync.RWMutex{}}, - "0008": {ID: "0008", AssignedTo: "worker_4", Checkpoint: "", Mux: &sync.RWMutex{}}, - "0009": {ID: "0009", AssignedTo: "worker_2", Checkpoint: "", Mux: &sync.RWMutex{}}, - "0010": {ID: "0010", AssignedTo: "worker_0", Checkpoint: ShardEnd, Mux: &sync.RWMutex{}}, - } - - workers, err := checkpoint.ListActiveWorkers(shardStatus) - if err != nil { - t.Error(err) - } - - for workerID, shards := range workers { - assert.Equal(t, 2, len(shards)) - for _, shard := range shards { - assert.Equal(t, workerID, shard.AssignedTo) - } - } -} - -func TestListActiveWorkersErrShardNotAssigned(t *testing.T) { - svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} - kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). - WithLeaseStealing(true) - - checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) - err := checkpoint.Init() - if err != nil { - t.Errorf("Checkpoint initialization failed: %+v", err) - } - - shardStatus := map[string]*par.ShardStatus{ - "0000": {ID: "0000", Mux: &sync.RWMutex{}}, - } - - _, err = checkpoint.ListActiveWorkers(shardStatus) - if err != ErrShardNotAssigned { - t.Error("Expected ErrShardNotAssigned when shard is missing AssignedTo value") - } -} - -func TestClaimShard(t *testing.T) { - svc := &mockDynamoDB{tableExist: true, item: map[string]*dynamodb.AttributeValue{}} - kclConfig := cfg.NewKinesisClientLibConfig("appName", "test", "us-west-2", "abc"). - WithInitialPositionInStream(cfg.LATEST). - WithMaxRecords(10). - WithMaxLeasesForWorker(1). - WithShardSyncIntervalMillis(5000). - WithFailoverTimeMillis(300000). - WithLeaseStealing(true) - - checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc) - checkpoint.Init() - - marshalledCheckpoint := map[string]*dynamodb.AttributeValue{ - "ShardID": { - S: aws.String("0001"), - }, - "AssignedTo": { - S: aws.String("abcd-efgh"), - }, - "LeaseTimeout": { - S: aws.String(time.Now().AddDate(0, -1, 0).UTC().Format(time.RFC3339)), - }, - "Checkpoint": { - S: aws.String("deadbeef"), - }, - } - input := &dynamodb.PutItemInput{ - TableName: aws.String("TableName"), - Item: marshalledCheckpoint, - } - checkpoint.svc.PutItem(input) - shard := &par.ShardStatus{ - ID: "0001", - Checkpoint: "deadbeef", - Mux: &sync.RWMutex{}, - } - - err := checkpoint.ClaimShard(shard, "ijkl-mnop") - if err != nil { - t.Errorf("Shard not claimed %s", err) - } - - claimRequest, ok := svc.item[ClaimRequestKey] - if !ok { - t.Error("Expected claimRequest to be set by ClaimShard") - } else if *claimRequest.S != "ijkl-mnop" { - t.Errorf("Expected checkpoint to be ijkl-mnop. Got '%s'", *claimRequest.S) - } - - status := &par.ShardStatus{ - ID: shard.ID, - Mux: &sync.RWMutex{}, - } - checkpoint.FetchCheckpoint(status) - - // asiggnedTo, checkpointer, and parent shard id should be the same - assert.Equal(t, shard.AssignedTo, status.AssignedTo) - assert.Equal(t, shard.Checkpoint, status.Checkpoint) - assert.Equal(t, shard.ParentShardId, status.ParentShardId) -} diff --git a/clientlibrary/config/config.go b/clientlibrary/config/config.go index f6e061d..35cb6ad 100644 --- a/clientlibrary/config/config.go +++ b/clientlibrary/config/config.go @@ -41,8 +41,8 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - creds "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" "github.com/vmware/vmware-go-kcl/logger" @@ -173,10 +173,10 @@ type ( KinesisEndpoint string // KinesisCredentials is used to access Kinesis - KinesisCredentials *creds.Credentials + KinesisCredentials *credentials.StaticCredentialsProvider // DynamoDBCredentials is used to access DynamoDB - DynamoDBCredentials *creds.Credentials + DynamoDBCredentials *credentials.StaticCredentialsProvider // TableName is name of the dynamo db table for managing kinesis stream default to ApplicationName TableName string diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index 6751f6a..45a6a2a 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -39,7 +39,7 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go-v2/credentials" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" "github.com/vmware/vmware-go-kcl/clientlibrary/utils" @@ -54,13 +54,13 @@ func NewKinesisClientLibConfig(applicationName, streamName, regionName, workerID // NewKinesisClientLibConfigWithCredential creates a default KinesisClientLibConfiguration based on the required fields and unique credentials. func NewKinesisClientLibConfigWithCredential(applicationName, streamName, regionName, workerID string, - creds *credentials.Credentials) *KinesisClientLibConfiguration { + creds *credentials.StaticCredentialsProvider) *KinesisClientLibConfiguration { return NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID, creds, creds) } // NewKinesisClientLibConfigWithCredentials creates a default KinesisClientLibConfiguration based on the required fields and specific credentials for each service. func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID string, - kiniesisCreds, dynamodbCreds *credentials.Credentials) *KinesisClientLibConfiguration { + kinesisCreds, dynamodbCreds *credentials.StaticCredentialsProvider) *KinesisClientLibConfiguration { checkIsValueNotEmpty("ApplicationName", applicationName) checkIsValueNotEmpty("StreamName", streamName) checkIsValueNotEmpty("RegionName", regionName) @@ -70,31 +70,31 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio } // populate the KCL configuration with default values - return &KinesisClientLibConfiguration{ - ApplicationName: applicationName, - KinesisCredentials: kiniesisCreds, - DynamoDBCredentials: dynamodbCreds, - TableName: applicationName, - EnhancedFanOutConsumerName: applicationName, - StreamName: streamName, - RegionName: regionName, - WorkerID: workerID, - InitialPositionInStream: DefaultInitialPositionInStream, - InitialPositionInStreamExtended: *newInitialPosition(DefaultInitialPositionInStream), - FailoverTimeMillis: DefaultFailoverTimeMillis, - LeaseRefreshPeriodMillis: DefaultLeaseRefreshPeriodMillis, - MaxRecords: DefaultMaxRecords, - IdleTimeBetweenReadsInMillis: DefaultIdletimeBetweenReadsMillis, - CallProcessRecordsEvenForEmptyRecordList: DefaultDontCallProcessRecordsForEmptyRecordList, - ParentShardPollIntervalMillis: DefaultParentShardPollIntervalMillis, - ShardSyncIntervalMillis: DefaultShardSyncIntervalMillis, - CleanupTerminatedShardsBeforeExpiry: DefaultCleanupLeasesUponShardsCompletion, - TaskBackoffTimeMillis: DefaultTaskBackoffTimeMillis, - ValidateSequenceNumberBeforeCheckpointing: DefaultValidateSequenceNumberBeforeCheckpointing, - ShutdownGraceMillis: DefaultShutdownGraceMillis, - MaxLeasesForWorker: DefaultMaxLeasesForWorker, - MaxLeasesToStealAtOneTime: DefaultMaxLeasesToStealAtOneTime, - InitialLeaseTableReadCapacity: DefaultInitialLeaseTableReadCapacity, + return &KinesisClientLibConfiguration { + ApplicationName: applicationName, + KinesisCredentials: kinesisCreds, + DynamoDBCredentials: dynamodbCreds, + TableName: applicationName, + EnhancedFanOutConsumerName: applicationName, + StreamName: streamName, + RegionName: regionName, + WorkerID: workerID, + InitialPositionInStream: DefaultInitialPositionInStream, + InitialPositionInStreamExtended: *newInitialPosition(DefaultInitialPositionInStream), + FailoverTimeMillis: DefaultFailoverTimeMillis, + LeaseRefreshPeriodMillis: DefaultLeaseRefreshPeriodMillis, + MaxRecords: DefaultMaxRecords, + IdleTimeBetweenReadsInMillis: DefaultIdleTimeBetweenReadsMillis, + CallProcessRecordsEvenForEmptyRecordList: DefaultDontCallProcessRecordsForEmptyRecordList, + ParentShardPollIntervalMillis: DefaultParentShardPollIntervalMillis, + ShardSyncIntervalMillis: DefaultShardSyncIntervalMillis, + CleanupTerminatedShardsBeforeExpiry: DefaultCleanupLeasesUponShardsCompletion, + TaskBackoffTimeMillis: DefaultTaskBackoffTimeMillis, + ValidateSequenceNumberBeforeCheckpointing: DefaultValidateSequenceNumberBeforeCheckpointing, + ShutdownGraceMillis: DefaultShutdownGraceMillis, + MaxLeasesForWorker: DefaultMaxLeasesForWorker, + MaxLeasesToStealAtOneTime: DefaultMaxLeasesToStealAtOneTime, + InitialLeaseTableReadCapacity: DefaultInitialLeaseTableReadCapacity, InitialLeaseTableWriteCapacity: DefaultInitialLeaseTableWriteCapacity, SkipShardSyncAtWorkerInitializationIfLeasesExist: DefaultSkipShardSyncAtStartupIfLeasesExist, EnableLeaseStealing: DefaultEnableLeaseStealing, diff --git a/clientlibrary/interfaces/inputs.go b/clientlibrary/interfaces/inputs.go index 385a3d2..2336af8 100644 --- a/clientlibrary/interfaces/inputs.go +++ b/clientlibrary/interfaces/inputs.go @@ -38,20 +38,20 @@ package interfaces import ( "time" - "github.com/aws/aws-sdk-go/aws" - ks "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" ) const ( - /** - * Indicates that the entire application is being shutdown, and if desired the record processor will be given a + /* + * REQUESTED Indicates that the entire application is being shutdown, and if desired the record processor will be given a * final chance to checkpoint. This state will not trigger a direct call to * {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#shutdown(ShutdownInput)}, but * instead depend on a different interface for backward compatibility. */ REQUESTED ShutdownReason = iota + 1 - /** + /* * Terminate processing for this RecordProcessor (resharding use case). * Indicates that the shard is closed and all records from the shard have been delivered to the application. * Applications SHOULD checkpoint their progress to indicate that they have successfully processed all records @@ -95,7 +95,7 @@ type ( CacheExitTime *time.Time // The records received from Kinesis. These records may have been de-aggregated if they were published by the KPL. - Records []*ks.Record + Records []types.Record // A checkpointer that the RecordProcessor can use to checkpoint its progress. Checkpointer IRecordProcessorCheckpointer diff --git a/clientlibrary/metrics/cloudwatch/cloudwatch.go b/clientlibrary/metrics/cloudwatch/cloudwatch.go index 2a1731d..144ce85 100644 --- a/clientlibrary/metrics/cloudwatch/cloudwatch.go +++ b/clientlibrary/metrics/cloudwatch/cloudwatch.go @@ -30,27 +30,28 @@ package cloudwatch import ( + "context" "sync" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - cwatch "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/credentials" + + cwatch "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/vmware/vmware-go-kcl/logger" ) -// Buffer metrics for at most this long before publishing to CloudWatch. -const DEFAULT_CLOUDWATCH_METRICS_BUFFER_DURATION = 10 * time.Second +// DefaultCloudwatchMetricsBufferDuration Buffer metrics for at most this long before publishing to CloudWatch. +const DefaultCloudwatchMetricsBufferDuration = 10 * time.Second type MonitoringService struct { appName string streamName string workerID string region string - credentials *credentials.Credentials + credentials *credentials.StaticCredentialsProvider logger logger.Logger // control how often to publish to CloudWatch @@ -58,7 +59,7 @@ type MonitoringService struct { stop *chan struct{} waitGroup *sync.WaitGroup - svc cloudwatchiface.CloudWatchAPI + svc *cwatch.Client shardMetrics *sync.Map } @@ -75,13 +76,13 @@ type cloudWatchMetrics struct { } // NewMonitoringService returns a Monitoring service publishing metrics to CloudWatch. -func NewMonitoringService(region string, creds *credentials.Credentials) *MonitoringService { - return NewMonitoringServiceWithOptions(region, creds, logger.GetDefaultLogger(), DEFAULT_CLOUDWATCH_METRICS_BUFFER_DURATION) +func NewMonitoringService(region string, creds *credentials.StaticCredentialsProvider) *MonitoringService { + return NewMonitoringServiceWithOptions(region, creds, logger.GetDefaultLogger(), DefaultCloudwatchMetricsBufferDuration) } // NewMonitoringServiceWithOptions returns a Monitoring service publishing metrics to // CloudWatch with the provided credentials, buffering duration and logger. -func NewMonitoringServiceWithOptions(region string, creds *credentials.Credentials, logger logger.Logger, bufferDur time.Duration) *MonitoringService { +func NewMonitoringServiceWithOptions(region string, creds *credentials.StaticCredentialsProvider, logger logger.Logger, bufferDur time.Duration) *MonitoringService { return &MonitoringService{ region: region, credentials: creds, @@ -95,15 +96,11 @@ func (cw *MonitoringService) Init(appName, streamName, workerID string) error { cw.streamName = streamName cw.workerID = workerID - cfg := &aws.Config{Region: aws.String(cw.region)} + cfg := &aws.Config{Region: cw.region} cfg.Credentials = cw.credentials - s, err := session.NewSession(cfg) - if err != nil { - cw.logger.Errorf("Error in creating session for cloudwatch. %+v", err) - return err - } - cw.svc = cwatch.New(s) - cw.shardMetrics = new(sync.Map) + + cw.svc = cwatch.NewFromConfig(*cfg) + cw.shardMetrics = &sync.Map{} stopChan := make(chan struct{}) cw.stop = &stopChan @@ -150,7 +147,7 @@ func (cw *MonitoringService) eventloop() { func (cw *MonitoringService) flushShard(shard string, metric *cloudWatchMetrics) bool { metric.Lock() - defaultDimensions := []*cwatch.Dimension{ + defaultDimensions := []types.Dimension{ { Name: aws.String("Shard"), Value: &shard, @@ -161,7 +158,7 @@ func (cw *MonitoringService) flushShard(shard string, metric *cloudWatchMetrics) }, } - leaseDimensions := []*cwatch.Dimension{ + leaseDimensions := []types.Dimension{ { Name: aws.String("Shard"), Value: &shard, @@ -177,44 +174,44 @@ func (cw *MonitoringService) flushShard(shard string, metric *cloudWatchMetrics) } metricTimestamp := time.Now() - data := []*cwatch.MetricDatum{ + data := []types.MetricDatum{ { Dimensions: defaultDimensions, MetricName: aws.String("RecordsProcessed"), - Unit: aws.String("Count"), + Unit: types.StandardUnitCount, Timestamp: &metricTimestamp, Value: aws.Float64(float64(metric.processedRecords)), }, { Dimensions: defaultDimensions, MetricName: aws.String("DataBytesProcessed"), - Unit: aws.String("Bytes"), + Unit: types.StandardUnitBytes, Timestamp: &metricTimestamp, Value: aws.Float64(float64(metric.processedBytes)), }, { Dimensions: leaseDimensions, MetricName: aws.String("RenewLease.Success"), - Unit: aws.String("Count"), + Unit: types.StandardUnitCount, Timestamp: &metricTimestamp, Value: aws.Float64(float64(metric.leaseRenewals)), }, { Dimensions: leaseDimensions, MetricName: aws.String("CurrentLeases"), - Unit: aws.String("Count"), + Unit: types.StandardUnitCount, Timestamp: &metricTimestamp, Value: aws.Float64(float64(metric.leasesHeld)), }, } if len(metric.behindLatestMillis) > 0 { - data = append(data, &cwatch.MetricDatum{ + data = append(data, types.MetricDatum{ Dimensions: defaultDimensions, MetricName: aws.String("MillisBehindLatest"), - Unit: aws.String("Milliseconds"), + Unit: types.StandardUnitMilliseconds, Timestamp: &metricTimestamp, - StatisticValues: &cwatch.StatisticSet{ + StatisticValues: &types.StatisticSet{ SampleCount: aws.Float64(float64(len(metric.behindLatestMillis))), Sum: sumFloat64(metric.behindLatestMillis), Maximum: maxFloat64(metric.behindLatestMillis), @@ -223,12 +220,12 @@ func (cw *MonitoringService) flushShard(shard string, metric *cloudWatchMetrics) } if len(metric.getRecordsTime) > 0 { - data = append(data, &cwatch.MetricDatum{ + data = append(data, types.MetricDatum{ Dimensions: defaultDimensions, MetricName: aws.String("KinesisDataFetcher.getRecords.Time"), - Unit: aws.String("Milliseconds"), + Unit: types.StandardUnitMilliseconds, Timestamp: &metricTimestamp, - StatisticValues: &cwatch.StatisticSet{ + StatisticValues: &types.StatisticSet{ SampleCount: aws.Float64(float64(len(metric.getRecordsTime))), Sum: sumFloat64(metric.getRecordsTime), Maximum: maxFloat64(metric.getRecordsTime), @@ -237,12 +234,12 @@ func (cw *MonitoringService) flushShard(shard string, metric *cloudWatchMetrics) } if len(metric.processRecordsTime) > 0 { - data = append(data, &cwatch.MetricDatum{ + data = append(data, types.MetricDatum{ Dimensions: defaultDimensions, MetricName: aws.String("RecordProcessor.processRecords.Time"), - Unit: aws.String("Milliseconds"), + Unit: types.StandardUnitMilliseconds, Timestamp: &metricTimestamp, - StatisticValues: &cwatch.StatisticSet{ + StatisticValues: &types.StatisticSet{ SampleCount: aws.Float64(float64(len(metric.processRecordsTime))), Sum: sumFloat64(metric.processRecordsTime), Maximum: maxFloat64(metric.processRecordsTime), @@ -251,7 +248,7 @@ func (cw *MonitoringService) flushShard(shard string, metric *cloudWatchMetrics) } // Publish metrics data to cloud watch - _, err := cw.svc.PutMetricData(&cwatch.PutMetricDataInput{ + _, err := cw.svc.PutMetricData(context.TODO(), &cwatch.PutMetricDataInput{ Namespace: aws.String(cw.appName), MetricData: data, }) diff --git a/clientlibrary/worker/common-shard-consumer.go b/clientlibrary/worker/common-shard-consumer.go index 416ac13..a638f5f 100644 --- a/clientlibrary/worker/common-shard-consumer.go +++ b/clientlibrary/worker/common-shard-consumer.go @@ -16,22 +16,24 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package worker package worker import ( "sync" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" - deagg "github.com/awslabs/kinesis-aggregation/go/deaggregator" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" "github.com/vmware/vmware-go-kcl/clientlibrary/config" kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" + deagg "github.com/vmware/vmware-go-kcl/internal/deaggregator" ) type shardConsumer interface { @@ -41,7 +43,7 @@ type shardConsumer interface { // commonShardConsumer implements common functionality for regular and enhanced fan-out consumers type commonShardConsumer struct { shard *par.ShardStatus - kc kinesisiface.KinesisAPI + kc *kinesis.Client checkpointer chk.Checkpointer recordProcessor kcl.IRecordProcessor kclConfig *config.KinesisClientLibConfiguration @@ -66,7 +68,7 @@ func (sc *commonShardConsumer) releaseLease() { // getStartingPosition gets kinesis stating position. // First try to fetch checkpoint. If checkpoint is not found use InitialPositionInStream -func (sc *commonShardConsumer) getStartingPosition() (*kinesis.StartingPosition, error) { +func (sc *commonShardConsumer) getStartingPosition() (*types.StartingPosition, error) { err := sc.checkpointer.FetchCheckpoint(sc.shard) if err != nil && err != chk.ErrSequenceIDNotFound { return nil, err @@ -75,24 +77,29 @@ func (sc *commonShardConsumer) getStartingPosition() (*kinesis.StartingPosition, checkpoint := sc.shard.GetCheckpoint() if checkpoint != "" { sc.kclConfig.Logger.Debugf("Start shard: %v at checkpoint: %v", sc.shard.ID, checkpoint) - return &kinesis.StartingPosition{ - Type: aws.String("AFTER_SEQUENCE_NUMBER"), + return &types.StartingPosition{ + Type: types.ShardIteratorTypeAfterSequenceNumber, SequenceNumber: &checkpoint, }, nil } shardIteratorType := config.InitalPositionInStreamToShardIteratorType(sc.kclConfig.InitialPositionInStream) - sc.kclConfig.Logger.Debugf("No checkpoint recorded for shard: %v, starting with: %v", sc.shard.ID, aws.StringValue(shardIteratorType)) - + sc.kclConfig.Logger.Debugf("No checkpoint recorded for shard: %v, starting with: %v", sc.shard.ID, aws.ToString(shardIteratorType)) if sc.kclConfig.InitialPositionInStream == config.AT_TIMESTAMP { - return &kinesis.StartingPosition{ - Type: shardIteratorType, + return &types.StartingPosition{ + Type: types.ShardIteratorTypeAtTimestamp, Timestamp: sc.kclConfig.InitialPositionInStreamExtended.Timestamp, }, nil } - return &kinesis.StartingPosition{ - Type: shardIteratorType, + if *shardIteratorType == "TRIM_HORIZON" { + return &types.StartingPosition{ + Type: types.ShardIteratorTypeTrimHorizon, + }, nil + } + + return &types.StartingPosition{ + Type: types.ShardIteratorTypeLatest, }, nil } @@ -121,7 +128,7 @@ func (sc *commonShardConsumer) waitOnParentShard() error { } } -func (sc *commonShardConsumer) processRecords(getRecordsStartTime time.Time, records []*kinesis.Record, millisBehindLatest *int64, recordCheckpointer kcl.IRecordProcessorCheckpointer) { +func (sc *commonShardConsumer) processRecords(getRecordsStartTime time.Time, records []types.Record, millisBehindLatest *int64, recordCheckpointer kcl.IRecordProcessorCheckpointer) { log := sc.kclConfig.Logger getRecordsTime := time.Since(getRecordsStartTime).Milliseconds() @@ -139,7 +146,7 @@ func (sc *commonShardConsumer) processRecords(getRecordsStartTime time.Time, rec input := &kcl.ProcessRecordsInput{ Records: dars, - MillisBehindLatest: aws.Int64Value(millisBehindLatest), + MillisBehindLatest: *millisBehindLatest, Checkpointer: recordCheckpointer, } diff --git a/clientlibrary/worker/fan-out-shard-consumer.go b/clientlibrary/worker/fan-out-shard-consumer.go index ba4484c..76827f3 100644 --- a/clientlibrary/worker/fan-out-shard-consumer.go +++ b/clientlibrary/worker/fan-out-shard-consumer.go @@ -16,14 +16,18 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package worker package worker import ( + "context" "errors" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" @@ -61,11 +65,11 @@ func (sc *FanOutShardConsumer) getRecords() error { return err } defer func() { - if shardSub == nil || shardSub.EventStream == nil { + if shardSub == nil || shardSub.GetStream() == nil { log.Debugf("Nothing to close, EventStream is nil") return } - err = shardSub.EventStream.Close() + err = shardSub.GetStream().Close() if err != nil { log.Errorf("Unable to close event stream for %s: %v", sc.shard.ID, err) } @@ -99,7 +103,7 @@ func (sc *FanOutShardConsumer) getRecords() error { return err } refreshLeaseTimer = time.After(time.Until(sc.shard.LeaseTimeout.Add(-time.Duration(sc.kclConfig.LeaseRefreshPeriodMillis) * time.Millisecond))) - case event, ok := <-shardSub.EventStream.Events(): + case event, ok := <-shardSub.GetStream().Events(): if !ok { // need to resubscribe to shard log.Debugf("Event stream ended, refreshing subscription on shard: %s for worker: %s", sc.shard.ID, sc.consumerID) @@ -113,13 +117,13 @@ func (sc *FanOutShardConsumer) getRecords() error { } continue } - subEvent, ok := event.(*kinesis.SubscribeToShardEvent) + subEvent, ok := event.(*types.SubscribeToShardEventStreamMemberSubscribeToShardEvent) if !ok { log.Errorf("Received unexpected event type: %T", event) continue } - continuationSequenceNumber = subEvent.ContinuationSequenceNumber - sc.processRecords(getRecordsStartTime, subEvent.Records, subEvent.MillisBehindLatest, recordCheckpointer) + continuationSequenceNumber = subEvent.Value.ContinuationSequenceNumber + sc.processRecords(getRecordsStartTime, subEvent.Value.Records, subEvent.Value.MillisBehindLatest, recordCheckpointer) // The shard has been closed, so no new records can be read from it if continuationSequenceNumber == nil { @@ -138,7 +142,7 @@ func (sc *FanOutShardConsumer) subscribeToShard() (*kinesis.SubscribeToShardOutp return nil, err } - return sc.kc.SubscribeToShard(&kinesis.SubscribeToShardInput{ + return sc.kc.SubscribeToShard(context.TODO(), &kinesis.SubscribeToShardInput{ ConsumerARN: &sc.consumerARN, ShardId: &sc.shard.ID, StartingPosition: startPosition, @@ -146,16 +150,16 @@ func (sc *FanOutShardConsumer) subscribeToShard() (*kinesis.SubscribeToShardOutp } func (sc *FanOutShardConsumer) resubscribe(shardSub *kinesis.SubscribeToShardOutput, continuationSequence *string) (*kinesis.SubscribeToShardOutput, error) { - err := shardSub.EventStream.Close() + err := shardSub.GetStream().Close() if err != nil { sc.kclConfig.Logger.Errorf("Unable to close event stream for %s: %v", sc.shard.ID, err) return nil, err } - startPosition := &kinesis.StartingPosition{ - Type: aws.String("AFTER_SEQUENCE_NUMBER"), + startPosition := &types.StartingPosition{ + Type: types.ShardIteratorTypeAfterSequenceNumber, SequenceNumber: continuationSequence, } - shardSub, err = sc.kc.SubscribeToShard(&kinesis.SubscribeToShardInput{ + shardSub, err = sc.kc.SubscribeToShard(context.TODO(), &kinesis.SubscribeToShardInput{ ConsumerARN: &sc.consumerARN, ShardId: &sc.shard.ID, StartingPosition: startPosition, diff --git a/clientlibrary/worker/polling-shard-consumer.go b/clientlibrary/worker/polling-shard-consumer.go index 90371b0..f0a1d9e 100644 --- a/clientlibrary/worker/polling-shard-consumer.go +++ b/clientlibrary/worker/polling-shard-consumer.go @@ -16,6 +16,8 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package worker // The implementation is derived from https://github.com/patrobinson/gokini // // Copyright 2018 Patrick robinson @@ -28,17 +30,18 @@ package worker import ( + "context" "errors" "math" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" "github.com/vmware/vmware-go-kcl/clientlibrary/metrics" - "github.com/vmware/vmware-go-kcl/clientlibrary/utils" ) // PollingShardConsumer is responsible for polling data records from a (specified) shard. @@ -56,6 +59,7 @@ func (sc *PollingShardConsumer) getShardIterator() (*string, error) { if err != nil { return nil, err } + shardIterArgs := &kinesis.GetShardIteratorInput{ ShardId: &sc.shard.ID, ShardIteratorType: startPosition.Type, @@ -63,14 +67,16 @@ func (sc *PollingShardConsumer) getShardIterator() (*string, error) { Timestamp: startPosition.Timestamp, StreamName: &sc.streamName, } - iterResp, err := sc.kc.GetShardIterator(shardIterArgs) + + iterResp, err := sc.kc.GetShardIterator(context.TODO(), shardIterArgs) if err != nil { return nil, err } + return iterResp.ShardIterator, nil } -// getRecords continously poll one shard for data record +// getRecords continuously poll one shard for data record // Precondition: it currently has the lease on the shard. func (sc *PollingShardConsumer) getRecords() error { defer sc.releaseLease() @@ -120,15 +126,19 @@ func (sc *PollingShardConsumer) getRecords() error { getRecordsStartTime := time.Now() - log.Debugf("Trying to read %d record from iterator: %v", sc.kclConfig.MaxRecords, aws.StringValue(shardIterator)) + log.Debugf("Trying to read %d record from iterator: %v", sc.kclConfig.MaxRecords, aws.ToString(shardIterator)) getRecordsArgs := &kinesis.GetRecordsInput{ - Limit: aws.Int64(int64(sc.kclConfig.MaxRecords)), + Limit: aws.Int32(int32(sc.kclConfig.MaxRecords)), ShardIterator: shardIterator, } + // Get records from stream and retry as needed - getResp, err := sc.kc.GetRecords(getRecordsArgs) + getResp, err := sc.kc.GetRecords(context.TODO(), getRecordsArgs) if err != nil { - if utils.AWSErrCode(err) == kinesis.ErrCodeProvisionedThroughputExceededException || utils.AWSErrCode(err) == kinesis.ErrCodeKMSThrottlingException { + //aws-sdk-go-v2 https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md#error-handling + var throughputExceededErr *types.ProvisionedThroughputExceededException + var kmsThrottlingErr *types.KMSThrottlingException + if errors.As(err, &throughputExceededErr) || errors.As(err, &kmsThrottlingErr) { log.Errorf("Error getting records from shard %v: %+v", sc.shard.ID, err) retriedErrors++ // exponential backoff @@ -156,7 +166,7 @@ func (sc *PollingShardConsumer) getRecords() error { // Idle between each read, the user is responsible for checkpoint the progress // This value is only used when no records are returned; if records are returned, it should immediately // retrieve the next set of records. - if len(getResp.Records) == 0 && aws.Int64Value(getResp.MillisBehindLatest) < int64(sc.kclConfig.IdleTimeBetweenReadsInMillis) { + if len(getResp.Records) == 0 && aws.ToInt64(getResp.MillisBehindLatest) < int64(sc.kclConfig.IdleTimeBetweenReadsInMillis) { time.Sleep(time.Duration(sc.kclConfig.IdleTimeBetweenReadsInMillis) * time.Millisecond) } diff --git a/clientlibrary/worker/record-processor-checkpointer.go b/clientlibrary/worker/record-processor-checkpointer.go index cf96ea9..c89dc4a 100644 --- a/clientlibrary/worker/record-processor-checkpointer.go +++ b/clientlibrary/worker/record-processor-checkpointer.go @@ -21,6 +21,8 @@ package worker import ( + "github.com/aws/aws-sdk-go-v2/aws" + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" par "github.com/vmware/vmware-go-kcl/clientlibrary/partition" @@ -71,13 +73,12 @@ func (rc *RecordProcessorCheckpointer) Checkpoint(sequenceNumber *string) error if sequenceNumber == nil { rc.shard.SetCheckpoint(chk.ShardEnd) } else { - rc.shard.SetCheckpoint(aws.StringValue(sequenceNumber)) + rc.shard.SetCheckpoint(aws.ToString(sequenceNumber)) } return rc.checkpoint.CheckpointSequence(rc.shard) } -func (rc *RecordProcessorCheckpointer) PrepareCheckpoint(sequenceNumber *string) (kcl.IPreparedCheckpointer, error) { +func (rc *RecordProcessorCheckpointer) PrepareCheckpoint(_ *string) (kcl.IPreparedCheckpointer, error) { return &PreparedCheckpointer{}, nil - } diff --git a/clientlibrary/worker/worker-fan-out.go b/clientlibrary/worker/worker-fan-out.go index 0725671..a7943b3 100644 --- a/clientlibrary/worker/worker-fan-out.go +++ b/clientlibrary/worker/worker-fan-out.go @@ -16,16 +16,19 @@ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ + +// Package worker package worker import ( + "context" + "errors" "fmt" "math" "time" - "github.com/aws/aws-sdk-go/service/kinesis" - - "github.com/vmware/vmware-go-kcl/clientlibrary/utils" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" ) // fetchConsumerARNWithRetry tries to fetch consumer ARN. Retries 10 times with exponential backoff in case of an error @@ -50,27 +53,34 @@ func (w *Worker) fetchConsumerARNWithRetry() (string, error) { func (w *Worker) fetchConsumerARN() (string, error) { log := w.kclConfig.Logger log.Debugf("Fetching stream consumer ARN") - streamDescription, err := w.kc.DescribeStream(&kinesis.DescribeStreamInput{ + + streamDescription, err := w.kc.DescribeStream(context.TODO(), &kinesis.DescribeStreamInput{ StreamName: &w.kclConfig.StreamName, }) + if err != nil { log.Errorf("Could not describe stream: %v", err) return "", err } - streamConsumerDescription, err := w.kc.DescribeStreamConsumer(&kinesis.DescribeStreamConsumerInput{ + + streamConsumerDescription, err := w.kc.DescribeStreamConsumer(context.TODO(), &kinesis.DescribeStreamConsumerInput{ ConsumerName: &w.kclConfig.EnhancedFanOutConsumerName, StreamARN: streamDescription.StreamDescription.StreamARN, }) + if err == nil { - log.Infof("Enhanced fan-out consumer found, consumer status: %s", *streamConsumerDescription.ConsumerDescription.ConsumerStatus) - if *streamConsumerDescription.ConsumerDescription.ConsumerStatus != kinesis.ConsumerStatusActive { - return "", fmt.Errorf("consumer is not in active status yet, current status: %s", *streamConsumerDescription.ConsumerDescription.ConsumerStatus) + log.Infof("Enhanced fan-out consumer found, consumer status: %s", streamConsumerDescription.ConsumerDescription.ConsumerStatus) + if streamConsumerDescription.ConsumerDescription.ConsumerStatus != types.ConsumerStatusActive { + return "", fmt.Errorf("consumer is not in active status yet, current status: %s", streamConsumerDescription.ConsumerDescription.ConsumerStatus) } return *streamConsumerDescription.ConsumerDescription.ConsumerARN, nil } - if utils.AWSErrCode(err) == kinesis.ErrCodeResourceNotFoundException { + + //aws-sdk-go-v2 https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md#error-handling + var notFoundErr *types.ResourceNotFoundException + if errors.As(err, ¬FoundErr) { log.Infof("Enhanced fan-out consumer not found, registering new consumer with name: %s", w.kclConfig.EnhancedFanOutConsumerName) - out, err := w.kc.RegisterStreamConsumer(&kinesis.RegisterStreamConsumerInput{ + out, err := w.kc.RegisterStreamConsumer(context.TODO(), &kinesis.RegisterStreamConsumerInput{ ConsumerName: &w.kclConfig.EnhancedFanOutConsumerName, StreamARN: streamDescription.StreamDescription.StreamARN, }) @@ -78,11 +88,13 @@ func (w *Worker) fetchConsumerARN() (string, error) { log.Errorf("Could not register enhanced fan-out consumer: %v", err) return "", err } - if *out.Consumer.ConsumerStatus != kinesis.ConsumerStatusActive { - return "", fmt.Errorf("consumer is not in active status yet, current status: %s", *out.Consumer.ConsumerStatus) + if out.Consumer.ConsumerStatus != types.ConsumerStatusActive { + return "", fmt.Errorf("consumer is not in active status yet, current status: %s", out.Consumer.ConsumerStatus) } return *out.Consumer.ConsumerARN, nil } - log.Errorf("Could not describe stream consumer: %v", err) + + log.Errorf("Could not describe stream consumer: %v", err) //%w should we unwrap the underlying error? + return "", err } diff --git a/clientlibrary/worker/worker.go b/clientlibrary/worker/worker.go index c5b39f4..01db8f5 100644 --- a/clientlibrary/worker/worker.go +++ b/clientlibrary/worker/worker.go @@ -30,16 +30,18 @@ package worker import ( + "context" "crypto/rand" "errors" "math/big" "sync" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awsConfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/kinesis" chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" "github.com/vmware/vmware-go-kcl/clientlibrary/config" @@ -59,7 +61,7 @@ type Worker struct { processorFactory kcl.IRecordProcessorFactory kclConfig *config.KinesisClientLibConfiguration - kc kinesisiface.KinesisAPI + kc *kinesis.Client checkpointer chk.Checkpointer mService metrics.MonitoringService @@ -94,7 +96,7 @@ func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisCli } // WithKinesis is used to provide Kinesis service for either custom implementation or unit testing. -func (w *Worker) WithKinesis(svc kinesisiface.KinesisAPI) *Worker { +func (w *Worker) WithKinesis(svc *kinesis.Client) *Worker { w.kc = svc return w } @@ -153,22 +155,38 @@ func (w *Worker) initialize() error { log := w.kclConfig.Logger log.Infof("Worker initialization in progress...") - // Create default Kinesis session + // Create default Kinesis client if w.kc == nil { // create session for Kinesis - log.Infof("Creating Kinesis session") + log.Infof("Creating Kinesis client") - s, err := session.NewSession(&aws.Config{ - Region: aws.String(w.regionName), - Endpoint: &w.kclConfig.KinesisEndpoint, - Credentials: w.kclConfig.KinesisCredentials, + resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { + return aws.Endpoint{ + PartitionID: "aws", + URL: w.kclConfig.KinesisEndpoint, + SigningRegion: w.regionName, + }, nil }) + cfg, err := awsConfig.LoadDefaultConfig( + context.TODO(), + awsConfig.WithRegion(w.regionName), + awsConfig.WithCredentialsProvider( + credentials.NewStaticCredentialsProvider( + w.kclConfig.KinesisCredentials.Value.AccessKeyID, + w.kclConfig.KinesisCredentials.Value.SecretAccessKey, + w.kclConfig.KinesisCredentials.Value.SessionToken)), + awsConfig.WithEndpointResolver(resolver), + awsConfig.WithRetryer(func() aws.Retryer { + return retry.AddWithMaxBackoffDelay(retry.NewStandard(), retry.DefaultMaxBackoff) + }), + ) + if err != nil { // no need to move forward - log.Fatalf("Failed in getting Kinesis session for creating Worker: %+v", err) + log.Fatalf("Failed in loading Kinesis default config for creating Worker: %+v", err) } - w.kc = kinesis.New(s) + w.kc = kinesis.NewFromConfig(cfg) } else { log.Infof("Use custom Kinesis service.") } @@ -460,7 +478,7 @@ func (w *Worker) getShardIDs(nextToken string, shardInfo map[string]bool) error args.StreamName = aws.String(w.streamName) } - listShards, err := w.kc.ListShards(args) + listShards, err := w.kc.ListShards(context.TODO(), args) if err != nil { log.Errorf("Error in ListShards: %s Error: %+v Request: %s", w.streamName, err, args) return err @@ -475,16 +493,16 @@ func (w *Worker) getShardIDs(nextToken string, shardInfo map[string]bool) error log.Infof("Found new shard with id %s", *s.ShardId) w.shardStatus[*s.ShardId] = &par.ShardStatus{ ID: *s.ShardId, - ParentShardId: aws.StringValue(s.ParentShardId), + ParentShardId: aws.ToString(s.ParentShardId), Mux: &sync.RWMutex{}, - StartingSequenceNumber: aws.StringValue(s.SequenceNumberRange.StartingSequenceNumber), - EndingSequenceNumber: aws.StringValue(s.SequenceNumberRange.EndingSequenceNumber), + StartingSequenceNumber: aws.ToString(s.SequenceNumberRange.StartingSequenceNumber), + EndingSequenceNumber: aws.ToString(s.SequenceNumberRange.EndingSequenceNumber), } } } if listShards.NextToken != nil { - err := w.getShardIDs(aws.StringValue(listShards.NextToken), shardInfo) + err := w.getShardIDs(aws.ToString(listShards.NextToken), shardInfo) if err != nil { log.Errorf("Error in ListShards: %s Error: %+v Request: %s", w.streamName, err, args) return err diff --git a/go.mod b/go.mod index 264834c..1f6152c 100644 --- a/go.mod +++ b/go.mod @@ -3,13 +3,18 @@ module github.com/vmware/vmware-go-kcl go 1.17 require ( - github.com/aws/aws-sdk-go v1.41.7 - github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f + github.com/aws/aws-sdk-go-v2 v1.11.0 + github.com/aws/aws-sdk-go-v2/config v1.10.0 + github.com/aws/aws-sdk-go-v2/credentials v1.6.0 + github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.4.0 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.10.0 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.7.0 + github.com/aws/aws-sdk-go-v2/service/kinesis v1.8.0 github.com/golang/protobuf v1.5.2 github.com/google/uuid v1.3.0 github.com/prometheus/client_golang v1.11.0 github.com/prometheus/common v0.32.1 - github.com/rs/zerolog v1.25.0 + github.com/rs/zerolog v1.26.0 github.com/sirupsen/logrus v1.8.1 github.com/stretchr/testify v1.7.0 go.uber.org/zap v1.19.1 @@ -18,6 +23,19 @@ require ( require ( github.com/BurntSushi/toml v0.4.1 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.3.0 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.6.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.6.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.9.0 // indirect + github.com/aws/smithy-go v1.9.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -26,9 +44,10 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect + github.com/stretchr/objx v0.3.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.7.0 // indirect - golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac // indirect + golang.org/x/sys v0.0.0-20211106132015-ebca88c72f68 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/go.sum b/go.sum index 8fc9eba..22eed71 100644 --- a/go.sum +++ b/go.sum @@ -12,37 +12,150 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/aws/aws-sdk-go v1.19.48 h1:YhKzuc9xggUt8jNDc5CmIBeB8GmGtazzq0aCXO4sj6w= github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.41.7 h1:vlpR8Cky3ZxUVNINgeRZS6N0p6zmFvu/ZqRRwrTI25U= github.com/aws/aws-sdk-go v1.41.7/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2 v1.10.0 h1:+dCJ5W2HiZNa4UtaIc5ljKNulm0dK0vS5dxb5LdDOAA= +github.com/aws/aws-sdk-go-v2 v1.10.0/go.mod h1:U/EyyVvKtzmFeQQcca7eBotKdlpcP2zzU6bXBYcf7CE= +github.com/aws/aws-sdk-go-v2 v1.11.0 h1:HxyD62DyNhCfiFGUHqJ/xITD6rAjJ7Dm/2nLxLmO4Ag= +github.com/aws/aws-sdk-go-v2 v1.11.0/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 h1:yVUAwvJC/0WNPbyl0nA3j1L6CW1CN8wBubCRqtG7JLI= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0/go.mod h1:Xn6sxgRuIDflLRJFj5Ev7UxABIkNbccFPV/p8itDReM= +github.com/aws/aws-sdk-go-v2/config v1.6.1 h1:qrZINaORyr78syO1zfD4l7r4tZjy0Z1l0sy4jiysyOM= +github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= +github.com/aws/aws-sdk-go-v2/config v1.9.0 h1:SkREVSwi+J8MSdjhJ96jijZm5ZDNleI0E4hHCNivh7s= +github.com/aws/aws-sdk-go-v2/config v1.9.0/go.mod h1:qhK5NNSgo9/nOSMu3HyE60WHXZTWTHTgd5qtIF44vOQ= +github.com/aws/aws-sdk-go-v2/config v1.10.0 h1:4i+/7DmCQCAls5Z61giur0LOPZ3PXFwnSIw7hRamzws= +github.com/aws/aws-sdk-go-v2/config v1.10.0/go.mod h1:xuqoV5etD3N3B8Ts9je4ijgAv6mb+6NiOPFMUhwRcjA= +github.com/aws/aws-sdk-go-v2/credentials v1.3.3 h1:A13QPatmUl41SqUfnuT3V0E3XiNGL6qNTOINbE8cZL4= +github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= +github.com/aws/aws-sdk-go-v2/credentials v1.5.0 h1:r6470olsn2qyOe2aLzK6q+wfO3dzNcMujRT3gqBgBB8= +github.com/aws/aws-sdk-go-v2/credentials v1.5.0/go.mod h1:kvqTkpzQmzri9PbsiTY+LvwFzM0gY19emlAWwBOJMb0= +github.com/aws/aws-sdk-go-v2/credentials v1.6.0 h1:L3O6osQTlzLKRmiTphw2QJuD21EFapWCX4IipiRJhAE= +github.com/aws/aws-sdk-go-v2/credentials v1.6.0/go.mod h1:rQkYdQPDXRrvPLeEuCNwSgtwMzBo9eDGWlTNC69Sh/0= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 h1:8kvinmbIDObqsWegKP0JjeanYPiA4GUVpAtciNWE+jw= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.3.0 h1:jEWmr4fcoAdoDo34DKMED/lEgPyyGE6/Xhwbgs6+NS8= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.3.0/go.mod h1:YjXozu6rHksfG22T5ZZASTrFOLzI0AoyuEC+GU9I3Lw= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.4.0 h1:J8Zgr+z0RjxidWB6vjX6sEB8TU/y6ELWoYhNoJ99d+M= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.4.0/go.mod h1:gWzcyoZ5LNkx1Xhluc25HU9eWIdcwiaymHuJnwO6ELs= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.3.0 h1:Nm2gF15BCti2SRfE/G6rS7KbTD8mQTVIwGFjMZIlie0= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.3.0/go.mod h1:GQd3X3up0vqgHmt2jca0vyM7rbZj1KkJBDHlb6Oc1Eg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1 h1:rc+fRGvlKbeSd9IFhFS1KWBs0XjTkq0CfK5xqyLgIp0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.7.0 h1:FKaqk7geL3oIqSwGJt5SWUKj8uJ+qLZNqlBuqq6sFyA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.7.0/go.mod h1:KqEkRkxm/+1Pd/rENRNbQpfblDBYeg5HDSqjB6ks8hA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0 h1:OpZjuUy8Jt3CA1WgJgBC5Bz+uOjE5Ppx4NFTRaooUuA= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0/go.mod h1:5E1J3/TTYy6z909QNR0QnXGBpfESYGDqd3O0zqONghU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 h1:IM9b6hlCcVFJFydPoyphs/t7YrHfqKy7T4/7AG5Eprs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.7 h1:/0GQVY8J25hww4J9a+rYKDr9ryGh2KdIdR8YHBP54h0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.7/go.mod h1:QXoZAXmBEHeMIFiBr3XumpTyoNTXTQbqPV+qaGX7gfY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0 h1:zY8cNmbBXt3pzjgWgdIbzpQ6qxoCwt+Nx9JbrAf2mbY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0/go.mod h1:NO3Q5ZTTQtO2xIg2+xTXYDiT7knSejfeDm7WGDaOo0U= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0 h1:Z3aR/OXBnkYK9zXkNkfitHX6SmUBzSsx8VMHbH4Lvhw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0/go.mod h1:anlUzBoEWglcUxUQwZA7HQOEVEnQALVZsizAapB2hq8= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1 h1:IkqRRUZTKaS16P2vpX+FNc2jq3JWa3c478gykQp4ow4= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.5 h1:zPxLGWALExNepElO0gYgoqsbqTlt4ZCrhZ7XlfJ+Qlw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.5/go.mod h1:6ZBTuDmvpCOD4Sf1i2/I3PgftlEcDGgvi8ocq64oQEg= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0 h1:c10Z7fWxtJCoyc8rv06jdh9xrKnu7bAJiRaKWvTb2mU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0/go.mod h1:6oXGy4GLpypD3uCh8wcqztigGgmhLToMfjavgh+VySg= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.9.0/go.mod h1:iX4tSMY8NP1mzU2PMS6arLyB/Yufz2LxCkn9DsgiWEI= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.10.0 h1:MNNV0fi3J5Lxxhx8iDlKdRZJrtBv/0FyganA3nBYe8Q= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.10.0/go.mod h1:Oiwhs3Fo9amYOGsJggWBPU6bwa/u0xVpEdOS5HlouPg= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 h1:SGwKUQaJudQQZE72dDQlL2FGuHNAEK1CyqKLTjh6mqE= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0/go.mod h1:XY5YhCS9SLul3JSQ08XG/nfxXxrkh6RR21XPq/J//NY= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.6.0 h1:HDp8hUQlGU5fgNoNDp0BOthk57AuTXMTaAK1mb9c27I= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.6.0/go.mod h1:t8pYXJHxfOe/088CcNeuqQbucpq9SwO1yjheCieDDnI= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.7.0 h1:S3X6RWl0TfMxNXsIzz8r3Y6YVA1HWGSx6M345Q3mQ+I= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.7.0/go.mod h1:Hh0zJ3419ET9xQBeR+y0lHIkObJwAKPbzV9nTZ0yrJ0= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 h1:QbFWJr2SAyVYvyoOHvJU6sCGLnqNT94ZbWElJMEI1JY= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0/go.mod h1:bYsEP8w5YnbYyrx/Zi5hy4hTwRRQISSJS3RWrsGRijg= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.5.0 h1:At4HitvrEFdSA5rNS1KHA65BYizq2p+gLtASYtoAH2A= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.5.0/go.mod h1:9u/PDp7T3XzjGA8XmYJcffjqPJmXeofDXHUyHqp2lYc= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.6.0 h1:Z893Baw1+7PfK+KtYgrHu+V2n/Ae9S0jG1dZGe4WQ7o= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.6.0/go.mod h1:PmJdIbYf6UjqnAJwZPi6CNG8JHXdzc/Y0Y8bWfPy0Yw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.4.0 h1:EtQ6hVAgNsWTiO+u9e+ziaEYyOAlEkAwLskpL40U6pQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.4.0/go.mod h1:vEkJTjJ8vnv0uWy2tAp7DSydWFpudMGWPQ2SFucoN1k= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 h1:lPLbw4Gn59uoKqvOfSnkJr54XWk5Ak1NK20ZEiSWb3U= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0/go.mod h1:80NaCIH9YU3rzTTs/J/ECATjXuRqzo/wB6ukO6MZ0XY= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0 h1:QCPbsMPMcM4iGbui5SH6O4uxvZffPoBJ4CIGX7dU0l4= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0/go.mod h1:enkU5tq2HoXY+ZMiQprgF3Q83T3PbO77E83yXXzRZWE= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.2.0 h1:uxy31f/H1bkUV2aircA9hTQT8s093u1eOeErsOXIY90= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.2.0/go.mod h1:wLLzEoPune3u08rkvNBm3BprebkWRmmCkMtTeujM3Fs= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.3.0 h1:A2aUh9d38A2ECh76ahOQUdpJFe+Jhjk8qrfV+YbNYGY= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.3.0/go.mod h1:5h2rxfLN22pLTQ1ZoOza87rp2SnN/9UDYdYBQRmIrsE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3 h1:VxFCgxsqWe7OThOwJ5IpFX3xrObtuIH9Hg/NW7oot1Y= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.4.0 h1:/T5wKsw/po118HEDvnSE8YU7TESxvZbYM2rnn+Oi7Kk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.4.0/go.mod h1:X5/JuOxPLU/ogICgDTtnpfaQzdQJO0yKDcpoxWLLJ8Y= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0 h1:qGZWS/WgiFY+Zgad2u0gwBHpJxz6Ne401JE7iQI1nKs= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0/go.mod h1:Mq6AEc+oEjCUlBuLiK5YwW4shSOAKCQ3tXN0sQeYoBA= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.7.0 h1:BR1dH17nltcO/bRr7sW+BTOY1OekCf3KadPBTHWPIY4= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.7.0/go.mod h1:IKVo9L4q2SkAfCZik0P9fdCZWppIE06l2ZfPktUNUXQ= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.8.0 h1:Cz26j4wGD1tJ2w/M8iLhaS81AkAGY3gEYRt0xQWjEIs= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.8.0/go.mod h1:QyNCg1xtWFJVL++i6ZyVcwXZCiKTNeXHH9zZu3NHOdU= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.3 h1:K2gCnGvAASpz+jqP9iyr+F/KNjmTYf8aWOtTQzhmZ5w= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= +github.com/aws/aws-sdk-go-v2/service/sso v1.5.0 h1:VnrCAJTp1bDxU79UuW/D4z7bwZ7xOc7JjDKpqXL/m04= +github.com/aws/aws-sdk-go-v2/service/sso v1.5.0/go.mod h1:GsqaJOJeOfeYD88/2vHWKXegvDRofDqWwC5i48A2kgs= +github.com/aws/aws-sdk-go-v2/service/sso v1.6.0 h1:JDgKIUZOmLFu/Rv6zXLrVTWCmzA0jcTdvsT8iFIKrAI= +github.com/aws/aws-sdk-go-v2/service/sso v1.6.0/go.mod h1:Q/l0ON1annSU+mc0JybDy1Gy6dnJxIcWjphO6qJPzvM= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.2 h1:l504GWCoQi1Pk68vSUFGLmDIEMzRfVGNgLakDK+Uj58= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= +github.com/aws/aws-sdk-go-v2/service/sts v1.8.0 h1:7N7RsEVvUcvEg7jrWKU5AnSi4/6b6eY9+wG1g6W4ExE= +github.com/aws/aws-sdk-go-v2/service/sts v1.8.0/go.mod h1:dOlm91B439le5y1vtPCk5yJtbx3RdT3hRGYRY8TYKvQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.9.0 h1:rBLCnL8hQ7Sv1S4XCPYgTMI7Uhg81BkvzIiK+/of2zY= +github.com/aws/aws-sdk-go-v2/service/sts v1.9.0/go.mod h1:jLKCFqS+1T4i7HDqCP9GM4Uk75YW1cS0o82LdxpMyOE= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.1 h1:9Y6qxtzgEODaLNGN+oN2QvcHvKUe4jsH8w4M+8LXzGk= +github.com/aws/smithy-go v1.8.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.9.0 h1:c7FUdEqrQA1/UVKKCNDFQPNKGp4FQg3YW4Ck5SLTG58= +github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= @@ -51,38 +164,57 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -90,6 +222,7 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -109,6 +242,7 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -120,8 +254,13 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -129,44 +268,60 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -195,17 +350,24 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.25.0 h1:Rj7XygbUHKUlDPcVdoLyR91fJBsduXj5fRxyqIQj/II= github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI= +github.com/rs/zerolog v1.26.0 h1:ORM4ibhEZeTeQlCojCK2kPz1ogAY4bGs4tD+SaAdGaE= +github.com/rs/zerolog v1.26.0/go.mod h1:yBiM87lvSqX8h0Ww4sdzNSkVYZ8dL2xjZJG1lAuGZEo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= +github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -214,11 +376,15 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0 h1:OtISOGfH6sOWa1/qXqqAiOIAO6Z5J3AEAE18WAq6BiQ= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= @@ -235,6 +401,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -245,8 +412,10 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -257,8 +426,10 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= @@ -266,6 +437,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -296,14 +468,18 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -314,6 +490,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -353,8 +530,20 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 h1:2B5p2L5IfGiD7+b9BOoRMC6DgObAVZV+Fsp050NqXik= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk= +golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211102061401-a2f17f7b995c h1:QOfDMdrf/UwlVR0UBq2Mpr58UzNtvgJRXA4BgPfFACs= +golang.org/x/sys v0.0.0-20211102061401-a2f17f7b995c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c h1:+8miTPjMCTXwih7BQmvWwd0PjdBZq2MKp/qQaahSzEM= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211106132015-ebca88c72f68 h1:Ywe/f3fNleF8I6F6qv3MeFoSZ6CTf2zBMMa/7qVML8M= +golang.org/x/sys v0.0.0-20211106132015-ebca88c72f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -365,6 +554,7 @@ golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -406,7 +596,10 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -427,12 +620,14 @@ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -462,6 +657,7 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhKa1AAvY53xsvLB1cWorMjslvY3VA8= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -474,6 +670,7 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -489,11 +686,13 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -514,7 +713,11 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/test/lease_stealing_util_test.go b/test/lease_stealing_util_test.go index 21b8ab3..aab57ae 100644 --- a/test/lease_stealing_util_test.go +++ b/test/lease_stealing_util_test.go @@ -1,16 +1,18 @@ package test import ( + "context" "fmt" "sync" "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" + "github.com/aws/aws-sdk-go-v2/service/kinesis" "github.com/stretchr/testify/assert" + chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint" cfg "github.com/vmware/vmware-go-kcl/clientlibrary/config" wk "github.com/vmware/vmware-go-kcl/clientlibrary/worker" @@ -18,10 +20,10 @@ import ( type LeaseStealingTest struct { t *testing.T - config *TestClusterConfig + config *TestClusterConfig cluster *TestCluster - kc kinesisiface.KinesisAPI - dc dynamodbiface.DynamoDBAPI + kc *kinesis.Client + dc *dynamodb.Client backOffSeconds int maxRetries int @@ -83,21 +85,19 @@ func (lst *LeaseStealingTest) getShardCountByWorker() map[string]int { } shardsByWorker := map[string]map[string]bool{} - err := lst.dc.ScanPages(input, func(out *dynamodb.ScanOutput, lastPage bool) bool { - for _, result := range out.Items { - if shardID, ok := result[chk.LeaseKeyKey]; !ok { - continue - } else if assignedTo, ok := result[chk.LeaseOwnerKey]; !ok { - continue - } else { - if _, ok := shardsByWorker[*assignedTo.S]; !ok { - shardsByWorker[*assignedTo.S] = map[string]bool{} - } - shardsByWorker[*assignedTo.S][*shardID.S] = true + scan, err := lst.dc.Scan(context.TODO(), input) + for _, result := range scan.Items { + if shardID, ok := result[chk.LeaseKeyKey]; !ok { + continue + } else if assignedTo, ok := result[chk.LeaseOwnerKey]; !ok { + continue + } else { + if _, ok := shardsByWorker[assignedTo.(*types.AttributeValueMemberS).Value]; !ok { + shardsByWorker[assignedTo.(*types.AttributeValueMemberS).Value] = map[string]bool{} } + shardsByWorker[assignedTo.(*types.AttributeValueMemberS).Value][shardID.(*types.AttributeValueMemberS).Value] = true } - return !lastPage - }) + } assert.Nil(lst.t, err) shardCountByWorker := map[string]int{} @@ -108,12 +108,12 @@ func (lst *LeaseStealingTest) getShardCountByWorker() map[string]int { } type LeaseStealingAssertions struct { - expectedLeasesForIntialWorker int - expectedLeasesPerWorker int + expectedLeasesForInitialWorker int + expectedLeasesPerWorker int } func (lst *LeaseStealingTest) Run(assertions LeaseStealingAssertions) { - // Publish records onto stream thoughtout the entire duration of the test + // Publish records onto stream throughout the entire duration of the test stop := lst.publishSomeData() defer stop() @@ -126,16 +126,16 @@ func (lst *LeaseStealingTest) Run(assertions LeaseStealingAssertions) { time.Sleep(time.Duration(lst.backOffSeconds) * time.Second) shardCountByWorker := lst.getShardCountByWorker() - if shardCount, ok := shardCountByWorker[worker1]; ok && shardCount == assertions.expectedLeasesForIntialWorker { + if shardCount, ok := shardCountByWorker[worker1]; ok && shardCount == assertions.expectedLeasesForInitialWorker { worker1ShardCount = shardCount break } } // Assert correct number of leases - assert.Equal(lst.t, assertions.expectedLeasesForIntialWorker, worker1ShardCount) + assert.Equal(lst.t, assertions.expectedLeasesForInitialWorker, worker1ShardCount) - // Spawn Remaining Wokers + // Spawn Remaining Workers for i := 0; i < lst.config.numWorkers-1; i++ { lst.cluster.SpawnWorker() } diff --git a/test/record_processor_test.go b/test/record_processor_test.go index 2e37368..ff4fef3 100644 --- a/test/record_processor_test.go +++ b/test/record_processor_test.go @@ -23,7 +23,9 @@ package test import ( "testing" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/stretchr/testify/assert" + kc "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces" ) @@ -50,7 +52,7 @@ type dumpRecordProcessor struct { } func (dd *dumpRecordProcessor) Initialize(input *kc.InitializationInput) { - dd.t.Logf("Processing SharId: %v at checkpoint: %v", input.ShardId, aws.StringValue(input.ExtendedSequenceNumber.SequenceNumber)) + dd.t.Logf("Processing SharId: %v at checkpoint: %v", input.ShardId, aws.ToString(input.ExtendedSequenceNumber.SequenceNumber)) shardID = input.ShardId dd.count = 0 } @@ -76,18 +78,18 @@ func (dd *dumpRecordProcessor) ProcessRecords(input *kc.ProcessRecordsInput) { // Calculate the time taken from polling records and delivering to record processor for a batch. diff := input.CacheExitTime.Sub(*input.CacheEntryTime) dd.t.Logf("Checkpoint progress at: %v, MillisBehindLatest = %v, KCLProcessTime = %v", lastRecordSequenceNumber, input.MillisBehindLatest, diff) - input.Checkpointer.Checkpoint(lastRecordSequenceNumber) + _ = input.Checkpointer.Checkpoint(lastRecordSequenceNumber) } func (dd *dumpRecordProcessor) Shutdown(input *kc.ShutdownInput) { - dd.t.Logf("Shutdown Reason: %v", aws.StringValue(kc.ShutdownReasonMessage(input.ShutdownReason))) + dd.t.Logf("Shutdown Reason: %v", aws.ToString(kc.ShutdownReasonMessage(input.ShutdownReason))) dd.t.Logf("Processed Record Count = %d", dd.count) // When the value of {@link ShutdownInput#getShutdownReason()} is // {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason#TERMINATE} it is required that you // checkpoint. Failure to do so will result in an IllegalArgumentException, and the KCL no longer making progress. if input.ShutdownReason == kc.TERMINATE { - input.Checkpointer.Checkpoint(nil) + _ = input.Checkpointer.Checkpoint(nil) } assert.True(dd.t, dd.count > 0) diff --git a/test/record_publisher_test.go b/test/record_publisher_test.go index baaac57..85017a3 100644 --- a/test/record_publisher_test.go +++ b/test/record_publisher_test.go @@ -19,61 +19,99 @@ package test import ( + "context" "crypto/md5" "fmt" "sync" + "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" - rec "github.com/awslabs/kinesis-aggregation/go/records" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awsConfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/dynamodb" + "github.com/aws/aws-sdk-go-v2/service/kinesis" + "github.com/aws/aws-sdk-go-v2/service/kinesis/types" "github.com/golang/protobuf/proto" - "github.com/vmware/vmware-go-kcl/clientlibrary/utils" - "testing" + "github.com/vmware/vmware-go-kcl/clientlibrary/utils" + rec "github.com/vmware/vmware-go-kcl/internal/records" ) const specstr = `{"name":"kube-qQyhk","networking":{"containerNetworkCidr":"10.2.0.0/16"},"orgName":"BVT-Org-cLQch","projectName":"project-tDSJd","serviceLevel":"DEVELOPER","size":{"count":1},"version":"1.8.1-4"}` // NewKinesisClient to create a Kinesis Client. -func NewKinesisClient(t *testing.T, regionName, endpoint string, credentials *credentials.Credentials) *kinesis.Kinesis { - s, err := session.NewSession(&aws.Config{ - Region: aws.String(regionName), - Endpoint: aws.String(endpoint), - Credentials: credentials, +func NewKinesisClient(t *testing.T, regionName, endpoint string, creds *credentials.StaticCredentialsProvider) *kinesis.Client { + // create session for Kinesis + t.Logf("Creating Kinesis client") + + resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { + return aws.Endpoint { + PartitionID: "aws", + URL: endpoint, + SigningRegion: regionName, + }, nil }) + cfg, err := awsConfig.LoadDefaultConfig( + context.TODO(), + awsConfig.WithRegion(regionName), + awsConfig.WithCredentialsProvider( + credentials.NewStaticCredentialsProvider( + creds.Value.AccessKeyID, + creds.Value.SecretAccessKey, + creds.Value.SessionToken)), + awsConfig.WithEndpointResolver(resolver), + awsConfig.WithRetryer(func() aws.Retryer { + return retry.AddWithMaxBackoffDelay(retry.NewStandard(), retry.DefaultMaxBackoff) + }), + ) + if err != nil { // no need to move forward - t.Fatalf("Failed in getting Kinesis session for creating Worker: %+v", err) + t.Fatalf("Failed in loading Kinesis default config for creating Worker: %+v", err) } - return kinesis.New(s) + + return kinesis.NewFromConfig(cfg) } // NewDynamoDBClient to create a Kinesis Client. -func NewDynamoDBClient(t *testing.T, regionName, endpoint string, credentials *credentials.Credentials) *dynamodb.DynamoDB { - s, err := session.NewSession(&aws.Config{ - Region: aws.String(regionName), - Endpoint: aws.String(endpoint), - Credentials: credentials, +func NewDynamoDBClient(t *testing.T, regionName, endpoint string, creds *credentials.StaticCredentialsProvider) *dynamodb.Client { + resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { + return aws.Endpoint { + PartitionID: "aws", + URL: endpoint, + SigningRegion: regionName, + }, nil }) + cfg, err := awsConfig.LoadDefaultConfig( + context.TODO(), + awsConfig.WithRegion(regionName), + awsConfig.WithCredentialsProvider( + credentials.NewStaticCredentialsProvider( + creds.Value.AccessKeyID, + creds.Value.SecretAccessKey, + creds.Value.SessionToken)), + awsConfig.WithEndpointResolver(resolver), + awsConfig.WithRetryer(func() aws.Retryer { + return retry.AddWithMaxBackoffDelay(retry.NewStandard(), retry.DefaultMaxBackoff) + }), + ) + if err != nil { - // no need to move forward - t.Fatalf("Failed in getting DynamoDB session for creating Worker: %+v", err) + t.Fatalf("unable to load SDK config, %v", err) } - return dynamodb.New(s) + + return dynamodb.NewFromConfig(cfg) } -func continuouslyPublishSomeData(t *testing.T, kc kinesisiface.KinesisAPI) func() { - shards := []*kinesis.Shard{} +func continuouslyPublishSomeData(t *testing.T, kc *kinesis.Client) func() { + var shards []types.Shard var nextToken *string for { - out, err := kc.ListShards(&kinesis.ListShardsInput{ + out, err := kc.ListShards(context.TODO(), &kinesis.ListShardsInput { StreamName: aws.String(streamName), NextToken: nextToken, }) @@ -112,7 +150,7 @@ func continuouslyPublishSomeData(t *testing.T, kc kinesisiface.KinesisAPI) func( } } -func publishToAllShards(t *testing.T, kc kinesisiface.KinesisAPI, shards []*kinesis.Shard) { +func publishToAllShards(t *testing.T, kc *kinesis.Client, shards []types.Shard) { // Put records to all shards for i := 0; i < 10; i++ { for _, shard := range shards { @@ -122,7 +160,7 @@ func publishToAllShards(t *testing.T, kc kinesisiface.KinesisAPI, shards []*kine } // publishSomeData to put some records into Kinesis stream -func publishSomeData(t *testing.T, kc kinesisiface.KinesisAPI) { +func publishSomeData(t *testing.T, kc *kinesis.Client) { // Put some data into stream. t.Log("Putting data into stream using PutRecord API...") for i := 0; i < 50; i++ { @@ -146,8 +184,8 @@ func publishSomeData(t *testing.T, kc kinesisiface.KinesisAPI) { } // publishRecord to put a record into Kinesis stream using PutRecord API. -func publishRecord(t *testing.T, kc kinesisiface.KinesisAPI, hashKey *string) { - input := &kinesis.PutRecordInput{ +func publishRecord(t *testing.T, kc *kinesis.Client, hashKey *string) { + input := &kinesis.PutRecordInput { Data: []byte(specstr), StreamName: aws.String(streamName), PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), @@ -156,7 +194,7 @@ func publishRecord(t *testing.T, kc kinesisiface.KinesisAPI, hashKey *string) { input.ExplicitHashKey = hashKey } // Use random string as partition key to ensure even distribution across shards - _, err := kc.PutRecord(input) + _, err := kc.PutRecord(context.TODO(), input) if err != nil { t.Errorf("Error in PutRecord. %+v", err) @@ -164,19 +202,19 @@ func publishRecord(t *testing.T, kc kinesisiface.KinesisAPI, hashKey *string) { } // publishRecord to put a record into Kinesis stream using PutRecords API. -func publishRecords(t *testing.T, kc kinesisiface.KinesisAPI) { +func publishRecords(t *testing.T, kc *kinesis.Client) { // Use random string as partition key to ensure even distribution across shards - records := make([]*kinesis.PutRecordsRequestEntry, 5) + records := make([]types.PutRecordsRequestEntry, 5) for i := 0; i < 5; i++ { - record := &kinesis.PutRecordsRequestEntry{ + record := types.PutRecordsRequestEntry { Data: []byte(specstr), PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), } records[i] = record } - _, err := kc.PutRecords(&kinesis.PutRecordsInput{ + _, err := kc.PutRecords(context.TODO(), &kinesis.PutRecordsInput{ Records: records, StreamName: aws.String(streamName), }) @@ -187,10 +225,10 @@ func publishRecords(t *testing.T, kc kinesisiface.KinesisAPI) { } // publishRecord to put a record into Kinesis stream using PutRecord API. -func publishAggregateRecord(t *testing.T, kc kinesisiface.KinesisAPI) { +func publishAggregateRecord(t *testing.T, kc *kinesis.Client) { data := generateAggregateRecord(5, specstr) // Use random string as partition key to ensure even distribution across shards - _, err := kc.PutRecord(&kinesis.PutRecordInput{ + _, err := kc.PutRecord(context.TODO(), &kinesis.PutRecordInput { Data: data, StreamName: aws.String(streamName), PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), diff --git a/test/worker_custom_test.go b/test/worker_custom_test.go index 19a6fb7..a513b56 100644 --- a/test/worker_custom_test.go +++ b/test/worker_custom_test.go @@ -19,14 +19,14 @@ package test import ( + "context" "os" "sync" "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/kinesis" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/kinesis" log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -77,7 +77,8 @@ func TestWorkerInjectCheckpointer(t *testing.T) { ID: shardID, Mux: &sync.RWMutex{}, } - checkpointer.FetchCheckpoint(status) + + _ = checkpointer.FetchCheckpoint(status) // checkpointer should be the same assert.NotEmpty(t, status.Checkpoint) @@ -104,12 +105,13 @@ func TestWorkerInjectKinesis(t *testing.T) { // configure cloudwatch as metrics system kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem)) - // create custom Kinesis - s, err := session.NewSession(&aws.Config{ - Region: aws.String(regionName), - }) + defaultConfig, err := config.LoadDefaultConfig( + context.TODO(), + config.WithRegion(regionName), + ) + assert.Nil(t, err) - kc := kinesis.New(s) + kc := kinesis.NewFromConfig(defaultConfig) // Put some data into stream. // publishSomeData(t, kc) @@ -146,11 +148,13 @@ func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) { kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem)) // create custom Kinesis - s, err := session.NewSession(&aws.Config{ - Region: aws.String(regionName), - }) + defaultConfig, err := config.LoadDefaultConfig( + context.TODO(), + config.WithRegion(regionName), + ) + assert.Nil(t, err) - kc := kinesis.New(s) + kc := kinesis.NewFromConfig(defaultConfig) // Put some data into stream. // publishSomeData(t, kc) diff --git a/test/worker_lease_stealing_test.go b/test/worker_lease_stealing_test.go index c35974c..3742a8f 100644 --- a/test/worker_lease_stealing_test.go +++ b/test/worker_lease_stealing_test.go @@ -20,8 +20,8 @@ func TestLeaseStealing(t *testing.T) { } test := NewLeaseStealingTest(t, config, newLeaseStealingWorkerFactory(t)) test.Run(LeaseStealingAssertions{ - expectedLeasesForIntialWorker: config.numShards, - expectedLeasesPerWorker: config.numShards / config.numWorkers, + expectedLeasesForInitialWorker: config.numShards, + expectedLeasesPerWorker: config.numShards / config.numWorkers, }) } @@ -55,7 +55,7 @@ func (wf *leaseStealingWorkerFactory) CreateKCLConfig(workerID string, config *T WithLogger(log) } -func (wf *leaseStealingWorkerFactory) CreateWorker(workerID string, kclConfig *cfg.KinesisClientLibConfiguration) *wk.Worker { +func (wf *leaseStealingWorkerFactory) CreateWorker(_ string, kclConfig *cfg.KinesisClientLibConfiguration) *wk.Worker { worker := wk.NewWorker(recordProcessorFactory(wf.t), kclConfig) return worker } @@ -71,8 +71,8 @@ func TestLeaseStealingInjectCheckpointer(t *testing.T) { } test := NewLeaseStealingTest(t, config, newleaseStealingWorkerFactoryCustomChk(t)) test.Run(LeaseStealingAssertions{ - expectedLeasesForIntialWorker: config.numShards, - expectedLeasesPerWorker: config.numShards / config.numWorkers, + expectedLeasesForInitialWorker: config.numShards, + expectedLeasesPerWorker: config.numShards / config.numWorkers, }) } @@ -101,10 +101,10 @@ func TestLeaseStealingWithMaxLeasesForWorker(t *testing.T) { regionName: regionName, workerIDTemplate: workerID + "-%v", } - test := NewLeaseStealingTest(t, config, newleaseStealingWorkerFactoryMaxLeases(t, config.numShards-1)) + test := NewLeaseStealingTest(t, config, newLeaseStealingWorkerFactoryMaxLeases(t, config.numShards-1)) test.Run(LeaseStealingAssertions{ - expectedLeasesForIntialWorker: config.numShards - 1, - expectedLeasesPerWorker: 2, + expectedLeasesForInitialWorker: config.numShards - 1, + expectedLeasesPerWorker: 2, }) } @@ -113,7 +113,7 @@ type leaseStealingWorkerFactoryMaxLeases struct { *leaseStealingWorkerFactory } -func newleaseStealingWorkerFactoryMaxLeases(t *testing.T, maxLeases int) *leaseStealingWorkerFactoryMaxLeases { +func newLeaseStealingWorkerFactoryMaxLeases(t *testing.T, maxLeases int) *leaseStealingWorkerFactoryMaxLeases { return &leaseStealingWorkerFactoryMaxLeases{ maxLeases, newLeaseStealingWorkerFactory(t), diff --git a/test/worker_test.go b/test/worker_test.go index a445a59..680a7c6 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -26,9 +26,7 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" - "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go-v2/credentials" "github.com/prometheus/common/expfmt" "github.com/stretchr/testify/assert" @@ -47,6 +45,8 @@ const ( regionName = "us-west-2" workerID = "test-worker" consumerName = "enhanced-fan-out-consumer" + kinesisEndpoint = "https://kinesis.eu-west-1.amazonaws.com" + dynamoEndpoint = "https://dynamodb.eu-west-1.amazonaws.com" ) const metricsSystem = "cloudwatch" @@ -76,7 +76,8 @@ func TestWorker(t *testing.T) { WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). WithFailoverTimeMillis(300000). - WithLogger(log) + WithLogger(log). + WithKinesisEndpoint(kinesisEndpoint) runTest(kclConfig, false, t) } @@ -98,7 +99,8 @@ func TestWorkerWithTimestamp(t *testing.T) { WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). WithFailoverTimeMillis(300000). - WithLogger(log) + WithLogger(log). + WithKinesisEndpoint(kinesisEndpoint) runTest(kclConfig, false, t) } @@ -128,24 +130,28 @@ func TestWorkerWithSigInt(t *testing.T) { WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). WithFailoverTimeMillis(300000). - WithLogger(log) + WithLogger(log). + WithKinesisEndpoint(kinesisEndpoint) runTest(kclConfig, true, t) } func TestWorkerStatic(t *testing.T) { - t.Skip("Need to provide actual credentials") + //t.Skip("Need to provide actual credentials") // Fill in the credentials for accessing Kinesis and DynamoDB. // Note: use empty string as SessionToken for long-term credentials. - creds := credentials.NewStaticCredentials("AccessKeyId", "SecretAccessKey", "SessionToken") + kinesisCreds := credentials.NewStaticCredentialsProvider("", "", "") + dynamoCreds := credentials.NewStaticCredentialsProvider("", "", "") - kclConfig := cfg.NewKinesisClientLibConfigWithCredential(appName, streamName, regionName, workerID, creds). + kclConfig := cfg.NewKinesisClientLibConfigWithCredentials(appName, streamName, regionName, workerID, &kinesisCreds, &dynamoCreds). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). - WithFailoverTimeMillis(300000) + WithFailoverTimeMillis(300000). + WithKinesisEndpoint(kinesisEndpoint). + WithDynamoDBEndpoint(dynamoEndpoint) runTest(kclConfig, false, t) } @@ -155,25 +161,29 @@ func TestWorkerAssumeRole(t *testing.T) { // Initial credentials loaded from SDK's default credential chain. Such as // the environment, shared credentials (~/.aws/credentials), or EC2 Instance - // Role. These credentials will be used to to make the STS Assume Role API. - sess := session.Must(session.NewSession()) + // Role. These credentials will be used to make the STS Assume Role API. + //sess := session.Must(session.NewSession()) // Create the credentials from AssumeRoleProvider to assume the role // referenced by the "myRoleARN" ARN. - creds := stscreds.NewCredentials(sess, "arn:aws:iam::*:role/kcl-test-publisher") + //kinesisCreds := stscreds.NewAssumeRoleProvider(sess, "arn:aws:iam::*:role/kcl-test-publisher") + kinesisCreds := credentials.NewStaticCredentialsProvider("", "", "") + dynamoCreds := credentials.NewStaticCredentialsProvider("", "", "") - kclConfig := cfg.NewKinesisClientLibConfigWithCredential(appName, streamName, regionName, workerID, creds). + kclConfig := cfg.NewKinesisClientLibConfigWithCredentials(appName, streamName, regionName, workerID, &kinesisCreds, &dynamoCreds). WithInitialPositionInStream(cfg.LATEST). WithMaxRecords(10). WithMaxLeasesForWorker(1). WithShardSyncIntervalMillis(5000). - WithFailoverTimeMillis(300000) + WithFailoverTimeMillis(300000). + WithKinesisEndpoint(kinesisEndpoint). + WithDynamoDBEndpoint(dynamoEndpoint) runTest(kclConfig, false, t) } func TestEnhancedFanOutConsumer(t *testing.T) { - // At miminal, use standard logrus logger + // At minimal, use standard logrus logger // log := logger.NewLogrusLogger(logrus.StandardLogger()) // // In order to have precise control over logging. Use logger with config @@ -202,7 +212,7 @@ func TestEnhancedFanOutConsumer(t *testing.T) { } func TestEnhancedFanOutConsumerDefaultConsumerName(t *testing.T) { - // At miminal, use standard logrus logger + // At minimal, use standard logrus logger // log := logger.NewLogrusLogger(logrus.StandardLogger()) // // In order to have precise control over logging. Use logger with config @@ -234,7 +244,7 @@ func TestEnhancedFanOutConsumerARN(t *testing.T) { t.Skip("Need to provide actual consumerARN") consumerARN := "arn:aws:kinesis:*:stream/kcl-test/consumer/fanout-poc-consumer-test:*" - // At miminal, use standard logrus logger + // At minimal, use standard logrus logger // log := logger.NewLogrusLogger(logrus.StandardLogger()) // // In order to have precise control over logging. Use logger with config @@ -294,13 +304,14 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t if triggersig { t.Log("Trigger signal SIGINT") p, _ := os.FindProcess(os.Getpid()) - p.Signal(os.Interrupt) + _ = p.Signal(os.Interrupt) } // wait a few seconds before shutdown processing time.Sleep(30 * time.Second) - if metricsSystem == "prometheus" { + switch metricsSystem { + case "prometheus": res, err := http.Get("http://localhost:8080/metrics") if err != nil { t.Fatalf("Error scraping Prometheus endpoint %s", err) @@ -308,12 +319,12 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t var parser expfmt.TextParser parsed, err := parser.TextToMetricFamilies(res.Body) - res.Body.Close() + _ = res.Body.Close() if err != nil { t.Errorf("Error reading monitoring response %s", err) } - t.Logf("Prometheus: %+v", parsed) + t.Logf("Prometheus: %+v", parsed) } t.Log("Calling normal shutdown at the end of application.") @@ -327,7 +338,7 @@ func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service stri return cloudwatch.NewMonitoringServiceWithOptions(kclConfig.RegionName, kclConfig.KinesisCredentials, kclConfig.Logger, - cloudwatch.DEFAULT_CLOUDWATCH_METRICS_BUFFER_DURATION) + cloudwatch.DefaultCloudwatchMetricsBufferDuration) } if service == "prometheus" { From a44513ef081fdd8ef1b3a636fc75cb9f7681ae84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Mon, 8 Nov 2021 16:28:32 +0100 Subject: [PATCH 79/90] add parameters names in order to serve as suggestions and ignore explicitly bellow to avoid lint msgs. --- clientlibrary/metrics/interfaces.go | 38 ++++++++++++++--------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/clientlibrary/metrics/interfaces.go b/clientlibrary/metrics/interfaces.go index 6e06108..8762a49 100644 --- a/clientlibrary/metrics/interfaces.go +++ b/clientlibrary/metrics/interfaces.go @@ -32,29 +32,29 @@ package metrics type MonitoringService interface { Init(appName, streamName, workerID string) error Start() error - IncrRecordsProcessed(string, int) - IncrBytesProcessed(string, int64) - MillisBehindLatest(string, float64) - LeaseGained(string) - LeaseLost(string) - LeaseRenewed(string) - RecordGetRecordsTime(string, float64) - RecordProcessRecordsTime(string, float64) + IncrRecordsProcessed(shard string, count int) + IncrBytesProcessed(shard string, count int64) + MillisBehindLatest(shard string, milliSeconds float64) + LeaseGained(shard string) + LeaseLost(shard string) + LeaseRenewed(shard string) + RecordGetRecordsTime(shard string, time float64) + RecordProcessRecordsTime(shard string, time float64) Shutdown() } // NoopMonitoringService implements MonitoringService by does nothing. type NoopMonitoringService struct{} -func (NoopMonitoringService) Init(appName, streamName, workerID string) error { return nil } -func (NoopMonitoringService) Start() error { return nil } -func (NoopMonitoringService) Shutdown() {} +func (NoopMonitoringService) Init(_, _, _ string) error { return nil } +func (NoopMonitoringService) Start() error { return nil } +func (NoopMonitoringService) Shutdown() {} -func (NoopMonitoringService) IncrRecordsProcessed(shard string, count int) {} -func (NoopMonitoringService) IncrBytesProcessed(shard string, count int64) {} -func (NoopMonitoringService) MillisBehindLatest(shard string, millSeconds float64) {} -func (NoopMonitoringService) LeaseGained(shard string) {} -func (NoopMonitoringService) LeaseLost(shard string) {} -func (NoopMonitoringService) LeaseRenewed(shard string) {} -func (NoopMonitoringService) RecordGetRecordsTime(shard string, time float64) {} -func (NoopMonitoringService) RecordProcessRecordsTime(shard string, time float64) {} +func (NoopMonitoringService) IncrRecordsProcessed(_ string, _ int) {} +func (NoopMonitoringService) IncrBytesProcessed(_ string, _ int64) {} +func (NoopMonitoringService) MillisBehindLatest(_ string, _ float64) {} +func (NoopMonitoringService) LeaseGained(_ string) {} +func (NoopMonitoringService) LeaseLost(_ string) {} +func (NoopMonitoringService) LeaseRenewed(_ string) {} +func (NoopMonitoringService) RecordGetRecordsTime(_ string, _ float64) {} +func (NoopMonitoringService) RecordProcessRecordsTime(_ string, _ float64) {} From 2bf65b43862f17ff7690f7d4ead7fe3a1c01ce69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Mon, 8 Nov 2021 18:39:27 +0100 Subject: [PATCH 80/90] update after tidy --- go.mod | 6 +- go.sum | 175 +-------------------------------------------------------- 2 files changed, 3 insertions(+), 178 deletions(-) diff --git a/go.mod b/go.mod index 1f6152c..5ec001c 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,6 @@ require ( github.com/aws/aws-sdk-go-v2 v1.11.0 github.com/aws/aws-sdk-go-v2/config v1.10.0 github.com/aws/aws-sdk-go-v2/credentials v1.6.0 - github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.4.0 github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.10.0 github.com/aws/aws-sdk-go-v2/service/dynamodb v1.7.0 github.com/aws/aws-sdk-go-v2/service/kinesis v1.8.0 @@ -24,12 +23,10 @@ require ( require ( github.com/BurntSushi/toml v0.4.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.3.0 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.6.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0 // indirect @@ -44,10 +41,9 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect - github.com/stretchr/objx v0.3.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.7.0 // indirect - golang.org/x/sys v0.0.0-20211106132015-ebca88c72f68 // indirect + golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/go.sum b/go.sum index 22eed71..9a7eb1b 100644 --- a/go.sum +++ b/go.sum @@ -12,209 +12,106 @@ cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bP cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0 h1:Dg9iHVQfrhq82rUNu9ZxUDrJLaxFUe/HlCVaLyRruq8= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/aws/aws-sdk-go v1.19.48 h1:YhKzuc9xggUt8jNDc5CmIBeB8GmGtazzq0aCXO4sj6w= -github.com/aws/aws-sdk-go v1.19.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.41.7 h1:vlpR8Cky3ZxUVNINgeRZS6N0p6zmFvu/ZqRRwrTI25U= -github.com/aws/aws-sdk-go v1.41.7/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go-v2 v1.8.1/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= -github.com/aws/aws-sdk-go-v2 v1.9.0/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= -github.com/aws/aws-sdk-go-v2 v1.10.0 h1:+dCJ5W2HiZNa4UtaIc5ljKNulm0dK0vS5dxb5LdDOAA= -github.com/aws/aws-sdk-go-v2 v1.10.0/go.mod h1:U/EyyVvKtzmFeQQcca7eBotKdlpcP2zzU6bXBYcf7CE= github.com/aws/aws-sdk-go-v2 v1.11.0 h1:HxyD62DyNhCfiFGUHqJ/xITD6rAjJ7Dm/2nLxLmO4Ag= github.com/aws/aws-sdk-go-v2 v1.11.0/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 h1:yVUAwvJC/0WNPbyl0nA3j1L6CW1CN8wBubCRqtG7JLI= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0/go.mod h1:Xn6sxgRuIDflLRJFj5Ev7UxABIkNbccFPV/p8itDReM= -github.com/aws/aws-sdk-go-v2/config v1.6.1 h1:qrZINaORyr78syO1zfD4l7r4tZjy0Z1l0sy4jiysyOM= -github.com/aws/aws-sdk-go-v2/config v1.6.1/go.mod h1:t/y3UPu0XEDy0cEw6mvygaBQaPzWiYAxfP2SzgtvclA= -github.com/aws/aws-sdk-go-v2/config v1.9.0 h1:SkREVSwi+J8MSdjhJ96jijZm5ZDNleI0E4hHCNivh7s= -github.com/aws/aws-sdk-go-v2/config v1.9.0/go.mod h1:qhK5NNSgo9/nOSMu3HyE60WHXZTWTHTgd5qtIF44vOQ= github.com/aws/aws-sdk-go-v2/config v1.10.0 h1:4i+/7DmCQCAls5Z61giur0LOPZ3PXFwnSIw7hRamzws= github.com/aws/aws-sdk-go-v2/config v1.10.0/go.mod h1:xuqoV5etD3N3B8Ts9je4ijgAv6mb+6NiOPFMUhwRcjA= -github.com/aws/aws-sdk-go-v2/credentials v1.3.3 h1:A13QPatmUl41SqUfnuT3V0E3XiNGL6qNTOINbE8cZL4= -github.com/aws/aws-sdk-go-v2/credentials v1.3.3/go.mod h1:oVieKMT3m9BSfqhOfuQ+E0j/yN84ZAJ7Qv8Sfume/ak= -github.com/aws/aws-sdk-go-v2/credentials v1.5.0 h1:r6470olsn2qyOe2aLzK6q+wfO3dzNcMujRT3gqBgBB8= -github.com/aws/aws-sdk-go-v2/credentials v1.5.0/go.mod h1:kvqTkpzQmzri9PbsiTY+LvwFzM0gY19emlAWwBOJMb0= github.com/aws/aws-sdk-go-v2/credentials v1.6.0 h1:L3O6osQTlzLKRmiTphw2QJuD21EFapWCX4IipiRJhAE= github.com/aws/aws-sdk-go-v2/credentials v1.6.0/go.mod h1:rQkYdQPDXRrvPLeEuCNwSgtwMzBo9eDGWlTNC69Sh/0= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0 h1:8kvinmbIDObqsWegKP0JjeanYPiA4GUVpAtciNWE+jw= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.2.0/go.mod h1:UVFtSYSWCHj2+brBLDHUdlJXmz8LxUpZhA+Ewypc+xQ= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.3.0 h1:jEWmr4fcoAdoDo34DKMED/lEgPyyGE6/Xhwbgs6+NS8= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.3.0/go.mod h1:YjXozu6rHksfG22T5ZZASTrFOLzI0AoyuEC+GU9I3Lw= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.4.0 h1:J8Zgr+z0RjxidWB6vjX6sEB8TU/y6ELWoYhNoJ99d+M= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.4.0/go.mod h1:gWzcyoZ5LNkx1Xhluc25HU9eWIdcwiaymHuJnwO6ELs= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.3.0 h1:Nm2gF15BCti2SRfE/G6rS7KbTD8mQTVIwGFjMZIlie0= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression v1.3.0/go.mod h1:GQd3X3up0vqgHmt2jca0vyM7rbZj1KkJBDHlb6Oc1Eg= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1 h1:rc+fRGvlKbeSd9IFhFS1KWBs0XjTkq0CfK5xqyLgIp0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.1/go.mod h1:+GTydg3uHmVlQdkRoetz6VHKbOMEYof70m19IpMLifc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.7.0 h1:FKaqk7geL3oIqSwGJt5SWUKj8uJ+qLZNqlBuqq6sFyA= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.7.0/go.mod h1:KqEkRkxm/+1Pd/rENRNbQpfblDBYeg5HDSqjB6ks8hA= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0 h1:OpZjuUy8Jt3CA1WgJgBC5Bz+uOjE5Ppx4NFTRaooUuA= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0/go.mod h1:5E1J3/TTYy6z909QNR0QnXGBpfESYGDqd3O0zqONghU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4 h1:IM9b6hlCcVFJFydPoyphs/t7YrHfqKy7T4/7AG5Eprs= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.4/go.mod h1:W5gGbtNXFpF9/ssYZTaItzG/B+j0bjTnwStiCP2AtWU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.7 h1:/0GQVY8J25hww4J9a+rYKDr9ryGh2KdIdR8YHBP54h0= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.0.7/go.mod h1:QXoZAXmBEHeMIFiBr3XumpTyoNTXTQbqPV+qaGX7gfY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0 h1:zY8cNmbBXt3pzjgWgdIbzpQ6qxoCwt+Nx9JbrAf2mbY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0/go.mod h1:NO3Q5ZTTQtO2xIg2+xTXYDiT7knSejfeDm7WGDaOo0U= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0 h1:Z3aR/OXBnkYK9zXkNkfitHX6SmUBzSsx8VMHbH4Lvhw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0/go.mod h1:anlUzBoEWglcUxUQwZA7HQOEVEnQALVZsizAapB2hq8= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1 h1:IkqRRUZTKaS16P2vpX+FNc2jq3JWa3c478gykQp4ow4= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.1/go.mod h1:Pv3WenDjI0v2Jl7UaMFIIbPOBbhn33RmmAmGgkXDoqY= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.5 h1:zPxLGWALExNepElO0gYgoqsbqTlt4ZCrhZ7XlfJ+Qlw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.2.5/go.mod h1:6ZBTuDmvpCOD4Sf1i2/I3PgftlEcDGgvi8ocq64oQEg= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0 h1:c10Z7fWxtJCoyc8rv06jdh9xrKnu7bAJiRaKWvTb2mU= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0/go.mod h1:6oXGy4GLpypD3uCh8wcqztigGgmhLToMfjavgh+VySg= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.9.0/go.mod h1:iX4tSMY8NP1mzU2PMS6arLyB/Yufz2LxCkn9DsgiWEI= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.10.0 h1:MNNV0fi3J5Lxxhx8iDlKdRZJrtBv/0FyganA3nBYe8Q= github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.10.0/go.mod h1:Oiwhs3Fo9amYOGsJggWBPU6bwa/u0xVpEdOS5HlouPg= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0 h1:SGwKUQaJudQQZE72dDQlL2FGuHNAEK1CyqKLTjh6mqE= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.5.0/go.mod h1:XY5YhCS9SLul3JSQ08XG/nfxXxrkh6RR21XPq/J//NY= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.6.0 h1:HDp8hUQlGU5fgNoNDp0BOthk57AuTXMTaAK1mb9c27I= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.6.0/go.mod h1:t8pYXJHxfOe/088CcNeuqQbucpq9SwO1yjheCieDDnI= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.7.0 h1:S3X6RWl0TfMxNXsIzz8r3Y6YVA1HWGSx6M345Q3mQ+I= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.7.0/go.mod h1:Hh0zJ3419ET9xQBeR+y0lHIkObJwAKPbzV9nTZ0yrJ0= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0 h1:QbFWJr2SAyVYvyoOHvJU6sCGLnqNT94ZbWElJMEI1JY= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.4.0/go.mod h1:bYsEP8w5YnbYyrx/Zi5hy4hTwRRQISSJS3RWrsGRijg= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.5.0 h1:At4HitvrEFdSA5rNS1KHA65BYizq2p+gLtASYtoAH2A= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.5.0/go.mod h1:9u/PDp7T3XzjGA8XmYJcffjqPJmXeofDXHUyHqp2lYc= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.6.0 h1:Z893Baw1+7PfK+KtYgrHu+V2n/Ae9S0jG1dZGe4WQ7o= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.6.0/go.mod h1:PmJdIbYf6UjqnAJwZPi6CNG8JHXdzc/Y0Y8bWfPy0Yw= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.4.0 h1:EtQ6hVAgNsWTiO+u9e+ziaEYyOAlEkAwLskpL40U6pQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.4.0/go.mod h1:vEkJTjJ8vnv0uWy2tAp7DSydWFpudMGWPQ2SFucoN1k= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 h1:lPLbw4Gn59uoKqvOfSnkJr54XWk5Ak1NK20ZEiSWb3U= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0/go.mod h1:80NaCIH9YU3rzTTs/J/ECATjXuRqzo/wB6ukO6MZ0XY= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0 h1:QCPbsMPMcM4iGbui5SH6O4uxvZffPoBJ4CIGX7dU0l4= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.1.0/go.mod h1:enkU5tq2HoXY+ZMiQprgF3Q83T3PbO77E83yXXzRZWE= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.2.0 h1:uxy31f/H1bkUV2aircA9hTQT8s093u1eOeErsOXIY90= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.2.0/go.mod h1:wLLzEoPune3u08rkvNBm3BprebkWRmmCkMtTeujM3Fs= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.3.0 h1:A2aUh9d38A2ECh76ahOQUdpJFe+Jhjk8qrfV+YbNYGY= github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.3.0/go.mod h1:5h2rxfLN22pLTQ1ZoOza87rp2SnN/9UDYdYBQRmIrsE= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3 h1:VxFCgxsqWe7OThOwJ5IpFX3xrObtuIH9Hg/NW7oot1Y= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.3/go.mod h1:7gcsONBmFoCcKrAqrm95trrMd2+C/ReYKP7Vfu8yHHA= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.4.0 h1:/T5wKsw/po118HEDvnSE8YU7TESxvZbYM2rnn+Oi7Kk= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.4.0/go.mod h1:X5/JuOxPLU/ogICgDTtnpfaQzdQJO0yKDcpoxWLLJ8Y= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0 h1:qGZWS/WgiFY+Zgad2u0gwBHpJxz6Ne401JE7iQI1nKs= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0/go.mod h1:Mq6AEc+oEjCUlBuLiK5YwW4shSOAKCQ3tXN0sQeYoBA= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.7.0 h1:BR1dH17nltcO/bRr7sW+BTOY1OekCf3KadPBTHWPIY4= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.7.0/go.mod h1:IKVo9L4q2SkAfCZik0P9fdCZWppIE06l2ZfPktUNUXQ= github.com/aws/aws-sdk-go-v2/service/kinesis v1.8.0 h1:Cz26j4wGD1tJ2w/M8iLhaS81AkAGY3gEYRt0xQWjEIs= github.com/aws/aws-sdk-go-v2/service/kinesis v1.8.0/go.mod h1:QyNCg1xtWFJVL++i6ZyVcwXZCiKTNeXHH9zZu3NHOdU= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.3 h1:K2gCnGvAASpz+jqP9iyr+F/KNjmTYf8aWOtTQzhmZ5w= -github.com/aws/aws-sdk-go-v2/service/sso v1.3.3/go.mod h1:Jgw5O+SK7MZ2Yi9Yvzb4PggAPYaFSliiQuWR0hNjexk= -github.com/aws/aws-sdk-go-v2/service/sso v1.5.0 h1:VnrCAJTp1bDxU79UuW/D4z7bwZ7xOc7JjDKpqXL/m04= -github.com/aws/aws-sdk-go-v2/service/sso v1.5.0/go.mod h1:GsqaJOJeOfeYD88/2vHWKXegvDRofDqWwC5i48A2kgs= github.com/aws/aws-sdk-go-v2/service/sso v1.6.0 h1:JDgKIUZOmLFu/Rv6zXLrVTWCmzA0jcTdvsT8iFIKrAI= github.com/aws/aws-sdk-go-v2/service/sso v1.6.0/go.mod h1:Q/l0ON1annSU+mc0JybDy1Gy6dnJxIcWjphO6qJPzvM= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.2 h1:l504GWCoQi1Pk68vSUFGLmDIEMzRfVGNgLakDK+Uj58= -github.com/aws/aws-sdk-go-v2/service/sts v1.6.2/go.mod h1:RBhoMJB8yFToaCnbe0jNq5Dcdy0jp6LhHqg55rjClkM= -github.com/aws/aws-sdk-go-v2/service/sts v1.8.0 h1:7N7RsEVvUcvEg7jrWKU5AnSi4/6b6eY9+wG1g6W4ExE= -github.com/aws/aws-sdk-go-v2/service/sts v1.8.0/go.mod h1:dOlm91B439le5y1vtPCk5yJtbx3RdT3hRGYRY8TYKvQ= github.com/aws/aws-sdk-go-v2/service/sts v1.9.0 h1:rBLCnL8hQ7Sv1S4XCPYgTMI7Uhg81BkvzIiK+/of2zY= github.com/aws/aws-sdk-go-v2/service/sts v1.9.0/go.mod h1:jLKCFqS+1T4i7HDqCP9GM4Uk75YW1cS0o82LdxpMyOE= -github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/aws/smithy-go v1.8.1 h1:9Y6qxtzgEODaLNGN+oN2QvcHvKUe4jsH8w4M+8LXzGk= -github.com/aws/smithy-go v1.8.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/aws/smithy-go v1.9.0 h1:c7FUdEqrQA1/UVKKCNDFQPNKGp4FQg3YW4Ck5SLTG58= github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= -github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f h1:Pf0BjJDga7C98f0vhw+Ip5EaiE07S3lTKpIYPNS0nMo= -github.com/awslabs/kinesis-aggregation/go v0.0.0-20210630091500-54e17340d32f/go.mod h1:SghidfnxvX7ribW6nHI7T+IBbc9puZ9kk5Tx/88h8P4= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -222,7 +119,6 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -242,7 +138,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -252,15 +147,11 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -268,60 +159,43 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -350,12 +224,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/rs/zerolog v1.25.0 h1:Rj7XygbUHKUlDPcVdoLyR91fJBsduXj5fRxyqIQj/II= -github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI= github.com/rs/zerolog v1.26.0 h1:ORM4ibhEZeTeQlCojCK2kPz1ogAY4bGs4tD+SaAdGaE= github.com/rs/zerolog v1.26.0/go.mod h1:yBiM87lvSqX8h0Ww4sdzNSkVYZ8dL2xjZJG1lAuGZEo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -364,10 +234,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= -github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -376,15 +243,12 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0 h1:OtISOGfH6sOWa1/qXqqAiOIAO6Z5J3AEAE18WAq6BiQ= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= @@ -401,7 +265,6 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -412,10 +275,8 @@ golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -426,10 +287,8 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= @@ -437,7 +296,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -468,18 +326,13 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -490,7 +343,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -531,30 +383,19 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 h1:2B5p2L5IfGiD7+b9BOoRMC6DgObAVZV+Fsp050NqXik= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk= -golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211102061401-a2f17f7b995c h1:QOfDMdrf/UwlVR0UBq2Mpr58UzNtvgJRXA4BgPfFACs= -golang.org/x/sys v0.0.0-20211102061401-a2f17f7b995c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c h1:+8miTPjMCTXwih7BQmvWwd0PjdBZq2MKp/qQaahSzEM= -golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211106132015-ebca88c72f68 h1:Ywe/f3fNleF8I6F6qv3MeFoSZ6CTf2zBMMa/7qVML8M= golang.org/x/sys v0.0.0-20211106132015-ebca88c72f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42 h1:G2DDmludOQZoWbpCr7OKDxnl478ZBGMcOhrv+ooX/Q4= +golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -596,9 +437,7 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -620,14 +459,12 @@ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -657,7 +494,6 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhKa1AAvY53xsvLB1cWorMjslvY3VA8= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -670,7 +506,6 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -686,13 +521,11 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -713,11 +546,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= From 7538535bffc48699518f5f5778f04352153090f3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Mon, 8 Nov 2021 22:36:18 +0100 Subject: [PATCH 81/90] remove debug code --- clientlibrary/checkpoint/dynamodb-checkpointer.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 4465f5a..091700b 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -140,9 +140,6 @@ func (checkpointer *DynamoCheckpoint) GetLease(shard *par.ShardStatus, newAssign var claimRequest string if checkpointer.kclConfig.EnableLeaseStealing { - if currentCheckpointClaimRequest, ok := currentCheckpoint[ClaimRequestKey]; ok { - fmt.Printf("aaaaaa %v", currentCheckpointClaimRequest) - } if currentCheckpointClaimRequest, ok := currentCheckpoint[ClaimRequestKey]; ok && currentCheckpointClaimRequest.(*types.AttributeValueMemberS).Value != "" { claimRequest = currentCheckpointClaimRequest.(*types.AttributeValueMemberS).Value From 6f0fbfe4c73070e7dbb2a10be589329124a8d5a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Thu, 11 Nov 2021 23:08:59 +0100 Subject: [PATCH 82/90] improve ci (remove hypermake, add makefile and github pipeline) --- .github/workflows/vmware-go-kcl-v2-ci.yml | 42 +++++++++ .gitignore | 3 +- HyperMake | 91 ------------------- Makefile | 59 ++++++++++++ _support/scripts/ci.sh | 105 ++++++++++++++++++++++ _support/scripts/sonar-scan.sh | 63 +++++++++++++ go.sum | 2 - support/scripts/check.sh | 75 ---------------- support/scripts/ci.sh | 7 -- support/scripts/functions.sh | 59 ------------ support/scripts/test.sh | 5 -- support/toolchain/HyperMake | 28 ------ support/toolchain/docker/Dockerfile | 8 -- 13 files changed, 271 insertions(+), 276 deletions(-) create mode 100755 .github/workflows/vmware-go-kcl-v2-ci.yml delete mode 100644 HyperMake create mode 100644 Makefile create mode 100755 _support/scripts/ci.sh create mode 100644 _support/scripts/sonar-scan.sh delete mode 100755 support/scripts/check.sh delete mode 100755 support/scripts/ci.sh delete mode 100644 support/scripts/functions.sh delete mode 100755 support/scripts/test.sh delete mode 100644 support/toolchain/HyperMake delete mode 100644 support/toolchain/docker/Dockerfile diff --git a/.github/workflows/vmware-go-kcl-v2-ci.yml b/.github/workflows/vmware-go-kcl-v2-ci.yml new file mode 100755 index 0000000..137e2f0 --- /dev/null +++ b/.github/workflows/vmware-go-kcl-v2-ci.yml @@ -0,0 +1,42 @@ +name: vmware-go-kcl-v2 + +on: + push: + branches: [ master ] + paths-ignore: [ README.md ] + pull_request: + branches: [ master ] + paths-ignore: [ README.md ] + +jobs: + build: + name: Build + runs-on: ubuntu-latest + steps: + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Build + shell: bash + run: | + make build + +# - name: Test +# shell: bash +# run: | +# make test + + - name: Format Check + shell: bash + run: | + make format-check + + - name: Lint + shell: bash + run: | + make lint + + - name: Scan + shell: bash + run: | + make scan \ No newline at end of file diff --git a/.gitignore b/.gitignore index a3b9045..e537833 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,5 @@ *_mock_test.go filenames -.DS_Store \ No newline at end of file +.DS_Store +.scannerwork/ \ No newline at end of file diff --git a/HyperMake b/HyperMake deleted file mode 100644 index cffd91b..0000000 --- a/HyperMake +++ /dev/null @@ -1,91 +0,0 @@ ---- -format: hypermake.v0 - -name: cascade-kinesis-client -description: Kinesis Client in Go - -targets: - rebuild-toolchain: - description: build toolchain image - watches: - - support/toolchain/docker - build: support/toolchain/docker - - toolchain: - description: placeholder for additional toolchain dependencies - - deps: - description: download dependencies to local cache - after: - - toolchain - watches: - - go.mod - cmds: - - go mod download - - go mod vendor - - go mod tidy - - build: - description: build source code - after: - - 'build-*' - - test: - description: run unit tests - after: - - deps - - check - always: true - cmds: - - ./support/scripts/test.sh - - ci: - description: run CI tests - after: - - deps - cmds: - - ./support/scripts/ci.sh - - checkfmt: - description: check code format - after: - - toolchain - watches: - - support/scripts/check.sh - always: true - cmds: - - ./support/scripts/check.sh fmt - - lint: - description: run lint to check code - after: - - toolchain - watches: - - support/scripts/check.sh - always: true - cmds: - - ./support/scripts/check.sh lint - - scanast: - description: run Go AST security scan - after: - - toolchain - watches: - - '**/**/*.go' - - './support/scripts/check.sh' - cmds: - - ./support/scripts/check.sh scanast - - check: - description: run all code checks - after: - - checkfmt - - lint - - scanast - -settings: - default-targets: - - test - docker: - image: 'vmware/go-kcl-toolchain:0.1.4' - src-volume: /go/src/github.com/vmware/vmware-go-kcl diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..78026f1 --- /dev/null +++ b/Makefile @@ -0,0 +1,59 @@ +.PHONY: help +help: ## - Show this help message + @printf "\033[32m\xE2\x9c\x93 usage: make [target]\n\n\033[0m" + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +.PHONY: up +up: ## - start docker compose + @ cd _support/docker && docker-compose -f docker-compose.yml up + +.PHONY: build-common +build-common: ## - execute build common tasks clean and mod tidy + @ go version + @ go clean + @ go mod download && go mod tidy + @ go mod verify + +.PHONY: build +build: build-common ## - build a debug binary to the current platform (windows, linux or darwin(mac)) + @ echo building + @ go build -v ./... + @ echo "done" + +.PHONY: format-check +format-check: ## - check files format using gofmt + @ ./_support/scripts/ci.sh fmtcheck + +.PHONY: format-check +format: ## - apply golang file format using gofmt + @ ./_support/scripts/ci.sh format + +.PHONY: test +test: build-common ## - execute go test command for unit and mocked tests + @ ./_support/scripts/ci.sh unittest + +.PHONY: integration-test +integration-test: ## - execute go test command for integration tests (aws credentials needed) + @ go test -v -cover -race ./test + +.PHONY: scan +scan: ## - execute static code analysis + @ ./_support/scripts/ci.sh scan + +.PHONY: lint +lint: ## - runs golangci-lint + @ ./_support/scripts/ci.sh lint + +.PHONY: ci-lint-docker +lint-docker: ## - runs golangci-lint with docker container + @ docker run --rm -v "$(shell pwd)":/app -w /app ${LINT_IMAGE} golangci-lint run ${LINT_FLAGS} + +.PHONY: sonar-scan +sonar-scan: ## - start sonar qube locally with docker (you will need docker installed in your machine) + @ # after start, setup a new project with the name sms-local and a new token sms-token, fill the token against the -Dsonar.login= parameter. + @ # login with user: admin pwd: vmware + @ $(SHELL) _support/scripts/sonar-scan.sh + +.PHONY: sonar-stop +sonar-stop: ## - stop sonar qube docker container + @ docker stop sonarqube diff --git a/_support/scripts/ci.sh b/_support/scripts/ci.sh new file mode 100755 index 0000000..e53d247 --- /dev/null +++ b/_support/scripts/ci.sh @@ -0,0 +1,105 @@ +#!/usr/bin/env bash + +function local_go_pkgs() { + find './clientlibrary' -name '*.go' | \ + grep -Fv '/vendor/' | \ + grep -Fv '/go/' | \ + grep -Fv '/gen/' | \ + grep -Fv '/tmp/' | \ + grep -Fv '/run/' | \ + grep -Fv '/tests/' | \ + sed -r 's|(.+)/[^/]+\.go$|\1|g' | \ + sort -u +} + +function checkfmt() { + local files="" + files="$(find . -type f -iname "*.go" -exec gofmt -l {} \;)" + + if [ -n "$files" ]; then + echo "You need to run \"gofmt -w ./\" to fix your formatting." + echo "$files" >&2 + return 1 + fi +} + +function go_format() { + echo "go formatting..." + gofmt -w ./ + echo "done" +} + +function lint() { + # golangci-lint run --enable-all -D forbidigo -D gochecknoglobals -D gofumpt -D gofmt -D nlreturn + golangci-lint run \ + --skip-files=_mock.go \ + --disable=golint \ + --skip-dirs=test \ + --fast \ + --timeout=600s \ + --verbose \ + "$(local_go_pkgs)" +} + +function test() { + go list ./... | grep -v /test | \ + xargs -L 1 -I% bash -c 'echo -e "\n**************** Package: % ****************" && go test % -v -cover -race ./...' +} + +function scanast() { + gosec version + gosec ./... > security.log 2>&1 + + local issues="" + issues=$(grep -c 'Severity: MEDIUM' security.log | grep -v deaggregator | grep -c _) + if [ -n "$issues" ] && [ "$issues" -gt 0 ]; then + echo "" + echo "Medium Severity Issues:" + grep -e "Severity: MEDIUM" -A 1 security.log + echo "$issues" "medium severity issues found." + fi + + local issues="" + local issues_count="" + issues="$(grep -E 'Severity: HIGH' security.log | grep -v vendor)" + issues_count="$(grep -E 'Severity: HIGH' security.log | grep -v vendor | grep -c _)" + if [ -n "$issues_count" ] && [ "$issues_count" -gt 0 ]; then + echo "" + echo "High Severity Issues:" + grep -E "Severity: HIGH" -A 1 security.log + echo "$issues_count" "high severity issues found." + echo "$issues" + echo "You need to resolve the high severity issues at the least." + exit 1 + fi + + local issues="" + local issues_count="" + issues="$(grep -E 'Errors unhandled' security.log | grep -v vendor | grep -v /src/go/src)" + issues_count="$(grep -E 'Errors unhandled' security.log | grep -v vendor | grep -v /src/go/src | grep -c _)" + if [ -n "$issues_count" ] && [ "$issues_count" -gt 0 ]; then + echo "" + echo "Unhandled errors:" + grep -E "Errors unhandled" security.log + echo "$issues_count" "unhandled errors, please indicate with the right comment that this case is ok, or handle the error." + echo "$issues" + echo "You need to resolve the all unhandled errors." + exit 1 + fi + + rm -f security.log +} + +function usage() { + echo "check.sh fmt|lint" >&2 + exit 2 +} + +case "$1" in + fmtcheck) checkfmt ;; + format) go_format ;; + lint) lint ;; + unittest) test ;; + scan) scanast ;; + *) usage ;; +esac diff --git a/_support/scripts/sonar-scan.sh b/_support/scripts/sonar-scan.sh new file mode 100644 index 0000000..4fcb34a --- /dev/null +++ b/_support/scripts/sonar-scan.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +######################## +# requirements: # +# 0. docker # +# 1. wget # +# 2. curl # +# 3. jq # +# 4. sonar-scanner # +######################## + +set -e + +projectKey="vmware-go-kcl-v2" +user_tokenName="local_token" +username="admin" +user_password="admin" +new_password="vmware" +url="http://localhost" +port="9000" + +if [[ "$( docker container inspect -f '{{.State.Running}}' sonarqube )" == "true" ]]; +then + docker ps +else + docker run --rm -d --name sonarqube -e SONAR_ES_BOOTSTRAP_CHECKS_DISABLE=true -p 9000:9000 sonarqube +fi + +echo "waiting for sonarqube starts..." +wget -q -O - "$@" http://localhost:9000 | awk '/STARTING/{ print $0 }' | xargs + +STATUS="$(wget -q -O - "$@" http://localhost:9000 | awk '/UP/{ print $0 }')" +while [ -z "$STATUS" ] +do + sleep 2 + STATUS="$(wget -q -O - "$@" http://localhost:9000 | awk '/UP/{ print $0 }')" + printf "." +done + +printf '\n %s' "${STATUS}" | xargs +echo "" + +# change the default password to avoid create a new one when login for the very first time +curl -u ${username}:${user_password} -X POST "${url}:${port}/api/users/change_password?login=${username}&previousPassword=${user_password}&password=${new_password}" + +# search the specific user tokens for SonarQube +hasToken=$(curl --silent -u ${username}:${new_password} -X GET "${url}:${port}/api/user_tokens/search") +if [[ -n "${hasToken}" ]]; then + # Revoke the user token for SonarQube + curl -X POST -H "Content-Type: application/x-www-form-urlencoded" -d "name=${user_tokenName}" -u ${username}:${new_password} "${url}:${port}"/api/user_tokens/revoke +fi + +# generate new token +token=$(curl --silent -X POST -H "Content-Type: application/x-www-form-urlencoded" -d "name=${user_tokenName}" -u ${username}:${new_password} "${url}:${port}"/api/user_tokens/generate | jq '.token' | xargs) + +# scan and push the results to localhost docker container +sonar-scanner -Dsonar.projectKey="${projectKey}" \ + -Dsonar.projectName="${projectKey}" \ + -Dsonar.sources=. \ + -Dsonar.exclusions="internal/records/**, test/**" \ + -Dsonar.host.url="${url}:${port}" \ + -Dsonar.login="${token}" + diff --git a/go.sum b/go.sum index 9a7eb1b..6030020 100644 --- a/go.sum +++ b/go.sum @@ -383,8 +383,6 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211106132015-ebca88c72f68 h1:Ywe/f3fNleF8I6F6qv3MeFoSZ6CTf2zBMMa/7qVML8M= -golang.org/x/sys v0.0.0-20211106132015-ebca88c72f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42 h1:G2DDmludOQZoWbpCr7OKDxnl478ZBGMcOhrv+ooX/Q4= golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/support/scripts/check.sh b/support/scripts/check.sh deleted file mode 100755 index eb87f6b..0000000 --- a/support/scripts/check.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env bash - -. support/scripts/functions.sh - -checkfmt() { - local files="$(gofmt -l $(local_go_pkgs))" - if [ -n "$files" ]; then - echo "You need to run \"gofmt -w ./\" to fix your formating." - echo "$files" >&2 - return 1 - fi -} - -lint() { - golangci-lint run \ - --skip-files=_mock.go \ - --disable=golint \ - --skip-dirs=test \ - --fast \ - --timeout=600s \ - --verbose \ - $(local_go_pkgs) -} - -scanast() { - set +e - gosec version - gosec ./... > security.log 2>&1 - set -e - - local issues="$(grep -E 'Severity: MEDIUM' security.log | wc -l)" - if [ -n $issues ] && [ $issues -gt 0 ]; then - echo "" - echo "Medium Severity Issues:" - grep -E "Severity: MEDIUM" -A 1 security.log - echo $issues "medium severity issues found." - fi - - local issues="$(grep -E 'Severity: HIGH' security.log | grep -v vendor)" - local issues_count="$(grep -E 'Severity: HIGH' security.log | grep -v vendor | wc -l)" - if [ -n $issues_count ] && [ $issues_count -gt 0 ]; then - echo "" - echo "High Severity Issues:" - grep -E "Severity: HIGH" -A 1 security.log - echo $issues_count "high severity issues found." - echo $issues - echo "You need to resolve the high severity issues at the least." - exit 1 - fi - - local issues="$(grep -E 'Errors unhandled' security.log | grep -v vendor | grep -v /src/go/src)" - local issues_count="$(grep -E 'Errors unhandled' security.log | grep -v vendor | grep -v /src/go/src | wc -l)" - if [ -n $issues_count ] && [ $issues_count -gt 0 ]; then - echo "" - echo "Unhandled errors:" - grep -E "Errors unhandled" security.log - echo $issues_count "unhandled errors, please indicate with the right comment that this case is ok, or handle the error." - echo $issues - echo "You need to resolve the all unhandled errors." - exit 1 - fi - rm security.log -} - -usage() { - echo "check.sh fmt|lint" >&2 - exit 2 -} - -case "$1" in - fmt) checkfmt ;; - lint) lint ;; - scanast) scanast;; - *) usage ;; -esac diff --git a/support/scripts/ci.sh b/support/scripts/ci.sh deleted file mode 100755 index 420bde0..0000000 --- a/support/scripts/ci.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# Run only the integration tests -# go test -race ./test -echo "Warning: Cannot find a good way to inject AWS credential to hmake container" -echo "Don't use hmake ci. Use the following command directly" -echo "go test -race ./test" diff --git a/support/scripts/functions.sh b/support/scripts/functions.sh deleted file mode 100644 index 489de81..0000000 --- a/support/scripts/functions.sh +++ /dev/null @@ -1,59 +0,0 @@ -set -ex - -# PROJ_ROOT specifies the project root -export PROJ_ROOT="$HMAKE_PROJECT_DIR" - -# Add /go in GOPATH because that's the original GOPATH in toolchain -export GOPATH=/go:$PROJ_ROOT - -local_go_pkgs() { - find './clientlibrary/' -name '*.go' | \ - grep -Fv '/vendor/' | \ - grep -Fv '/go/' | \ - grep -Fv '/gen/' | \ - grep -Fv '/tmp/' | \ - grep -Fv '/run/' | \ - grep -Fv '/tests/' | \ - sed -r 's|(.+)/[^/]+\.go$|\1|g' | \ - sort -u -} - -version_suffix() { - local suffix=$(git log -1 --format=%h 2>/dev/null || true) - if [ -n "$suffix" ]; then - test -z "$(git status --porcelain 2>/dev/null || true)" || suffix="${suffix}+" - echo -n "-g${suffix}" - else - echo -n -dev - fi -} - -git_commit_hash() { - echo $(git rev-parse --short HEAD) -} - -# Due to Go plugin genhash algorithm simply takes full source path -# from archive, it generates different plugin hash if source path of -# shared pkg is different, and causes load failure. -# as a workaround, lookup shared pkg and place it to fixed path -FIX_GOPATH=/tmp/go - -fix_go_pkg() { - local pkg="$1" base - for p in ${GOPATH//:/ }; do - if [ -d "$p/src/$pkg" ]; then - base="$p" - break - fi - done - - if [ -z "$base" ]; then - echo "Package $pkg not found in GOPATH: $GOPATH" >&2 - return 1 - fi - - local fix_pkg_path="$FIX_GOPATH/src/$pkg" - rm -f "$fix_pkg_path" - mkdir -p "$(dirname $fix_pkg_path)" - ln -s "$base/src/$pkg" "$fix_pkg_path" -} diff --git a/support/scripts/test.sh b/support/scripts/test.sh deleted file mode 100755 index ee8226e..0000000 --- a/support/scripts/test.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -. support/scripts/functions.sh - -# Run only the unit tests and not integration tests -go test -cover -race $(local_go_pkgs) diff --git a/support/toolchain/HyperMake b/support/toolchain/HyperMake deleted file mode 100644 index c294e5a..0000000 --- a/support/toolchain/HyperMake +++ /dev/null @@ -1,28 +0,0 @@ ---- -format: hypermake.v0 - -name: go-kcl -description: VMWare Go-KCL Amazon Kinesis Client Library in Go - -targets: - rebuild-toolchain: - description: build toolchain image - watches: - - docker - build: docker - cache: false - tags: - - vmware/go-kcl-toolchain:latest - - push-toolchain: - description: push toolchain image - after: - - rebuild-toolchain - push: - - vmware/go-kcl-toolchain:latest - -settings: - default-targets: - - rebuild-toolchain - docker: - image: 'vmware/go-kcl-toolchain:0.1.4' diff --git a/support/toolchain/docker/Dockerfile b/support/toolchain/docker/Dockerfile deleted file mode 100644 index 47a5d12..0000000 --- a/support/toolchain/docker/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM golang:1.17 -ENV PATH /go/bin:/src/bin:/root/go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/src -RUN go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.42.1 && \ - go install golang.org/x/tools/cmd/...@latest && \ - go install github.com/go-delve/delve/cmd/dlv@latest && \ - curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s v2.8.1 && \ - chmod -R a+rw /go From f9ced84cbdad09f80f7a77b6a7795af8f66f1ac7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Thu, 11 Nov 2021 23:11:30 +0100 Subject: [PATCH 83/90] improve gofmt --- .../checkpoint/dynamodb-checkpointer.go | 2 +- clientlibrary/config/kcl-config.go | 87 +++++++++---------- internal/deaggregator/deaggregator.go | 2 +- internal/deaggregator/deaggregator_test.go | 4 +- internal/records/records.pb.go | 2 +- test/lease_stealing_util_test.go | 2 +- test/record_publisher_test.go | 14 +-- 7 files changed, 56 insertions(+), 57 deletions(-) diff --git a/clientlibrary/checkpoint/dynamodb-checkpointer.go b/clientlibrary/checkpoint/dynamodb-checkpointer.go index 091700b..dc7e8cd 100644 --- a/clientlibrary/checkpoint/dynamodb-checkpointer.go +++ b/clientlibrary/checkpoint/dynamodb-checkpointer.go @@ -61,7 +61,7 @@ type DynamoCheckpoint struct { LeaseDuration int svc *dynamodb.Client - kclConfig *config.KinesisClientLibConfiguration + kclConfig *config.KinesisClientLibConfiguration Retries int lastLeaseSync time.Time } diff --git a/clientlibrary/config/kcl-config.go b/clientlibrary/config/kcl-config.go index 45a6a2a..ae4935c 100644 --- a/clientlibrary/config/kcl-config.go +++ b/clientlibrary/config/kcl-config.go @@ -70,31 +70,31 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio } // populate the KCL configuration with default values - return &KinesisClientLibConfiguration { - ApplicationName: applicationName, - KinesisCredentials: kinesisCreds, - DynamoDBCredentials: dynamodbCreds, - TableName: applicationName, - EnhancedFanOutConsumerName: applicationName, - StreamName: streamName, - RegionName: regionName, - WorkerID: workerID, - InitialPositionInStream: DefaultInitialPositionInStream, - InitialPositionInStreamExtended: *newInitialPosition(DefaultInitialPositionInStream), - FailoverTimeMillis: DefaultFailoverTimeMillis, - LeaseRefreshPeriodMillis: DefaultLeaseRefreshPeriodMillis, - MaxRecords: DefaultMaxRecords, - IdleTimeBetweenReadsInMillis: DefaultIdleTimeBetweenReadsMillis, - CallProcessRecordsEvenForEmptyRecordList: DefaultDontCallProcessRecordsForEmptyRecordList, - ParentShardPollIntervalMillis: DefaultParentShardPollIntervalMillis, - ShardSyncIntervalMillis: DefaultShardSyncIntervalMillis, - CleanupTerminatedShardsBeforeExpiry: DefaultCleanupLeasesUponShardsCompletion, - TaskBackoffTimeMillis: DefaultTaskBackoffTimeMillis, - ValidateSequenceNumberBeforeCheckpointing: DefaultValidateSequenceNumberBeforeCheckpointing, - ShutdownGraceMillis: DefaultShutdownGraceMillis, - MaxLeasesForWorker: DefaultMaxLeasesForWorker, - MaxLeasesToStealAtOneTime: DefaultMaxLeasesToStealAtOneTime, - InitialLeaseTableReadCapacity: DefaultInitialLeaseTableReadCapacity, + return &KinesisClientLibConfiguration{ + ApplicationName: applicationName, + KinesisCredentials: kinesisCreds, + DynamoDBCredentials: dynamodbCreds, + TableName: applicationName, + EnhancedFanOutConsumerName: applicationName, + StreamName: streamName, + RegionName: regionName, + WorkerID: workerID, + InitialPositionInStream: DefaultInitialPositionInStream, + InitialPositionInStreamExtended: *newInitialPosition(DefaultInitialPositionInStream), + FailoverTimeMillis: DefaultFailoverTimeMillis, + LeaseRefreshPeriodMillis: DefaultLeaseRefreshPeriodMillis, + MaxRecords: DefaultMaxRecords, + IdleTimeBetweenReadsInMillis: DefaultIdleTimeBetweenReadsMillis, + CallProcessRecordsEvenForEmptyRecordList: DefaultDontCallProcessRecordsForEmptyRecordList, + ParentShardPollIntervalMillis: DefaultParentShardPollIntervalMillis, + ShardSyncIntervalMillis: DefaultShardSyncIntervalMillis, + CleanupTerminatedShardsBeforeExpiry: DefaultCleanupLeasesUponShardsCompletion, + TaskBackoffTimeMillis: DefaultTaskBackoffTimeMillis, + ValidateSequenceNumberBeforeCheckpointing: DefaultValidateSequenceNumberBeforeCheckpointing, + ShutdownGraceMillis: DefaultShutdownGraceMillis, + MaxLeasesForWorker: DefaultMaxLeasesForWorker, + MaxLeasesToStealAtOneTime: DefaultMaxLeasesToStealAtOneTime, + InitialLeaseTableReadCapacity: DefaultInitialLeaseTableReadCapacity, InitialLeaseTableWriteCapacity: DefaultInitialLeaseTableWriteCapacity, SkipShardSyncAtWorkerInitializationIfLeasesExist: DefaultSkipShardSyncAtStartupIfLeasesExist, EnableLeaseStealing: DefaultEnableLeaseStealing, @@ -167,25 +167,24 @@ func (c *KinesisClientLibConfiguration) WithMaxLeasesForWorker(n int) *KinesisCl return c } -/* WithIdleTimeBetweenReadsInMillis - Controls how long the KCL will sleep if no records are returned from Kinesis - -

- This value is only used when no records are returned; if records are returned, the {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ProcessTask} will - immediately retrieve the next set of records after the call to - {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#processRecords(ProcessRecordsInput)} - has returned. Setting this value to high may result in the KCL being unable to catch up. If you are changing this - value it's recommended that you enable {@link #withCallProcessRecordsEvenForEmptyRecordList(boolean)}, and - monitor how far behind the records retrieved are by inspecting - {@link com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput#getMillisBehindLatest()}, and the - CloudWatch - Metric: GetRecords.MillisBehindLatest -

- - @param IdleTimeBetweenReadsInMillis: how long to sleep between GetRecords calls when no records are returned. - @return KinesisClientLibConfiguration - */ +// WithIdleTimeBetweenReadsInMillis +// Controls how long the KCL will sleep if no records are returned from Kinesis +// +//

+// This value is only used when no records are returned; if records are returned, the {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ProcessTask} will +// immediately retrieve the next set of records after the call to +// {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#processRecords(ProcessRecordsInput)} +// has returned. Setting this value to high may result in the KCL being unable to catch up. If you are changing this +// value it's recommended that you enable {@link #withCallProcessRecordsEvenForEmptyRecordList(boolean)}, and +// monitor how far behind the records retrieved are by inspecting +// {@link com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput#getMillisBehindLatest()}, and the +// CloudWatch +// Metric: GetRecords.MillisBehindLatest +//

+// +// @param IdleTimeBetweenReadsInMillis: how long to sleep between GetRecords calls when no records are returned. +// @return KinesisClientLibConfiguration func (c *KinesisClientLibConfiguration) WithIdleTimeBetweenReadsInMillis(idleTimeBetweenReadsInMillis int) *KinesisClientLibConfiguration { checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis) c.IdleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis diff --git a/internal/deaggregator/deaggregator.go b/internal/deaggregator/deaggregator.go index 6aa8905..91a5ad5 100644 --- a/internal/deaggregator/deaggregator.go +++ b/internal/deaggregator/deaggregator.go @@ -93,4 +93,4 @@ func createUserRecord(partitionKeys []string, aggRec *rec.Record, record types.R PartitionKey: &partitionKey, SequenceNumber: record.SequenceNumber, } -} \ No newline at end of file +} diff --git a/internal/deaggregator/deaggregator_test.go b/internal/deaggregator/deaggregator_test.go index 5bc8be6..a933baa 100644 --- a/internal/deaggregator/deaggregator_test.go +++ b/internal/deaggregator/deaggregator_test.go @@ -61,7 +61,7 @@ func generateKinesisRecord(data []byte) types.Record { encryptionType := types.EncryptionTypeNone partitionKey := "1234" sequenceNumber := "21269319989900637946712965403778482371" - return types.Record { + return types.Record{ ApproximateArrivalTimestamp: ¤tTime, Data: data, EncryptionType: encryptionType, @@ -199,4 +199,4 @@ func TestRecordWithMismatchMd5SumReturnsSingleRecord(t *testing.T) { // A byte record with an MD5 sum that does not match with the md5.Sum(record) // will be marked as a non-aggregate record and return a single record assert.Equal(t, 1, len(dars), "Mismatch md5 sum test should return length of 1.") -} \ No newline at end of file +} diff --git a/internal/records/records.pb.go b/internal/records/records.pb.go index 689a1c8..89abba5 100644 --- a/internal/records/records.pb.go +++ b/internal/records/records.pb.go @@ -212,4 +212,4 @@ var fileDescriptor_6ae0159314830e16 = []byte{ 0x20, 0x1a, 0x0c, 0xa8, 0x78, 0xbe, 0xd0, 0xe7, 0x26, 0xcd, 0x52, 0x81, 0x08, 0x68, 0x47, 0x25, 0x92, 0x94, 0x28, 0x2b, 0xb4, 0x26, 0x6d, 0x3e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x87, 0x3e, 0x63, 0x69, 0x7d, 0x01, 0x00, 0x00, -} \ No newline at end of file +} diff --git a/test/lease_stealing_util_test.go b/test/lease_stealing_util_test.go index aab57ae..cbd01aa 100644 --- a/test/lease_stealing_util_test.go +++ b/test/lease_stealing_util_test.go @@ -20,7 +20,7 @@ import ( type LeaseStealingTest struct { t *testing.T - config *TestClusterConfig + config *TestClusterConfig cluster *TestCluster kc *kinesis.Client dc *dynamodb.Client diff --git a/test/record_publisher_test.go b/test/record_publisher_test.go index 85017a3..5bd061a 100644 --- a/test/record_publisher_test.go +++ b/test/record_publisher_test.go @@ -47,7 +47,7 @@ func NewKinesisClient(t *testing.T, regionName, endpoint string, creds *credenti t.Logf("Creating Kinesis client") resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { - return aws.Endpoint { + return aws.Endpoint{ PartitionID: "aws", URL: endpoint, SigningRegion: regionName, @@ -73,13 +73,13 @@ func NewKinesisClient(t *testing.T, regionName, endpoint string, creds *credenti t.Fatalf("Failed in loading Kinesis default config for creating Worker: %+v", err) } - return kinesis.NewFromConfig(cfg) + return kinesis.NewFromConfig(cfg) } // NewDynamoDBClient to create a Kinesis Client. func NewDynamoDBClient(t *testing.T, regionName, endpoint string, creds *credentials.StaticCredentialsProvider) *dynamodb.Client { resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) { - return aws.Endpoint { + return aws.Endpoint{ PartitionID: "aws", URL: endpoint, SigningRegion: regionName, @@ -111,7 +111,7 @@ func continuouslyPublishSomeData(t *testing.T, kc *kinesis.Client) func() { var shards []types.Shard var nextToken *string for { - out, err := kc.ListShards(context.TODO(), &kinesis.ListShardsInput { + out, err := kc.ListShards(context.TODO(), &kinesis.ListShardsInput{ StreamName: aws.String(streamName), NextToken: nextToken, }) @@ -185,7 +185,7 @@ func publishSomeData(t *testing.T, kc *kinesis.Client) { // publishRecord to put a record into Kinesis stream using PutRecord API. func publishRecord(t *testing.T, kc *kinesis.Client, hashKey *string) { - input := &kinesis.PutRecordInput { + input := &kinesis.PutRecordInput{ Data: []byte(specstr), StreamName: aws.String(streamName), PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), @@ -207,7 +207,7 @@ func publishRecords(t *testing.T, kc *kinesis.Client) { records := make([]types.PutRecordsRequestEntry, 5) for i := 0; i < 5; i++ { - record := types.PutRecordsRequestEntry { + record := types.PutRecordsRequestEntry{ Data: []byte(specstr), PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), } @@ -228,7 +228,7 @@ func publishRecords(t *testing.T, kc *kinesis.Client) { func publishAggregateRecord(t *testing.T, kc *kinesis.Client) { data := generateAggregateRecord(5, specstr) // Use random string as partition key to ensure even distribution across shards - _, err := kc.PutRecord(context.TODO(), &kinesis.PutRecordInput { + _, err := kc.PutRecord(context.TODO(), &kinesis.PutRecordInput{ Data: data, StreamName: aws.String(streamName), PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)), From 7abd86ac6e7d339690f7367ab32381d463812530 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Thu, 11 Nov 2021 23:24:51 +0100 Subject: [PATCH 84/90] add more branches --- .github/workflows/vmware-go-kcl-v2-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/vmware-go-kcl-v2-ci.yml b/.github/workflows/vmware-go-kcl-v2-ci.yml index 137e2f0..7612d20 100755 --- a/.github/workflows/vmware-go-kcl-v2-ci.yml +++ b/.github/workflows/vmware-go-kcl-v2-ci.yml @@ -2,7 +2,7 @@ name: vmware-go-kcl-v2 on: push: - branches: [ master ] + branches: [ master, upgrade/aws-sdk-to-v2 ] paths-ignore: [ README.md ] pull_request: branches: [ master ] From b20dd33ae25c9471ef7b585632dec4de6641dc80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Thu, 11 Nov 2021 23:30:28 +0100 Subject: [PATCH 85/90] upgrade packages --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5ec001c..d6133f7 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/prometheus/procfs v0.7.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.7.0 // indirect - golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42 // indirect + golang.org/x/sys v0.0.0-20211111213525-f221eed1c01e // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) diff --git a/go.sum b/go.sum index 6030020..a170946 100644 --- a/go.sum +++ b/go.sum @@ -383,8 +383,8 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42 h1:G2DDmludOQZoWbpCr7OKDxnl478ZBGMcOhrv+ooX/Q4= -golang.org/x/sys v0.0.0-20211107104306-e0b2ad06fe42/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211111213525-f221eed1c01e h1:zeJt6jBtVDK23XK9QXcmG0FvO0elikp0dYZQZOeL1y0= +golang.org/x/sys v0.0.0-20211111213525-f221eed1c01e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From 7b0c198c0f21d9042b204fd83ade39576f964f68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Thu, 11 Nov 2021 23:31:05 +0100 Subject: [PATCH 86/90] setup go 1.17 --- .github/workflows/vmware-go-kcl-v2-ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/vmware-go-kcl-v2-ci.yml b/.github/workflows/vmware-go-kcl-v2-ci.yml index 7612d20..5f2b6a9 100755 --- a/.github/workflows/vmware-go-kcl-v2-ci.yml +++ b/.github/workflows/vmware-go-kcl-v2-ci.yml @@ -16,6 +16,12 @@ jobs: - name: Check out code into the Go module directory uses: actions/checkout@v2 + - name: Set up Go 1.17.x + uses: actions/setup-go@v2 + with: + go-version: ^1.17 + id: go + - name: Build shell: bash run: | From fdbaa68af74f6e22e25addde85a07cebad968654 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Thu, 11 Nov 2021 23:33:44 +0100 Subject: [PATCH 87/90] fix format --- test/worker_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/worker_test.go b/test/worker_test.go index 680a7c6..935ed45 100644 --- a/test/worker_test.go +++ b/test/worker_test.go @@ -40,13 +40,13 @@ import ( ) const ( - appName = "appName" - streamName = "kcl-test" - regionName = "us-west-2" - workerID = "test-worker" - consumerName = "enhanced-fan-out-consumer" + appName = "appName" + streamName = "kcl-test" + regionName = "us-west-2" + workerID = "test-worker" + consumerName = "enhanced-fan-out-consumer" kinesisEndpoint = "https://kinesis.eu-west-1.amazonaws.com" - dynamoEndpoint = "https://dynamodb.eu-west-1.amazonaws.com" + dynamoEndpoint = "https://dynamodb.eu-west-1.amazonaws.com" ) const metricsSystem = "cloudwatch" From 0906af7baf2eb4871b15b474363cc8439ca11767 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Fri, 12 Nov 2021 01:07:08 +0100 Subject: [PATCH 88/90] improve lint --- .github/workflows/vmware-go-kcl-v2-ci.yml | 2 +- Makefile | 4 ++-- _support/scripts/ci.sh | 28 ++++++++++++++++------- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/.github/workflows/vmware-go-kcl-v2-ci.yml b/.github/workflows/vmware-go-kcl-v2-ci.yml index 5f2b6a9..bcdd40e 100755 --- a/.github/workflows/vmware-go-kcl-v2-ci.yml +++ b/.github/workflows/vmware-go-kcl-v2-ci.yml @@ -40,7 +40,7 @@ jobs: - name: Lint shell: bash run: | - make lint + make lint-docker - name: Scan shell: bash diff --git a/Makefile b/Makefile index 78026f1..0823e84 100644 --- a/Makefile +++ b/Makefile @@ -44,9 +44,9 @@ scan: ## - execute static code analysis lint: ## - runs golangci-lint @ ./_support/scripts/ci.sh lint -.PHONY: ci-lint-docker +.PHONY: lint-docker lint-docker: ## - runs golangci-lint with docker container - @ docker run --rm -v "$(shell pwd)":/app -w /app ${LINT_IMAGE} golangci-lint run ${LINT_FLAGS} + @ ./_support/scripts/ci.sh lintDocker .PHONY: sonar-scan sonar-scan: ## - start sonar qube locally with docker (you will need docker installed in your machine) diff --git a/_support/scripts/ci.sh b/_support/scripts/ci.sh index e53d247..beaba8a 100755 --- a/_support/scripts/ci.sh +++ b/_support/scripts/ci.sh @@ -31,14 +31,25 @@ function go_format() { function lint() { # golangci-lint run --enable-all -D forbidigo -D gochecknoglobals -D gofumpt -D gofmt -D nlreturn - golangci-lint run \ - --skip-files=_mock.go \ - --disable=golint \ - --skip-dirs=test \ - --fast \ - --timeout=600s \ - --verbose \ - "$(local_go_pkgs)" + + golangci-lint run \ + --skip-files=_mock.go \ + --skip-dirs=test \ + --skip-dirs=internal \ + --timeout=600s \ + --verbose +} + +function lintDocker() { + lintVersion="1.41.1" + lintImage="golangci/golangci-lint:v$lintVersion-alpine" + + docker run --rm -v "${PWD}":/app -w /app "$lintImage" golangci-lint run \ + --skip-files=_mock.go \ + --skip-dirs=test \ + --skip-dirs=internal \ + --timeout=600s \ + --verbose } function test() { @@ -99,6 +110,7 @@ case "$1" in fmtcheck) checkfmt ;; format) go_format ;; lint) lint ;; + lintDocker) lintDocker ;; unittest) test ;; scan) scanast ;; *) usage ;; From eaf26900ef279e7eb55a531ff82b67be1e4ed670 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Fri, 12 Nov 2021 02:02:50 +0100 Subject: [PATCH 89/90] add security scan --- .github/workflows/vmware-go-kcl-v2-ci.yml | 78 ++++++++++++++--------- .gitignore | 3 +- Makefile | 4 ++ _support/scripts/ci.sh | 21 ++++-- 4 files changed, 71 insertions(+), 35 deletions(-) diff --git a/.github/workflows/vmware-go-kcl-v2-ci.yml b/.github/workflows/vmware-go-kcl-v2-ci.yml index bcdd40e..3c5a8d3 100755 --- a/.github/workflows/vmware-go-kcl-v2-ci.yml +++ b/.github/workflows/vmware-go-kcl-v2-ci.yml @@ -7,42 +7,62 @@ on: pull_request: branches: [ master ] paths-ignore: [ README.md ] - + jobs: build: - name: Build + name: Continous Integration runs-on: ubuntu-latest steps: - - name: Check out code into the Go module directory - uses: actions/checkout@v2 + - name: Check out code into the Go module directory + uses: actions/checkout@v2 - - name: Set up Go 1.17.x - uses: actions/setup-go@v2 - with: - go-version: ^1.17 - id: go + - name: Set up Go 1.17.x + uses: actions/setup-go@v2 + with: + go-version: ^1.17 + id: go - - name: Build - shell: bash - run: | - make build + - name: Build + shell: bash + run: | + make build -# - name: Test -# shell: bash -# run: | -# make test + # - name: Test + # shell: bash + # run: | + # make test - - name: Format Check - shell: bash - run: | - make format-check + scans: + name: Checks, Lints and Scans + runs-on: ubuntu-latest + steps: + - name: Check out code into the Go module directory + uses: actions/checkout@v2 - - name: Lint - shell: bash - run: | - make lint-docker + - name: Set up Go 1.17.x + uses: actions/setup-go@v2 + with: + go-version: ^1.17 + id: go - - name: Scan - shell: bash - run: | - make scan \ No newline at end of file + - name: Format Check + shell: bash + run: | + make format-check + + - name: Lint + shell: bash + run: | + make lint-docker + + - name: Run Gosec Security Scanner + uses: securego/gosec@master + with: + # let the report trigger content trigger a failure using the GitHub Security features. + args: '-no-fail -fmt sarif -out results.sarif -exclude-dir internal -exclude-dir vendor -severity high ./...' + + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@v1 + with: + # path to SARIF file relative to the root of the repository + sarif_file: results.sarif \ No newline at end of file diff --git a/.gitignore b/.gitignore index e537833..85afb96 100644 --- a/.gitignore +++ b/.gitignore @@ -22,4 +22,5 @@ filenames .DS_Store -.scannerwork/ \ No newline at end of file +.scannerwork/ +*.sarif \ No newline at end of file diff --git a/Makefile b/Makefile index 0823e84..ef02f13 100644 --- a/Makefile +++ b/Makefile @@ -40,6 +40,10 @@ integration-test: ## - execute go test command for integration tests (aws creden scan: ## - execute static code analysis @ ./_support/scripts/ci.sh scan +.PHONY: local-scan +local-scan: ## - execute static code analysis locally + @ ./_support/scripts/ci.sh localScan + .PHONY: lint lint: ## - runs golangci-lint @ ./_support/scripts/ci.sh lint diff --git a/_support/scripts/ci.sh b/_support/scripts/ci.sh index beaba8a..245802f 100755 --- a/_support/scripts/ci.sh +++ b/_support/scripts/ci.sh @@ -23,7 +23,7 @@ function checkfmt() { fi } -function go_format() { +function goFormat() { echo "go formatting..." gofmt -w ./ echo "done" @@ -52,7 +52,7 @@ function lintDocker() { --verbose } -function test() { +function unitTest() { go list ./... | grep -v /test | \ xargs -L 1 -I% bash -c 'echo -e "\n**************** Package: % ****************" && go test % -v -cover -race ./...' } @@ -101,6 +101,16 @@ function scanast() { rm -f security.log } +function Scan() { + gosec -fmt=sarif -out=results.sarif -exclude-dir=internal -exclude-dir=vendor -severity=high ./... +} + +function localScan() { + # you can use the vs code plugin https://marketplace.visualstudio.com/items?itemName=MS-SarifVSCode.sarif-viewer + # to navigate against the issues + gosec -fmt=sarif -out=results.sarif -exclude-dir=internal -exclude-dir=vendor ./... +} + function usage() { echo "check.sh fmt|lint" >&2 exit 2 @@ -108,10 +118,11 @@ function usage() { case "$1" in fmtcheck) checkfmt ;; - format) go_format ;; + format) goFormat ;; lint) lint ;; lintDocker) lintDocker ;; - unittest) test ;; - scan) scanast ;; + unittest) unitTest ;; + scan) scan ;; + localScan) localScan ;; *) usage ;; esac From dd8ec339820c287403ce420d0effbf00c31d4059 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabiano=20Grac=CC=A7as?= Date: Fri, 12 Nov 2021 02:03:10 +0100 Subject: [PATCH 90/90] add build badge --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 920a975..af388e4 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,7 @@ # vmware-go-kcl-v2 +[![vmware-go-kcl-v2](https://github.com/fafg/vmware-go-kcl/actions/workflows/vmware-go-kcl-v2-ci.yml/badge.svg)](https://github.com/fafg/vmware-go-kcl/actions/workflows/vmware-go-kcl-v2-ci.yml) + ## Overview ## Try it out