diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 6bdaa999..00000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,6 +0,0 @@ -*Issue #, if available:* - -*Description of changes:* - - -By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. diff --git a/.travis.yml b/.travis.yml index 320f811c..ebb7a2ac 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,6 @@ language: java jdk: - - openjdk8 + - openjdk7 + - oraclejdk7 - oraclejdk8 -sudo: false -dist: trusty \ No newline at end of file +sudo: false \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index b22ef573..00000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,460 +0,0 @@ -# Changelog - -### Release 2.0.5 (November 12, 2018) -[Milestone #26](https://github.com/awslabs/amazon-kinesis-client/milestone/26?closed=1) -* Fixed a deadlock condition that could occur when using the polling model. - When using the `PollingConfig` and a slower record processor it was possible to hit a deadlock in the retrieval of records. - * [PR #462](https://github.com/awslabs/amazon-kinesis-client/pull/462) - * [Issue #448](https://github.com/awslabs/amazon-kinesis-client/issues/448) -* Adjusted `RetrievalConfig`, and `FanOutConfig` to use accessors instead of direct member access. - * [PR #453](https://github.com/awslabs/amazon-kinesis-client/pull/453) - - -### Release 2.0.4 (October 18, 2018) -[Milestone #25](https://github.com/awslabs/amazon-kinesis-client/milestone/25) -* Added method to retrieve leases from the LeaseCoordinator and LeaseTaker. - * [PR #428](https://github.com/awslabs/amazon-kinesis-client/pull/428) -* Fixed a race condition shutting down the Scheduler before it has completed initialization. - * [PR #439](https://github.com/awslabs/amazon-kinesis-client/pull/439) - * [Issue #427](https://github.com/awslabs/amazon-kinesis-client/issues/427) -* Added `HierarchicalShardSyncer` which replaces the static `ShardSyncer`. - `HierarchicalShardSyncer` removes the contention between multiple instances of the Scheduler when running under a single JVM. - * [PR #395](https://github.com/awslabs/amazon-kinesis-client/pull/395) - * [Issue #415](https://github.com/awslabs/amazon-kinesis-client/issues/415) -* Added `TaskExecutionListener` which allows monitoring of tasks being executed by the `ShardConsumer`. - The listener is invoked before and after a task is executed by the `ShardConsumer`. - * [PR #417](https://github.com/awslabs/amazon-kinesis-client/pull/417) - -### Release 2.0.3 (October 8, 2018) -[Milestone #23](https://github.com/awslabs/amazon-kinesis-client/milestone/23) -* Fixed an issue where the `KinesisAsyncClient` could be misconfigured to use HTTP 1.1. - Using HTTP 1.1 with `SubscribeToShard` is unsupported, and could cause misdelivery of records to the record processor. - * [Issue #391](https://github.com/awslabs/amazon-kinesis-client/issues/391) - * [PR #434](https://github.com/awslabs/amazon-kinesis-client/pull/434) - * [PR #433](https://github.com/awslabs/amazon-kinesis-client/pull/433) -* Lower the severity of `ReadTimeout` exceptions. - `ReadTimeout` exceptions can occur if the client is unable to request data from Kinesis for more than client timeout, which defaults to 30 seconds. This can occur if the record processor blocks for more than the timeout period. `ReadTimeout` could also occur as part of [Issue #391](https://github.com/awslabs/amazon-kinesis-client/issues/391). - * [Issue #399](https://github.com/awslabs/amazon-kinesis-client/issues/399) - * [PR #403](https://github.com/awslabs/amazon-kinesis-client/pull/403) -* Added a callback that allows applications to take actions after DynamoDB table creation. - Applications can now install a callback that is called after creating the DynamoDB table by implementing `TableCreatorCallback`. - * [PR #413](https://github.com/awslabs/amazon-kinesis-client/pull/413) -* Updated the guava dependency to 26.0-jre. - * [PR #420](https://github.com/awslabs/amazon-kinesis-client/pull/420) - * [Issue #416](https://github.com/awslabs/amazon-kinesis-client/issues/416) -* Added some additional debug logging around the initialization of the `FanOutRecordsPublisher`. - * [PR #398](https://github.com/awslabs/amazon-kinesis-client/pull/398) -* Upgraded AWS SDK version to 2.0.6 - * [PR #434](https://github.com/awslabs/amazon-kinesis-client/pull/434) - - -### Release 2.0.2 (September 4, 2018) -[Milestone #22](https://github.com/awslabs/amazon-kinesis-client/milestone/22) -* Fixed an issue where the a warning would be logged every second if `logWarningForTaskAfterMillis` was set. - The logging for last time of data arrival now respects the value of `logWarningForTaskAfterMillis`. - * [PR #383](https://github.com/awslabs/amazon-kinesis-client/pull/383) - * [Issue #381](https://github.com/awslabs/amazon-kinesis-client/issues/381) -* Moved creation of `WorkerStateChangedListener` and `GracefulShutdownCoordinator` to the `CoordinatorConfig`. - Originally the `WorkerStateChangedListener` and `GracefulShutdownCoordinator` were created by methods on the `SchedulerCoordinatorFactory`, but they should have been configuration options. - The original methods have been deprecated, and may be removed at a later date. - * [PR #385](https://github.com/awslabs/amazon-kinesis-client/pull/385) - * [PR #388](https://github.com/awslabs/amazon-kinesis-client/pull/388) -* Removed dependency on Apache Commons Lang 2.6. - The dependency on Apache Commons Lang 2.6 has removed, and all usages updated to use Apache Commons Lang 3.7. - * [PR #386](https://github.com/awslabs/amazon-kinesis-client/pull/386) - * [Issue #370](https://github.com/awslabs/amazon-kinesis-client/issues/370) -* Fixed a typo in the MutliLang Daemon shutdown hook. - * [PR #387](https://github.com/awslabs/amazon-kinesis-client/pull/387) -* Added method `onAllInitializationAttemptsFailed(Throwable)` to `WorkerStateChangedListener` to report when all initialization attempts have failed. - This method is a default method, and it isn't require to implement the method. This method is only called after all attempts to initialize the `Scheduler` have failed. - * [PR #369](https://github.com/awslabs/amazon-kinesis-client/pull/369) - -### Release 2.0.1 (August 21, 2018) -* Mark certain internal components with `@KinesisClientInternalApi` attribute. - Components marked as internal may be deprecated at a faster rate than public components. - * [PR #358](https://github.com/awslabs/amazon-kinesis-client/pull/358) -* Fixed an issue where `ResourceNotFoundException` on subscription to a shard was not triggering end of shard handling. - If a lease table contains a shard that is no longer present in the stream attempt to subscribe to that shard will trigger a `ResourceNotFoundException`. These exception are treated the same as reaching the end of a shard. - * [PR #359](https://github.com/awslabs/amazon-kinesis-client/pull/359) -* Fixed an issue where the KCL would not Use the configured DynamoDB IOPs when creating the lease table. - * [PR #360](https://github.com/awslabs/amazon-kinesis-client/pull/360) -* Make the maximum number of Scheduler initialization attempts configurable. - The maximum number of `Scheduler` initialization attempts can be configured via `CoordinatorConfig#maxInitializationAttempts`. - * [PR #363](https://github.com/awslabs/amazon-kinesis-client/pull/363) - * [PR #368](https://github.com/awslabs/amazon-kinesis-client/pull/368) -* Fixed an issue where it was possible to get a duplicate record when resubscribing to a shard. - Subscribe to shard requires periodic resubscribing, and uses a new concept of a continuation sequence number. If the continuation sequence number was equal to the last record that record would be processed a second time. Resubscribing now uses `AFTER_SEQUENCE_NUMBER` to ensure that only later records are returned. - * [PR #371](https://github.com/awslabs/amazon-kinesis-client/pull/371) -* Upgraded to AWS SDK 2.0.1 - * [PR #372](https://github.com/awslabs/amazon-kinesis-client/pull/372) -* Fixed an issue where time based restart of the subscription wasn't resetting the `lastRequestTime`. - If a subscription hasn't delivered any data for more than 30 seconds it will be canceled and restarted. This detection is based of the `lastRequestTime` which wasn't getting reset after the restart was triggered. - * [PR #373](https://github.com/awslabs/amazon-kinesis-client/pull/373) -* Fixed an issue where requesting on the subscription from the `FanOutRecordsPublisher` could trigger an unexpected failure. - Due to a race condition the underlying flow in the subscription could be set to something else. The method is now synchronized, and verifies that the subscriber it was created with is still the subscriber in affect. - This issue generally would only appear when multiple errors were occurring while connecting to Kinesis. - * [PR #374](https://github.com/awslabs/amazon-kinesis-client/pull/374) -* Fixed an issue where the number of requested items could exceed the capacity of the RxJava queue. - There was an off by one issue when determining whether to make a request to the SDK subscription. This changes the calculation to represent the capacity as a queue. - * [PR #375](https://github.com/awslabs/amazon-kinesis-client/pull/375) - -### Release 2.0.0 (August 02, 2018) -* The Maven `groupId`, along with the `version`, for the Amazon Kinesis Client has changed from `com.amazonaws` to `software.amazon.kinesis`. - To add a dependency on the new version of the Amazon Kinesis Client: - ``` xml - - software.amazon.kinesis - amazon-kinesis-client - 2.0.0 - - ``` -* Added support for Enhanced Fan Out. - Enhanced Fan Out provides for lower end to end latency, and increased number of consumers per stream. - * Records are now delivered via streaming, reducing end-to-end latency. - * The Amazon Kinesis Client will automatically register a new consumer if required. - When registering a new consumer, the Kinesis Client will default to the application name unless configured otherwise. - * `SubscribeToShard` maintains long lived connections with Kinesis, which in the AWS Java SDK 2.0 is limited by default. - The `KinesisClientUtil` has been added to assist configuring the `maxConcurrency` of the `KinesisAsyncClient`. - __WARNING: The Amazon Kinesis Client may see significantly increased latency, unless the `KinesisAsyncClient` is configured to have a `maxConcurrency` high enough to allow all leases plus additional usages of the `KinesisAsyncClient`.__ - * The Amazon Kinesis Client now uses 3 additional Kinesis API's: - __WARNING: If using a restrictive Kinesis IAM policy you may need to add the following API methods to the policy.__ - * [`SubscribeToShard`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SubscribeToShard.html) - * [`DescribeStreamSummary`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_DescribeStreamSummary.html) - * [`DescribeStreamConsumer`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_DescribeStreamConsumer.html) - * [`RegisterStreamConsumer`](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_RegisterStreamConsumer.html) - * New configuration options are available to configure Enhanced Fan Out. - - | Name | Default | Description | - |-----------------|---------|---------------------------------------------------------------------------------------------------------------------| - | consumerArn | Unset | The ARN for an already created consumer. If this is set, the Kinesis Client will not attempt to create a consumer. | - | streamName | Unset | The name of the stream that a consumer should be create for if necessary | - | consumerName | Unset | The name of the consumer to create. If this is not set the applicationName will be used instead. | - | applicationName | Unset | The name of the application. This is used as the name of the consumer unless consumerName is set. | - -* Modular Configuration of the Kinesis Client - The Kinesis Client has migrated to a modular configuration system, and the `KinesisClientLibConfiguration` class has been removed. - Configuration has been split into 7 classes. Default versions of the configuration can be created from the `ConfigsBuilder`. - Please [see the migration guide for more information][migration-guide]. - * `CheckpointConfig` - * `CoordinatorConfig` - * `LeaseManagementConfig` - * `LifecycleConfig` - * `MetricsConfig` - * `ProcessorConfig` - * `RetrievalConfig` - -* Upgraded to AWS Java SDK 2.0 - The Kinesis Client now uses the AWS Java SDK 2.0. The dependency on AWS Java SDK 1.11 has been removed. - All configurations will only accept 2.0 clients. - * When configuring the `KinesisAsyncClient` the `KinesisClientUtil#createKinesisAsyncClient` can be used to configure the Kinesis Client - * __If you need support for AWS Java SDK 1.11 you will need to add a direct dependency.__ - __When adding a dependency you must ensure that the 1.11 versions of Jackson dependencies are excluded__ - [Please see the migration guide for more information][migration-guide] - -* MultiLangDaemon is now a separate module - The MultiLangDaemon has been separated to its own Maven module and is no longer available in `amazon-kinesis-client`. To include the MultiLangDaemon, add a dependency on `amazon-kinesis-client-multilang`. - -## Release 1.9.1 (April 30, 2018) -* Added the ability to create a prepared checkpoint when at `SHARD_END`. - * [PR #301](https://github.com/awslabs/amazon-kinesis-client/pull/301) -* Added the ability to subscribe to worker state change events. - * [PR #291](https://github.com/awslabs/amazon-kinesis-client/pull/291) -* Added support for custom lease managers. - A custom `LeaseManager` can be provided to `Worker.Builder` that will be used to provide lease services. - This makes it possible to implement custom lease management systems in addition to the default DynamoDB system. - * [PR #297](https://github.com/awslabs/amazon-kinesis-client/pull/297) -* Updated the version of the AWS Java SDK to 1.11.219 - -## Release 1.9.0 (February 6, 2018) -* Introducing support for ListShards API. This API is used in place of DescribeStream API to provide more throughput during ShardSyncTask. Please consult the [AWS Documentation for ListShards](https://docs.aws.amazon.com/kinesis/latest/APIReference/API_ListShards.html) for more information. - * ListShards supports higher call rate, which should reduce instances of throttling when attempting to synchronize the shard list. - * __WARNING: `ListShards` is a new API, and may require updating any explicit IAM policies__ - * Added configuration parameters for ListShards usage - - | Name | Default | Description | - | ---- | ------- | ----------- | - | [listShardsBackoffTimeInMillis](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1412) | 1500 ms | This is the default backoff time between 2 ListShards calls when throttled. | - | [listShardsRetryAttempts](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1423) | 50 | This is the maximum number of times the KinesisProxy will retry to make ListShards calls on being throttled. | - -* Updating the version of AWS Java SDK to 1.11.272. - * Version 1.11.272 is now the minimum support version of the SDK. -* Deprecating the following methods, and classes. These methods, and classes will be removed in a future release. - * Deprecated [IKinesisProxy#getStreamInfo](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxy.java#L48-L62). - * Deprecated [IKinesisProxyFactory](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxyFactory.java). - * Deprecated [KinesisProxyFactory](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxyFactory.java). - * Deprecated certain [KinesisProxy](https://github.com/awslabs/amazon-kinesis-client/blob/3ae916c5fcdccd6b835c86ba7f6f53dd5b4c8b04/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxy.java) constructors. - * [PR #293](https://github.com/awslabs/amazon-kinesis-client/pull/293) - -## Release 1.8.10 -* Allow providing a custom IKinesisProxy implementation. - * [PR #274](https://github.com/awslabs/amazon-kinesis-client/pull/274) -* Checkpointing on a different thread should no longer emit a warning about NullMetricsScope. - * [PR #284](https://github.com/awslabs/amazon-kinesis-client/pull/284) - * [Issue #48](https://github.com/awslabs/amazon-kinesis-client/issues/48) -* Upgraded the AWS Java SDK to version 1.11.271 - * [PR #287](https://github.com/awslabs/amazon-kinesis-client/pull/287) - -## Release 1.8.9 -* Allow disabling check for the case where a child shard has an open parent shard. - There is a race condition where it's possible for the a parent shard to appear open, while having child shards. This check can now be disabled by setting [`ignoreUnexpectedChildShards`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1037) to true. - * [PR #240](https://github.com/awslabs/amazon-kinesis-client/pull/240) - * [Issue #210](https://github.com/awslabs/amazon-kinesis-client/issues/210) -* Upgraded the AWS SDK for Java to 1.11.261 - * [PR #281](https://github.com/awslabs/amazon-kinesis-client/pull/281) - -## Release 1.8.8 -* Fixed issues with leases losses due to `ExpiredIteratorException` in `PrefetchGetRecordsCache` and `AsynchronousFetchingStrategy`. - PrefetchGetRecordsCache will request for a new iterator and start fetching data again. - * [PR#263](https://github.com/awslabs/amazon-kinesis-client/pull/263) -* Added warning message for long running tasks. - Logging long running tasks can be enabled by setting the following configuration property: - - | Name | Default | Description | - | ---- | ------- | ----------- | - | [`logWarningForTaskAfterMillis`](https://github.com/awslabs/amazon-kinesis-client/blob/3de901ea9327370ed732af86c4d4999c8d99541c/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1367) | Not set | Milliseconds after which the logger will log a warning message for the long running task | - - * [PR#259](https://github.com/awslabs/amazon-kinesis-client/pull/259) -* Handling spurious lease renewal failures gracefully. - Added better handling of DynamoDB failures when updating leases. These failures would occur when a request to DynamoDB appeared to fail, but was actually successful. - * [PR#247](https://github.com/awslabs/amazon-kinesis-client/pull/247) -* ShutdownTask gets retried if the previous attempt on the ShutdownTask fails. - * [PR#267](https://github.com/awslabs/amazon-kinesis-client/pull/267) -* Fix for using maxRecords from `KinesisClientLibConfiguration` in `GetRecordsCache` for fetching records. - * [PR#264](https://github.com/awslabs/amazon-kinesis-client/pull/264) - -## Release 1.8.7 -* Don't add a delay for synchronous requests to Kinesis - Removes a delay that had been added for synchronous `GetRecords` calls to Kinesis. - * [PR #256](https://github.com/awslabs/amazon-kinesis-client/pull/256) - -## Release 1.8.6 -* Add prefetching of records from Kinesis - Prefetching will retrieve and queue additional records from Kinesis while the application is processing existing records. - Prefetching can be enabled by setting [`dataFetchingStrategy`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1317) to `PREFETCH_CACHED`. Once enabled an additional fetching thread will be started to retrieve records from Kinesis. Retrieved records will be held in a queue until the application is ready to process them. - Pre-fetching supports the following configuration values: - - | Name | Default | Description | - | ---- | ------- | ----------- | - | [`dataFetchingStrategy`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1317) | `DEFAULT` | Which data fetching strategy to use | - | [`maxPendingProcessRecordsInput`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1296) | 3 | The maximum number of process records input that can be queued | - | [`maxCacheByteSize`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1307) | 8 MiB | The maximum number of bytes that can be queued | - | [`maxRecordsCount`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1326) | 30,000 | The maximum number of records that can be queued | - | [`idleMillisBetweenCalls`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L1353) | 1,500 ms | The amount of time to wait between calls to Kinesis | - - * [PR #246](https://github.com/awslabs/amazon-kinesis-client/pull/246) - -## Release 1.8.5 (September 26, 2017) -* Only advance the shard iterator for the accepted response. - This fixes a race condition in the `KinesisDataFetcher` when it's being used to make asynchronous requests. The shard iterator is now only advanced when the retriever calls `DataFetcherResult#accept()`. - * [PR #230](https://github.com/awslabs/amazon-kinesis-client/pull/230) - * [Issue #231](https://github.com/awslabs/amazon-kinesis-client/issues/231) - -## Release 1.8.4 (September 22, 2017) -* Create a new completion service for each request. - This ensures that canceled tasks are discarded. This will prevent a cancellation exception causing issues processing records. - * [PR #227](https://github.com/awslabs/amazon-kinesis-client/pull/227) - * [Issue #226](https://github.com/awslabs/amazon-kinesis-client/issues/226) - -## Release 1.8.3 (September 22, 2017) -* Call shutdown on the retriever when the record processor is being shutdown - This fixes a bug that could leak threads if using the [`AsynchronousGetRecordsRetrievalStrategy`](https://github.com/awslabs/amazon-kinesis-client/blob/9a82b6bd05b3c9c5f8581af007141fa6d5f0fc4e/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/AsynchronousGetRecordsRetrievalStrategy.java#L42) is being used. - The asynchronous retriever is only used when [`KinesisClientLibConfiguration#retryGetRecordsInSeconds`](https://github.com/awslabs/amazon-kinesis-client/blob/01d2688bc6e68fd3fe5cb698cb65299d67ac930d/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L227), and [`KinesisClientLibConfiguration#maxGetRecordsThreadPool`](https://github.com/awslabs/amazon-kinesis-client/blob/01d2688bc6e68fd3fe5cb698cb65299d67ac930d/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java#L230) are set. - * [PR #222](https://github.com/awslabs/amazon-kinesis-client/pull/222) - -## Release 1.8.2 (September 20, 2017) -* Add support for two phase checkpoints - Applications can now set a pending checkpoint, before completing the checkpoint operation. Once the application has completed its checkpoint steps, the final checkpoint will clear the pending checkpoint. - Should the checkpoint fail the attempted sequence number is provided in the [`InitializationInput#getPendingCheckpointSequenceNumber`](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/InitializationInput.java#L81) otherwise the value will be null. - * [PR #188](https://github.com/awslabs/amazon-kinesis-client/pull/188) -* Support timeouts, and retry for GetRecords calls. - Applications can now set timeouts for GetRecord calls to Kinesis. As part of setting the timeout, the application must also provide a thread pool size for concurrent requests. - * [PR #214](https://github.com/awslabs/amazon-kinesis-client/pull/214) -* Notification when the lease table is throttled - When writes, or reads, to the lease table are throttled a warning will be emitted. If you're seeing this warning you should increase the IOPs for your lease table to prevent processing delays. - * [PR #212](https://github.com/awslabs/amazon-kinesis-client/pull/212) -* Support configuring the graceful shutdown timeout for MultiLang Clients - This adds support for setting the timeout that the Java process will wait for the MutliLang client to complete graceful shutdown. The timeout can be configured by adding `shutdownGraceMillis` to the properties file set to the number of milliseconds to wait. - * [PR #204](https://github.com/awslabs/amazon-kinesis-client/pull/204) - -## Release 1.8.1 (August 2, 2017) -* Support timeouts for calls to the MultiLang Daemon - This adds support for setting a timeout when dispatching records to the client record processor. If the record processor doesn't respond within the timeout the parent Java process will be terminated. This is a temporary fix to handle cases where the KCL becomes blocked while waiting for a client record processor. - The timeout for the this can be set by adding `timeoutInSeconds = `. The default for this is no timeout. - __Setting this can cause the KCL to exit suddenly, before using this ensure that you have an automated restart for your application__ - * [PR #195](https://github.com/awslabs/amazon-kinesis-client/pull/195) - * [Issue #185](https://github.com/awslabs/amazon-kinesis-client/issues/185) - -## Release 1.8.0 (July 25, 2017) -* Execute graceful shutdown on its own thread - * [PR #191](https://github.com/awslabs/amazon-kinesis-client/pull/191) - * [Issue #167](https://github.com/awslabs/amazon-kinesis-client/issues/167) -* Added support for controlling the size of the lease renewer thread pool - * [PR #177](https://github.com/awslabs/amazon-kinesis-client/pull/177) - * [Issue #171](https://github.com/awslabs/amazon-kinesis-client/issues/171) -* Require Java 8 and later - __Java 8 is now required for versions 1.8.0 of the amazon-kinesis-client and later.__ - * [PR #176](https://github.com/awslabs/amazon-kinesis-client/issues/176) - -## Release 1.7.6 (June 21, 2017) -* Added support for graceful shutdown in MultiLang Clients - * [PR #174](https://github.com/awslabs/amazon-kinesis-client/pull/174) - * [PR #182](https://github.com/awslabs/amazon-kinesis-client/pull/182) -* Updated documentation for `v2.IRecordProcessor#shutdown`, and `KinesisClientLibConfiguration#idleTimeBetweenReadsMillis` - * [PR #170](https://github.com/awslabs/amazon-kinesis-client/pull/170) -* Updated to version 1.11.151 of the AWS Java SDK - * [PR #183](https://github.com/awslabs/amazon-kinesis-client/pull/183) - -## Release 1.7.5 (April 7, 2017) -* Correctly handle throttling for DescribeStream, and save accumulated progress from individual calls. - * [PR #152](https://github.com/awslabs/amazon-kinesis-client/pull/152) -* Upgrade to version 1.11.115 of the AWS Java SDK - * [PR #155](https://github.com/awslabs/amazon-kinesis-client/pull/155) - -## Release 1.7.4 (February 27, 2017) -* Fixed an issue building JavaDoc for Java 8. - * [Issue #18](https://github.com/awslabs/amazon-kinesis-client/issues/18) - * [PR #141](https://github.com/awslabs/amazon-kinesis-client/pull/141) -* Reduce Throttling Messages to WARN, unless throttling occurs 6 times consecutively. - * [Issue #4](https://github.com/awslabs/amazon-kinesis-client/issues/4) - * [PR #140](https://github.com/awslabs/amazon-kinesis-client/pull/140) -* Fixed two bugs occurring in requestShutdown. - * Fixed a bug that prevented the worker from shutting down, via requestShutdown, when no leases were held. - * [Issue #128](https://github.com/awslabs/amazon-kinesis-client/issues/128) - * Fixed a bug that could trigger a NullPointerException if leases changed during requestShutdown. - * [Issue #129](https://github.com/awslabs/amazon-kinesis-client/issues/129) - * [PR #139](https://github.com/awslabs/amazon-kinesis-client/pull/139) -* Upgraded the AWS SDK Version to 1.11.91 - * [PR #138](https://github.com/awslabs/amazon-kinesis-client/pull/138) -* Use an executor returned from `ExecutorService.newFixedThreadPool` instead of constructing it by hand. - * [PR #135](https://github.com/awslabs/amazon-kinesis-client/pull/135) -* Correctly initialize DynamoDB client, when endpoint is explicitly set. - * [PR #142](https://github.com/awslabs/amazon-kinesis-client/pull/142) - -## Release 1.7.3 (January 9, 2017) -* Upgrade to the newest AWS Java SDK. - * [Amazon Kinesis Client Issue #27](https://github.com/awslabs/amazon-kinesis-client-python/issues/27) - * [PR #126](https://github.com/awslabs/amazon-kinesis-client/pull/126) - * [PR #125](https://github.com/awslabs/amazon-kinesis-client/pull/125) -* Added a direct dependency on commons-logging. - * [Issue #123](https://github.com/awslabs/amazon-kinesis-client/issues/123) - * [PR #124](https://github.com/awslabs/amazon-kinesis-client/pull/124) -* Make ShardInfo public to allow for custom ShardPrioritization strategies. - * [Issue #120](https://github.com/awslabs/amazon-kinesis-client/issues/120) - * [PR #127](https://github.com/awslabs/amazon-kinesis-client/pull/127) - -## Release 1.7.2 (November 7, 2016) -* MultiLangDaemon Feature Updates - The MultiLangDaemon has been upgraded to use the v2 interfaces, which allows access to enhanced checkpointing, and more information during record processor initialization. The MultiLangDaemon clients must be updated before they can take advantage of these new features. - -## Release 1.7.1 (November 3, 2016) -* General - * Allow disabling shard synchronization at startup. - * Applications can disable shard synchronization at startup. Disabling shard synchronization can application startup times for very large streams. - * [PR #102](https://github.com/awslabs/amazon-kinesis-client/pull/102) - * Applications can now request a graceful shutdown, and record processors that implement the IShutdownNotificationAware will be given a chance to checkpoint before being shutdown. - * This adds a [new interface](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IShutdownNotificationAware.java), and a [new method on Worker](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/Worker.java#L539). - * [PR #109](https://github.com/awslabs/amazon-kinesis-client/pull/109) - * Solves [Issue #79](https://github.com/awslabs/amazon-kinesis-client/issues/79) -* MultiLangDaemon - * Applications can now use credential provides that accept string parameters. - * [PR #99](https://github.com/awslabs/amazon-kinesis-client/pull/99) - * Applications can now use different credentials for each service. - * [PR #111](https://github.com/awslabs/amazon-kinesis-client/pull/111) - -## Release 1.7.0 (August 22, 2016) -* Add support for time based iterators ([See GetShardIterator Documentation](http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html)) - * [PR #94](https://github.com/awslabs/amazon-kinesis-client/pull/94) - The `KinesisClientLibConfiguration` now supports providing an initial time stamp position. - * This position is only used if there is no current checkpoint for the shard. - * This setting cannot be used with DynamoDB Streams - Resolves [Issue #88](https://github.com/awslabs/amazon-kinesis-client/issues/88) -* Allow Prioritization of Parent Shards for Task Assignment - * [PR #95](https://github.com/awslabs/amazon-kinesis-client/pull/95) - The `KinesisClientLibconfiguration` now supports providing a `ShardPrioritization` strategy. This strategy controls how the `Worker` determines which `ShardConsumer` to call next. This can improve processing for streams that split often, such as DynamoDB Streams. -* Remove direct dependency on `aws-java-sdk-core`, to allow independent versioning. - * [PR #92](https://github.com/awslabs/amazon-kinesis-client/pull/92) - **You may need to add a direct dependency on aws-java-sdk-core if other dependencies include an older version.** - -## Release 1.6.5 (July 25, 2016) -* Change LeaseManager to call DescribeTable before attempting to create the lease table. - * [Issue #36](https://github.com/awslabs/amazon-kinesis-client/issues/36) - * [PR #41](https://github.com/awslabs/amazon-kinesis-client/pull/41) - * [PR #67](https://github.com/awslabs/amazon-kinesis-client/pull/67) -* Allow DynamoDB lease table name to be specified - * [PR #61](https://github.com/awslabs/amazon-kinesis-client/pull/61) -* Add approximateArrivalTimestamp for JsonFriendlyRecord - * [PR #86](https://github.com/awslabs/amazon-kinesis-client/pull/86) -* Shutdown lease renewal thread pool on exit. - * [PR #84](https://github.com/awslabs/amazon-kinesis-client/pull/84) -* Wait for CloudWatch publishing thread to finish before exiting. - * [PR #82](https://github.com/awslabs/amazon-kinesis-client/pull/82) -* Added unit, and integration tests for the library. - -## Release 1.6.4 (July 6, 2016) -* Upgrade to AWS SDK for Java 1.11.14 - * [Issue #74](https://github.com/awslabs/amazon-kinesis-client/issues/74) - * [Issue #73](https://github.com/awslabs/amazon-kinesis-client/issues/73) -* **Maven Artifact Signing Change** - * Artifacts are now signed by the identity `Amazon Kinesis Tools ` - -## Release 1.6.3 (May 12, 2016) -* Fix format exception caused by DEBUG log in LeaseTaker [Issue # 68](https://github.com/awslabs/amazon-kinesis-client/issues/68) - -## Release 1.6.2 (March 23, 2016) -* Support for specifying max leases per worker and max leases to steal at a time. -* Support for specifying initial DynamoDB table read and write capacity. -* Support for parallel lease renewal. -* Support for graceful worker shutdown. -* Change DefaultCWMetricsPublisher log level to debug. [PR # 49](https://github.com/awslabs/amazon-kinesis-client/pull/49) -* Avoid NPE in MLD record processor shutdown if record processor was not initialized. [Issue # 29](https://github.com/awslabs/amazon-kinesis-client/issues/29) - -## Release 1.6.1 (September 23, 2015) -* Expose [approximateArrivalTimestamp](http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) for Records in processRecords API call. - -## Release 1.6.0 (July 31, 2015) -* Restores compatibility with [dynamodb-streams-kinesis-adapter](https://github.com/awslabs/dynamodb-streams-kinesis-adapter) (which was broken in 1.4.0). - -## Release 1.5.1 (July 20, 2015) -* KCL maven artifact 1.5.0 does not work with JDK 7. This release addresses this issue. - -## Release 1.5.0 (July 9, 2015) -* **[Metrics Enhancements][kinesis-guide-monitoring-with-kcl]** - * Support metrics level and dimension configurations to control CloudWatch metrics emitted by the KCL. - * Add new metrics that track time spent in record processor methods. - * Disable WorkerIdentifier dimension by default. -* **Exception Reporting** — Do not silently ignore exceptions in ShardConsumer. -* **AWS SDK Component Dependencies** — Depend only on AWS SDK components that are used. - -## Release 1.4.0 (June 2, 2015) -* Integration with the **[Kinesis Producer Library (KPL)][kinesis-guide-kpl]** - * Automatically de-aggregate records put into the Kinesis stream using the KPL. - * Support checkpointing at the individual user record level when multiple user records are aggregated into one Kinesis record using the KPL. - - See [Consumer De-aggregation with the KCL][kinesis-guide-consumer-deaggregation] for details. - -## Release 1.3.0 (May 22, 2015) -* A new metric called "MillisBehindLatest", which tracks how far consumers are from real time, is now uploaded to CloudWatch. - -## Release 1.2.1 (January 26, 2015) -* **MultiLangDaemon** — Changes to the MultiLangDaemon to make it easier to provide a custom worker. - -## Release 1.2 (October 21, 2014) -* **Multi-Language Support** — Amazon KCL now supports implementing record processors in any language by communicating with the daemon over [STDIN and STDOUT][multi-lang-protocol]. Python developers can directly use the [Amazon Kinesis Client Library for Python][kclpy] to write their data processing applications. - -## Release 1.1 (June 30, 2014) -* **Checkpointing at a specific sequence number** — The IRecordProcessorCheckpointer interface now supports checkpointing at a sequence number specified by the record processor. -* **Set region** — KinesisClientLibConfiguration now supports setting the region name to indicate the location of the Amazon Kinesis service. The Amazon DynamoDB table and Amazon CloudWatch metrics associated with your application will also use this region setting. - -[kinesis]: http://aws.amazon.com/kinesis -[kinesis-forum]: http://developer.amazonwebservices.com/connect/forum.jspa?forumID=169 -[kinesis-client-library-issues]: https://github.com/awslabs/amazon-kinesis-client/issues -[docs-signup]: http://docs.aws.amazon.com/AWSSdkDocsJava/latest/DeveloperGuide/java-dg-setup.html -[kinesis-guide]: http://docs.aws.amazon.com/kinesis/latest/dev/introduction.html -[kinesis-guide-begin]: http://docs.aws.amazon.com/kinesis/latest/dev/before-you-begin.html -[kinesis-guide-create]: http://docs.aws.amazon.com/kinesis/latest/dev/step-one-create-stream.html -[kinesis-guide-applications]: http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-record-processor-app.html -[kinesis-guide-monitoring-with-kcl]: http://docs.aws.amazon.com//kinesis/latest/dev/monitoring-with-kcl.html -[kinesis-guide-kpl]: http://docs.aws.amazon.com//kinesis/latest/dev/developing-producers-with-kpl.html -[kinesis-guide-consumer-deaggregation]: http://docs.aws.amazon.com//kinesis/latest/dev/kinesis-kpl-consumer-deaggregation.html -[kclpy]: https://github.com/awslabs/amazon-kinesis-client-python -[multi-lang-protocol]: https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/multilang/package-info.java diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 3b644668..00000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,4 +0,0 @@ -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index ed9eb3e2..00000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,61 +0,0 @@ -# Contributing Guidelines - -Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional -documentation, we greatly value feedback and contributions from our community. - -Please read through this document before submitting any issues or pull requests to ensure we have all the necessary -information to effectively respond to your bug report or contribution. - - -## Reporting Bugs/Feature Requests - -We welcome you to use the GitHub issue tracker to report bugs or suggest features. - -When filing an issue, please check [existing open](https://github.com/awslabs/amazon-kinesis-client/issues), or [recently closed](https://github.com/awslabs/amazon-kinesis-client/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already -reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: - -* A reproducible test case or series of steps -* The version of our code being used -* Any modifications you've made relevant to the bug -* Anything unusual about your environment or deployment - - -## Contributing via Pull Requests -Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: - -1. You are working against the latest source on the *master* branch. -2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. -3. You open an issue to discuss any significant work - we would hate for your time to be wasted. - -To send us a pull request, please: - -1. Fork the repository. -2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. -3. Ensure local tests pass. -4. Commit to your fork using clear commit messages. -5. Send us a pull request, answering any default questions in the pull request interface. -6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. - -GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and -[creating a pull request](https://help.github.com/articles/creating-a-pull-request/). - - -## Finding contributions to work on -Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/amazon-kinesis-client/labels/help%20wanted) issues is a great place to start. - - -## Code of Conduct -This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). -For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact -opensource-codeofconduct@amazon.com with any additional questions or comments. - - -## Security issue notifications -If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. - - -## Licensing - -See the [LICENSE](https://github.com/awslabs/amazon-kinesis-client/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. - -We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. diff --git a/META-INF/MANIFEST.MF b/META-INF/MANIFEST.MF index 4b6733f2..dea3446e 100644 --- a/META-INF/MANIFEST.MF +++ b/META-INF/MANIFEST.MF @@ -2,9 +2,9 @@ Manifest-Version: 1.0 Bundle-ManifestVersion: 2 Bundle-Name: Amazon Kinesis Client Library for Java Bundle-SymbolicName: com.amazonaws.kinesisclientlibrary;singleton:=true -Bundle-Version: 2.0.0 +Bundle-Version: 1.7.4 Bundle-Vendor: Amazon Technologies, Inc -Bundle-RequiredExecutionEnvironment: JavaSE-1.8 +Bundle-RequiredExecutionEnvironment: JavaSE-1.7 Require-Bundle: org.apache.commons.codec;bundle-version="1.6", org.apache.commons.logging;bundle-version="1.1.3";visibility:=reexport, com.fasterxml.jackson.core.jackson-databind;bundle-version="2.5.3", @@ -12,16 +12,16 @@ Require-Bundle: org.apache.commons.codec;bundle-version="1.6", com.fasterxml.jackson.core.jackson-annotations;bundle-version="2.5.0", org.apache.httpcomponents.httpcore;bundle-version="4.3.3", org.apache.httpcomponents.httpclient;bundle-version="4.3.6" - com.amazonaws.sdk;bundle-version="1.11.319", + com.amazonaws.sdk;bundle-version="1.11.14", Export-Package: com.amazonaws.services.kinesis, com.amazonaws.services.kinesis.clientlibrary, - com.amazonaws.services.kinesis.clientlibrary.kinesisClientLibConfiguration, + com.amazonaws.services.kinesis.clientlibrary.config, com.amazonaws.services.kinesis.clientlibrary.exceptions, com.amazonaws.services.kinesis.clientlibrary.exceptions.internal, com.amazonaws.services.kinesis.clientlibrary.interfaces, com.amazonaws.services.kinesis.clientlibrary.lib, com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint, - com.amazonaws.services.kinesis.clientlibrary.lib.scheduler, + com.amazonaws.services.kinesis.clientlibrary.lib.worker, com.amazonaws.services.kinesis.clientlibrary.proxies, com.amazonaws.services.kinesis.clientlibrary.types, com.amazonaws.services.kinesis.leases, diff --git a/README.md b/README.md index a1523497..ba784a1f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ -# Amazon Kinesis Client Library for Java -[![Build Status](https://travis-ci.org/awslabs/amazon-kinesis-client.svg?branch=master)](https://travis-ci.org/awslabs/amazon-kinesis-client) ![BuildStatus](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiaWo4bDYyUkpWaG9ZTy9zeFVoaVlWbEwxazdicDJLcmZwUUpFWVVBM0ZueEJSeFIzNkhURzdVbUd6WUZHcGNxa3BEUzNrL0I5Nzc4NE9rbXhvdEpNdlFRPSIsIml2UGFyYW1ldGVyU3BlYyI6IlZDaVZJSTM1QW95bFRTQnYiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master) +# Amazon Kinesis Client Library for Java [![Build Status](https://travis-ci.org/awslabs/amazon-kinesis-client.svg?branch=master)](https://travis-ci.org/awslabs/amazon-kinesis-client) The **Amazon Kinesis Client Library for Java** (Amazon KCL) enables Java developers to easily consume and process data from [Amazon Kinesis][kinesis]. @@ -7,13 +6,6 @@ The **Amazon Kinesis Client Library for Java** (Amazon KCL) enables Java develop * [Forum][kinesis-forum] * [Issues][kinesis-client-library-issues] -### Recommended Upgrade for All Users of the 2.x Amazon Kinesis Client -**:warning: It's highly recommended for users of version 2.0 of the Amazon Kinesis Client to upgrade to version 2.0.3 or later. A [bug has been](https://github.com/awslabs/amazon-kinesis-client/issues/391) identified in versions prior to 2.0.3 that could cause records to be delivered to the wrong record processor.** - -**:information_source: Amazon Kinesis Client versions 1.x are not impacted.** - -Please open an issue if you have any questions. - ## Features * Provides an easy-to-use programming model for processing data using Amazon Kinesis @@ -23,7 +15,7 @@ Please open an issue if you have any questions. 1. **Sign up for AWS** — Before you begin, you need an AWS account. For more information about creating an AWS account and retrieving your AWS credentials, see [AWS Account and Credentials][docs-signup] in the AWS SDK for Java Developer Guide. 1. **Sign up for Amazon Kinesis** — Go to the Amazon Kinesis console to sign up for the service and create an Amazon Kinesis stream. For more information, see [Create an Amazon Kinesis Stream][kinesis-guide-create] in the Amazon Kinesis Developer Guide. -1. **Minimum requirements** — To use the Amazon Kinesis Client Library, you'll need **Java 1.8+**. For more information about Amazon Kinesis Client Library requirements, see [Before You Begin][kinesis-guide-begin] in the Amazon Kinesis Developer Guide. +1. **Minimum requirements** — To use the Amazon Kinesis Client Library, you'll need **Java 1.7+**. For more information about Amazon Kinesis Client Library requirements, see [Before You Begin][kinesis-guide-begin] in the Amazon Kinesis Developer Guide. 1. **Using the Amazon Kinesis Client Library** — The best way to get familiar with the Amazon Kinesis Client Library is to read [Developing Record Consumer Applications][kinesis-guide-applications] in the Amazon Kinesis Developer Guide. ## Building from Source @@ -31,46 +23,146 @@ Please open an issue if you have any questions. After you've downloaded the code from GitHub, you can build it using Maven. To disable GPG signing in the build, use this command: `mvn clean install -Dgpg.skip=true` ## Integration with the Kinesis Producer Library -For producer-side developers using the **[Kinesis Producer Library (KPL)][kinesis-guide-kpl]**, the KCL integrates without additional effort. When the KCL retrieves an aggregated Amazon Kinesis record consisting of multiple KPL user records, it will automatically invoke the KPL to extract the individual user records before returning them to the user. +For producer-side developers using the **[Kinesis Producer Library (KPL)][kinesis-guide-kpl]**, the KCL integrates without additional effort.  When the KCL retrieves an aggregated Amazon Kinesis record consisting of multiple KPL user records, it will automatically invoke the KPL to extract the individual user records before returning them to the user. ## Amazon KCL support for other languages To make it easier for developers to write record processors in other languages, we have implemented a Java based daemon, called MultiLangDaemon that does all the heavy lifting. Our approach has the daemon spawn a sub-process, which in turn runs the record processor, which can be written in any language. The MultiLangDaemon process and the record processor sub-process communicate with each other over [STDIN and STDOUT using a defined protocol][multi-lang-protocol]. There will be a one to one correspondence amongst record processors, child processes, and shards. For Python developers specifically, we have abstracted these implementation details away and [expose an interface][kclpy] that enables you to focus on writing record processing logic in Python. This approach enables KCL to be language agnostic, while providing identical features and similar parallel processing model across all languages. -## Using the KCL -The recommended way to use the KCL for Java is to consume it from Maven. - -### Version 2.x - ``` xml - - software.amazon.kinesis - amazon-kinesis-client - 2.0.4 - - ``` - -### Version 1.x -[Version 1.x tracking branch](https://github.com/awslabs/amazon-kinesis-client/tree/v1.x) - ``` xml - - com.amazonaws - amazon-kinesis-client - 1.9.2 - - ``` - - ## Release Notes +### Release 1.7.4 (February 27, 2017) +* Fixed an issue building JavaDoc for Java 8. + * [Issue #18](https://github.com/awslabs/amazon-kinesis-client/issues/18) + * [PR #141](https://github.com/awslabs/amazon-kinesis-client/pull/141) +* Reduce Throttling Messages to WARN, unless throttling occurs 6 times consecutively. + * [Issue #4](https://github.com/awslabs/amazon-kinesis-client/issues/4) + * [PR #140](https://github.com/awslabs/amazon-kinesis-client/pull/140) +* Fixed two bugs occurring in requestShutdown. + * Fixed a bug that prevented the worker from shutting down, via requestShutdown, when no leases were held. + * [Issue #128](https://github.com/awslabs/amazon-kinesis-client/issues/128) + * Fixed a bug that could trigger a NullPointerException if leases changed during requestShutdown. + * [Issue #129](https://github.com/awslabs/amazon-kinesis-client/issues/129) + * [PR #139](https://github.com/awslabs/amazon-kinesis-client/pull/139) +* Upgraded the AWS SDK Version to 1.11.91 + * [PR #138](https://github.com/awslabs/amazon-kinesis-client/pull/138) +* Use an executor returned from `ExecutorService.newFixedThreadPool` instead of constructing it by hand. + * [PR #135](https://github.com/awslabs/amazon-kinesis-client/pull/135) +* Correctly initialize DynamoDB client, when endpoint is explicitly set. + * [PR #142](https://github.com/awslabs/amazon-kinesis-client/pull/142) -### Latest Release (2.0.5 - November 12, 2018) -[Milestone #26](https://github.com/awslabs/amazon-kinesis-client/milestone/26?closed=1) -* Fixed a deadlock condition that could occur when using the polling model. - It was possible to hit a deadlock in the retrieval of records When using the `PollingConfig` and a slow running record processor. - * [PR #462](https://github.com/awslabs/amazon-kinesis-client/pull/462) - * [Issue #448](https://github.com/awslabs/amazon-kinesis-client/issues/448) -* Adjusted `RetrievalConfig`, and `FanOutConfig` to use accessors instead of direct member access. - * [PR #453](https://github.com/awslabs/amazon-kinesis-client/pull/453) +### Release 1.7.3 (January 9, 2017) +* Upgrade to the newest AWS Java SDK. + * [Amazon Kinesis Client Issue #27](https://github.com/awslabs/amazon-kinesis-client-python/issues/27) + * [PR #126](https://github.com/awslabs/amazon-kinesis-client/pull/126) + * [PR #125](https://github.com/awslabs/amazon-kinesis-client/pull/125) +* Added a direct dependency on commons-logging. + * [Issue #123](https://github.com/awslabs/amazon-kinesis-client/issues/123) + * [PR #124](https://github.com/awslabs/amazon-kinesis-client/pull/124) +* Make ShardInfo public to allow for custom ShardPrioritization strategies. + * [Issue #120](https://github.com/awslabs/amazon-kinesis-client/issues/120) + * [PR #127](https://github.com/awslabs/amazon-kinesis-client/pull/127) -### For remaining release notes check **[CHANGELOG.md][changelog-md]**. +### Release 1.7.2 (November 7, 2016) +* MultiLangDaemon Feature Updates + The MultiLangDaemon has been upgraded to use the v2 interfaces, which allows access to enhanced checkpointing, and more information during record processor initialization. The MultiLangDaemon clients must be updated before they can take advantage of these new features. + +### Release 1.7.1 (November 3, 2016) +* General + * Allow disabling shard synchronization at startup. + * Applications can disable shard synchronization at startup. Disabling shard synchronization can application startup times for very large streams. + * [PR #102](https://github.com/awslabs/amazon-kinesis-client/pull/102) + * Applications can now request a graceful shutdown, and record processors that implement the IShutdownNotificationAware will be given a chance to checkpoint before being shutdown. + * This adds a [new interface](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IShutdownNotificationAware.java), and a [new method on Worker](https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/Worker.java#L539). + * [PR #109](https://github.com/awslabs/amazon-kinesis-client/pull/109) + * Solves [Issue #79](https://github.com/awslabs/amazon-kinesis-client/issues/79) +* MultiLangDaemon + * Applications can now use credential provides that accept string parameters. + * [PR #99](https://github.com/awslabs/amazon-kinesis-client/pull/99) + * Applications can now use different credentials for each service. + * [PR #111](https://github.com/awslabs/amazon-kinesis-client/pull/111) + +### Release 1.7.0 (August 22, 2016) +* Add support for time based iterators ([See GetShardIterator Documentation](http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html)) + * [PR #94](https://github.com/awslabs/amazon-kinesis-client/pull/94) + The `KinesisClientLibConfiguration` now supports providing an initial time stamp position. + * This position is only used if there is no current checkpoint for the shard. + * This setting cannot be used with DynamoDB Streams + Resolves [Issue #88](https://github.com/awslabs/amazon-kinesis-client/issues/88) +* Allow Prioritization of Parent Shards for Task Assignment + * [PR #95](https://github.com/awslabs/amazon-kinesis-client/pull/95) + The `KinesisClientLibconfiguration` now supports providing a `ShardPrioritization` strategy. This strategy controls how the `Worker` determines which `ShardConsumer` to call next. This can improve processing for streams that split often, such as DynamoDB Streams. +* Remove direct dependency on `aws-java-sdk-core`, to allow independent versioning. + * [PR #92](https://github.com/awslabs/amazon-kinesis-client/pull/92) + **You may need to add a direct dependency on aws-java-sdk-core if other dependencies include an older version.** + +### Release 1.6.5 (July 25, 2016) +* Change LeaseManager to call DescribeTable before attempting to create the lease table. + * [Issue #36](https://github.com/awslabs/amazon-kinesis-client/issues/36) + * [PR #41](https://github.com/awslabs/amazon-kinesis-client/pull/41) + * [PR #67](https://github.com/awslabs/amazon-kinesis-client/pull/67) +* Allow DynamoDB lease table name to be specified + * [PR #61](https://github.com/awslabs/amazon-kinesis-client/pull/61) +* Add approximateArrivalTimestamp for JsonFriendlyRecord + * [PR #86](https://github.com/awslabs/amazon-kinesis-client/pull/86) +* Shutdown lease renewal thread pool on exit. + * [PR #84](https://github.com/awslabs/amazon-kinesis-client/pull/84) +* Wait for CloudWatch publishing thread to finish before exiting. + * [PR #82](https://github.com/awslabs/amazon-kinesis-client/pull/82) +* Added unit, and integration tests for the library. + +### Release 1.6.4 (July 6, 2016) +* Upgrade to AWS SDK for Java 1.11.14 + * [Issue #74](https://github.com/awslabs/amazon-kinesis-client/issues/74) + * [Issue #73](https://github.com/awslabs/amazon-kinesis-client/issues/73) +* **Maven Artifact Signing Change** + * Artifacts are now signed by the identity `Amazon Kinesis Tools ` + +### Release 1.6.3 (May 12, 2016) +* Fix format exception caused by DEBUG log in LeaseTaker [Issue # 68](https://github.com/awslabs/amazon-kinesis-client/issues/68) + +### Release 1.6.2 (March 23, 2016) +* Support for specifying max leases per worker and max leases to steal at a time. +* Support for specifying initial DynamoDB table read and write capacity. +* Support for parallel lease renewal. +* Support for graceful worker shutdown. +* Change DefaultCWMetricsPublisher log level to debug. [PR # 49](https://github.com/awslabs/amazon-kinesis-client/pull/49) +* Avoid NPE in MLD record processor shutdown if record processor was not initialized. [Issue # 29](https://github.com/awslabs/amazon-kinesis-client/issues/29) + +### Release 1.6.1 (September 23, 2015) +* Expose [approximateArrivalTimestamp](http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) for Records in processRecords API call. + +### Release 1.6.0 (July 31, 2015) +* Restores compatibility with [dynamodb-streams-kinesis-adapter](https://github.com/awslabs/dynamodb-streams-kinesis-adapter) (which was broken in 1.4.0). + +### Release 1.5.1 (July 20, 2015) +* KCL maven artifact 1.5.0 does not work with JDK 7. This release addresses this issue. + +### Release 1.5.0 (July 9, 2015) +* **[Metrics Enhancements][kinesis-guide-monitoring-with-kcl]** + * Support metrics level and dimension configurations to control CloudWatch metrics emitted by the KCL. + * Add new metrics that track time spent in record processor methods. + * Disable WorkerIdentifier dimension by default. +* **Exception Reporting** — Do not silently ignore exceptions in ShardConsumer. +* **AWS SDK Component Dependencies** — Depend only on AWS SDK components that are used. + +### Release 1.4.0 (June 2, 2015) +* Integration with the **[Kinesis Producer Library (KPL)][kinesis-guide-kpl]** + * Automatically de-aggregate records put into the Kinesis stream using the KPL. + * Support checkpointing at the individual user record level when multiple user records are aggregated into one Kinesis record using the KPL. + + See [Consumer De-aggregation with the KCL][kinesis-guide-consumer-deaggregation] for details. + +### Release 1.3.0 (May 22, 2015) +* A new metric called "MillisBehindLatest", which tracks how far consumers are from real time, is now uploaded to CloudWatch. + +### Release 1.2.1 (January 26, 2015) +* **MultiLangDaemon** — Changes to the MultiLangDaemon to make it easier to provide a custom worker. + +### Release 1.2 (October 21, 2014) +* **Multi-Language Support** — Amazon KCL now supports implementing record processors in any language by communicating with the daemon over [STDIN and STDOUT][multi-lang-protocol]. Python developers can directly use the [Amazon Kinesis Client Library for Python][kclpy] to write their data processing applications. + +### Release 1.1 (June 30, 2014) +* **Checkpointing at a specific sequence number** — The IRecordProcessorCheckpointer interface now supports checkpointing at a sequence number specified by the record processor. +* **Set region** — KinesisClientLibConfiguration now supports setting the region name to indicate the location of the Amazon Kinesis service. The Amazon DynamoDB table and Amazon CloudWatch metrics associated with your application will also use this region setting. [kinesis]: http://aws.amazon.com/kinesis [kinesis-forum]: http://developer.amazonwebservices.com/connect/forum.jspa?forumID=169 @@ -85,5 +177,4 @@ The recommended way to use the KCL for Java is to consume it from Maven. [kinesis-guide-consumer-deaggregation]: http://docs.aws.amazon.com//kinesis/latest/dev/kinesis-kpl-consumer-deaggregation.html [kclpy]: https://github.com/awslabs/amazon-kinesis-client-python [multi-lang-protocol]: https://github.com/awslabs/amazon-kinesis-client/blob/master/src/main/java/com/amazonaws/services/kinesis/multilang/package-info.java -[changelog-md]: https://github.com/awslabs/amazon-kinesis-client/blob/master/CHANGELOG.md -[migration-guide]: https://docs.aws.amazon.com/streams/latest/dev/kcl-migration.html + diff --git a/amazon-kinesis-client-multilang/pom.xml b/amazon-kinesis-client-multilang/pom.xml deleted file mode 100644 index 8897a8b6..00000000 --- a/amazon-kinesis-client-multilang/pom.xml +++ /dev/null @@ -1,130 +0,0 @@ - - - - - amazon-kinesis-client-pom - software.amazon.kinesis - 2.0.5 - - 4.0.0 - - amazon-kinesis-client-multilang - - - - software.amazon.kinesis - amazon-kinesis-client - ${project.version} - - - - org.projectlombok - lombok - 1.16.20 - provided - - - - ch.qos.logback - logback-classic - 1.1.7 - - - - - junit - junit - 4.11 - test - - - - org.mockito - mockito-all - 1.10.19 - test - - - - org.hamcrest - hamcrest-all - 1.3 - test - - - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.2 - - 1.8 - 1.8 - UTF-8 - - - - - - - org.apache.maven.plugins - maven-javadoc-plugin - 2.10.3 - - - attach-javadocs - - jar - - - - - - org.apache.maven.plugins - maven-source-plugin - 3.0.1 - - - attach-sources - - jar - - - - - - - - - - disable-java8-doclint - - [1.8,) - - - -Xdoclint:none - - - - - - diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangRecordProcessorFactory.java b/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangRecordProcessorFactory.java deleted file mode 100644 index 734e6364..00000000 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangRecordProcessorFactory.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package com.amazonaws.services.kinesis.multilang; - -import java.util.concurrent.ExecutorService; - -import com.fasterxml.jackson.databind.ObjectMapper; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; -import software.amazon.kinesis.processor.ShardRecordProcessor; - -/** - * Creates {@link MultiLangShardRecordProcessor}'s. - */ -@Slf4j -public class MultiLangRecordProcessorFactory implements ShardRecordProcessorFactory { - private static final String COMMAND_DELIMETER_REGEX = " +"; - - private final String command; - private final String[] commandArray; - - private final ObjectMapper objectMapper; - - private final ExecutorService executorService; - - private final KinesisClientLibConfiguration configuration; - - /** - * @param command The command that will do processing for this factory's record processors. - * @param executorService An executor service to use while processing inputs and outputs of the child process. - */ - public MultiLangRecordProcessorFactory(String command, ExecutorService executorService, - KinesisClientLibConfiguration configuration) { - this(command, executorService, new ObjectMapper(), configuration); - } - - /** - * @param command The command that will do processing for this factory's record processors. - * @param executorService An executor service to use while processing inputs and outputs of the child process. - * @param objectMapper An object mapper used to convert messages to json to be written to the child process - */ - public MultiLangRecordProcessorFactory(String command, ExecutorService executorService, ObjectMapper objectMapper, - KinesisClientLibConfiguration configuration) { - this.command = command; - this.commandArray = command.split(COMMAND_DELIMETER_REGEX); - this.executorService = executorService; - this.objectMapper = objectMapper; - this.configuration = configuration; - } - - @Override - public ShardRecordProcessor shardRecordProcessor() { - log.debug("Creating new record processor for client executable: {}", command); - /* - * Giving ProcessBuilder the command as an array of Strings allows users to specify command line arguments. - */ - return new MultiLangShardRecordProcessor(new ProcessBuilder(commandArray), executorService, this.objectMapper, - this.configuration); - } - - String[] getCommandArray() { - return commandArray; - } -} diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/BooleanPropertyValueDecoder.java b/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/BooleanPropertyValueDecoder.java deleted file mode 100644 index e57413dd..00000000 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/BooleanPropertyValueDecoder.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package com.amazonaws.services.kinesis.multilang.config; - -import java.util.Arrays; -import java.util.List; - -/** - * Provide boolean property. - */ -class BooleanPropertyValueDecoder implements IPropertyValueDecoder { - - /** - * Constructor. - */ - BooleanPropertyValueDecoder() { - } - - /** - * @param value property value as String - * @return corresponding variable in correct type - */ - @Override - public Boolean decodeValue(String value) { - return Boolean.parseBoolean(value); - } - - /** - * @return list of supported types - */ - @Override - public List> getSupportedTypes() { - return Arrays.asList(boolean.class, Boolean.class); - } - -} diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/InitializeMessage.java b/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/InitializeMessage.java deleted file mode 100644 index 4774e59a..00000000 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/InitializeMessage.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package com.amazonaws.services.kinesis.multilang.messages; - -import lombok.Getter; -import lombok.Setter; -import software.amazon.kinesis.lifecycle.events.InitializationInput; - -/** - * An initialize message is sent to the client's subprocess to indicate that it should perform its initialization steps. - */ -@Getter -@Setter -public class InitializeMessage extends Message { - /** - * The name used for the action field in {@link Message}. - */ - public static final String ACTION = "initialize"; - - /** - * The shard id that this processor is getting initialized for. - */ - private String shardId; - private String sequenceNumber; - private Long subSequenceNumber; - - /** - * Default constructor. - */ - public InitializeMessage() { - } - - /** - * Convenience constructor. - * - * @param initializationInput {@link InitializationInput} - */ - public InitializeMessage(InitializationInput initializationInput) { - this.shardId = initializationInput.shardId(); - if (initializationInput.extendedSequenceNumber() != null) { - this.sequenceNumber = initializationInput.extendedSequenceNumber().sequenceNumber(); - this.subSequenceNumber = initializationInput.extendedSequenceNumber().subSequenceNumber(); - } else { - this.sequenceNumber = null; - this.subSequenceNumber = null; - } - } - -} diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/JsonFriendlyRecord.java b/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/JsonFriendlyRecord.java deleted file mode 100644 index 5d4b0031..00000000 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/JsonFriendlyRecord.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package com.amazonaws.services.kinesis.multilang.messages; - -import java.time.Instant; - -import com.fasterxml.jackson.annotation.JsonProperty; - -import lombok.AllArgsConstructor; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.NoArgsConstructor; -import lombok.NonNull; -import lombok.Setter; -import lombok.ToString; -import lombok.experimental.Accessors; -import software.amazon.kinesis.retrieval.KinesisClientRecord; - -/** - * Class for encoding Record objects to json. Needed because Records have byte buffers for their data field which causes - * problems for the json library we're using. - */ -@NoArgsConstructor -@AllArgsConstructor -@Getter -@Setter -@EqualsAndHashCode -@ToString -public class JsonFriendlyRecord { - private byte[] data; - private String partitionKey; - private String sequenceNumber; - private Instant approximateArrivalTimestamp; - private Long subSequenceNumber; - - public static String ACTION = "record"; - - public static JsonFriendlyRecord fromKinesisClientRecord(@NonNull final KinesisClientRecord record) { - byte[] data = record.data() == null ? null : record.data().array(); - return new JsonFriendlyRecord(data, record.partitionKey(), record.sequenceNumber(), - record.approximateArrivalTimestamp(), record.subSequenceNumber()); - } - - @JsonProperty - public String getAction() { - return ACTION; - } -} diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ShutdownRequestedMessage.java b/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ShutdownRequestedMessage.java deleted file mode 100644 index 941a8f7e..00000000 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ShutdownRequestedMessage.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package com.amazonaws.services.kinesis.multilang.messages; - -import lombok.NoArgsConstructor; - -/** - * A message to indicate to the client's process that shutdown is requested. - */ -@NoArgsConstructor -public class ShutdownRequestedMessage extends Message { - /** - * The name used for the action field in {@link Message}. - */ - public static final String ACTION = "shutdownRequested"; -} diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java deleted file mode 100644 index 11413e44..00000000 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java +++ /dev/null @@ -1,1384 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.coordinator; - -import java.util.Date; -import java.util.Optional; -import java.util.Set; - -import org.apache.commons.lang3.Validate; - -import com.google.common.collect.ImmutableSet; - -import lombok.Getter; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.leases.NoOpShardPrioritization; -import software.amazon.kinesis.leases.ShardPrioritization; -import software.amazon.kinesis.lifecycle.ProcessTask; -import software.amazon.kinesis.lifecycle.ShardConsumer; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.MetricsUtil; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.retrieval.DataFetchingStrategy; -import software.amazon.kinesis.retrieval.RecordsFetcherFactory; -import software.amazon.kinesis.retrieval.polling.SimpleRecordsFetcherFactory; - -/** - * Configuration for the Amazon Kinesis Client Library. - */ -public class KinesisClientLibConfiguration { - - private static final long EPSILON_MS = 25; - - /** - * The location in the shard from which the KinesisClientLibrary will start fetching records from - * when the application starts for the first time and there is no checkpoint for the shard. - */ - public static final InitialPositionInStream DEFAULT_INITIAL_POSITION_IN_STREAM = InitialPositionInStream.LATEST; - - /** - * Fail over time in milliseconds. A worker which does not renew it's lease within this time interval - * will be regarded as having problems and it's shards will be assigned to other workers. - * For applications that have a large number of shards, this may be set to a higher number to reduce - * the number of DynamoDB IOPS required for tracking leases. - */ - public static final long DEFAULT_FAILOVER_TIME_MILLIS = 10000L; - - /** - * Max records to fetch from Kinesis in a single GetRecords call. - */ - public static final int DEFAULT_MAX_RECORDS = 10000; - - /** - * The default value for how long the {@link ShardConsumer} should sleep if no records are returned from the call to - * {@link com.amazonaws.services.kinesis.AmazonKinesis#getRecords(com.amazonaws.services.kinesis.model.GetRecordsRequest)}. - */ - public static final long DEFAULT_IDLETIME_BETWEEN_READS_MILLIS = 1000L; - - /** - * Don't call processRecords() on the record processor for empty record lists. - */ - public static final boolean DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST = false; - - /** - * Interval in milliseconds between polling to check for parent shard completion. - * Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on - * completion of parent shards). - */ - public static final long DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS = 10000L; - - /** - * Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks. - */ - public static final long DEFAULT_SHARD_SYNC_INTERVAL_MILLIS = 60000L; - - /** - * Cleanup leases upon shards completion (don't wait until they expire in Kinesis). - * Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by default we try - * to delete the ones we don't need any longer. - */ - public static final boolean DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION = true; - - /** - * Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures). - */ - public static final long DEFAULT_TASK_BACKOFF_TIME_MILLIS = 500L; - - /** - * Buffer metrics for at most this long before publishing to CloudWatch. - */ - public static final long DEFAULT_METRICS_BUFFER_TIME_MILLIS = 10000L; - - /** - * Buffer at most this many metrics before publishing to CloudWatch. - */ - public static final int DEFAULT_METRICS_MAX_QUEUE_SIZE = 10000; - - /** - * Metrics level for which to enable CloudWatch metrics. - */ - public static final MetricsLevel DEFAULT_METRICS_LEVEL = MetricsLevel.DETAILED; - - /** - * Metrics dimensions that always will be enabled regardless of the config provided by user. - */ - public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS = ImmutableSet - .of(MetricsUtil.OPERATION_DIMENSION_NAME); - - /** - * Allowed dimensions for CloudWatch metrics. By default, worker ID dimension will be disabled. - */ - public static final Set DEFAULT_METRICS_ENABLED_DIMENSIONS = ImmutableSet. builder() - .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS).add(MetricsUtil.SHARD_ID_DIMENSION_NAME).build(); - - /** - * Metrics dimensions that signify all possible dimensions. - */ - public static final Set METRICS_DIMENSIONS_ALL = ImmutableSet.of(MetricsScope.METRICS_DIMENSIONS_ALL); - - /** - * User agent set when Amazon Kinesis Client Library makes AWS requests. - */ - public static final String KINESIS_CLIENT_LIB_USER_AGENT = "amazon-kinesis-client-library-java-1.9.0"; - - /** - * KCL will validate client provided sequence numbers with a call to Amazon Kinesis before checkpointing for calls - * to {@link ShardRecordProcessorCheckpointer#checkpoint(String)} by default. - */ - public static final boolean DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING = true; - - /** - * The max number of leases (shards) this worker should process. - * This can be useful to avoid overloading (and thrashing) a worker when a host has resource constraints - * or during deployment. - * NOTE: Setting this to a low value can cause data loss if workers are not able to pick up all shards in the - * stream due to the max limit. - */ - public static final int DEFAULT_MAX_LEASES_FOR_WORKER = Integer.MAX_VALUE; - - /** - * Max leases to steal from another worker at one time (for load balancing). - * Setting this to a higher number can allow for faster load convergence (e.g. during deployments, cold starts), - * but can cause higher churn in the system. - */ - public static final int DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME = 1; - - /** - * The Amazon DynamoDB table used for tracking leases will be provisioned with this read capacity. - */ - public static final int DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY = 10; - - /** - * The Amazon DynamoDB table used for tracking leases will be provisioned with this write capacity. - */ - public static final int DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY = 10; - - /** - * The Worker will skip shard sync during initialization if there are one or more leases in the lease table. This - * assumes that the shards and leases are in-sync. This enables customers to choose faster startup times (e.g. - * during incremental deployments of an application). - */ - public static final boolean DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST = false; - - /** - * Default Shard prioritization strategy. - */ - public static final ShardPrioritization DEFAULT_SHARD_PRIORITIZATION = new NoOpShardPrioritization(); - - /** - * The amount of milliseconds to wait before graceful shutdown forcefully terminates. - */ - public static final long DEFAULT_SHUTDOWN_GRACE_MILLIS = 5000L; - - /** - * The size of the thread pool to create for the lease renewer to use. - */ - public static final int DEFAULT_MAX_LEASE_RENEWAL_THREADS = 20; - - /** - * The sleep time between two listShards calls from the proxy when throttled. - */ - public static final long DEFAULT_LIST_SHARDS_BACKOFF_TIME_IN_MILLIS = 1500; - - /** - * The number of times the Proxy will retry listShards call when throttled. - */ - public static final int DEFAULT_MAX_LIST_SHARDS_RETRY_ATTEMPTS = 50; - - private String applicationName; - private String tableName; - private String streamName; - private String kinesisEndpoint; - private String dynamoDBEndpoint; - private InitialPositionInStream initialPositionInStream; - private AwsCredentialsProvider kinesisCredentialsProvider; - private AwsCredentialsProvider dynamoDBCredentialsProvider; - private AwsCredentialsProvider cloudWatchCredentialsProvider; - private long failoverTimeMillis; - private String workerIdentifier; - private long shardSyncIntervalMillis; - private int maxRecords; - private long idleTimeBetweenReadsInMillis; - // Enables applications flush/checkpoint (if they have some data "in progress", but don't get new data for while) - private boolean callProcessRecordsEvenForEmptyRecordList; - private long parentShardPollIntervalMillis; - private boolean cleanupLeasesUponShardCompletion; - private boolean ignoreUnexpectedChildShards; - private long taskBackoffTimeMillis; - private long metricsBufferTimeMillis; - private int metricsMaxQueueSize; - private MetricsLevel metricsLevel; - private Set metricsEnabledDimensions; - private boolean validateSequenceNumberBeforeCheckpointing; - private String regionName; - private int maxLeasesForWorker; - private int maxLeasesToStealAtOneTime; - private int initialLeaseTableReadCapacity; - private int initialLeaseTableWriteCapacity; - private InitialPositionInStreamExtended initialPositionInStreamExtended; - // This is useful for optimizing deployments to large fleets working on a stable stream. - private boolean skipShardSyncAtWorkerInitializationIfLeasesExist; - private ShardPrioritization shardPrioritization; - private long shutdownGraceMillis; - - @Getter - private Optional timeoutInSeconds = Optional.empty(); - - @Getter - private Optional retryGetRecordsInSeconds = Optional.empty(); - - @Getter - private Optional maxGetRecordsThreadPool = Optional.empty(); - - @Getter - private int maxLeaseRenewalThreads = DEFAULT_MAX_LEASE_RENEWAL_THREADS; - - @Getter - private RecordsFetcherFactory recordsFetcherFactory; - - @Getter - private Optional logWarningForTaskAfterMillis = Optional.empty(); - - @Getter - private long listShardsBackoffTimeInMillis = DEFAULT_LIST_SHARDS_BACKOFF_TIME_IN_MILLIS; - - @Getter - private int maxListShardsRetryAttempts = DEFAULT_MAX_LIST_SHARDS_RETRY_ATTEMPTS; - - /** - * Constructor. - * - * @param applicationName - * Name of the Amazon Kinesis application. - * By default the application name is included in the user agent string used to make AWS requests. This - * can assist with troubleshooting (e.g. distinguish requests made by separate applications). - * @param streamName - * Name of the Kinesis stream - * @param credentialsProvider - * Provides credentials used to sign AWS requests - * @param workerId - * Used to distinguish different workers/processes of a Kinesis application - */ - public KinesisClientLibConfiguration(String applicationName, String streamName, - AwsCredentialsProvider credentialsProvider, String workerId) { - this(applicationName, streamName, credentialsProvider, credentialsProvider, credentialsProvider, workerId); - } - - /** - * Constructor. - * - * @param applicationName - * Name of the Amazon Kinesis application - * By default the application name is included in the user agent string used to make AWS requests. This - * can assist with troubleshooting (e.g. distinguish requests made by separate applications). - * @param streamName - * Name of the Kinesis stream - * @param kinesisCredentialsProvider - * Provides credentials used to access Kinesis - * @param dynamoDBCredentialsProvider - * Provides credentials used to access DynamoDB - * @param cloudWatchCredentialsProvider - * Provides credentials used to access CloudWatch - * @param workerId - * Used to distinguish different workers/processes of a Kinesis application - */ - public KinesisClientLibConfiguration(String applicationName, String streamName, - AwsCredentialsProvider kinesisCredentialsProvider, AwsCredentialsProvider dynamoDBCredentialsProvider, - AwsCredentialsProvider cloudWatchCredentialsProvider, String workerId) { - this(applicationName, streamName, null, null, DEFAULT_INITIAL_POSITION_IN_STREAM, kinesisCredentialsProvider, - dynamoDBCredentialsProvider, cloudWatchCredentialsProvider, DEFAULT_FAILOVER_TIME_MILLIS, workerId, - DEFAULT_MAX_RECORDS, DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, - DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS, - DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, - DEFAULT_TASK_BACKOFF_TIME_MILLIS, DEFAULT_METRICS_BUFFER_TIME_MILLIS, DEFAULT_METRICS_MAX_QUEUE_SIZE, - DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, null, DEFAULT_SHUTDOWN_GRACE_MILLIS); - } - - /** - * @param applicationName - * Name of the Kinesis application - * By default the application name is included in the user agent string used to make AWS requests. This - * can assist with troubleshooting (e.g. distinguish requests made by separate applications). - * @param streamName - * Name of the Kinesis stream - * @param kinesisEndpoint - * Kinesis endpoint - * @param initialPositionInStream - * One of LATEST or TRIM_HORIZON. The KinesisClientLibrary will start fetching - * records from that location in the stream when an application starts up for the first time and there - * are no checkpoints. If there are checkpoints, then we start from the checkpoint position. - * @param kinesisCredentialsProvider - * Provides credentials used to access Kinesis - * @param dynamoDBCredentialsProvider - * Provides credentials used to access DynamoDB - * @param cloudWatchCredentialsProvider - * Provides credentials used to access CloudWatch - * @param failoverTimeMillis - * Lease duration (leases not renewed within this period will be claimed by others) - * @param workerId - * Used to distinguish different workers/processes of a Kinesis application - * @param maxRecords - * Max records to read per Kinesis getRecords() call - * @param idleTimeBetweenReadsInMillis - * Idle time between calls to fetch data from Kinesis - * @param callProcessRecordsEvenForEmptyRecordList - * Call the IRecordProcessor::processRecords() API even if - * GetRecords returned an empty record list. - * @param parentShardPollIntervalMillis - * Wait for this long between polls to check if parent shards are done - * @param shardSyncIntervalMillis - * Time between tasks to sync leases and Kinesis shards - * @param cleanupTerminatedShardsBeforeExpiry - * Clean up shards we've finished processing (don't wait for expiration - * in Kinesis) - * @param taskBackoffTimeMillis - * Backoff period when tasks encounter an exception - * @param metricsBufferTimeMillis - * Metrics are buffered for at most this long before publishing to CloudWatch - * @param metricsMaxQueueSize - * Max number of metrics to buffer before publishing to CloudWatch - * @param validateSequenceNumberBeforeCheckpointing - * whether KCL should validate client provided sequence numbers - * with a call to Amazon Kinesis before checkpointing for calls to - * {@link ShardRecordProcessorCheckpointer#checkpoint(String)} - * @param regionName - * The region name for the service - * @param shutdownGraceMillis - * The number of milliseconds before graceful shutdown terminates forcefully - */ - // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 26 LINES - // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 26 LINES - public KinesisClientLibConfiguration(String applicationName, String streamName, String kinesisEndpoint, - InitialPositionInStream initialPositionInStream, AwsCredentialsProvider kinesisCredentialsProvider, - AwsCredentialsProvider dynamoDBCredentialsProvider, AwsCredentialsProvider cloudWatchCredentialsProvider, - long failoverTimeMillis, String workerId, int maxRecords, long idleTimeBetweenReadsInMillis, - boolean callProcessRecordsEvenForEmptyRecordList, long parentShardPollIntervalMillis, - long shardSyncIntervalMillis, boolean cleanupTerminatedShardsBeforeExpiry, long taskBackoffTimeMillis, - long metricsBufferTimeMillis, int metricsMaxQueueSize, boolean validateSequenceNumberBeforeCheckpointing, - String regionName, long shutdownGraceMillis) { - this(applicationName, streamName, kinesisEndpoint, null, initialPositionInStream, kinesisCredentialsProvider, - dynamoDBCredentialsProvider, cloudWatchCredentialsProvider, failoverTimeMillis, workerId, maxRecords, - idleTimeBetweenReadsInMillis, callProcessRecordsEvenForEmptyRecordList, parentShardPollIntervalMillis, - shardSyncIntervalMillis, cleanupTerminatedShardsBeforeExpiry, taskBackoffTimeMillis, - metricsBufferTimeMillis, metricsMaxQueueSize, validateSequenceNumberBeforeCheckpointing, regionName, - shutdownGraceMillis); - } - - /** - * @param applicationName - * Name of the Kinesis application - * By default the application name is included in the user agent string used to make AWS requests. This - * can assist with troubleshooting (e.g. distinguish requests made by separate applications). - * @param streamName - * Name of the Kinesis stream - * @param kinesisEndpoint - * Kinesis endpoint - * @param dynamoDBEndpoint - * DynamoDB endpoint - * @param initialPositionInStream - * One of LATEST or TRIM_HORIZON. The KinesisClientLibrary will start fetching - * records from that location in the stream when an application starts up for the first time and there - * are no checkpoints. If there are checkpoints, then we start from the checkpoint position. - * @param kinesisCredentialsProvider - * Provides credentials used to access Kinesis - * @param dynamoDBCredentialsProvider - * Provides credentials used to access DynamoDB - * @param cloudWatchCredentialsProvider - * Provides credentials used to access CloudWatch - * @param failoverTimeMillis - * Lease duration (leases not renewed within this period will be claimed by others) - * @param workerId - * Used to distinguish different workers/processes of a Kinesis application - * @param maxRecords - * Max records to read per Kinesis getRecords() call - * @param idleTimeBetweenReadsInMillis - * Idle time between calls to fetch data from Kinesis - * @param callProcessRecordsEvenForEmptyRecordList - * Call the IRecordProcessor::processRecords() API even if - * GetRecords returned an empty record list. - * @param parentShardPollIntervalMillis - * Wait for this long between polls to check if parent shards are done - * @param shardSyncIntervalMillis - * Time between tasks to sync leases and Kinesis shards - * @param cleanupTerminatedShardsBeforeExpiry - * Clean up shards we've finished processing (don't wait for expiration - * in Kinesis) - * @param taskBackoffTimeMillis - * Backoff period when tasks encounter an exception - * @param metricsBufferTimeMillis - * Metrics are buffered for at most this long before publishing to CloudWatch - * @param metricsMaxQueueSize - * Max number of metrics to buffer before publishing to CloudWatch - * @param validateSequenceNumberBeforeCheckpointing - * whether KCL should validate client provided sequence numbers - * with a call to Amazon Kinesis before checkpointing for calls to - * {@link ShardRecordProcessorCheckpointer#checkpoint(String)} - * @param regionName - * The region name for the service - */ - // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 26 LINES - // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 26 LINES - public KinesisClientLibConfiguration(String applicationName, String streamName, String kinesisEndpoint, - String dynamoDBEndpoint, InitialPositionInStream initialPositionInStream, - AwsCredentialsProvider kinesisCredentialsProvider, AwsCredentialsProvider dynamoDBCredentialsProvider, - AwsCredentialsProvider cloudWatchCredentialsProvider, long failoverTimeMillis, String workerId, - int maxRecords, long idleTimeBetweenReadsInMillis, boolean callProcessRecordsEvenForEmptyRecordList, - long parentShardPollIntervalMillis, long shardSyncIntervalMillis, - boolean cleanupTerminatedShardsBeforeExpiry, long taskBackoffTimeMillis, long metricsBufferTimeMillis, - int metricsMaxQueueSize, boolean validateSequenceNumberBeforeCheckpointing, String regionName, - long shutdownGraceMillis) { - // Check following values are greater than zero - checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis); - checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis); - checkIsValuePositive("ParentShardPollIntervalMillis", parentShardPollIntervalMillis); - checkIsValuePositive("ShardSyncIntervalMillis", shardSyncIntervalMillis); - checkIsValuePositive("MaxRecords", (long) maxRecords); - checkIsValuePositive("TaskBackoffTimeMillis", taskBackoffTimeMillis); - checkIsValuePositive("MetricsBufferTimeMills", metricsBufferTimeMillis); - checkIsValuePositive("MetricsMaxQueueSize", (long) metricsMaxQueueSize); - checkIsValuePositive("ShutdownGraceMillis", shutdownGraceMillis); - checkIsRegionNameValid(regionName); - this.applicationName = applicationName; - this.tableName = applicationName; - this.streamName = streamName; - this.kinesisEndpoint = kinesisEndpoint; - this.dynamoDBEndpoint = dynamoDBEndpoint; - this.initialPositionInStream = initialPositionInStream; - this.failoverTimeMillis = failoverTimeMillis; - this.maxRecords = maxRecords; - this.idleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis; - this.callProcessRecordsEvenForEmptyRecordList = callProcessRecordsEvenForEmptyRecordList; - this.parentShardPollIntervalMillis = parentShardPollIntervalMillis; - this.shardSyncIntervalMillis = shardSyncIntervalMillis; - this.cleanupLeasesUponShardCompletion = cleanupTerminatedShardsBeforeExpiry; - this.workerIdentifier = workerId; - this.taskBackoffTimeMillis = taskBackoffTimeMillis; - this.metricsBufferTimeMillis = metricsBufferTimeMillis; - this.metricsMaxQueueSize = metricsMaxQueueSize; - this.metricsLevel = DEFAULT_METRICS_LEVEL; - this.metricsEnabledDimensions = DEFAULT_METRICS_ENABLED_DIMENSIONS; - this.validateSequenceNumberBeforeCheckpointing = validateSequenceNumberBeforeCheckpointing; - this.regionName = regionName; - this.maxLeasesForWorker = DEFAULT_MAX_LEASES_FOR_WORKER; - this.maxLeasesToStealAtOneTime = DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME; - this.initialLeaseTableReadCapacity = DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY; - this.initialLeaseTableWriteCapacity = DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY; - this.initialPositionInStreamExtended = InitialPositionInStreamExtended - .newInitialPosition(initialPositionInStream); - this.skipShardSyncAtWorkerInitializationIfLeasesExist = DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST; - this.shardPrioritization = DEFAULT_SHARD_PRIORITIZATION; - this.recordsFetcherFactory = new SimpleRecordsFetcherFactory(); - } - - /** - * @param applicationName - * Name of the Kinesis application - * By default the application name is included in the user agent string used to make AWS requests. This - * can assist with troubleshooting (e.g. distinguish requests made by separate applications). - * @param streamName - * Name of the Kinesis stream - * @param kinesisEndpoint - * Kinesis endpoint - * @param dynamoDBEndpoint - * DynamoDB endpoint - * @param initialPositionInStream - * One of LATEST or TRIM_HORIZON. The KinesisClientLibrary will start fetching - * records from that location in the stream when an application starts up for the first time and there - * are no checkpoints. If there are checkpoints, then we start from the checkpoint position. - * @param kinesisCredentialsProvider - * Provides credentials used to access Kinesis - * @param dynamoDBCredentialsProvider - * Provides credentials used to access DynamoDB - * @param cloudWatchCredentialsProvider - * Provides credentials used to access CloudWatch - * @param failoverTimeMillis - * Lease duration (leases not renewed within this period will be claimed by others) - * @param workerId - * Used to distinguish different workers/processes of a Kinesis application - * @param maxRecords - * Max records to read per Kinesis getRecords() call - * @param idleTimeBetweenReadsInMillis - * Idle time between calls to fetch data from Kinesis - * @param callProcessRecordsEvenForEmptyRecordList - * Call the IRecordProcessor::processRecords() API even if - * GetRecords returned an empty record list. - * @param parentShardPollIntervalMillis - * Wait for this long between polls to check if parent shards are done - * @param shardSyncIntervalMillis - * Time between tasks to sync leases and Kinesis shards - * @param cleanupTerminatedShardsBeforeExpiry - * Clean up shards we've finished processing (don't wait for expiration - * in Kinesis) - * @param taskBackoffTimeMillis - * Backoff period when tasks encounter an exception - * @param metricsBufferTimeMillis - * Metrics are buffered for at most this long before publishing to CloudWatch - * @param metricsMaxQueueSize - * Max number of metrics to buffer before publishing to CloudWatch - * @param validateSequenceNumberBeforeCheckpointing - * whether KCL should validate client provided sequence numbers - * with a call to Amazon Kinesis before checkpointing for calls to - * {@link ShardRecordProcessorCheckpointer#checkpoint(String)} - * @param regionName - * The region name for the service - */ - // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 26 LINES - // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 26 LINES - public KinesisClientLibConfiguration(String applicationName, String streamName, String kinesisEndpoint, - String dynamoDBEndpoint, InitialPositionInStream initialPositionInStream, - AwsCredentialsProvider kinesisCredentialsProvider, AwsCredentialsProvider dynamoDBCredentialsProvider, - AwsCredentialsProvider cloudWatchCredentialsProvider, long failoverTimeMillis, String workerId, - int maxRecords, long idleTimeBetweenReadsInMillis, boolean callProcessRecordsEvenForEmptyRecordList, - long parentShardPollIntervalMillis, long shardSyncIntervalMillis, - boolean cleanupTerminatedShardsBeforeExpiry, long taskBackoffTimeMillis, long metricsBufferTimeMillis, - int metricsMaxQueueSize, boolean validateSequenceNumberBeforeCheckpointing, String regionName, - RecordsFetcherFactory recordsFetcherFactory) { - // Check following values are greater than zero - checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis); - checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis); - checkIsValuePositive("ParentShardPollIntervalMillis", parentShardPollIntervalMillis); - checkIsValuePositive("ShardSyncIntervalMillis", shardSyncIntervalMillis); - checkIsValuePositive("MaxRecords", (long) maxRecords); - checkIsValuePositive("TaskBackoffTimeMillis", taskBackoffTimeMillis); - checkIsValuePositive("MetricsBufferTimeMills", metricsBufferTimeMillis); - checkIsValuePositive("MetricsMaxQueueSize", (long) metricsMaxQueueSize); - checkIsRegionNameValid(regionName); - this.applicationName = applicationName; - this.tableName = applicationName; - this.streamName = streamName; - this.kinesisEndpoint = kinesisEndpoint; - this.dynamoDBEndpoint = dynamoDBEndpoint; - this.initialPositionInStream = initialPositionInStream; - this.kinesisCredentialsProvider = kinesisCredentialsProvider; - this.dynamoDBCredentialsProvider = dynamoDBCredentialsProvider; - this.cloudWatchCredentialsProvider = cloudWatchCredentialsProvider; - this.failoverTimeMillis = failoverTimeMillis; - this.maxRecords = maxRecords; - this.idleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis; - this.callProcessRecordsEvenForEmptyRecordList = callProcessRecordsEvenForEmptyRecordList; - this.parentShardPollIntervalMillis = parentShardPollIntervalMillis; - this.shardSyncIntervalMillis = shardSyncIntervalMillis; - this.cleanupLeasesUponShardCompletion = cleanupTerminatedShardsBeforeExpiry; - this.workerIdentifier = workerId; - this.taskBackoffTimeMillis = taskBackoffTimeMillis; - this.metricsBufferTimeMillis = metricsBufferTimeMillis; - this.metricsMaxQueueSize = metricsMaxQueueSize; - this.metricsLevel = DEFAULT_METRICS_LEVEL; - this.metricsEnabledDimensions = DEFAULT_METRICS_ENABLED_DIMENSIONS; - this.validateSequenceNumberBeforeCheckpointing = validateSequenceNumberBeforeCheckpointing; - this.regionName = regionName; - this.maxLeasesForWorker = DEFAULT_MAX_LEASES_FOR_WORKER; - this.maxLeasesToStealAtOneTime = DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME; - this.initialLeaseTableReadCapacity = DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY; - this.initialLeaseTableWriteCapacity = DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY; - this.initialPositionInStreamExtended = InitialPositionInStreamExtended - .newInitialPosition(initialPositionInStream); - this.skipShardSyncAtWorkerInitializationIfLeasesExist = DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST; - this.shardPrioritization = DEFAULT_SHARD_PRIORITIZATION; - this.recordsFetcherFactory = recordsFetcherFactory; - this.shutdownGraceMillis = shutdownGraceMillis; - } - - // Check if value is positive, otherwise throw an exception - private void checkIsValuePositive(String key, long value) { - if (value <= 0) { - throw new IllegalArgumentException( - "Value of " + key + " should be positive, but current value is " + value); - } - } - - private void checkIsRegionNameValid(String regionNameToCheck) { - // - // TODO: Should it come back? - // - // if (regionNameToCheck != null && RegionUtils.getRegion(regionNameToCheck) == null) { - // throw new IllegalArgumentException("The specified region name is not valid"); - // } - } - - /** - * @return Name of the application - */ - public String getApplicationName() { - return applicationName; - } - - /** - * @return Name of the table to use in DynamoDB - */ - public String getTableName() { - return tableName; - } - - /** - * @return Time within which a worker should renew a lease (else it is assumed dead) - */ - public long getFailoverTimeMillis() { - return failoverTimeMillis; - } - - /** - * @return Credentials provider used to access Kinesis - */ - public AwsCredentialsProvider getKinesisCredentialsProvider() { - return kinesisCredentialsProvider; - } - - /** - * @return Credentials provider used to access DynamoDB - */ - public AwsCredentialsProvider getDynamoDBCredentialsProvider() { - return dynamoDBCredentialsProvider; - } - - /** - * @return Credentials provider used to access CloudWatch - */ - public AwsCredentialsProvider getCloudWatchCredentialsProvider() { - return cloudWatchCredentialsProvider; - } - - /** - * @return workerIdentifier - */ - public String getWorkerIdentifier() { - return workerIdentifier; - } - - /** - * @return the shardSyncIntervalMillis - */ - public long getShardSyncIntervalMillis() { - return shardSyncIntervalMillis; - } - - /** - * @return Max records to fetch per Kinesis getRecords call - */ - public int getMaxRecords() { - return maxRecords; - } - - /** - * @return Idle time between calls to fetch data from Kinesis - */ - public long getIdleTimeBetweenReadsInMillis() { - return idleTimeBetweenReadsInMillis; - } - - /** - * @return true if processRecords() should be called even for empty record lists - */ - public boolean shouldCallProcessRecordsEvenForEmptyRecordList() { - return callProcessRecordsEvenForEmptyRecordList; - } - - /** - * @return Epsilon milliseconds (used for lease timing margins) - */ - public long getEpsilonMillis() { - return EPSILON_MS; - } - - /** - * @return stream name - */ - public String getStreamName() { - return streamName; - } - - /** - * @return Kinesis endpoint - */ - public String getKinesisEndpoint() { - return kinesisEndpoint; - } - - /** - * @return DynamoDB endpoint - */ - public String getDynamoDBEndpoint() { - return dynamoDBEndpoint; - } - - /** - * @return the initialPositionInStream - */ - public InitialPositionInStream getInitialPositionInStream() { - return initialPositionInStream; - } - - /** - * @return interval between polls for parent shard completion - */ - public long getParentShardPollIntervalMillis() { - return parentShardPollIntervalMillis; - } - - /** - * @return backoff time when tasks encounter exceptions - */ - public long getTaskBackoffTimeMillis() { - return taskBackoffTimeMillis; - } - - /** - * @return Metrics are buffered for at most this long before publishing. - */ - public long getMetricsBufferTimeMillis() { - return metricsBufferTimeMillis; - } - - /** - * @return Max number of metrics to buffer before publishing. - */ - public int getMetricsMaxQueueSize() { - return metricsMaxQueueSize; - } - - /** - * @return Metrics level enabled for metrics. - */ - public MetricsLevel getMetricsLevel() { - return metricsLevel; - } - - /** - * @return Enabled dimensions for metrics. - */ - public Set getMetricsEnabledDimensions() { - // Unmodifiable set. - return metricsEnabledDimensions; - } - - /** - * @return true if we should clean up leases of shards after processing is complete (don't wait for expiration) - */ - public boolean shouldCleanupLeasesUponShardCompletion() { - return cleanupLeasesUponShardCompletion; - } - - /** - * @return true if we should ignore child shards which have open parents - */ - public boolean shouldIgnoreUnexpectedChildShards() { - return ignoreUnexpectedChildShards; - } - - /** - * @return true if KCL should validate client provided sequence numbers with a call to Amazon Kinesis before - * checkpointing for calls to {@link ShardRecordProcessorCheckpointer#checkpoint(String)} - */ - public boolean shouldValidateSequenceNumberBeforeCheckpointing() { - return validateSequenceNumberBeforeCheckpointing; - } - - /** - * @return Region for the service - */ - public String getRegionName() { - return regionName; - } - - /** - * @return true if Worker should skip syncing shards and leases at startup if leases are present - */ - public boolean getSkipShardSyncAtWorkerInitializationIfLeasesExist() { - return skipShardSyncAtWorkerInitializationIfLeasesExist; - } - - /** - * @return Max leases this Worker can handle at a time - */ - public int getMaxLeasesForWorker() { - return maxLeasesForWorker; - } - - /** - * @return Max leases to steal at one time (for load balancing) - */ - public int getMaxLeasesToStealAtOneTime() { - return maxLeasesToStealAtOneTime; - } - - /** - * @return Read capacity to provision when creating the lease table. - */ - public int getInitialLeaseTableReadCapacity() { - return initialLeaseTableReadCapacity; - } - - /** - * @return Write capacity to provision when creating the lease table. - */ - public int getInitialLeaseTableWriteCapacity() { - return initialLeaseTableWriteCapacity; - } - - /** - * Keeping it protected to forbid outside callers from depending on this internal object. - * - * @return The initialPositionInStreamExtended object. - */ - protected InitialPositionInStreamExtended getInitialPositionInStreamExtended() { - return initialPositionInStreamExtended; - } - - /** - * @return The timestamp from where we need to start the application. - * Valid only for initial position of type AT_TIMESTAMP, returns null for other positions. - */ - public Date getTimestampAtInitialPositionInStream() { - return initialPositionInStreamExtended.getTimestamp(); - } - - /** - * @return Shard prioritization strategy. - */ - public ShardPrioritization getShardPrioritizationStrategy() { - return shardPrioritization; - } - - /** - * @return Graceful shutdown timeout - */ - public long getShutdownGraceMillis() { - return shutdownGraceMillis; - } - - /* - * // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 190 LINES - * /** - * - * @param tableName name of the lease table in DynamoDB - * - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withTableName(String tableName) { - this.tableName = tableName; - return this; - } - - /** - * @param kinesisEndpoint - * Kinesis endpoint - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withKinesisEndpoint(String kinesisEndpoint) { - this.kinesisEndpoint = kinesisEndpoint; - return this; - } - - /** - * @param dynamoDBEndpoint - * DynamoDB endpoint - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withDynamoDBEndpoint(String dynamoDBEndpoint) { - this.dynamoDBEndpoint = dynamoDBEndpoint; - return this; - } - - /** - * @param initialPositionInStream - * One of LATEST or TRIM_HORIZON. The Amazon Kinesis Client Library - * will start fetching records from this position when the application starts up if there are no - * checkpoints. - * If there are checkpoints, we will process records from the checkpoint position. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withInitialPositionInStream(InitialPositionInStream initialPositionInStream) { - this.initialPositionInStream = initialPositionInStream; - this.initialPositionInStreamExtended = InitialPositionInStreamExtended - .newInitialPosition(initialPositionInStream); - return this; - } - - /** - * @param timestamp - * The timestamp to use with the AT_TIMESTAMP value for initialPositionInStream. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withTimestampAtInitialPositionInStream(Date timestamp) { - this.initialPositionInStream = InitialPositionInStream.AT_TIMESTAMP; - this.initialPositionInStreamExtended = InitialPositionInStreamExtended.newInitialPositionAtTimestamp(timestamp); - return this; - } - - /** - * @param failoverTimeMillis - * Lease duration (leases not renewed within this period will be claimed by others) - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withFailoverTimeMillis(long failoverTimeMillis) { - checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis); - this.failoverTimeMillis = failoverTimeMillis; - return this; - } - - /** - * @param shardSyncIntervalMillis - * Time between tasks to sync leases and Kinesis shards - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withShardSyncIntervalMillis(long shardSyncIntervalMillis) { - checkIsValuePositive("ShardSyncIntervalMillis", shardSyncIntervalMillis); - this.shardSyncIntervalMillis = shardSyncIntervalMillis; - return this; - } - - /** - * @param maxRecords - * Max records to fetch in a Kinesis getRecords() call - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withMaxRecords(int maxRecords) { - checkIsValuePositive("MaxRecords", (long) maxRecords); - this.maxRecords = maxRecords; - return this; - } - - /** - * Controls how long the KCL will sleep if no records are returned from Kinesis - * - *

- * This value is only used when no records are returned; if records are returned, the {@link ProcessTask} will - * immediately retrieve the next set of records after the call to - * {@link ShardRecordProcessor#processRecords(ProcessRecordsInput)} - * has returned. Setting this value to high may result in the KCL being unable to catch up. If you are changing this - * value it's recommended that you enable {@link #withCallProcessRecordsEvenForEmptyRecordList(boolean)}, and - * monitor how far behind the records retrieved are by inspecting - * {@link ProcessRecordsInput#millisBehindLatest()}, and the - * CloudWatch - * Metric: GetRecords.MillisBehindLatest - *

- * - * @param idleTimeBetweenReadsInMillis - * how long to sleep between GetRecords calls when no records are returned. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withIdleTimeBetweenReadsInMillis(long idleTimeBetweenReadsInMillis) { - checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis); - this.idleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis; - return this; - } - - /** - * @param callProcessRecordsEvenForEmptyRecordList - * Call the ShardRecordProcessor::processRecords() API even if - * GetRecords returned an empty record list - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withCallProcessRecordsEvenForEmptyRecordList( - boolean callProcessRecordsEvenForEmptyRecordList) { - this.callProcessRecordsEvenForEmptyRecordList = callProcessRecordsEvenForEmptyRecordList; - return this; - } - - /** - * @param parentShardPollIntervalMillis - * Wait for this long between polls to check if parent shards are done - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withParentShardPollIntervalMillis(long parentShardPollIntervalMillis) { - checkIsValuePositive("ParentShardPollIntervalMillis", parentShardPollIntervalMillis); - this.parentShardPollIntervalMillis = parentShardPollIntervalMillis; - return this; - } - - /** - * @param cleanupLeasesUponShardCompletion - * Clean up shards we've finished processing (don't wait for expiration - * in Kinesis) - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withCleanupLeasesUponShardCompletion( - boolean cleanupLeasesUponShardCompletion) { - this.cleanupLeasesUponShardCompletion = cleanupLeasesUponShardCompletion; - return this; - } - - /** - * @param ignoreUnexpectedChildShards - * Ignore child shards with open parents. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withIgnoreUnexpectedChildShards(boolean ignoreUnexpectedChildShards) { - this.ignoreUnexpectedChildShards = ignoreUnexpectedChildShards; - return this; - } - - /** - * Override the default user agent (application name). - * - * @param userAgent - * User agent to use in AWS requests - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withUserAgent(String userAgent) { - String customizedUserAgent = userAgent + "," + KINESIS_CLIENT_LIB_USER_AGENT; - return this; - } - - /** - * @param taskBackoffTimeMillis - * Backoff period when tasks encounter an exception - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withTaskBackoffTimeMillis(long taskBackoffTimeMillis) { - checkIsValuePositive("TaskBackoffTimeMillis", taskBackoffTimeMillis); - this.taskBackoffTimeMillis = taskBackoffTimeMillis; - return this; - } - - /** - * @param metricsBufferTimeMillis - * Metrics are buffered for at most this long before publishing to CloudWatch - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withMetricsBufferTimeMillis(long metricsBufferTimeMillis) { - checkIsValuePositive("MetricsBufferTimeMillis", metricsBufferTimeMillis); - this.metricsBufferTimeMillis = metricsBufferTimeMillis; - return this; - } - - /** - * @param metricsMaxQueueSize - * Max number of metrics to buffer before publishing to CloudWatch - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withMetricsMaxQueueSize(int metricsMaxQueueSize) { - checkIsValuePositive("MetricsMaxQueueSize", (long) metricsMaxQueueSize); - this.metricsMaxQueueSize = metricsMaxQueueSize; - return this; - } - - /** - * @param metricsLevel - * Metrics level to enable. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withMetricsLevel(MetricsLevel metricsLevel) { - this.metricsLevel = metricsLevel == null ? DEFAULT_METRICS_LEVEL : metricsLevel; - return this; - } - - /** - * Sets metrics level that should be enabled. Possible values are: - * NONE - * SUMMARY - * DETAILED - * - * @param metricsLevel - * Metrics level to enable. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withMetricsLevel(String metricsLevel) { - this.metricsLevel = MetricsLevel.fromName(metricsLevel); - return this; - } - - /** - * Sets the dimensions that are allowed to be emitted in metrics. - * - * @param metricsEnabledDimensions - * Set of dimensions that are allowed. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withMetricsEnabledDimensions(Set metricsEnabledDimensions) { - if (metricsEnabledDimensions == null) { - this.metricsEnabledDimensions = METRICS_ALWAYS_ENABLED_DIMENSIONS; - } else if (metricsEnabledDimensions.contains(MetricsScope.METRICS_DIMENSIONS_ALL)) { - this.metricsEnabledDimensions = METRICS_DIMENSIONS_ALL; - } else { - this.metricsEnabledDimensions = ImmutableSet. builder().addAll(metricsEnabledDimensions) - .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS).build(); - } - return this; - } - - /** - * - * @param validateSequenceNumberBeforeCheckpointing - * whether KCL should validate client provided sequence numbers - * with a call to Amazon Kinesis before checkpointing for calls to - * {@link ShardRecordProcessorCheckpointer#checkpoint(String)}. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withValidateSequenceNumberBeforeCheckpointing( - boolean validateSequenceNumberBeforeCheckpointing) { - this.validateSequenceNumberBeforeCheckpointing = validateSequenceNumberBeforeCheckpointing; - return this; - } - - /** - * If set to true, the Worker will not sync shards and leases during initialization if there are one or more leases - * in the lease table. This assumes that the shards and leases are in-sync. - * This enables customers to choose faster startup times (e.g. during incremental deployments of an application). - * - * @param skipShardSyncAtStartupIfLeasesExist - * Should Worker skip syncing shards and leases at startup (Worker - * initialization). - * @return KinesisClientLibConfiguration - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withSkipShardSyncAtStartupIfLeasesExist( - boolean skipShardSyncAtStartupIfLeasesExist) { - this.skipShardSyncAtWorkerInitializationIfLeasesExist = skipShardSyncAtStartupIfLeasesExist; - return this; - } - - /** - * - * @param regionName - * The region name for the service - * @return KinesisClientLibConfiguration - */ - // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 2 LINES - public KinesisClientLibConfiguration withRegionName(String regionName) { - checkIsRegionNameValid(regionName); - this.regionName = regionName; - return this; - } - - /** - * Worker will not acquire more than the specified max number of leases even if there are more - * shards that need to be processed. This can be used in scenarios where a worker is resource constrained or - * to prevent lease thrashing when small number of workers pick up all leases for small amount of time during - * deployment. - * Note that setting a low value may cause data loss (e.g. if there aren't enough Workers to make progress on all - * shards). When setting the value for this property, one must ensure enough workers are present to process - * shards and should consider future resharding, child shards that may be blocked on parent shards, some workers - * becoming unhealthy, etc. - * - * @param maxLeasesForWorker - * Max leases this Worker can handle at a time - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withMaxLeasesForWorker(int maxLeasesForWorker) { - checkIsValuePositive("maxLeasesForWorker", maxLeasesForWorker); - this.maxLeasesForWorker = maxLeasesForWorker; - return this; - } - - /** - * Max leases to steal from a more loaded Worker at one time (for load balancing). - * Setting this to a higher number can allow for faster load convergence (e.g. during deployments, cold starts), - * but can cause higher churn in the system. - * - * @param maxLeasesToStealAtOneTime - * Steal up to this many leases at one time (for load balancing) - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withMaxLeasesToStealAtOneTime(int maxLeasesToStealAtOneTime) { - checkIsValuePositive("maxLeasesToStealAtOneTime", maxLeasesToStealAtOneTime); - this.maxLeasesToStealAtOneTime = maxLeasesToStealAtOneTime; - return this; - } - - /** - * @param initialLeaseTableReadCapacity - * Read capacity to provision when creating the lease table. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withInitialLeaseTableReadCapacity(int initialLeaseTableReadCapacity) { - checkIsValuePositive("initialLeaseTableReadCapacity", initialLeaseTableReadCapacity); - this.initialLeaseTableReadCapacity = initialLeaseTableReadCapacity; - return this; - } - - /** - * @param initialLeaseTableWriteCapacity - * Write capacity to provision when creating the lease table. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withInitialLeaseTableWriteCapacity(int initialLeaseTableWriteCapacity) { - checkIsValuePositive("initialLeaseTableWriteCapacity", initialLeaseTableWriteCapacity); - this.initialLeaseTableWriteCapacity = initialLeaseTableWriteCapacity; - return this; - } - - /** - * @param shardPrioritization - * Implementation of ShardPrioritization interface that should be used during processing. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withShardPrioritizationStrategy(ShardPrioritization shardPrioritization) { - if (shardPrioritization == null) { - throw new IllegalArgumentException("shardPrioritization cannot be null"); - } - this.shardPrioritization = shardPrioritization; - return this; - } - - /** - * Sets the size of the thread pool that will be used to renew leases. - * - * Setting this to low may starve the lease renewal process, and cause the worker to lose leases at a higher rate. - * - * @param maxLeaseRenewalThreads - * the maximum size of the lease renewal thread pool - * @throws IllegalArgumentException - * if maxLeaseRenewalThreads is <= 0 - * @return this configuration object - */ - public KinesisClientLibConfiguration withMaxLeaseRenewalThreads(int maxLeaseRenewalThreads) { - Validate.isTrue(maxLeaseRenewalThreads > 2, - "The maximum number of lease renewal threads must be greater than or equal to 2."); - this.maxLeaseRenewalThreads = maxLeaseRenewalThreads; - - return this; - } - - /** - * @param retryGetRecordsInSeconds - * the time in seconds to wait before the worker retries to get a record. - * @return this configuration object. - */ - public KinesisClientLibConfiguration withRetryGetRecordsInSeconds(final int retryGetRecordsInSeconds) { - checkIsValuePositive("retryGetRecordsInSeconds", retryGetRecordsInSeconds); - this.retryGetRecordsInSeconds = Optional.of(retryGetRecordsInSeconds); - return this; - } - - /** - * @param maxGetRecordsThreadPool - * the max number of threads in the getRecords thread pool. - * @return this configuration object - */ - public KinesisClientLibConfiguration withMaxGetRecordsThreadPool(final int maxGetRecordsThreadPool) { - checkIsValuePositive("maxGetRecordsThreadPool", maxGetRecordsThreadPool); - this.maxGetRecordsThreadPool = Optional.of(maxGetRecordsThreadPool); - return this; - } - - /** - * - * @param maxPendingProcessRecordsInput - * The max number of ProcessRecordsInput that can be stored in the cache before - * blocking - * @return this configuration object - */ - public KinesisClientLibConfiguration withMaxPendingProcessRecordsInput(final int maxPendingProcessRecordsInput) { - checkIsValuePositive("maxPendingProcessRecordsInput", maxPendingProcessRecordsInput); - this.recordsFetcherFactory.maxPendingProcessRecordsInput(maxPendingProcessRecordsInput); - return this; - } - - /** - * @param maxCacheByteSize - * Max byte size for the cache at any given point of time. After this threshold is crossed - * the KinesisDataFetcher will be blocked until the cache has more space available. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withMaxCacheByteSize(final int maxCacheByteSize) { - checkIsValuePositive("maxCacheByteSize", maxCacheByteSize); - this.recordsFetcherFactory.maxByteSize(maxCacheByteSize); - return this; - } - - /** - * @param dataFetchingStrategy - * The strategy for fetching data from kinesis. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withDataFetchingStrategy(String dataFetchingStrategy) { - this.recordsFetcherFactory.dataFetchingStrategy(DataFetchingStrategy.valueOf(dataFetchingStrategy.toUpperCase())); - return this; - } - - /** - * @param maxRecordsCount - * The maximum number of records in the cache, accross all ProcessRecordInput objects - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withMaxRecordsCount(final int maxRecordsCount) { - checkIsValuePositive("maxRecordsCount", maxRecordsCount); - this.recordsFetcherFactory.maxRecordsCount(maxRecordsCount); - return this; - } - - /** - * @param timeoutInSeconds - * The timeout in seconds to wait for the MultiLangProtocol to wait for - */ - public void withTimeoutInSeconds(final int timeoutInSeconds) { - this.timeoutInSeconds = Optional.of(timeoutInSeconds); - } - - /** - * @param shutdownGraceMillis - * Time before gracefully shutdown forcefully terminates - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withShutdownGraceMillis(long shutdownGraceMillis) { - checkIsValuePositive("ShutdownGraceMillis", shutdownGraceMillis); - this.shutdownGraceMillis = shutdownGraceMillis; - return this; - } - - /** - * @param idleMillisBetweenCalls - * Idle time between 2 getcalls from the data fetcher. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withIdleMillisBetweenCalls(long idleMillisBetweenCalls) { - checkIsValuePositive("IdleMillisBetweenCalls", idleMillisBetweenCalls); - this.recordsFetcherFactory.idleMillisBetweenCalls(idleMillisBetweenCalls); - return this; - } - - /** - * @param logWarningForTaskAfterMillis - * Logs warn message if as task is held in a task for more than the set - * time. - * @return KinesisClientLibConfiguration - */ - public KinesisClientLibConfiguration withLogWarningForTaskAfterMillis(long logWarningForTaskAfterMillis) { - checkIsValuePositive("LogProcessTaskStatusAfterInMillis", logWarningForTaskAfterMillis); - this.logWarningForTaskAfterMillis = Optional.of(logWarningForTaskAfterMillis); - return this; - } - - /** - * @param listShardsBackoffTimeInMillis - * Max sleep between two listShards call when throttled - * in KinesisProxy. - * @return - */ - public KinesisClientLibConfiguration withListShardsBackoffTimeInMillis(long listShardsBackoffTimeInMillis) { - checkIsValuePositive("listShardsBackoffTimeInMillis", listShardsBackoffTimeInMillis); - this.listShardsBackoffTimeInMillis = listShardsBackoffTimeInMillis; - return this; - } - - /** - * @param maxListShardsRetryAttempts - * Max number of retries for listShards when throttled - * in KinesisProxy. - * @return - */ - public KinesisClientLibConfiguration withMaxListShardsRetryAttempts(int maxListShardsRetryAttempts) { - checkIsValuePositive("maxListShardsRetryAttempts", maxListShardsRetryAttempts); - this.maxListShardsRetryAttempts = maxListShardsRetryAttempts; - return this; - } -} diff --git a/amazon-kinesis-client-multilang/src/main/resources/logback.xml b/amazon-kinesis-client-multilang/src/main/resources/logback.xml deleted file mode 100644 index 46b45182..00000000 --- a/amazon-kinesis-client-multilang/src/main/resources/logback.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - %d [%thread] %-5level %logger{36} [%mdc{ShardId:-NONE}] - %msg %n - - - - - - - \ No newline at end of file diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocolTest.java b/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocolTest.java deleted file mode 100644 index 5e51cc05..00000000 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocolTest.java +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package com.amazonaws.services.kinesis.multilang; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyInt; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.argThat; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.runners.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; - -import software.amazon.kinesis.exceptions.InvalidStateException; -import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.exceptions.ThrottlingException; -import com.amazonaws.services.kinesis.multilang.messages.CheckpointMessage; -import com.amazonaws.services.kinesis.multilang.messages.Message; -import com.amazonaws.services.kinesis.multilang.messages.ProcessRecordsMessage; -import com.amazonaws.services.kinesis.multilang.messages.StatusMessage; -import com.google.common.util.concurrent.SettableFuture; - -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownReason; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.retrieval.KinesisClientRecord; - -@RunWith(MockitoJUnitRunner.class) -public class MultiLangProtocolTest { - private static final List EMPTY_RECORD_LIST = Collections.emptyList(); - - @Mock - private MultiLangProtocol protocol; - @Mock - private MessageWriter messageWriter; - @Mock - private MessageReader messageReader; - private String shardId; - @Mock - private RecordProcessorCheckpointer checkpointer; - @Mock - private KinesisClientLibConfiguration configuration; - - @Before - public void setup() { - this.shardId = "shard-id-123"; - protocol = new MultiLangProtocolForTesting(messageReader, messageWriter, - InitializationInput.builder().shardId(shardId).build(), configuration); - - when(configuration.getTimeoutInSeconds()).thenReturn(Optional.empty()); - } - - private Future buildFuture(T value) { - SettableFuture future = SettableFuture.create(); - future.set(value); - return future; - } - - private Future buildFuture(T value, Class clazz) { - SettableFuture future = SettableFuture.create(); - future.set(value); - return future; - } - - @Test - public void initializeTest() throws InterruptedException, ExecutionException { - when(messageWriter - .writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder() - .shardId(shardId).build())))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("initialize"), Message.class)); - assertThat(protocol.initialize(), equalTo(true)); - } - - @Test - public void processRecordsTest() throws InterruptedException, ExecutionException { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("processRecords"), Message.class)); - - assertThat(protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build()), - equalTo(true)); - } - - @Test - public void shutdownTest() throws InterruptedException, ExecutionException { - when(messageWriter.writeShutdownMessage(any(ShutdownReason.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("shutdown"), Message.class)); - - Mockito.doReturn(buildFuture(true)).when(messageWriter) - .writeShutdownMessage(any(ShutdownReason.class)); - Mockito.doReturn(buildFuture(new StatusMessage("shutdown"))) - .when(messageReader).getNextMessageFromSTDOUT(); - assertThat(protocol.shutdown(null, ShutdownReason.LEASE_LOST), equalTo(true)); - } - - @Test - public void shutdownRequestedTest() { - when(messageWriter.writeShutdownRequestedMessage()).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("shutdownRequested"), Message.class)); - Mockito.doReturn(buildFuture(true)).when(messageWriter) - .writeShutdownRequestedMessage(); - Mockito.doReturn(buildFuture(new StatusMessage("shutdownRequested"))) - .when(messageReader).getNextMessageFromSTDOUT(); - assertThat(protocol.shutdownRequested(null), equalTo(true)); - } - - private Answer> buildMessageAnswers(List messages) { - return new Answer>() { - - Iterator messageIterator; - Message message; - - Answer> init(List messages) { - messageIterator = messages.iterator(); - return this; - } - - @Override - public Future answer(InvocationOnMock invocation) throws Throwable { - if (this.messageIterator.hasNext()) { - message = this.messageIterator.next(); - } - return buildFuture(message); - } - - }.init(messages); - } - - @Test - public void processRecordsWithCheckpointsTest() throws InterruptedException, ExecutionException, - KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { - - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenAnswer(buildMessageAnswers(new ArrayList() { - { - this.add(new CheckpointMessage("123", 0L, null)); - this.add(new CheckpointMessage(null, 0L, null)); - /* - * This procesRecords message will be ignored by the read loop which only cares about status and - * checkpoint messages. All other lines and message types are ignored. By inserting it here, we check - * that this test succeeds even with unexpected messaging. - */ - this.add(new ProcessRecordsMessage()); - this.add(new StatusMessage("processRecords")); - } - })); - - boolean result = protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST) - .checkpointer(checkpointer).build()); - - assertThat(result, equalTo(true)); - - verify(checkpointer, timeout(1)).checkpoint(); - verify(checkpointer, timeout(1)).checkpoint("123", 0L); - } - - @Test - public void processRecordsWithABadCheckpointTest() throws InterruptedException, ExecutionException { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(buildFuture(false)); - when(messageReader.getNextMessageFromSTDOUT()).thenAnswer(buildMessageAnswers(new ArrayList() { - { - this.add(new CheckpointMessage("456", 0L, null)); - this.add(new StatusMessage("processRecords")); - } - })); - assertThat(protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST) - .checkpointer(checkpointer).build()), equalTo(false)); - } - - @Test(expected = NullPointerException.class) - public void waitForStatusMessageTimeoutTest() throws InterruptedException, TimeoutException, ExecutionException { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - Future future = Mockito.mock(Future.class); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(future); - when(configuration.getTimeoutInSeconds()).thenReturn(Optional.of(5)); - when(future.get(anyInt(), eq(TimeUnit.SECONDS))).thenThrow(TimeoutException.class); - protocol = new MultiLangProtocolForTesting(messageReader, - messageWriter, - InitializationInput.builder().shardId(shardId).build(), - configuration); - - protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build()); - } - - @Test - public void waitForStatusMessageSuccessTest() { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("processRecords"), Message.class)); - when(configuration.getTimeoutInSeconds()).thenReturn(Optional.of(5)); - - assertTrue(protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build())); - } - - private class MultiLangProtocolForTesting extends MultiLangProtocol { - /** - * Constructor. - * - * @param messageReader A message reader. - * @param messageWriter A message writer. - * @param initializationInput - * @param configuration - */ - MultiLangProtocolForTesting(final MessageReader messageReader, - final MessageWriter messageWriter, - final InitializationInput initializationInput, - final KinesisClientLibConfiguration configuration) { - super(messageReader, messageWriter, initializationInput, configuration); - } - - @Override - protected void haltJvm(final int exitStatus) { - throw new NullPointerException(); - } - } -} diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/StreamingShardRecordProcessorFactoryTest.java b/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/StreamingShardRecordProcessorFactoryTest.java deleted file mode 100644 index 2eba9833..00000000 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/StreamingShardRecordProcessorFactoryTest.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package com.amazonaws.services.kinesis.multilang; - -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; -import org.junit.Assert; -import org.junit.Test; - -import software.amazon.kinesis.processor.ShardRecordProcessor; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -@RunWith(MockitoJUnitRunner.class) -public class StreamingShardRecordProcessorFactoryTest { - - @Mock - private KinesisClientLibConfiguration configuration; - - @Test - public void createProcessorTest() { - MultiLangRecordProcessorFactory factory = new MultiLangRecordProcessorFactory("somecommand", null, configuration); - ShardRecordProcessor processor = factory.shardRecordProcessor(); - - Assert.assertEquals("Should have constructed a StreamingRecordProcessor", MultiLangShardRecordProcessor.class, - processor.getClass()); - } -} diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoderTest.java b/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoderTest.java deleted file mode 100644 index 43b507d9..00000000 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoderTest.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package com.amazonaws.services.kinesis.multilang.config; - -import static org.junit.Assert.assertEquals; - -import org.junit.Test; - -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain; - -public class AWSCredentialsProviderPropertyValueDecoderTest { - - private static final String TEST_ACCESS_KEY_ID = "123"; - private static final String TEST_SECRET_KEY = "456"; - - private String credentialName1 = "com.amazonaws.services.kinesis.multilang.config.AWSCredentialsProviderPropertyValueDecoderTest$AlwaysSucceedCredentialsProvider"; - private String credentialName2 = "com.amazonaws.services.kinesis.multilang.config.AWSCredentialsProviderPropertyValueDecoderTest$ConstructorCredentialsProvider"; - private AWSCredentialsProviderPropertyValueDecoder decoder = new AWSCredentialsProviderPropertyValueDecoder(); - - @Test - public void testSingleProvider() { - AwsCredentialsProvider provider = decoder.decodeValue(credentialName1); - assertEquals(provider.getClass(), AwsCredentialsProviderChain.class); - assertEquals(provider.resolveCredentials().accessKeyId(), TEST_ACCESS_KEY_ID); - assertEquals(provider.resolveCredentials().secretAccessKey(), TEST_SECRET_KEY); - } - - @Test - public void testTwoProviders() { - AwsCredentialsProvider provider = decoder.decodeValue(credentialName1 + "," + credentialName1); - assertEquals(provider.getClass(), AwsCredentialsProviderChain.class); - assertEquals(provider.resolveCredentials().accessKeyId(), TEST_ACCESS_KEY_ID); - assertEquals(provider.resolveCredentials().secretAccessKey(), TEST_SECRET_KEY); - } - - @Test - public void testProfileProviderWithOneArg() { - AwsCredentialsProvider provider = decoder.decodeValue(credentialName2 + "|arg"); - assertEquals(provider.getClass(), AwsCredentialsProviderChain.class); - assertEquals(provider.resolveCredentials().accessKeyId(), "arg"); - assertEquals(provider.resolveCredentials().secretAccessKey(), "blank"); - } - - @Test - public void testProfileProviderWithTwoArgs() { - AwsCredentialsProvider provider = decoder.decodeValue(credentialName2 + "|arg1|arg2"); - assertEquals(provider.getClass(), AwsCredentialsProviderChain.class); - assertEquals(provider.resolveCredentials().accessKeyId(), "arg1"); - assertEquals(provider.resolveCredentials().secretAccessKey(), "arg2"); - } - - /** - * This credentials provider will always succeed - */ - public static class AlwaysSucceedCredentialsProvider implements AwsCredentialsProvider { - - @Override - public AwsCredentials resolveCredentials() { - return AwsBasicCredentials.create(TEST_ACCESS_KEY_ID, TEST_SECRET_KEY); - } - - } - - /** - * This credentials provider needs a constructor call to instantiate it - */ - public static class ConstructorCredentialsProvider implements AwsCredentialsProvider { - - private String arg1; - private String arg2; - - public ConstructorCredentialsProvider(String arg1) { - this.arg1 = arg1; - this.arg2 = "blank"; - } - - public ConstructorCredentialsProvider(String arg1, String arg2) { - this.arg1 = arg1; - this.arg2 = arg2; - } - - @Override - public AwsCredentials resolveCredentials() { - return AwsBasicCredentials.create(arg1, arg2); - } - - } -} diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/config/DatePropertyValueDecoderTest.java b/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/config/DatePropertyValueDecoderTest.java deleted file mode 100644 index 7c5cd8f3..00000000 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/config/DatePropertyValueDecoderTest.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package com.amazonaws.services.kinesis.multilang.config; - -import static org.junit.Assert.assertEquals; - -import java.util.Date; - -import org.junit.Test; - -public class DatePropertyValueDecoderTest { - - private DatePropertyValueDecoder decoder = new DatePropertyValueDecoder(); - - private static final String TEST_VALUE = "1527267472"; - - @Test - public void testNumericValue() { - Date timestamp = decoder.decodeValue(TEST_VALUE); - assertEquals(timestamp.getClass(), Date.class); - assertEquals(timestamp, new Date(Long.parseLong(TEST_VALUE) * 1000L)); - } - - @Test(expected = IllegalArgumentException.class) - public void testEmptyValue() { - Date timestamp = decoder.decodeValue(""); - } - - @Test(expected = IllegalArgumentException.class) - public void testNullValue() { - Date timestamp = decoder.decodeValue(null); - } - - @Test(expected = IllegalArgumentException.class) - public void testNonNumericValue() { - Date timestamp = decoder.decodeValue("123abc"); - } -} diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/config/KinesisClientLibConfiguratorTest.java b/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/config/KinesisClientLibConfiguratorTest.java deleted file mode 100644 index 687d8f40..00000000 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/config/KinesisClientLibConfiguratorTest.java +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package com.amazonaws.services.kinesis.multilang.config; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.ByteArrayInputStream; -import java.io.InputStream; -import java.util.Optional; -import java.util.Set; - -import org.apache.commons.lang3.StringUtils; -import org.junit.Ignore; -import org.junit.Test; - -import com.google.common.collect.ImmutableSet; - -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; -import software.amazon.kinesis.metrics.MetricsLevel; - -public class KinesisClientLibConfiguratorTest { - - private String credentialName1 = "com.amazonaws.services.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProvider"; - private String credentialName2 = "com.amazonaws.services.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysFailCredentialsProvider"; - private String credentialNameKinesis = "com.amazonaws.services.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderKinesis"; - private String credentialNameDynamoDB = "com.amazonaws.services.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderDynamoDB"; - private String credentialNameCloudWatch = "com.amazonaws.services.kinesis.multilang.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderCloudWatch"; - private KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator(); - - @Test - public void testWithBasicSetup() { - KinesisClientLibConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = " + credentialName1, "workerId = 123" }, '\n')); - assertEquals(config.getApplicationName(), "b"); - assertEquals(config.getStreamName(), "a"); - assertEquals(config.getWorkerIdentifier(), "123"); - assertEquals(config.getMaxGetRecordsThreadPool(), Optional.empty()); - assertEquals(config.getRetryGetRecordsInSeconds(), Optional.empty()); - } - - @Test - public void testWithLongVariables() { - KinesisClientLibConfiguration config = getConfiguration(StringUtils.join(new String[] { "applicationName = app", - "streamName = 123", "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, - "workerId = 123", "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, '\n')); - - assertEquals(config.getApplicationName(), "app"); - assertEquals(config.getStreamName(), "123"); - assertEquals(config.getWorkerIdentifier(), "123"); - assertEquals(config.getFailoverTimeMillis(), 100); - assertEquals(config.getShardSyncIntervalMillis(), 500); - } - - @Test - public void testWithUnsupportedClientConfigurationVariables() { - KinesisClientLibConfiguration config = getConfiguration(StringUtils.join( - new String[] { "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, "workerId = id", - "kinesisClientConfig = {}", "streamName = stream", "applicationName = b" }, - '\n')); - - assertEquals(config.getApplicationName(), "b"); - assertEquals(config.getStreamName(), "stream"); - assertEquals(config.getWorkerIdentifier(), "id"); - // by setting the configuration there is no effect on kinesisClientConfiguration variable. - } - - @Test - public void testWithIntVariables() { - KinesisClientLibConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = kinesis", - "AWSCredentialsProvider = " + credentialName2 + ", " + credentialName1, "workerId = w123", - "maxRecords = 10", "metricsMaxQueueSize = 20", "applicationName = kinesis", - "retryGetRecordsInSeconds = 2", "maxGetRecordsThreadPool = 1" }, '\n')); - - assertEquals(config.getApplicationName(), "kinesis"); - assertEquals(config.getStreamName(), "kinesis"); - assertEquals(config.getWorkerIdentifier(), "w123"); - assertEquals(config.getMaxRecords(), 10); - assertEquals(config.getMetricsMaxQueueSize(), 20); - assertEquals(config.getRetryGetRecordsInSeconds(), Optional.of(2)); - assertEquals(config.getMaxGetRecordsThreadPool(), Optional.of(1)); - } - - @Test - public void testWithBooleanVariables() { - KinesisClientLibConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD, " + credentialName1, "workerId = 0", - "cleanupLeasesUponShardCompletion = false", "validateSequenceNumberBeforeCheckpointing = true" }, - '\n')); - - assertEquals(config.getApplicationName(), "b"); - assertEquals(config.getStreamName(), "a"); - assertEquals(config.getWorkerIdentifier(), "0"); - assertFalse(config.shouldCleanupLeasesUponShardCompletion()); - assertTrue(config.shouldValidateSequenceNumberBeforeCheckpointing()); - } - - @Test - public void testWithStringVariables() { - KinesisClientLibConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 1", - "kinesisEndpoint = https://kinesis", "metricsLevel = SUMMARY" }, '\n')); - - assertEquals(config.getWorkerIdentifier(), "1"); - assertEquals(config.getKinesisEndpoint(), "https://kinesis"); - assertEquals(config.getMetricsLevel(), MetricsLevel.SUMMARY); - } - - @Test - public void testWithSetVariables() { - KinesisClientLibConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 1", - "metricsEnabledDimensions = ShardId, WorkerIdentifier" }, '\n')); - - Set expectedMetricsEnabledDimensions = ImmutableSet. builder() - .add("ShardId", "WorkerIdentifier") - .addAll(KinesisClientLibConfiguration.METRICS_ALWAYS_ENABLED_DIMENSIONS).build(); - assertEquals(config.getMetricsEnabledDimensions(), expectedMetricsEnabledDimensions); - } - - @Test - public void testWithInitialPositionInStreamVariables() { - KinesisClientLibConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon" }, '\n')); - - assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON); - } - - @Test - public void testSkippingNonKCLVariables() { - KinesisClientLibConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon", "abc = 1" }, '\n')); - - assertEquals(config.getApplicationName(), "b"); - assertEquals(config.getStreamName(), "a"); - assertEquals(config.getWorkerIdentifier(), "123"); - assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON); - } - - @Test - public void testEmptyOptionalVariables() { - KinesisClientLibConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon", "maxGetRecordsThreadPool = 1" }, '\n')); - assertEquals(config.getMaxGetRecordsThreadPool(), Optional.of(1)); - assertEquals(config.getRetryGetRecordsInSeconds(), Optional.empty()); - } - - @Test - public void testWithZeroValue() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon", "maxGetRecordsThreadPool = 0", - "retryGetRecordsInSeconds = 0" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - try { - configurator.getConfiguration(input); - } catch (Exception e) { - fail("Don't expect to fail on invalid variable value"); - - } - } - - @Test - public void testWithInvalidIntValue() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = 100nf" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - try { - configurator.getConfiguration(input); - } catch (Exception e) { - fail("Don't expect to fail on invalid variable value"); - } - } - - @Test - public void testWithNegativeIntValue() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = -12" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - try { - configurator.getConfiguration(input); - } catch (Exception e) { - fail("Don't expect to fail on invalid variable value"); - } - } - - @Test - public void testWithMissingCredentialsProvider() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", "workerId = 123", - "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - try { - configurator.getConfiguration(input); - fail("expect failure with no credentials provider variables"); - } catch (Exception e) { - // succeed - } - } - - @Test - public void testWithMissingWorkerId() { - String test = StringUtils.join( - new String[] { "streamName = a", "applicationName = b", "AWSCredentialsProvider = " + credentialName1, - "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, - '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - KinesisClientLibConfiguration config = configurator.getConfiguration(input); - - // if workerId is not provided, configurator should assign one for it automatically - assertNotNull(config.getWorkerIdentifier()); - assertFalse(config.getWorkerIdentifier().isEmpty()); - } - - @Test - public void testWithMissingStreamName() { - String test = StringUtils.join(new String[] { "applicationName = b", - "AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = 100" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - try { - configurator.getConfiguration(input); - fail("expect failure with no stream name variables"); - } catch (Exception e) { - // succeed - } - } - - @Test - public void testWithMissingApplicationName() { - String test = StringUtils.join(new String[] { "streamName = a", "AWSCredentialsProvider = " + credentialName1, - "workerId = 123", "failoverTimeMillis = 100" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - try { - configurator.getConfiguration(input); - fail("expect failure with no application variables"); - } catch (Exception e) { - // succeed - } - } - - @Test - public void testWithAWSCredentialsFailed() { - String test = StringUtils.join( - new String[] { "streamName = a", "applicationName = b", "AWSCredentialsProvider = " + credentialName2, - "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, - '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - try { - KinesisClientLibConfiguration config = configurator.getConfiguration(input); - config.getKinesisCredentialsProvider().resolveCredentials(); - fail("expect failure with wrong credentials provider"); - } catch (Exception e) { - // succeed - } - } - - // TODO: fix this test - @Test - @Ignore - public void testWithDifferentAWSCredentialsForDynamoDBAndCloudWatch() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialNameKinesis, - "AWSCredentialsProviderDynamoDB = " + credentialNameDynamoDB, - "AWSCredentialsProviderCloudWatch = " + credentialNameCloudWatch, "failoverTimeMillis = 100", - "shardSyncIntervalMillis = 500" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - KinesisClientLibConfiguration config = configurator.getConfiguration(input); - try { - config.getKinesisCredentialsProvider().resolveCredentials(); - } catch (Exception e) { - fail("Kinesis credential providers should not fail."); - } - try { - config.getDynamoDBCredentialsProvider().resolveCredentials(); - } catch (Exception e) { - fail("DynamoDB credential providers should not fail."); - } - try { - config.getCloudWatchCredentialsProvider().resolveCredentials(); - } catch (Exception e) { - fail("CloudWatch credential providers should not fail."); - } - } - - // TODO: fix this test - @Test - @Ignore - public void testWithDifferentAWSCredentialsForDynamoDBAndCloudWatchFailed() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialNameKinesis, - "AWSCredentialsProviderDynamoDB = " + credentialName1, - "AWSCredentialsProviderCloudWatch = " + credentialName1, "failoverTimeMillis = 100", - "shardSyncIntervalMillis = 500" }, '\n'); - InputStream input = new ByteArrayInputStream(test.getBytes()); - - // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - - // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement - KinesisClientLibConfiguration config = configurator.getConfiguration(input); - try { - config.getKinesisCredentialsProvider().resolveCredentials(); - } catch (Exception e) { - fail("Kinesis credential providers should not fail."); - } - try { - config.getDynamoDBCredentialsProvider().resolveCredentials(); - fail("DynamoDB credential providers should fail."); - } catch (Exception e) { - // succeed - } - try { - config.getCloudWatchCredentialsProvider().resolveCredentials(); - fail("CloudWatch credential providers should fail."); - } catch (Exception e) { - // succeed - } - } - - /** - * This credentials provider will always succeed - */ - public static class AlwaysSucceedCredentialsProvider implements AwsCredentialsProvider { - - @Override - public AwsCredentials resolveCredentials() { - return null; - } - - } - - /** - * This credentials provider will always succeed - */ - public static class AlwaysSucceedCredentialsProviderKinesis implements AwsCredentialsProvider { - - @Override - public AwsCredentials resolveCredentials() { - return AwsBasicCredentials.create("", ""); - } - - } - - /** - * This credentials provider will always succeed - */ - public static class AlwaysSucceedCredentialsProviderDynamoDB implements AwsCredentialsProvider { - - @Override - public AwsCredentials resolveCredentials() { - return AwsBasicCredentials.create("", ""); - } - - } - - /** - * This credentials provider will always succeed - */ - public static class AlwaysSucceedCredentialsProviderCloudWatch implements AwsCredentialsProvider { - - @Override - public AwsCredentials resolveCredentials() { - return AwsBasicCredentials.create("", ""); - } - - } - - /** - * This credentials provider will always fail - */ - public static class AlwaysFailCredentialsProvider implements AwsCredentialsProvider { - - @Override - public AwsCredentials resolveCredentials() { - throw new IllegalArgumentException(); - } - - } - - private KinesisClientLibConfiguration getConfiguration(String configString) { - InputStream input = new ByteArrayInputStream(configString.getBytes()); - KinesisClientLibConfiguration config = configurator.getConfiguration(input); - return config; - } -} diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/messages/MessageTest.java b/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/messages/MessageTest.java deleted file mode 100644 index 179c4ad8..00000000 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/messages/MessageTest.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package com.amazonaws.services.kinesis.multilang.messages; - -import java.nio.ByteBuffer; -import java.util.Collections; - -import org.junit.Assert; -import org.junit.Test; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; - -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownReason; -import software.amazon.kinesis.retrieval.KinesisClientRecord; - -public class MessageTest { - - @Test - public void toStringTest() { - Message[] messages = new Message[]{ - new CheckpointMessage("1234567890", 0L, null), - new InitializeMessage(InitializationInput.builder().shardId("shard-123").build()), - new ProcessRecordsMessage(ProcessRecordsInput.builder() - .records(Collections.singletonList( - KinesisClientRecord.builder() - .data(ByteBuffer.wrap("cat".getBytes())) - .partitionKey("cat") - .sequenceNumber("555") - .build())) - .build()), - new ShutdownMessage(ShutdownReason.LEASE_LOST), - new StatusMessage("processRecords"), - new InitializeMessage(), - new ProcessRecordsMessage(), - new ShutdownRequestedMessage() - }; - -// TODO: fix this - for (int i = 0; i < messages.length; i++) { - System.out.println(messages[i].toString()); - Assert.assertTrue("Each message should contain the action field", messages[i].toString().contains("action")); - } - - // Hit this constructor - KinesisClientRecord defaultJsonFriendlyRecord = KinesisClientRecord.builder().build(); - Assert.assertNull(defaultJsonFriendlyRecord.partitionKey()); - Assert.assertNull(defaultJsonFriendlyRecord.data()); - Assert.assertNull(defaultJsonFriendlyRecord.sequenceNumber()); - Assert.assertNull(new ShutdownMessage(null).getReason()); - - // Hit the bad object mapping path - Message withBadMapper = new Message() { - }.withObjectMapper(new ObjectMapper() { - /** - * - */ - private static final long serialVersionUID = 1L; - - @Override - public String writeValueAsString(Object m) throws JsonProcessingException { - throw new JsonProcessingException(new Throwable()) { - }; - } - }); - String s = withBadMapper.toString(); - Assert.assertNotNull(s); - } -} diff --git a/amazon-kinesis-client-multilang/src/test/resources/logback.xml b/amazon-kinesis-client-multilang/src/test/resources/logback.xml deleted file mode 100644 index 46b45182..00000000 --- a/amazon-kinesis-client-multilang/src/test/resources/logback.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - %d [%thread] %-5level %logger{36} [%mdc{ShardId:-NONE}] - %msg %n - - - - - - - \ No newline at end of file diff --git a/amazon-kinesis-client/pom.xml b/amazon-kinesis-client/pom.xml deleted file mode 100644 index 1b791739..00000000 --- a/amazon-kinesis-client/pom.xml +++ /dev/null @@ -1,332 +0,0 @@ - - - 4.0.0 - - - software.amazon.kinesis - amazon-kinesis-client-pom - 2.0.5 - - - amazon-kinesis-client - jar - Amazon Kinesis Client Library for Java - - The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data - from Amazon Kinesis. - - https://aws.amazon.com/kinesis - - - https://github.com/awslabs/amazon-kinesis-client.git - - - - - Amazon Software License - https://aws.amazon.com/asl - repo - - - - - 1.11.272 - 2.0.6 - 1.0.392 - libsqlite4java - ${project.build.directory}/test-lib - 1.7.25 - - - - - software.amazon.awssdk - kinesis - ${awssdk.version} - - - software.amazon.awssdk - dynamodb - ${awssdk.version} - - - software.amazon.awssdk - cloudwatch - ${awssdk.version} - - - software.amazon.awssdk - netty-nio-client - ${awssdk.version} - - - com.google.guava - guava - 26.0-jre - - - com.google.protobuf - protobuf-java - 2.6.1 - - - org.apache.commons - commons-lang3 - 3.7 - - - org.slf4j - slf4j-api - ${slf4j.version} - - - - io.reactivex.rxjava2 - rxjava - 2.1.14 - - - - org.projectlombok - lombok - 1.16.20 - provided - - - - - junit - junit - 4.11 - test - - - - org.mockito - mockito-all - 1.10.19 - test - - - - org.hamcrest - hamcrest-all - 1.3 - test - - - - - - - - - - - - ch.qos.logback - logback-classic - 1.1.7 - test - - - - - - - - - - - - - - - amazonwebservices - Amazon Web Services - https://aws.amazon.com - - developer - - - - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.2 - - 1.8 - 1.8 - UTF-8 - - - - - - - - org.apache.maven.plugins - maven-surefire-plugin - 2.19.1 - - - **/*IntegrationTest.java - - - - sqlite4java.library.path - ${sqlite4java.libpath} - - - - - - org.apache.maven.plugins - maven-failsafe-plugin - 2.19.1 - - - **/*IntegrationTest.java - - - - - - integration-test - verify - - - - - - org.apache.maven.plugins - maven-dependency-plugin - - - copy - test-compile - - copy - - - - - - com.almworks.sqlite4java - ${sqlite4java.native}-osx - ${sqlite4java.version} - dylib - true - ${sqlite4java.libpath} - - - - - - com.almworks.sqlite4java - ${sqlite4java.native}-linux-i386 - ${sqlite4java.version} - so - true - ${sqlite4java.libpath} - - - - - com.almworks.sqlite4java - ${sqlite4java.native}-linux-amd64 - ${sqlite4java.version} - so - true - ${sqlite4java.libpath} - - - - - - com.almworks.sqlite4java - sqlite4java-win32-x86 - ${sqlite4java.version} - dll - true - ${sqlite4java.libpath} - - - - - com.almworks.sqlite4java - sqlite4java-win32-x64 - ${sqlite4java.version} - dll - true - ${sqlite4java.libpath} - - - - - - - - org.apache.maven.plugins - maven-javadoc-plugin - 2.10.3 - - com.amazonaws.services.kinesis.producer.protobuf - - - - attach-javadocs - - jar - - - - - - org.apache.maven.plugins - maven-source-plugin - 3.0.1 - - - attach-sources - - jar - - - - - - - - - - disable-java8-doclint - - [1.8,) - - - -Xdoclint:none - - - - - diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/annotations/KinesisClientInternalApi.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/annotations/KinesisClientInternalApi.java deleted file mode 100644 index 32b322f0..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/annotations/KinesisClientInternalApi.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.annotations; - -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; - -/** - * Any class/method/variable marked with this annotation is subject to breaking changes between minor releases. - */ -@Retention(RetentionPolicy.CLASS) -public @interface KinesisClientInternalApi { -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/Checkpoint.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/Checkpoint.java deleted file mode 100644 index 0b11ee66..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/Checkpoint.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.checkpoint; - -import lombok.Data; -import lombok.experimental.Accessors; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * A class encapsulating the 2 pieces of state stored in a checkpoint. - */ -@Data -@Accessors(fluent = true) -public class Checkpoint { - private final ExtendedSequenceNumber checkpoint; - private final ExtendedSequenceNumber pendingCheckpoint; - - /** - * Constructor. - * - * @param checkpoint the checkpoint sequence number - cannot be null or empty. - * @param pendingCheckpoint the pending checkpoint sequence number - can be null. - */ - public Checkpoint(final ExtendedSequenceNumber checkpoint, final ExtendedSequenceNumber pendingCheckpoint) { - if (checkpoint == null || checkpoint.sequenceNumber().isEmpty()) { - throw new IllegalArgumentException("Checkpoint cannot be null or empty"); - } - this.checkpoint = checkpoint; - this.pendingCheckpoint = pendingCheckpoint; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointConfig.java deleted file mode 100644 index 13c0d153..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointConfig.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.checkpoint; - - -import lombok.Data; -import lombok.experimental.Accessors; -import software.amazon.kinesis.checkpoint.dynamodb.DynamoDBCheckpointFactory; - -/** - * Used by the KCL to manage checkpointing. - */ -@Data -@Accessors(fluent = true) -public class CheckpointConfig { - private CheckpointFactory checkpointFactory = new DynamoDBCheckpointFactory(); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointFactory.java deleted file mode 100644 index fe51584c..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointFactory.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.checkpoint; - -import software.amazon.kinesis.leases.LeaseCoordinator; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.processor.Checkpointer; - -/** - * - */ -public interface CheckpointFactory { - Checkpointer createCheckpointer(LeaseCoordinator leaseCoordinator, LeaseRefresher leaseRefresher); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/DoesNothingPreparedCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/DoesNothingPreparedCheckpointer.java deleted file mode 100644 index be211204..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/DoesNothingPreparedCheckpointer.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.checkpoint; - -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.exceptions.InvalidStateException; -import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.exceptions.ThrottlingException; -import software.amazon.kinesis.processor.PreparedCheckpointer; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * A special PreparedCheckpointer that does nothing, which can be used when preparing a checkpoint at the current - * checkpoint sequence number where it is never necessary to do another checkpoint. - * This simplifies programming by preventing application developers from having to reason about whether - * their application has processed records before calling prepareCheckpoint - * - * Here's why it's safe to do nothing: - * The only way to checkpoint at current checkpoint value is to have a record processor that gets - * initialized, processes 0 records, then calls prepareCheckpoint(). The value in the table is the same, so there's - * no reason to overwrite it with another copy of itself. - */ -@KinesisClientInternalApi -public class DoesNothingPreparedCheckpointer implements PreparedCheckpointer { - - private final ExtendedSequenceNumber sequenceNumber; - - /** - * Constructor. - * @param sequenceNumber the sequence number value - */ - public DoesNothingPreparedCheckpointer(ExtendedSequenceNumber sequenceNumber) { - this.sequenceNumber = sequenceNumber; - } - - /** - * {@inheritDoc} - */ - @Override - public ExtendedSequenceNumber pendingCheckpoint() { - return sequenceNumber; - } - - /** - * {@inheritDoc} - */ - @Override - public void checkpoint() - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { - // This method does nothing - } - -} - diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SequenceNumberValidator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SequenceNumberValidator.java deleted file mode 100644 index e18da9ec..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SequenceNumberValidator.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.checkpoint; - -import java.math.BigInteger; -import java.util.Collections; -import java.util.List; -import java.util.Optional; - -import org.apache.commons.lang3.StringUtils; - -import lombok.Data; -import lombok.experimental.Accessors; - -/** - * This supports extracting the shardId from a sequence number. - * - *

Warning

- * Sequence numbers are an opaque value used by Kinesis, and maybe changed at any time. Should validation stop - * working you may need to update your version of the KCL - * - */ -public class SequenceNumberValidator { - - @Data - @Accessors(fluent = true) - private static class SequenceNumberComponents { - final int version; - final int shardId; - } - - private interface SequenceNumberReader { - Optional read(String sequenceNumber); - } - - /** - * Reader for the v2 sequence number format. v1 sequence numbers are no longer used or available. - */ - private static class V2SequenceNumberReader implements SequenceNumberReader { - - private static final int VERSION = 2; - - private static final int EXPECTED_BIT_LENGTH = 186; - - private static final int VERSION_OFFSET = 184; - private static final long VERSION_MASK = (1 << 4) - 1; - - private static final int SHARD_ID_OFFSET = 4; - private static final long SHARD_ID_MASK = (1L << 32) - 1; - - @Override - public Optional read(String sequenceNumberString) { - BigInteger sequenceNumber = new BigInteger(sequenceNumberString, 10); - - // - // If the bit length of the sequence number isn't 186 it's impossible for the version numbers - // to be where we expect them. We treat this the same as an unknown version of the sequence number - // - // If the sequence number length isn't what we expect it's due to a new version of the sequence number or - // an invalid sequence number. This - // - if (sequenceNumber.bitLength() != EXPECTED_BIT_LENGTH) { - return Optional.empty(); - } - - // - // Read the 4 most significant bits of the sequence number, the 2 most significant bits are implicitly 0 - // (2 == 0b0011). If the version number doesn't match we give up and say we can't parse the sequence number - // - int version = readOffset(sequenceNumber, VERSION_OFFSET, VERSION_MASK); - if (version != VERSION) { - return Optional.empty(); - } - - // - // If we get here the sequence number is big enough, and the version matches so the shardId should be valid. - // - int shardId = readOffset(sequenceNumber, SHARD_ID_OFFSET, SHARD_ID_MASK); - return Optional.of(new SequenceNumberComponents(version, shardId)); - } - - private int readOffset(BigInteger sequenceNumber, int offset, long mask) { - long value = sequenceNumber.shiftRight(offset).longValue() & mask; - return (int) value; - } - } - - private static final List SEQUENCE_NUMBER_READERS = Collections - .singletonList(new V2SequenceNumberReader()); - - private Optional retrieveComponentsFor(String sequenceNumber) { - return SEQUENCE_NUMBER_READERS.stream().map(r -> r.read(sequenceNumber)).filter(Optional::isPresent).map(Optional::get).findFirst(); - } - - /** - * Attempts to retrieve the version for a sequence number. If no reader can be found for the sequence number this - * will return an empty Optional. - * - *

- * This will return an empty Optional if the it's unable to extract the version number. This can occur for - * multiple reasons including: - *

    - *
  • Kinesis has started using a new version of sequence numbers
  • - *
  • The provided sequence number isn't a valid Kinesis sequence number.
  • - *
- * - *

- * - * @param sequenceNumber - * the sequence number to extract the version from - * @return an Optional containing the version if a compatible sequence number reader can be found, an empty Optional - * otherwise. - */ - public Optional versionFor(String sequenceNumber) { - return retrieveComponentsFor(sequenceNumber).map(SequenceNumberComponents::version); - } - - /** - * Attempts to retrieve the shardId from a sequence number. If the version of the sequence number is unsupported - * this will return an empty optional. - * - * This will return an empty Optional if the sequence number isn't recognized. This can occur for multiple - * reasons including: - *
    - *
  • Kinesis has started using a new version of sequence numbers
  • - *
  • The provided sequence number isn't a valid Kinesis sequence number.
  • - *
- *
- *

- * This should always return a value if {@link #versionFor(String)} returns a value - *

- * - * @param sequenceNumber - * the sequence number to extract the shardId from - * @return an Optional containing the shardId if the version is supported, an empty Optional otherwise. - */ - public Optional shardIdFor(String sequenceNumber) { - return retrieveComponentsFor(sequenceNumber).map(s -> String.format("shardId-%012d", s.shardId())); - } - - /** - * Validates that the sequence number provided contains the given shardId. If the sequence number is unsupported - * this will return an empty Optional. - * - *

- * Validation of a sequence number will only occur if the sequence number can be parsed. It's possible to use - * {@link #versionFor(String)} to verify that the given sequence number is supported by this class. There are 3 - * possible validation states: - *

- *
Some(True)
- *
The sequence number can be parsed, and the shardId matches the one in the sequence number
- *
Some(False)
- *
THe sequence number can be parsed, and the shardId doesn't match the one in the sequence number
- *
None
- *
It wasn't possible to parse the sequence number so the validity of the sequence number is unknown
- *
- *

- * - *

- * Handling unknown validation causes is application specific, and not specific handling is - * provided. - *

- * - * @param sequenceNumber - * the sequence number to verify the shardId - * @param shardId - * the shardId that the sequence is expected to contain - * @return true if the sequence number contains the shardId, false if it doesn't. If the sequence number version is - * unsupported this will return an empty Optional - */ - public Optional validateSequenceNumberForShard(String sequenceNumber, String shardId) { - return shardIdFor(sequenceNumber).map(s -> StringUtils.equalsIgnoreCase(s, shardId)); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointer.java deleted file mode 100644 index 5a49aedf..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointer.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.checkpoint; - -import software.amazon.kinesis.exceptions.InvalidStateException; -import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.exceptions.ThrottlingException; -import software.amazon.kinesis.processor.PreparedCheckpointer; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Objects of this class are prepared to checkpoint at a specific sequence number. They use an - * RecordProcessorCheckpointer to do the actual checkpointing, so their checkpoint is subject to the same 'didn't go - * backwards' validation as a normal checkpoint. - */ -public class ShardPreparedCheckpointer implements PreparedCheckpointer { - - private final ExtendedSequenceNumber pendingCheckpointSequenceNumber; - private final RecordProcessorCheckpointer checkpointer; - - /** - * Constructor. - * - * @param pendingCheckpointSequenceNumber sequence number to checkpoint at - * @param checkpointer checkpointer to use - */ - public ShardPreparedCheckpointer(ExtendedSequenceNumber pendingCheckpointSequenceNumber, - RecordProcessorCheckpointer checkpointer) { - this.pendingCheckpointSequenceNumber = pendingCheckpointSequenceNumber; - this.checkpointer = checkpointer; - } - - /** - * {@inheritDoc} - */ - @Override - public ExtendedSequenceNumber pendingCheckpoint() { - return pendingCheckpointSequenceNumber; - } - - /** - * {@inheritDoc} - */ - @Override - public void checkpoint() - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { - checkpointer.checkpoint(pendingCheckpointSequenceNumber.sequenceNumber(), - pendingCheckpointSequenceNumber.subSequenceNumber()); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardRecordProcessorCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardRecordProcessorCheckpointer.java deleted file mode 100644 index ada04834..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardRecordProcessorCheckpointer.java +++ /dev/null @@ -1,323 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.checkpoint; - -import lombok.Getter; -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.experimental.Accessors; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.kinesis.model.Record; -import software.amazon.kinesis.exceptions.InvalidStateException; -import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.KinesisClientLibException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.exceptions.ThrottlingException; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.processor.PreparedCheckpointer; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * This class is used to enable RecordProcessors to checkpoint their progress. - * The Amazon Kinesis Client Library will instantiate an object and provide a reference to the application - * ShardRecordProcessor instance. Amazon Kinesis Client Library will create one instance per shard assignment. - */ -@RequiredArgsConstructor -@Slf4j -public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpointer { - @NonNull - private final ShardInfo shardInfo; - @NonNull - @Getter @Accessors(fluent = true) - private final Checkpointer checkpointer; - - // Set to the last value set via checkpoint(). - // Sample use: verify application shutdown() invoked checkpoint() at the end of a shard. - @Getter @Accessors(fluent = true) - private ExtendedSequenceNumber lastCheckpointValue; - @Getter @Accessors(fluent = true) - private ExtendedSequenceNumber largestPermittedCheckpointValue; - private ExtendedSequenceNumber sequenceNumberAtShardEnd; - - /** - * {@inheritDoc} - */ - @Override - public synchronized void checkpoint() - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { - if (log.isDebugEnabled()) { - log.debug("Checkpointing {}, token {} at largest permitted value {}", shardInfo.shardId(), - shardInfo.concurrencyToken(), this.largestPermittedCheckpointValue); - } - advancePosition(this.largestPermittedCheckpointValue); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized void checkpoint(Record record) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { - - // TODO: UserRecord Deprecation - if (record == null) { - throw new IllegalArgumentException("Could not checkpoint a null record"); - } /* else if (record instanceof UserRecord) { - checkpoint(record.sequenceNumber(), ((UserRecord) record).subSequenceNumber()); - } */ else { - checkpoint(record.sequenceNumber(), 0); - } - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized void checkpoint(String sequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { - checkpoint(sequenceNumber, 0); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized void checkpoint(String sequenceNumber, long subSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { - - if (subSequenceNumber < 0) { - throw new IllegalArgumentException("Could not checkpoint at invalid, negative subsequence number " - + subSequenceNumber); - } - - /* - * If there isn't a last checkpoint value, we only care about checking the upper bound. - * If there is a last checkpoint value, we want to check both the lower and upper bound. - */ - ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber(sequenceNumber, subSequenceNumber); - if ((lastCheckpointValue == null || lastCheckpointValue.compareTo(newCheckpoint) <= 0) - && newCheckpoint.compareTo(largestPermittedCheckpointValue) <= 0) { - - if (log.isDebugEnabled()) { - log.debug("Checkpointing {}, token {} at specific extended sequence number {}", shardInfo.shardId(), - shardInfo.concurrencyToken(), newCheckpoint); - } - this.advancePosition(newCheckpoint); - } else { - throw new IllegalArgumentException(String.format( - "Could not checkpoint at extended sequence number %s as it did not fall into acceptable range " - + "between the last checkpoint %s and the greatest extended sequence number passed to this " - + "record processor %s", - newCheckpoint, this.lastCheckpointValue, this.largestPermittedCheckpointValue)); - } - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized PreparedCheckpointer prepareCheckpoint() - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { - return this.prepareCheckpoint( - this.largestPermittedCheckpointValue.sequenceNumber(), - this.largestPermittedCheckpointValue.subSequenceNumber()); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized PreparedCheckpointer prepareCheckpoint(Record record) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { - // - // TODO: UserRecord Deprecation - // - if (record == null) { - throw new IllegalArgumentException("Could not prepare checkpoint a null record"); - } /*else if (record instanceof UserRecord) { - return prepareCheckpoint(record.sequenceNumber(), ((UserRecord) record).subSequenceNumber()); - } */ else { - return prepareCheckpoint(record.sequenceNumber(), 0); - } - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized PreparedCheckpointer prepareCheckpoint(String sequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { - return prepareCheckpoint(sequenceNumber, 0); - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { - - if (subSequenceNumber < 0) { - throw new IllegalArgumentException("Could not checkpoint at invalid, negative subsequence number " - + subSequenceNumber); - } - - /* - * If there isn't a last checkpoint value, we only care about checking the upper bound. - * If there is a last checkpoint value, we want to check both the lower and upper bound. - */ - ExtendedSequenceNumber pendingCheckpoint = new ExtendedSequenceNumber(sequenceNumber, subSequenceNumber); - if ((lastCheckpointValue == null || lastCheckpointValue.compareTo(pendingCheckpoint) <= 0) - && pendingCheckpoint.compareTo(largestPermittedCheckpointValue) <= 0) { - - if (log.isDebugEnabled()) { - log.debug("Preparing checkpoint {}, token {} at specific extended sequence number {}", - shardInfo.shardId(), shardInfo.concurrencyToken(), pendingCheckpoint); - } - return doPrepareCheckpoint(pendingCheckpoint); - } else { - throw new IllegalArgumentException(String.format( - "Could not prepare checkpoint at extended sequence number %s as it did not fall into acceptable " - + "range between the last checkpoint %s and the greatest extended sequence number passed " - + "to this record processor %s", - pendingCheckpoint, this.lastCheckpointValue, this.largestPermittedCheckpointValue)); - } - } - - public synchronized void setInitialCheckpointValue(ExtendedSequenceNumber initialCheckpoint) { - lastCheckpointValue = initialCheckpoint; - } - - /** - * @param largestPermittedCheckpointValue the largest permitted checkpoint - */ - public synchronized void largestPermittedCheckpointValue(ExtendedSequenceNumber largestPermittedCheckpointValue) { - this.largestPermittedCheckpointValue = largestPermittedCheckpointValue; - } - - /** - * Used to remember the last extended sequence number before SHARD_END to allow us to prevent the checkpointer - * from checkpointing at the end of the shard twice (i.e. at the last extended sequence number and then again - * at SHARD_END). - * - * @param extendedSequenceNumber - */ - public synchronized void sequenceNumberAtShardEnd(ExtendedSequenceNumber extendedSequenceNumber) { - this.sequenceNumberAtShardEnd = extendedSequenceNumber; - } - - - /** - * Internal API - has package level access only for testing purposes. - * - * @param sequenceNumber - * - * @throws KinesisClientLibDependencyException - * @throws ThrottlingException - * @throws ShutdownException - * @throws InvalidStateException - */ - void advancePosition(String sequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { - advancePosition(new ExtendedSequenceNumber(sequenceNumber)); - } - - void advancePosition(ExtendedSequenceNumber extendedSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { - ExtendedSequenceNumber checkpointToRecord = extendedSequenceNumber; - if (sequenceNumberAtShardEnd != null && sequenceNumberAtShardEnd.equals(extendedSequenceNumber)) { - // If we are about to checkpoint the very last sequence number for this shard, we might as well - // just checkpoint at SHARD_END - checkpointToRecord = ExtendedSequenceNumber.SHARD_END; - } - - // Don't checkpoint a value we already successfully checkpointed - if (extendedSequenceNumber != null && !extendedSequenceNumber.equals(lastCheckpointValue)) { - try { - if (log.isDebugEnabled()) { - log.debug("Setting {}, token {} checkpoint to {}", shardInfo.shardId(), - shardInfo.concurrencyToken(), checkpointToRecord); - } - checkpointer.setCheckpoint(shardInfo.shardId(), checkpointToRecord, shardInfo.concurrencyToken()); - lastCheckpointValue = checkpointToRecord; - } catch (ThrottlingException | ShutdownException | InvalidStateException - | KinesisClientLibDependencyException e) { - throw e; - } catch (KinesisClientLibException e) { - log.warn("Caught exception setting checkpoint.", e); - throw new KinesisClientLibDependencyException("Caught exception while checkpointing", e); - } - } - } - - /** - * This method stores the given sequenceNumber as a pending checkpoint in the lease table without overwriting the - * current checkpoint, then returns a PreparedCheckpointer that is ready to checkpoint at the given sequence number. - * - * This method does not advance lastCheckpointValue, but calls to PreparedCheckpointer.checkpoint() on the returned - * objects do. This allows customers to 'discard' prepared checkpoints by calling any of the 4 checkpoint methods on - * this class before calling PreparedCheckpointer.checkpoint(). Some examples: - * - * 1) prepareCheckpoint(snA); checkpoint(snB). // this works regardless of whether snA or snB is bigger. It discards - * the prepared checkpoint at snA. - * 2) prepareCheckpoint(snA); prepareCheckpoint(snB). // this works regardless of whether snA or snB is bigger. It - * replaces the preparedCheckpoint at snA with a new one at snB. - * 3) checkpointA = prepareCheckpoint(snA); checkpointB = prepareCheckpoint(snB); checkpointB.checkpoint(); - * checkpointerA.checkpoint(); // This replaces the prepared checkpoint at snA with a new one at snB, then - * checkpoints at snB regardless of whether snA or snB is bigger. The checkpoint at snA only succeeds if snA > snB. - * - * @param extendedSequenceNumber the sequence number for the prepared checkpoint - * @return a prepared checkpoint that is ready to checkpoint at the given sequence number. - * @throws KinesisClientLibDependencyException - * @throws InvalidStateException - * @throws ThrottlingException - * @throws ShutdownException - */ - private PreparedCheckpointer doPrepareCheckpoint(ExtendedSequenceNumber extendedSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { - - ExtendedSequenceNumber newPrepareCheckpoint = extendedSequenceNumber; - if (sequenceNumberAtShardEnd != null && sequenceNumberAtShardEnd.equals(extendedSequenceNumber)) { - // If we are about to checkpoint the very last sequence number for this shard, we might as well - // just checkpoint at SHARD_END - newPrepareCheckpoint = ExtendedSequenceNumber.SHARD_END; - } - - // Don't actually prepare a checkpoint if they're trying to checkpoint at the current checkpointed value. - // The only way this can happen is if they call prepareCheckpoint() in a record processor that was initialized - // AND that has not processed any records since initialization. - if (newPrepareCheckpoint.equals(lastCheckpointValue)) { - return new DoesNothingPreparedCheckpointer(newPrepareCheckpoint); - } - - try { - checkpointer.prepareCheckpoint(shardInfo.shardId(), newPrepareCheckpoint, shardInfo.concurrencyToken()); - } catch (ThrottlingException | ShutdownException | InvalidStateException - | KinesisClientLibDependencyException e) { - throw e; - } catch (KinesisClientLibException e) { - log.warn("Caught exception setting prepareCheckpoint.", e); - throw new KinesisClientLibDependencyException("Caught exception while prepareCheckpointing", e); - } - - ShardPreparedCheckpointer result = new ShardPreparedCheckpointer(newPrepareCheckpoint, this); - return result; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointFactory.java deleted file mode 100644 index f0ba08e8..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointFactory.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.checkpoint.dynamodb; - -import lombok.Data; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.checkpoint.CheckpointFactory; -import software.amazon.kinesis.leases.LeaseCoordinator; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.processor.Checkpointer; - -/** - * - */ -@Data -@KinesisClientInternalApi -public class DynamoDBCheckpointFactory implements CheckpointFactory { - @Override - public Checkpointer createCheckpointer(final LeaseCoordinator leaseLeaseCoordinator, - final LeaseRefresher leaseRefresher) { - return new DynamoDBCheckpointer(leaseLeaseCoordinator, leaseRefresher); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointer.java deleted file mode 100644 index 9b05cd86..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointer.java +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.checkpoint.dynamodb; - -import java.util.Objects; -import java.util.UUID; - -import com.google.common.annotations.VisibleForTesting; - -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.checkpoint.Checkpoint; -import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.KinesisClientLibException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.exceptions.ThrottlingException; -import software.amazon.kinesis.exceptions.internal.KinesisClientLibIOException; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseCoordinator; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * - */ -@RequiredArgsConstructor -@Slf4j -@KinesisClientInternalApi -public class DynamoDBCheckpointer implements Checkpointer { - @NonNull - private final LeaseCoordinator leaseCoordinator; - @NonNull - private final LeaseRefresher leaseRefresher; - - private String operation; - - @Override - public void setCheckpoint(final String shardId, final ExtendedSequenceNumber checkpointValue, - final String concurrencyToken) throws KinesisClientLibException { - try { - boolean wasSuccessful = setCheckpoint(shardId, checkpointValue, UUID.fromString(concurrencyToken)); - if (!wasSuccessful) { - throw new ShutdownException("Can't update checkpoint - instance doesn't hold the lease for this shard"); - } - } catch (ProvisionedThroughputException e) { - throw new ThrottlingException("Got throttled while updating checkpoint.", e); - } catch (InvalidStateException e) { - String message = "Unable to save checkpoint for shardId " + shardId; - log.error(message, e); - throw new software.amazon.kinesis.exceptions.InvalidStateException(message, e); - } catch (DependencyException e) { - throw new KinesisClientLibDependencyException("Unable to save checkpoint for shardId " + shardId, e); - } - } - - @Override - public ExtendedSequenceNumber getCheckpoint(final String shardId) throws KinesisClientLibException { - try { - return leaseRefresher.getLease(shardId).checkpoint(); - } catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) { - String message = "Unable to fetch checkpoint for shardId " + shardId; - log.error(message, e); - throw new KinesisClientLibIOException(message, e); - } - } - - @Override - public Checkpoint getCheckpointObject(final String shardId) throws KinesisClientLibException { - try { - Lease lease = leaseRefresher.getLease(shardId); - log.debug("[{}] Retrieved lease => {}", shardId, lease); - return new Checkpoint(lease.checkpoint(), lease.pendingCheckpoint()); - } catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) { - String message = "Unable to fetch checkpoint for shardId " + shardId; - log.error(message, e); - throw new KinesisClientLibIOException(message, e); - } - } - - @Override - public void prepareCheckpoint(final String shardId, final ExtendedSequenceNumber pendingCheckpoint, - final String concurrencyToken) throws KinesisClientLibException { - try { - boolean wasSuccessful = - prepareCheckpoint(shardId, pendingCheckpoint, UUID.fromString(concurrencyToken)); - if (!wasSuccessful) { - throw new ShutdownException( - "Can't prepare checkpoint - instance doesn't hold the lease for this shard"); - } - } catch (ProvisionedThroughputException e) { - throw new ThrottlingException("Got throttled while preparing checkpoint.", e); - } catch (InvalidStateException e) { - String message = "Unable to prepare checkpoint for shardId " + shardId; - log.error(message, e); - throw new software.amazon.kinesis.exceptions.InvalidStateException(message, e); - } catch (DependencyException e) { - throw new KinesisClientLibDependencyException("Unable to prepare checkpoint for shardId " + shardId, e); - } - } - - @VisibleForTesting - public boolean setCheckpoint(String shardId, ExtendedSequenceNumber checkpoint, UUID concurrencyToken) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - Lease lease = leaseCoordinator.getCurrentlyHeldLease(shardId); - if (lease == null) { - log.info("Worker {} could not update checkpoint for shard {} because it does not hold the lease", - leaseCoordinator.workerIdentifier(), shardId); - return false; - } - - lease.checkpoint(checkpoint); - lease.pendingCheckpoint(null); - lease.ownerSwitchesSinceCheckpoint(0L); - - return leaseCoordinator.updateLease(lease, concurrencyToken, operation, shardId); - } - - boolean prepareCheckpoint(String shardId, ExtendedSequenceNumber pendingCheckpoint, UUID concurrencyToken) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - Lease lease = leaseCoordinator.getCurrentlyHeldLease(shardId); - if (lease == null) { - log.info("Worker {} could not prepare checkpoint for shard {} because it does not hold the lease", - leaseCoordinator.workerIdentifier(), shardId); - return false; - } - - lease.pendingCheckpoint(Objects.requireNonNull(pendingCheckpoint, "pendingCheckpoint should not be null")); - return leaseCoordinator.updateLease(lease, concurrencyToken, operation, shardId); - } - - @Override - public void operation(@NonNull final String operation) { - this.operation = operation; - } - - @Override - public String operation() { - return operation; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ConfigsBuilder.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ConfigsBuilder.java deleted file mode 100644 index 4d252f5a..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ConfigsBuilder.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.common; - -import org.apache.commons.lang3.StringUtils; - -import lombok.Data; -import lombok.NonNull; -import lombok.experimental.Accessors; -import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; -import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.checkpoint.CheckpointConfig; -import software.amazon.kinesis.coordinator.CoordinatorConfig; -import software.amazon.kinesis.leases.LeaseManagementConfig; -import software.amazon.kinesis.lifecycle.LifecycleConfig; -import software.amazon.kinesis.metrics.MetricsConfig; -import software.amazon.kinesis.processor.ProcessorConfig; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; -import software.amazon.kinesis.retrieval.RetrievalConfig; - -/** - * This Builder is useful to create all configurations for the KCL with default values. - */ -@Data -@Accessors(fluent = true) -public class ConfigsBuilder { - /** - * Name of the stream to consume records from - */ - @NonNull - private final String streamName; - /** - * Application name for the KCL Worker - */ - @NonNull - private final String applicationName; - /** - * KinesisClient to be used to consumer records from Kinesis - */ - @NonNull - private final KinesisAsyncClient kinesisClient; - /** - * DynamoDBClient to be used to interact with DynamoDB service for lease management and checkpoiniting - */ - @NonNull - private final DynamoDbAsyncClient dynamoDBClient; - /** - * CloudWatchClient to be used to push KCL metrics to CloudWatch service - */ - @NonNull - private final CloudWatchAsyncClient cloudWatchClient; - /** - * KCL worker identifier to distinguish between 2 unique workers - */ - @NonNull - private final String workerIdentifier; - /** - * ShardRecordProcessorFactory to be used to create ShardRecordProcesor for processing records - */ - @NonNull - private final ShardRecordProcessorFactory shardRecordProcessorFactory; - - /** - * Lease table name used for lease management and checkpointing. - */ - private String tableName; - - /** - * Lease table name used for lease management and checkpointing. - * - * @return DynamoDB table name - */ - public String tableName() { - if (StringUtils.isEmpty(tableName)) { - tableName = applicationName(); - } - return tableName; - } - - /** - * CloudWatch namespace for KCL metrics. - */ - private String namespace; - - /** - * CloudWatch namespace for KCL metrics. - * - * @return CloudWatch namespace - */ - public String namespace() { - if (StringUtils.isEmpty(namespace)) { - namespace = applicationName(); - } - return namespace; - } - - /** - * Creates a new instance of CheckpointConfig - * - * @return CheckpointConfig - */ - public CheckpointConfig checkpointConfig() { - return new CheckpointConfig(); - } - - /** - * Creates a new instance of CoordinatorConfig - * - * @return CoordinatorConfig - */ - public CoordinatorConfig coordinatorConfig() { - return new CoordinatorConfig(applicationName()); - } - - /** - * Creates a new instance of LeaseManagementConfig - * - * @return LeaseManagementConfig - */ - public LeaseManagementConfig leaseManagementConfig() { - return new LeaseManagementConfig(tableName(), dynamoDBClient(), kinesisClient(), streamName(), - workerIdentifier()); - } - - /** - * Creates a new instance of LifecycleConfig - * - * @return LifecycleConfig - */ - public LifecycleConfig lifecycleConfig() { - return new LifecycleConfig(); - } - - /** - * Creates a new instance of MetricsConfig - * - * @return MetricsConfig - */ - public MetricsConfig metricsConfig() { - return new MetricsConfig(cloudWatchClient(), namespace()); - } - - - /** - * Creates a new instance of ProcessorConfig - * - * @return ProcessorConfigConfig - */ - public ProcessorConfig processorConfig() { - return new ProcessorConfig(shardRecordProcessorFactory()); - } - - /** - * Creates a new instance of RetrievalConfig - * - * @return RetrievalConfig - */ - public RetrievalConfig retrievalConfig() { - return new RetrievalConfig(kinesisClient(), streamName(), applicationName()); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStream.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStream.java deleted file mode 100644 index 5c8d26bb..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStream.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.common; - -/** - * Used to specify the position in the stream where a new application should start from. - * This is used during initial application bootstrap (when a checkpoint doesn't exist for a shard or its parents). - */ -public enum InitialPositionInStream { - /** - * Start after the most recent data record (fetch new data). - */ - LATEST, - - /** - * Start from the oldest available data record. - */ - TRIM_HORIZON, - - /** - * Start from the record at or after the specified server-side timestamp. - */ - AT_TIMESTAMP -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisClientUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisClientUtil.java deleted file mode 100644 index 634c9b01..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisClientUtil.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.common; - -import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClientBuilder; - -/** - * Utility to setup KinesisAsyncClient to be used with KCL. - */ -public class KinesisClientUtil { - - /** - * Creates a client from a builder. - * - * @param clientBuilder - * @return - */ - public static KinesisAsyncClient createKinesisAsyncClient(KinesisAsyncClientBuilder clientBuilder) { - return clientBuilder.httpClientBuilder(NettyNioAsyncHttpClient.builder().maxConcurrency(Integer.MAX_VALUE)) - .build(); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisRequestsBuilder.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisRequestsBuilder.java deleted file mode 100644 index 4a384c89..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisRequestsBuilder.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.common; - -import software.amazon.awssdk.awscore.AwsRequest; -import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration; -import software.amazon.awssdk.core.ApiName; -import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest; -import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest; -import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; -import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; -import software.amazon.awssdk.services.kinesis.model.ListShardsRequest; -import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest; -import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.retrieval.RetrievalConfig; - -/** - * - */ -@KinesisClientInternalApi -public class KinesisRequestsBuilder { - public static ListShardsRequest.Builder listShardsRequestBuilder() { - return appendUserAgent(ListShardsRequest.builder()); - } - - public static SubscribeToShardRequest.Builder subscribeToShardRequestBuilder() { - return appendUserAgent(SubscribeToShardRequest.builder()); - } - - public static GetRecordsRequest.Builder getRecordsRequestBuilder() { - return appendUserAgent(GetRecordsRequest.builder()); - } - - public static GetShardIteratorRequest.Builder getShardIteratorRequestBuilder() { - return appendUserAgent(GetShardIteratorRequest.builder()); - } - - public static DescribeStreamSummaryRequest.Builder describeStreamSummaryRequestBuilder() { - return appendUserAgent(DescribeStreamSummaryRequest.builder()); - } - - public static RegisterStreamConsumerRequest.Builder registerStreamConsumerRequestBuilder() { - return appendUserAgent(RegisterStreamConsumerRequest.builder()); - } - - public static DescribeStreamConsumerRequest.Builder describeStreamConsumerRequestBuilder() { - return appendUserAgent(DescribeStreamConsumerRequest.builder()); - } - - @SuppressWarnings("unchecked") - private static T appendUserAgent(final T builder) { - return (T) builder - .overrideConfiguration( - AwsRequestOverrideConfiguration.builder() - .addApiName(ApiName.builder().name(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT) - .version(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT_VERSION).build()) - .build()); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorConfig.java deleted file mode 100644 index 6098b2fa..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorConfig.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.coordinator; - -import lombok.Data; -import lombok.NonNull; -import lombok.experimental.Accessors; -import software.amazon.kinesis.leases.NoOpShardPrioritization; -import software.amazon.kinesis.leases.ShardPrioritization; - -/** - * Used by the KCL to configure the coordinator. - */ -@Data -@Accessors(fluent = true) -public class CoordinatorConfig { - /** - * Application name used by checkpointer to checkpoint. - * - * @return String - */ - @NonNull - private final String applicationName; - - /** - * The maximum number of attempts to initialize the Scheduler - * - *

Default value: 20

- */ - private int maxInitializationAttempts = 20; - - /** - * Interval in milliseconds between polling to check for parent shard completion. - * Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on - * completion of parent shards). - * - *

Default value: 10000L

- */ - private long parentShardPollIntervalMillis = 10000L; - - /** - * The Scheduler will skip shard sync during initialization if there are one or more leases in the lease table. This - * assumes that the shards and leases are in-sync. This enables customers to choose faster startup times (e.g. - * during incremental deployments of an application). - * - *

Default value: false

- */ - private boolean skipShardSyncAtWorkerInitializationIfLeasesExist = false; - - /** - * The number of milliseconds between polling of the shard consumer for triggering state changes, and health checks. - * - *

Default value: 1000 milliseconds

- */ - private long shardConsumerDispatchPollIntervalMillis = 1000L; - - /** - * Shard prioritization strategy. - * - *

Default value: {@link NoOpShardPrioritization}

- */ - private ShardPrioritization shardPrioritization = new NoOpShardPrioritization(); - - /** - * WorkerStateChangeListener to be used by the Scheduler. - * - *

Default value: {@link NoOpWorkerStateChangeListener}

- */ - private WorkerStateChangeListener workerStateChangeListener = new NoOpWorkerStateChangeListener(); - - /** - * GracefulShutdownCoordinator to be used by the Scheduler. - * - *

Default value: {@link GracefulShutdownCoordinator}

- */ - private GracefulShutdownCoordinator gracefulShutdownCoordinator = new GracefulShutdownCoordinator(); - - private CoordinatorFactory coordinatorFactory = new SchedulerCoordinatorFactory(); - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorFactory.java deleted file mode 100644 index 7a055bc7..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorFactory.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.coordinator; - -import java.util.concurrent.ExecutorService; - -import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.processor.Checkpointer; - -/** - * Used in the process of configuring and providing instances to the {@link Scheduler} - */ -public interface CoordinatorFactory { - /** - * Creates the executor service to be used by the Scheduler. - * - * @return ExecutorService - */ - ExecutorService createExecutorService(); - - /** - * Creates GracefulShutdownCoordinator to be used by the Scheduler. - * - *

Method Deprecated

- *

- * Note: This method has been deprecated, and will be removed in a future release. Use the configuration in - * {@link CoordinatorConfig#gracefulShutdownCoordinator}. Set the - * {@link CoordinatorConfig#gracefulShutdownCoordinator} to null in order to use this method. - *

- *

Resolution Order

- *
    - *
  1. {@link CoordinatorConfig#gracefulShutdownCoordinator()}
  2. - *
  3. {@link CoordinatorFactory#createGracefulShutdownCoordinator()}
  4. - *
- * - * - * @return a {@link GracefulShutdownCoordinator} that manages the process of shutting down the scheduler. - */ - @Deprecated - default GracefulShutdownCoordinator createGracefulShutdownCoordinator() { - return new GracefulShutdownCoordinator(); - } - - /** - * Creates a WorkerStateChangeListener to be used by the Scheduler. - * - *

Method Deprecated

- *

- * Note: This method has been deprecated, and will be removed in a future release. Use the configuration in - * {@link CoordinatorConfig#workerStateChangeListener}. Set the - * {@link CoordinatorConfig#workerStateChangeListener} to null in order to use this method. - *

- * - *

Resolution Order

- *
    - *
  1. {@link CoordinatorConfig#workerStateChangeListener()}
  2. - *
  3. {@link CoordinatorFactory#createWorkerStateChangeListener()}
  4. - *
- * - * @return a {@link WorkerStateChangeListener} instance that will be notified for specific {@link Scheduler} steps. - */ - @Deprecated - default WorkerStateChangeListener createWorkerStateChangeListener() { - return new NoOpWorkerStateChangeListener(); - } - - /** - * Creates a RecordProcessorChedckpointer to be used by the Scheduler. - * - * @param shardInfo ShardInfo to be used in order to create the ShardRecordProcessorCheckpointer - * @param checkpoint Checkpointer to be used in order to create Shardthe RecordProcessorCheckpointer - * @return ShardRecordProcessorCheckpointer - */ - ShardRecordProcessorCheckpointer createRecordProcessorCheckpointer(ShardInfo shardInfo, Checkpointer checkpoint); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownContext.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownContext.java deleted file mode 100644 index 02fca78a..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownContext.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.coordinator; - -import lombok.Data; -import lombok.experimental.Accessors; - -import java.util.concurrent.CountDownLatch; - -@Data -@Accessors(fluent = true) -class GracefulShutdownContext { - private final CountDownLatch shutdownCompleteLatch; - private final CountDownLatch notificationCompleteLatch; - private final Scheduler scheduler; - - static GracefulShutdownContext SHUTDOWN_ALREADY_COMPLETED = new GracefulShutdownContext(null, null, null); - - boolean isShutdownAlreadyCompleted() { - return shutdownCompleteLatch == null && notificationCompleteLatch == null && scheduler == null; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinator.java deleted file mode 100644 index 6dbc534d..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinator.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.coordinator; - -import java.util.concurrent.Callable; -import java.util.concurrent.Future; -import java.util.concurrent.FutureTask; -import java.util.concurrent.TimeUnit; - -import lombok.extern.slf4j.Slf4j; - -class GracefulShutdownCoordinator { - - Future startGracefulShutdown(Callable shutdownCallable) { - FutureTask task = new FutureTask<>(shutdownCallable); - Thread shutdownThread = new Thread(task, "RequestedShutdownThread"); - shutdownThread.start(); - return task; - - } - - Callable createGracefulShutdownCallable(Callable startWorkerShutdown) { - return new GracefulShutdownCallable(startWorkerShutdown); - } - - @Slf4j - static class GracefulShutdownCallable implements Callable { - private final Callable startWorkerShutdown; - - GracefulShutdownCallable(Callable startWorkerShutdown) { - this.startWorkerShutdown = startWorkerShutdown; - } - - private boolean isWorkerShutdownComplete(GracefulShutdownContext context) { - return context.scheduler().shutdownComplete() || context.scheduler().shardInfoShardConsumerMap().isEmpty(); - } - - private String awaitingLogMessage(GracefulShutdownContext context) { - long awaitingNotification = context.notificationCompleteLatch().getCount(); - long awaitingFinalShutdown = context.shutdownCompleteLatch().getCount(); - - return String.format( - "Waiting for %d record process to complete shutdown notification, and %d record processor to complete final shutdown ", - awaitingNotification, awaitingFinalShutdown); - } - - private String awaitingFinalShutdownMessage(GracefulShutdownContext context) { - long outstanding = context.shutdownCompleteLatch().getCount(); - return String.format("Waiting for %d record processors to complete final shutdown", outstanding); - } - - private boolean waitForRecordProcessors(GracefulShutdownContext context) { - - // - // Awaiting for all ShardConsumer/RecordProcessors to be notified that a shutdown has been requested. - // There is the possibility of a race condition where a lease is terminated after the shutdown request - // notification is started, but before the ShardConsumer is sent the notification. In this case the - // ShardConsumer would start the lease loss shutdown, and may never call the notification methods. - // - try { - while (!context.notificationCompleteLatch().await(1, TimeUnit.SECONDS)) { - if (Thread.interrupted()) { - throw new InterruptedException(); - } - log.info(awaitingLogMessage(context)); - if (workerShutdownWithRemaining(context.shutdownCompleteLatch().getCount(), context)) { - return false; - } - } - } catch (InterruptedException ie) { - log.warn("Interrupted while waiting for notification complete, terminating shutdown. {}", - awaitingLogMessage(context)); - return false; - } - - if (Thread.interrupted()) { - log.warn("Interrupted before worker shutdown, terminating shutdown"); - return false; - } - - // - // Once all record processors have been notified of the shutdown it is safe to allow the worker to - // start its shutdown behavior. Once shutdown starts it will stop renewer, and drop any remaining leases. - // - context.scheduler().shutdown(); - - if (Thread.interrupted()) { - log.warn("Interrupted after worker shutdown, terminating shutdown"); - return false; - } - - // - // Want to wait for all the remaining ShardConsumers/ShardRecordProcessor's to complete their final shutdown - // processing. This should really be a no-op since as part of the notification completion the lease for - // ShardConsumer is terminated. - // - try { - while (!context.shutdownCompleteLatch().await(1, TimeUnit.SECONDS)) { - if (Thread.interrupted()) { - throw new InterruptedException(); - } - log.info(awaitingFinalShutdownMessage(context)); - if (workerShutdownWithRemaining(context.shutdownCompleteLatch().getCount(), context)) { - return false; - } - } - } catch (InterruptedException ie) { - log.warn("Interrupted while waiting for shutdown completion, terminating shutdown. {}", - awaitingFinalShutdownMessage(context)); - return false; - } - return true; - } - - /** - * This checks to see if the worker has already hit it's shutdown target, while there is outstanding record - * processors. This maybe a little racy due to when the value of outstanding is retrieved. In general though the - * latch should be decremented before the shutdown completion. - * - * @param outstanding - * the number of record processor still awaiting shutdown. - */ - private boolean workerShutdownWithRemaining(long outstanding, GracefulShutdownContext context) { - if (isWorkerShutdownComplete(context)) { - if (outstanding != 0) { - log.info("Shutdown completed, but shutdownCompleteLatch still had outstanding {} with a current" - + " value of {}. shutdownComplete: {} -- Consumer Map: {}", outstanding, - context.shutdownCompleteLatch().getCount(), context.scheduler().shutdownComplete(), - context.scheduler().shardInfoShardConsumerMap().size()); - return true; - } - } - return false; - } - - @Override - public Boolean call() throws Exception { - GracefulShutdownContext context; - try { - context = startWorkerShutdown.call(); - } catch (Exception ex) { - log.warn("Caught exception while requesting initial worker shutdown.", ex); - throw ex; - } - return context.isShutdownAlreadyCompleted() || waitForRecordProcessors(context); - } - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/NoOpWorkerStateChangeListener.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/NoOpWorkerStateChangeListener.java deleted file mode 100644 index f316b351..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/NoOpWorkerStateChangeListener.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.coordinator; - -public class NoOpWorkerStateChangeListener implements WorkerStateChangeListener { - - /** - * Empty constructor for NoOp Worker State Change Listener - */ - public NoOpWorkerStateChangeListener() { - - } - - @Override - public void onWorkerStateChange(WorkerState newState) { - - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java deleted file mode 100644 index df7fdda4..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java +++ /dev/null @@ -1,657 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.coordinator; - -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; - -import com.google.common.annotations.VisibleForTesting; - -import lombok.AccessLevel; -import lombok.Getter; -import lombok.NoArgsConstructor; -import lombok.NonNull; -import lombok.experimental.Accessors; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.checkpoint.CheckpointConfig; -import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseCoordinator; -import software.amazon.kinesis.leases.LeaseManagementConfig; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.ShardDetector; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.leases.ShardPrioritization; -import software.amazon.kinesis.leases.ShardSyncTask; -import software.amazon.kinesis.leases.ShardSyncTaskManager; -import software.amazon.kinesis.leases.HierarchicalShardSyncer; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseCoordinator; -import software.amazon.kinesis.leases.exceptions.LeasingException; -import software.amazon.kinesis.lifecycle.LifecycleConfig; -import software.amazon.kinesis.lifecycle.ShardConsumer; -import software.amazon.kinesis.lifecycle.ShardConsumerArgument; -import software.amazon.kinesis.lifecycle.ShardConsumerShutdownNotification; -import software.amazon.kinesis.lifecycle.ShutdownNotification; -import software.amazon.kinesis.lifecycle.ShutdownReason; -import software.amazon.kinesis.lifecycle.TaskResult; -import software.amazon.kinesis.metrics.CloudWatchMetricsFactory; -import software.amazon.kinesis.metrics.MetricsCollectingTaskDecorator; -import software.amazon.kinesis.metrics.MetricsConfig; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.processor.ProcessorConfig; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; -import software.amazon.kinesis.processor.ShutdownNotificationAware; -import software.amazon.kinesis.retrieval.AggregatorUtil; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RetrievalConfig; - -/** - * - */ -@Getter -@Accessors(fluent = true) -@Slf4j -public class Scheduler implements Runnable { - - private SchedulerLog slog = new SchedulerLog(); - - private final CheckpointConfig checkpointConfig; - private final CoordinatorConfig coordinatorConfig; - private final LeaseManagementConfig leaseManagementConfig; - private final LifecycleConfig lifecycleConfig; - private final MetricsConfig metricsConfig; - private final ProcessorConfig processorConfig; - private final RetrievalConfig retrievalConfig; - - private final String applicationName; - private final int maxInitializationAttempts; - private final Checkpointer checkpoint; - private final long shardConsumerDispatchPollIntervalMillis; - // Backoff time when polling to check if application has finished processing - // parent shards - private final long parentShardPollIntervalMillis; - private final ExecutorService executorService; - // private final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy; - private final LeaseCoordinator leaseCoordinator; - private final ShardSyncTaskManager shardSyncTaskManager; - private final ShardPrioritization shardPrioritization; - private final boolean cleanupLeasesUponShardCompletion; - private final boolean skipShardSyncAtWorkerInitializationIfLeasesExist; - private final GracefulShutdownCoordinator gracefulShutdownCoordinator; - private final WorkerStateChangeListener workerStateChangeListener; - private final InitialPositionInStreamExtended initialPosition; - private final MetricsFactory metricsFactory; - private final long failoverTimeMillis; - private final long taskBackoffTimeMillis; - private final String streamName; - private final long listShardsBackoffTimeMillis; - private final int maxListShardsRetryAttempts; - private final LeaseRefresher leaseRefresher; - private final ShardDetector shardDetector; - private final boolean ignoreUnexpetedChildShards; - private final AggregatorUtil aggregatorUtil; - private final HierarchicalShardSyncer hierarchicalShardSyncer; - - // Holds consumers for shards the worker is currently tracking. Key is shard - // info, value is ShardConsumer. - private ConcurrentMap shardInfoShardConsumerMap = new ConcurrentHashMap<>(); - - private volatile boolean shutdown; - private volatile long shutdownStartTimeMillis; - private volatile boolean shutdownComplete = false; - - private final Object lock = new Object(); - - /** - * Used to ensure that only one requestedShutdown is in progress at a time. - */ - private Future gracefulShutdownFuture; - @VisibleForTesting - protected boolean gracefuleShutdownStarted = false; - - public Scheduler(@NonNull final CheckpointConfig checkpointConfig, - @NonNull final CoordinatorConfig coordinatorConfig, - @NonNull final LeaseManagementConfig leaseManagementConfig, - @NonNull final LifecycleConfig lifecycleConfig, - @NonNull final MetricsConfig metricsConfig, - @NonNull final ProcessorConfig processorConfig, - @NonNull final RetrievalConfig retrievalConfig) { - this.checkpointConfig = checkpointConfig; - this.coordinatorConfig = coordinatorConfig; - this.leaseManagementConfig = leaseManagementConfig; - this.lifecycleConfig = lifecycleConfig; - this.metricsConfig = metricsConfig; - this.processorConfig = processorConfig; - this.retrievalConfig = retrievalConfig; - - this.applicationName = this.coordinatorConfig.applicationName(); - this.maxInitializationAttempts = this.coordinatorConfig.maxInitializationAttempts(); - this.metricsFactory = this.metricsConfig.metricsFactory(); - this.leaseCoordinator = this.leaseManagementConfig.leaseManagementFactory() - .createLeaseCoordinator(this.metricsFactory); - this.leaseRefresher = this.leaseCoordinator.leaseRefresher(); - - // - // TODO: Figure out what to do with lease manage <=> checkpoint relationship - // - this.checkpoint = this.checkpointConfig.checkpointFactory().createCheckpointer(this.leaseCoordinator, - this.leaseRefresher); - - // - // TODO: Move this configuration to lifecycle - // - this.shardConsumerDispatchPollIntervalMillis = this.coordinatorConfig.shardConsumerDispatchPollIntervalMillis(); - this.parentShardPollIntervalMillis = this.coordinatorConfig.parentShardPollIntervalMillis(); - this.executorService = this.coordinatorConfig.coordinatorFactory().createExecutorService(); - - this.shardSyncTaskManager = this.leaseManagementConfig.leaseManagementFactory() - .createShardSyncTaskManager(this.metricsFactory); - this.shardPrioritization = this.coordinatorConfig.shardPrioritization(); - this.cleanupLeasesUponShardCompletion = this.leaseManagementConfig.cleanupLeasesUponShardCompletion(); - this.skipShardSyncAtWorkerInitializationIfLeasesExist = - this.coordinatorConfig.skipShardSyncAtWorkerInitializationIfLeasesExist(); - if (coordinatorConfig.gracefulShutdownCoordinator() != null) { - this.gracefulShutdownCoordinator = coordinatorConfig.gracefulShutdownCoordinator(); - } else { - this.gracefulShutdownCoordinator = this.coordinatorConfig.coordinatorFactory() - .createGracefulShutdownCoordinator(); - } - if (coordinatorConfig.workerStateChangeListener() != null) { - this.workerStateChangeListener = coordinatorConfig.workerStateChangeListener(); - } else { - this.workerStateChangeListener = this.coordinatorConfig.coordinatorFactory() - .createWorkerStateChangeListener(); - } - this.initialPosition = retrievalConfig.initialPositionInStreamExtended(); - this.failoverTimeMillis = this.leaseManagementConfig.failoverTimeMillis(); - this.taskBackoffTimeMillis = this.lifecycleConfig.taskBackoffTimeMillis(); -// this.retryGetRecordsInSeconds = this.retrievalConfig.retryGetRecordsInSeconds(); -// this.maxGetRecordsThreadPool = this.retrievalConfig.maxGetRecordsThreadPool(); - this.streamName = this.retrievalConfig.streamName(); - this.listShardsBackoffTimeMillis = this.retrievalConfig.listShardsBackoffTimeInMillis(); - this.maxListShardsRetryAttempts = this.retrievalConfig.maxListShardsRetryAttempts(); - this.shardDetector = this.shardSyncTaskManager.shardDetector(); - this.ignoreUnexpetedChildShards = this.leaseManagementConfig.ignoreUnexpectedChildShards(); - this.aggregatorUtil = this.lifecycleConfig.aggregatorUtil(); - this.hierarchicalShardSyncer = leaseManagementConfig.hierarchicalShardSyncer(); - } - - /** - * Start consuming data from the stream, and pass it to the application record processors. - */ - @Override - public void run() { - if (shutdown) { - return; - } - - try { - initialize(); - log.info("Initialization complete. Starting worker loop."); - } catch (RuntimeException e) { - log.error("Unable to initialize after {} attempts. Shutting down.", maxInitializationAttempts, e); - workerStateChangeListener.onAllInitializationAttemptsFailed(e); - shutdown(); - } - - while (!shouldShutdown()) { - runProcessLoop(); - } - - finalShutdown(); - log.info("Worker loop is complete. Exiting from worker."); - } - - private void initialize() { - synchronized (lock) { - workerStateChangeListener.onWorkerStateChange(WorkerStateChangeListener.WorkerState.INITIALIZING); - boolean isDone = false; - Exception lastException = null; - - for (int i = 0; (!isDone) && (i < maxInitializationAttempts); i++) { - try { - log.info("Initialization attempt {}", (i + 1)); - log.info("Initializing LeaseCoordinator"); - leaseCoordinator.initialize(); - - TaskResult result = null; - if (!skipShardSyncAtWorkerInitializationIfLeasesExist || leaseRefresher.isLeaseTableEmpty()) { - log.info("Syncing Kinesis shard info"); - ShardSyncTask shardSyncTask = new ShardSyncTask(shardDetector, leaseRefresher, initialPosition, - cleanupLeasesUponShardCompletion, ignoreUnexpetedChildShards, 0L, hierarchicalShardSyncer, - metricsFactory); - result = new MetricsCollectingTaskDecorator(shardSyncTask, metricsFactory).call(); - } else { - log.info("Skipping shard sync per configuration setting (and lease table is not empty)"); - } - - if (result == null || result.getException() == null) { - if (!leaseCoordinator.isRunning()) { - log.info("Starting LeaseCoordinator"); - leaseCoordinator.start(); - } else { - log.info("LeaseCoordinator is already running. No need to start it."); - } - isDone = true; - } else { - lastException = result.getException(); - } - } catch (LeasingException e) { - log.error("Caught exception when initializing LeaseCoordinator", e); - lastException = e; - } catch (Exception e) { - lastException = e; - } - - try { - Thread.sleep(parentShardPollIntervalMillis); - } catch (InterruptedException e) { - log.debug("Sleep interrupted while initializing worker."); - } - } - - if (!isDone) { - throw new RuntimeException(lastException); - } - workerStateChangeListener.onWorkerStateChange(WorkerStateChangeListener.WorkerState.STARTED); - } - } - - @VisibleForTesting - void runProcessLoop() { - try { - boolean foundCompletedShard = false; - Set assignedShards = new HashSet<>(); - for (ShardInfo shardInfo : getShardInfoForAssignments()) { - ShardConsumer shardConsumer = createOrGetShardConsumer(shardInfo, - processorConfig.shardRecordProcessorFactory()); - - if (shardConsumer.isShutdown() && shardConsumer.shutdownReason().equals(ShutdownReason.SHARD_END)) { - foundCompletedShard = true; - } else { - shardConsumer.executeLifecycle(); - } - assignedShards.add(shardInfo); - } - - if (foundCompletedShard) { - shardSyncTaskManager.syncShardAndLeaseInfo(); - } - - // clean up shard consumers for unassigned shards - cleanupShardConsumers(assignedShards); - - slog.info("Sleeping ..."); - Thread.sleep(shardConsumerDispatchPollIntervalMillis); - } catch (Exception e) { - log.error("Worker.run caught exception, sleeping for {} milli seconds!", - String.valueOf(shardConsumerDispatchPollIntervalMillis), e); - try { - Thread.sleep(shardConsumerDispatchPollIntervalMillis); - } catch (InterruptedException ex) { - log.info("Worker: sleep interrupted after catching exception ", ex); - } - } - slog.resetInfoLogging(); - } - - /** - * Returns whether worker can shutdown immediately. Note that this method is called from Worker's {{@link #run()} - * method before every loop run, so method must do minimum amount of work to not impact shard processing timings. - * - * @return Whether worker should shutdown immediately. - */ - @VisibleForTesting - boolean shouldShutdown() { - if (executorService.isShutdown()) { - log.error("Worker executor service has been shutdown, so record processors cannot be shutdown."); - return true; - } - if (shutdown) { - if (shardInfoShardConsumerMap.isEmpty()) { - log.info("All record processors have been shutdown successfully."); - return true; - } - if ((System.currentTimeMillis() - shutdownStartTimeMillis) >= failoverTimeMillis) { - log.info("Lease failover time is reached, so forcing shutdown."); - return true; - } - } - return false; - } - - /** - * Requests a graceful shutdown of the worker, notifying record processors, that implement - * {@link ShutdownNotificationAware}, of the impending shutdown. This gives the record processor a final chance to - * checkpoint. - * - * This will only create a single shutdown future. Additional attempts to start a graceful shutdown will return the - * previous future. - * - * It's possible that a record processor won't be notify before being shutdown. This can occur if the lease is - * lost after requesting shutdown, but before the notification is dispatched. - * - *

Requested Shutdown Process

When a shutdown process is requested it operates slightly differently to - * allow the record processors a chance to checkpoint a final time. - *
    - *
  1. Call to request shutdown invoked.
  2. - *
  3. Worker stops attempting to acquire new leases
  4. - *
  5. Record Processor Shutdown Begins - *
      - *
    1. Record processor is notified of the impending shutdown, and given a final chance to checkpoint
    2. - *
    3. The lease for the record processor is then dropped.
    4. - *
    5. The record processor enters into an idle state waiting for the worker to complete final termination
    6. - *
    7. The worker will detect a record processor that has lost it's lease, and will terminate the record processor - * with {@link ShutdownReason#LEASE_LOST}
    8. - *
    - *
  6. - *
  7. The worker will shutdown all record processors.
  8. - *
  9. Once all record processors have been terminated, the worker will terminate all owned resources.
  10. - *
  11. Once the worker shutdown is complete, the returned future is completed.
  12. - *
- * - * @return a future that will be set once the shutdown has completed. True indicates that the graceful shutdown - * completed successfully. A false value indicates that a non-exception case caused the shutdown process to - * terminate early. - */ - public Future startGracefulShutdown() { - synchronized (this) { - if (gracefulShutdownFuture == null) { - gracefulShutdownFuture = gracefulShutdownCoordinator - .startGracefulShutdown(createGracefulShutdownCallable()); - } - } - return gracefulShutdownFuture; - } - - /** - * Creates a callable that will execute the graceful shutdown process. This callable can be used to execute graceful - * shutdowns in your own executor, or execute the shutdown synchronously. - * - * @return a callable that run the graceful shutdown process. This may return a callable that return true if the - * graceful shutdown has already been completed. - * @throws IllegalStateException - * thrown by the callable if another callable has already started the shutdown process. - */ - public Callable createGracefulShutdownCallable() { - if (shutdownComplete()) { - return () -> true; - } - Callable startShutdown = createWorkerShutdownCallable(); - return gracefulShutdownCoordinator.createGracefulShutdownCallable(startShutdown); - } - - public boolean hasGracefulShutdownStarted() { - return gracefuleShutdownStarted; - } - - @VisibleForTesting - Callable createWorkerShutdownCallable() { - return () -> { - synchronized (this) { - if (this.gracefuleShutdownStarted) { - throw new IllegalStateException("Requested shutdown has already been started"); - } - this.gracefuleShutdownStarted = true; - } - // - // Stop accepting new leases. Once we do this we can be sure that - // no more leases will be acquired. - // - leaseCoordinator.stopLeaseTaker(); - - Collection leases = leaseCoordinator.getAssignments(); - if (leases == null || leases.isEmpty()) { - // - // If there are no leases notification is already completed, but we still need to shutdown the worker. - // - this.shutdown(); - return GracefulShutdownContext.SHUTDOWN_ALREADY_COMPLETED; - } - CountDownLatch shutdownCompleteLatch = new CountDownLatch(leases.size()); - CountDownLatch notificationCompleteLatch = new CountDownLatch(leases.size()); - for (Lease lease : leases) { - ShutdownNotification shutdownNotification = new ShardConsumerShutdownNotification(leaseCoordinator, - lease, notificationCompleteLatch, shutdownCompleteLatch); - ShardInfo shardInfo = DynamoDBLeaseCoordinator.convertLeaseToAssignment(lease); - ShardConsumer consumer = shardInfoShardConsumerMap.get(shardInfo); - if (consumer != null) { - consumer.gracefulShutdown(shutdownNotification); - } else { - // - // There is a race condition between retrieving the current assignments, and creating the - // notification. If the a lease is lost in between these two points, we explicitly decrement the - // notification latches to clear the shutdown. - // - notificationCompleteLatch.countDown(); - shutdownCompleteLatch.countDown(); - } - } - return new GracefulShutdownContext(shutdownCompleteLatch, notificationCompleteLatch, this); - }; - } - - /** - * Signals worker to shutdown. Worker will try initiating shutdown of all record processors. Note that if executor - * services were passed to the worker by the user, worker will not attempt to shutdown those resources. - * - *

Shutdown Process

When called this will start shutdown of the record processor, and eventually shutdown - * the worker itself. - *
    - *
  1. Call to start shutdown invoked
  2. - *
  3. Lease coordinator told to stop taking leases, and to drop existing leases.
  4. - *
  5. Worker discovers record processors that no longer have leases.
  6. - *
  7. Worker triggers shutdown with state {@link ShutdownReason#LEASE_LOST}.
  8. - *
  9. Once all record processors are shutdown, worker terminates owned resources.
  10. - *
  11. Shutdown complete.
  12. - *
- */ - public void shutdown() { - synchronized (lock) { - if (shutdown) { - log.warn("Shutdown requested a second time."); - return; - } - log.info("Worker shutdown requested."); - - // Set shutdown flag, so Worker.run can start shutdown process. - shutdown = true; - shutdownStartTimeMillis = System.currentTimeMillis(); - - // Stop lease coordinator, so leases are not renewed or stolen from other workers. - // Lost leases will force Worker to begin shutdown process for all shard consumers in - // Worker.run(). - leaseCoordinator.stop(); - workerStateChangeListener.onWorkerStateChange(WorkerStateChangeListener.WorkerState.SHUT_DOWN); - } - } - - /** - * Perform final shutdown related tasks for the worker including shutting down worker owned executor services, - * threads, etc. - */ - private void finalShutdown() { - log.info("Starting worker's final shutdown."); - - if (executorService instanceof SchedulerCoordinatorFactory.SchedulerThreadPoolExecutor) { - // This should interrupt all active record processor tasks. - executorService.shutdownNow(); - } - if (metricsFactory instanceof CloudWatchMetricsFactory) { - ((CloudWatchMetricsFactory) metricsFactory).shutdown(); - } - shutdownComplete = true; - } - - private List getShardInfoForAssignments() { - List assignedStreamShards = leaseCoordinator.getCurrentAssignments(); - List prioritizedShards = shardPrioritization.prioritize(assignedStreamShards); - - if ((prioritizedShards != null) && (!prioritizedShards.isEmpty())) { - if (slog.isInfoEnabled()) { - StringBuilder builder = new StringBuilder(); - boolean firstItem = true; - for (ShardInfo shardInfo : prioritizedShards) { - if (!firstItem) { - builder.append(", "); - } - builder.append(shardInfo.shardId()); - firstItem = false; - } - slog.info("Current stream shard assignments: " + builder.toString()); - } - } else { - slog.info("No activities assigned"); - } - - return prioritizedShards; - } - - /** - * NOTE: This method is internal/private to the Worker class. It has package access solely for testing. - * - * @param shardInfo - * Kinesis shard info - * @return ShardConsumer for the shard - */ - ShardConsumer createOrGetShardConsumer(@NonNull final ShardInfo shardInfo, - @NonNull final ShardRecordProcessorFactory shardRecordProcessorFactory) { - ShardConsumer consumer = shardInfoShardConsumerMap.get(shardInfo); - // Instantiate a new consumer if we don't have one, or the one we - // had was from an earlier - // lease instance (and was shutdown). Don't need to create another - // one if the shard has been - // completely processed (shutdown reason terminate). - if ((consumer == null) - || (consumer.isShutdown() && consumer.shutdownReason().equals(ShutdownReason.LEASE_LOST))) { - consumer = buildConsumer(shardInfo, shardRecordProcessorFactory); - shardInfoShardConsumerMap.put(shardInfo, consumer); - slog.infoForce("Created new shardConsumer for : " + shardInfo); - } - return consumer; - } - - protected ShardConsumer buildConsumer(@NonNull final ShardInfo shardInfo, - @NonNull final ShardRecordProcessorFactory shardRecordProcessorFactory) { - RecordsPublisher cache = retrievalConfig.retrievalFactory().createGetRecordsCache(shardInfo, metricsFactory); - ShardRecordProcessorCheckpointer checkpointer = coordinatorConfig.coordinatorFactory().createRecordProcessorCheckpointer(shardInfo, - checkpoint); - ShardConsumerArgument argument = new ShardConsumerArgument(shardInfo, - streamName, - leaseRefresher, - executorService, - cache, - shardRecordProcessorFactory.shardRecordProcessor(), - checkpoint, - checkpointer, - parentShardPollIntervalMillis, - taskBackoffTimeMillis, - skipShardSyncAtWorkerInitializationIfLeasesExist, - listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, - processorConfig.callProcessRecordsEvenForEmptyRecordList(), - shardConsumerDispatchPollIntervalMillis, - initialPosition, - cleanupLeasesUponShardCompletion, - ignoreUnexpetedChildShards, - shardDetector, - aggregatorUtil, - hierarchicalShardSyncer, - metricsFactory); - return new ShardConsumer(cache, executorService, shardInfo, lifecycleConfig.logWarningForTaskAfterMillis(), - argument, lifecycleConfig.taskExecutionListener()); - } - - /** - * NOTE: This method is internal/private to the Worker class. It has package access solely for testing. - * - * This method relies on ShardInfo.equals() method returning true for ShardInfo objects which may have been - * instantiated with parentShardIds in a different order (and rest of the fields being the equal). For example - * shardInfo1.equals(shardInfo2) should return true with shardInfo1 and shardInfo2 defined as follows. ShardInfo - * shardInfo1 = new ShardInfo(shardId1, concurrencyToken1, Arrays.asList("parent1", "parent2")); ShardInfo - * shardInfo2 = new ShardInfo(shardId1, concurrencyToken1, Arrays.asList("parent2", "parent1")); - */ - void cleanupShardConsumers(Set assignedShards) { - for (ShardInfo shard : shardInfoShardConsumerMap.keySet()) { - if (!assignedShards.contains(shard)) { - // Shutdown the consumer since we are no longer responsible for - // the shard. - ShardConsumer consumer = shardInfoShardConsumerMap.get(shard); - if (consumer.leaseLost()) { - shardInfoShardConsumerMap.remove(shard); - log.debug("Removed consumer for {} as lease has been lost", shard.shardId()); - } else { - consumer.executeLifecycle(); - } - } - } - } - - /** - * Logger for suppressing too much INFO logging. To avoid too much logging information Worker will output logging at - * INFO level for a single pass through the main loop every minute. At DEBUG level it will output all INFO logs on - * every pass. - */ - @NoArgsConstructor(access = AccessLevel.PRIVATE) - private static class SchedulerLog { - - private long reportIntervalMillis = TimeUnit.MINUTES.toMillis(1); - private long nextReportTime = System.currentTimeMillis() + reportIntervalMillis; - private boolean infoReporting; - - void info(Object message) { - if (this.isInfoEnabled()) { - log.info("{}", message); - } - } - - void infoForce(Object message) { - log.info("{}", message); - } - - private boolean isInfoEnabled() { - return infoReporting; - } - - private void resetInfoLogging() { - if (infoReporting) { - // We just logged at INFO level for a pass through worker loop - if (log.isInfoEnabled()) { - infoReporting = false; - nextReportTime = System.currentTimeMillis() + reportIntervalMillis; - } // else is DEBUG or TRACE so leave reporting true - } else if (nextReportTime <= System.currentTimeMillis()) { - infoReporting = true; - } - } - } - - @Deprecated - public Future requestShutdown() { - return null; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/SchedulerCoordinatorFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/SchedulerCoordinatorFactory.java deleted file mode 100644 index 72f830fb..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/SchedulerCoordinatorFactory.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.coordinator; - -import java.util.concurrent.ExecutorService; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import lombok.Data; -import lombok.NonNull; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.processor.Checkpointer; - -/** - * - */ -@Data -@KinesisClientInternalApi -public class SchedulerCoordinatorFactory implements CoordinatorFactory { - /** - * {@inheritDoc} - */ - @Override - public ExecutorService createExecutorService() { - return new SchedulerThreadPoolExecutor( - new ThreadFactoryBuilder().setNameFormat("ShardRecordProcessor-%04d").build()); - } - - static class SchedulerThreadPoolExecutor extends ThreadPoolExecutor { - private static final long DEFAULT_KEEP_ALIVE = 60L; - SchedulerThreadPoolExecutor(ThreadFactory threadFactory) { - super(0, Integer.MAX_VALUE, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, new SynchronousQueue<>(), - threadFactory); - } - } - - /** - * {@inheritDoc} - */ - @Override - public ShardRecordProcessorCheckpointer createRecordProcessorCheckpointer(@NonNull final ShardInfo shardInfo, - @NonNull final Checkpointer checkpoint) { - return new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/WorkerStateChangeListener.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/WorkerStateChangeListener.java deleted file mode 100644 index 2ca08aa4..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/WorkerStateChangeListener.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.coordinator; - -/** - * A listener for callbacks on changes worker state - */ -@FunctionalInterface -public interface WorkerStateChangeListener { - enum WorkerState { - CREATED, - INITIALIZING, - STARTED, - SHUT_DOWN - } - - void onWorkerStateChange(WorkerState newState); - - default void onAllInitializationAttemptsFailed(Throwable e) { - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ShutdownException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ShutdownException.java deleted file mode 100644 index 5a57b11b..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ShutdownException.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.exceptions; - -/** - * The ShardRecordProcessor instance has been shutdown (e.g. and attempts a checkpoint). - */ -public class ShutdownException extends KinesisClientLibNonRetryableException { - - private static final long serialVersionUID = 1L; - - /** - * @param message provides more details about the cause and potential ways to debug/address. - */ - public ShutdownException(String message) { - super(message); - } - - /** - * @param message provides more details about the cause and potential ways to debug/address. - * @param e Cause of the exception - */ - public ShutdownException(String message, Exception e) { - super(message, e); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/KinesisClientLibIOException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/KinesisClientLibIOException.java deleted file mode 100644 index f15a8088..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/KinesisClientLibIOException.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.exceptions.internal; - -import software.amazon.kinesis.exceptions.KinesisClientLibRetryableException; - -/** - * Thrown when we encounter issues when reading/writing information (e.g. shard information from Kinesis may not be - * current/complete). - */ -public class KinesisClientLibIOException extends KinesisClientLibRetryableException { - private static final long serialVersionUID = 1L; - - /** - * Constructor. - * - * @param message Error message. - */ - public KinesisClientLibIOException(String message) { - super(message); - } - - /** - * Constructor. - * - * @param message Error message. - * @param e Cause. - */ - public KinesisClientLibIOException(String message, Exception e) { - super(message, e); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java deleted file mode 100644 index c61bf935..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java +++ /dev/null @@ -1,756 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -import java.io.Serializable; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.function.Function; -import java.util.stream.Collectors; - -import org.apache.commons.lang3.StringUtils; - -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.kinesis.model.Shard; -import software.amazon.awssdk.utils.CollectionUtils; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.exceptions.internal.KinesisClientLibIOException; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.MetricsUtil; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Helper class to sync leases with shards of the Kinesis stream. - * It will create new leases/activities when it discovers new Kinesis shards (bootstrap/resharding). - * It deletes leases for shards that have been trimmed from Kinesis, or if we've completed processing it - * and begun processing it's child shards. - */ -@Slf4j -@KinesisClientInternalApi -public class HierarchicalShardSyncer { - - /** - * Check and create leases for any new shards (e.g. following a reshard operation). Sync leases with Kinesis shards - * (e.g. at startup, or when we reach end of a shard). - * - * @param shardDetector - * @param leaseRefresher - * @param initialPosition - * @param cleanupLeasesOfCompletedShards - * @param ignoreUnexpectedChildShards - * @param scope - * @throws DependencyException - * @throws InvalidStateException - * @throws ProvisionedThroughputException - * @throws KinesisClientLibIOException - */ - // CHECKSTYLE:OFF CyclomaticComplexity - public synchronized void checkAndCreateLeaseForNewShards(@NonNull final ShardDetector shardDetector, - final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition, - final boolean cleanupLeasesOfCompletedShards, final boolean ignoreUnexpectedChildShards, - final MetricsScope scope) throws DependencyException, InvalidStateException, - ProvisionedThroughputException, KinesisClientLibIOException { - final List shards = getShardList(shardDetector); - log.debug("Num shards: {}", shards.size()); - - final Map shardIdToShardMap = constructShardIdToShardMap(shards); - final Map> shardIdToChildShardIdsMap = constructShardIdToChildShardIdsMap( - shardIdToShardMap); - final Set inconsistentShardIds = findInconsistentShardIds(shardIdToChildShardIdsMap, shardIdToShardMap); - if (!ignoreUnexpectedChildShards) { - assertAllParentShardsAreClosed(inconsistentShardIds); - } - - final List currentLeases = leaseRefresher.listLeases(); - - final List newLeasesToCreate = determineNewLeasesToCreate(shards, currentLeases, initialPosition, - inconsistentShardIds); - log.debug("Num new leases to create: {}", newLeasesToCreate.size()); - for (Lease lease : newLeasesToCreate) { - long startTime = System.currentTimeMillis(); - boolean success = false; - try { - leaseRefresher.createLeaseIfNotExists(lease); - success = true; - } finally { - MetricsUtil.addSuccessAndLatency(scope, "CreateLease", success, startTime, MetricsLevel.DETAILED); - } - } - - final List trackedLeases = new ArrayList<>(currentLeases); - trackedLeases.addAll(newLeasesToCreate); - cleanupGarbageLeases(shardDetector, shards, trackedLeases, leaseRefresher); - if (cleanupLeasesOfCompletedShards) { - cleanupLeasesOfFinishedShards(currentLeases, shardIdToShardMap, shardIdToChildShardIdsMap, trackedLeases, - leaseRefresher); - } - } - // CHECKSTYLE:ON CyclomaticComplexity - - /** Helper method to detect a race condition between fetching the shards via paginated DescribeStream calls - * and a reshard operation. - * @param inconsistentShardIds - * @throws KinesisClientLibIOException - */ - private static void assertAllParentShardsAreClosed(final Set inconsistentShardIds) - throws KinesisClientLibIOException { - if (!CollectionUtils.isNullOrEmpty(inconsistentShardIds)) { - final String ids = StringUtils.join(inconsistentShardIds, ' '); - throw new KinesisClientLibIOException(String.format( - "%d open child shards (%s) are inconsistent. This can happen due to a race condition between describeStream and a reshard operation.", - inconsistentShardIds.size(), ids)); - } - } - - /** - * Helper method to construct the list of inconsistent shards, which are open shards with non-closed ancestor - * parent(s). - * @param shardIdToChildShardIdsMap - * @param shardIdToShardMap - * @return Set of inconsistent open shard ids for shards having open parents. - */ - private static Set findInconsistentShardIds(final Map> shardIdToChildShardIdsMap, - final Map shardIdToShardMap) { - return shardIdToChildShardIdsMap.entrySet().stream() - .filter(entry -> entry.getKey() == null - || shardIdToShardMap.get(entry.getKey()).sequenceNumberRange().endingSequenceNumber() == null) - .flatMap(entry -> shardIdToChildShardIdsMap.get(entry.getKey()).stream()).collect(Collectors.toSet()); - } - - /** - * Note: this has package level access for testing purposes. - * Useful for asserting that we don't have an incomplete shard list following a reshard operation. - * We verify that if the shard is present in the shard list, it is closed and its hash key range - * is covered by its child shards. - * @param shardIdsOfClosedShards Id of the shard which is expected to be closed - * @return ShardIds of child shards (children of the expectedClosedShard) - * @throws KinesisClientLibIOException - */ - synchronized void assertClosedShardsAreCoveredOrAbsent(final Map shardIdToShardMap, - final Map> shardIdToChildShardIdsMap, final Set shardIdsOfClosedShards) - throws KinesisClientLibIOException { - final String exceptionMessageSuffix = "This can happen if we constructed the list of shards " - + " while a reshard operation was in progress."; - - for (String shardId : shardIdsOfClosedShards) { - final Shard shard = shardIdToShardMap.get(shardId); - if (shard == null) { - log.info("Shard {} is not present in Kinesis anymore.", shardId); - continue; - } - - final String endingSequenceNumber = shard.sequenceNumberRange().endingSequenceNumber(); - if (endingSequenceNumber == null) { - throw new KinesisClientLibIOException("Shard " + shardIdsOfClosedShards - + " is not closed. " + exceptionMessageSuffix); - } - - final Set childShardIds = shardIdToChildShardIdsMap.get(shardId); - if (childShardIds == null) { - throw new KinesisClientLibIOException("Incomplete shard list: Closed shard " + shardId - + " has no children." + exceptionMessageSuffix); - } - - assertHashRangeOfClosedShardIsCovered(shard, shardIdToShardMap, childShardIds); - } - } - - private synchronized void assertHashRangeOfClosedShardIsCovered(final Shard closedShard, - final Map shardIdToShardMap, final Set childShardIds) - throws KinesisClientLibIOException { - BigInteger minStartingHashKeyOfChildren = null; - BigInteger maxEndingHashKeyOfChildren = null; - - final BigInteger startingHashKeyOfClosedShard = new BigInteger(closedShard.hashKeyRange().startingHashKey()); - final BigInteger endingHashKeyOfClosedShard = new BigInteger(closedShard.hashKeyRange().endingHashKey()); - - for (String childShardId : childShardIds) { - final Shard childShard = shardIdToShardMap.get(childShardId); - final BigInteger startingHashKey = new BigInteger(childShard.hashKeyRange().startingHashKey()); - if (minStartingHashKeyOfChildren == null || startingHashKey.compareTo(minStartingHashKeyOfChildren) < 0) { - minStartingHashKeyOfChildren = startingHashKey; - } - - final BigInteger endingHashKey = new BigInteger(childShard.hashKeyRange().endingHashKey()); - if (maxEndingHashKeyOfChildren == null || endingHashKey.compareTo(maxEndingHashKeyOfChildren) > 0) { - maxEndingHashKeyOfChildren = endingHashKey; - } - } - - if (minStartingHashKeyOfChildren == null || maxEndingHashKeyOfChildren == null - || minStartingHashKeyOfChildren.compareTo(startingHashKeyOfClosedShard) > 0 - || maxEndingHashKeyOfChildren.compareTo(endingHashKeyOfClosedShard) < 0) { - throw new KinesisClientLibIOException(String.format( - "Incomplete shard list: hash key range of shard %s is not covered by its child shards.", - closedShard.shardId())); - } - - } - - /** - * Helper method to construct shardId->setOfChildShardIds map. - * Note: This has package access for testing purposes only. - * @param shardIdToShardMap - * @return - */ - static Map> constructShardIdToChildShardIdsMap(final Map shardIdToShardMap) { - final Map> shardIdToChildShardIdsMap = new HashMap<>(); - - for (final Map.Entry entry : shardIdToShardMap.entrySet()) { - final String shardId = entry.getKey(); - final Shard shard = entry.getValue(); - final String parentShardId = shard.parentShardId(); - if (parentShardId != null && shardIdToShardMap.containsKey(parentShardId)) { - final Set childShardIds = shardIdToChildShardIdsMap.computeIfAbsent(parentShardId, - key -> new HashSet<>()); - childShardIds.add(shardId); - } - - final String adjacentParentShardId = shard.adjacentParentShardId(); - if (adjacentParentShardId != null && shardIdToShardMap.containsKey(adjacentParentShardId)) { - final Set childShardIds = shardIdToChildShardIdsMap.computeIfAbsent(adjacentParentShardId, - key -> new HashSet<>()); - childShardIds.add(shardId); - } - } - return shardIdToChildShardIdsMap; - } - - private static List getShardList(@NonNull final ShardDetector shardDetector) throws KinesisClientLibIOException { - final List shards = shardDetector.listShards(); - if (shards == null) { - throw new KinesisClientLibIOException( - "Stream is not in ACTIVE OR UPDATING state - will retry getting the shard list."); - } - return shards; - } - - /** - * Determine new leases to create and their initial checkpoint. - * Note: Package level access only for testing purposes. - * - * For each open (no ending sequence number) shard without open parents that doesn't already have a lease, - * determine if it is a descendent of any shard which is or will be processed (e.g. for which a lease exists): - * If so, set checkpoint of the shard to TrimHorizon and also create leases for ancestors if needed. - * If not, set checkpoint of the shard to the initial position specified by the client. - * To check if we need to create leases for ancestors, we use the following rules: - * * If we began (or will begin) processing data for a shard, then we must reach end of that shard before - * we begin processing data from any of its descendants. - * * A shard does not start processing data until data from all its parents has been processed. - * Note, if the initial position is LATEST and a shard has two parents and only one is a descendant - we'll create - * leases corresponding to both the parents - the parent shard which is not a descendant will have - * its checkpoint set to Latest. - * - * We assume that if there is an existing lease for a shard, then either: - * * we have previously created a lease for its parent (if it was needed), or - * * the parent shard has expired. - * - * For example: - * Shard structure (each level depicts a stream segment): - * 0 1 2 3 4 5 - shards till epoch 102 - * \ / \ / | | - * 6 7 4 5 - shards from epoch 103 - 205 - * \ / | / \ - * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) - * Current leases: (3, 4, 5) - * New leases to create: (2, 6, 7, 8, 9, 10) - * - * The leases returned are sorted by the starting sequence number - following the same order - * when persisting the leases in DynamoDB will ensure that we recover gracefully if we fail - * before creating all the leases. - * - * If a shard has no existing lease, is open, and is a descendant of a parent which is still open, we ignore it - * here; this happens when the list of shards is inconsistent, which could be due to pagination delay for very - * high shard count streams (i.e., dynamodb streams for tables with thousands of partitions). This can only - * currently happen here if ignoreUnexpectedChildShards was true in syncShardleases. - * - * - * @param shards List of all shards in Kinesis (we'll create new leases based on this set) - * @param currentLeases List of current leases - * @param initialPosition One of LATEST, TRIM_HORIZON, or AT_TIMESTAMP. We'll start fetching records from that - * location in the shard (when an application starts up for the first time - and there are no checkpoints). - * @param inconsistentShardIds Set of child shard ids having open parents. - * @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard - */ - static List determineNewLeasesToCreate(final List shards, final List currentLeases, - final InitialPositionInStreamExtended initialPosition, final Set inconsistentShardIds) { - final Map shardIdToNewLeaseMap = new HashMap<>(); - final Map shardIdToShardMapOfAllKinesisShards = constructShardIdToShardMap(shards); - - final Set shardIdsOfCurrentLeases = currentLeases.stream() - .peek(lease -> log.debug("Existing lease: {}", lease)).map(Lease::leaseKey).collect(Collectors.toSet()); - - final List openShards = getOpenShards(shards); - final Map memoizationContext = new HashMap<>(); - - // Iterate over the open shards and find those that don't have any lease entries. - for (Shard shard : openShards) { - final String shardId = shard.shardId(); - log.debug("Evaluating leases for open shard {} and its ancestors.", shardId); - if (shardIdsOfCurrentLeases.contains(shardId)) { - log.debug("Lease for shardId {} already exists. Not creating a lease", shardId); - } else if (inconsistentShardIds.contains(shardId)) { - log.info("shardId {} is an inconsistent child. Not creating a lease", shardId); - } else { - log.debug("Need to create a lease for shardId {}", shardId); - final Lease newLease = newKCLLease(shard); - final boolean isDescendant = checkIfDescendantAndAddNewLeasesForAncestors(shardId, initialPosition, - shardIdsOfCurrentLeases, shardIdToShardMapOfAllKinesisShards, shardIdToNewLeaseMap, - memoizationContext); - - /** - * If the shard is a descendant and the specified initial position is AT_TIMESTAMP, then the - * checkpoint should be set to AT_TIMESTAMP, else to TRIM_HORIZON. For AT_TIMESTAMP, we will add a - * lease just like we do for TRIM_HORIZON. However we will only return back records with server-side - * timestamp at or after the specified initial position timestamp. - * - * Shard structure (each level depicts a stream segment): - * 0 1 2 3 4 5 - shards till epoch 102 - * \ / \ / | | - * 6 7 4 5 - shards from epoch 103 - 205 - * \ / | /\ - * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) - * - * Current leases: empty set - * - * For the above example, suppose the initial position in stream is set to AT_TIMESTAMP with - * timestamp value 206. We will then create new leases for all the shards (with checkpoint set to - * AT_TIMESTAMP), including the ancestor shards with epoch less than 206. However as we begin - * processing the ancestor shards, their checkpoints would be updated to SHARD_END and their leases - * would then be deleted since they won't have records with server-side timestamp at/after 206. And - * after that we will begin processing the descendant shards with epoch at/after 206 and we will - * return the records that meet the timestamp requirement for these shards. - */ - if (isDescendant - && !initialPosition.getInitialPositionInStream().equals(InitialPositionInStream.AT_TIMESTAMP)) { - newLease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - } else { - newLease.checkpoint(convertToCheckpoint(initialPosition)); - } - log.debug("Set checkpoint of {} to {}", newLease.leaseKey(), newLease.checkpoint()); - shardIdToNewLeaseMap.put(shardId, newLease); - } - } - - final List newLeasesToCreate = new ArrayList<>(shardIdToNewLeaseMap.values()); - final Comparator startingSequenceNumberComparator = new StartingSequenceNumberAndShardIdBasedComparator( - shardIdToShardMapOfAllKinesisShards); - newLeasesToCreate.sort(startingSequenceNumberComparator); - return newLeasesToCreate; - } - - /** - * Determine new leases to create and their initial checkpoint. - * Note: Package level access only for testing purposes. - */ - static List determineNewLeasesToCreate(final List shards, final List currentLeases, - final InitialPositionInStreamExtended initialPosition) { - final Set inconsistentShardIds = new HashSet<>(); - return determineNewLeasesToCreate(shards, currentLeases, initialPosition, inconsistentShardIds); - } - - /** - * Note: Package level access for testing purposes only. - * Check if this shard is a descendant of a shard that is (or will be) processed. - * Create leases for the ancestors of this shard as required. - * See javadoc of determineNewLeasesToCreate() for rules and example. - * - * @param shardId The shardId to check. - * @param initialPosition One of LATEST, TRIM_HORIZON, or AT_TIMESTAMP. We'll start fetching records from that - * location in the shard (when an application starts up for the first time - and there are no checkpoints). - * @param shardIdsOfCurrentLeases The shardIds for the current leases. - * @param shardIdToShardMapOfAllKinesisShards ShardId->Shard map containing all shards obtained via DescribeStream. - * @param shardIdToLeaseMapOfNewShards Add lease POJOs corresponding to ancestors to this map. - * @param memoizationContext Memoization of shards that have been evaluated as part of the evaluation - * @return true if the shard is a descendant of any current shard (lease already exists) - */ - // CHECKSTYLE:OFF CyclomaticComplexity - static boolean checkIfDescendantAndAddNewLeasesForAncestors(final String shardId, - final InitialPositionInStreamExtended initialPosition, final Set shardIdsOfCurrentLeases, - final Map shardIdToShardMapOfAllKinesisShards, - final Map shardIdToLeaseMapOfNewShards, final Map memoizationContext) { - - final Boolean previousValue = memoizationContext.get(shardId); - if (previousValue != null) { - return previousValue; - } - - boolean isDescendant = false; - final Set descendantParentShardIds = new HashSet<>(); - - if (shardId != null && shardIdToShardMapOfAllKinesisShards.containsKey(shardId)) { - if (shardIdsOfCurrentLeases.contains(shardId)) { - // This shard is a descendant of a current shard. - isDescendant = true; - // We don't need to add leases of its ancestors, - // because we'd have done it when creating a lease for this shard. - } else { - final Shard shard = shardIdToShardMapOfAllKinesisShards.get(shardId); - final Set parentShardIds = getParentShardIds(shard, shardIdToShardMapOfAllKinesisShards); - for (String parentShardId : parentShardIds) { - // Check if the parent is a descendant, and include its ancestors. - if (checkIfDescendantAndAddNewLeasesForAncestors(parentShardId, initialPosition, - shardIdsOfCurrentLeases, shardIdToShardMapOfAllKinesisShards, shardIdToLeaseMapOfNewShards, - memoizationContext)) { - isDescendant = true; - descendantParentShardIds.add(parentShardId); - log.debug("Parent shard {} is a descendant.", parentShardId); - } else { - log.debug("Parent shard {} is NOT a descendant.", parentShardId); - } - } - - // If this is a descendant, create leases for its parent shards (if they don't exist) - if (isDescendant) { - for (String parentShardId : parentShardIds) { - if (!shardIdsOfCurrentLeases.contains(parentShardId)) { - log.debug("Need to create a lease for shardId {}", parentShardId); - Lease lease = shardIdToLeaseMapOfNewShards.get(parentShardId); - if (lease == null) { - lease = newKCLLease(shardIdToShardMapOfAllKinesisShards.get(parentShardId)); - shardIdToLeaseMapOfNewShards.put(parentShardId, lease); - } - - if (descendantParentShardIds.contains(parentShardId) - && !initialPosition.getInitialPositionInStream() - .equals(InitialPositionInStream.AT_TIMESTAMP)) { - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - } else { - lease.checkpoint(convertToCheckpoint(initialPosition)); - } - } - } - } else { - // This shard should be included, if the customer wants to process all records in the stream or - // if the initial position is AT_TIMESTAMP. For AT_TIMESTAMP, we will add a lease just like we do - // for TRIM_HORIZON. However we will only return back records with server-side timestamp at or - // after the specified initial position timestamp. - if (initialPosition.getInitialPositionInStream().equals(InitialPositionInStream.TRIM_HORIZON) - || initialPosition.getInitialPositionInStream() - .equals(InitialPositionInStream.AT_TIMESTAMP)) { - isDescendant = true; - } - } - - } - } - - memoizationContext.put(shardId, isDescendant); - return isDescendant; - } - // CHECKSTYLE:ON CyclomaticComplexity - - /** - * Helper method to get parent shardIds of the current shard - includes the parent shardIds if: - * a/ they are not null - * b/ if they exist in the current shard map (i.e. haven't expired) - * - * @param shard Will return parents of this shard - * @param shardIdToShardMapOfAllKinesisShards ShardId->Shard map containing all shards obtained via DescribeStream. - * @return Set of parentShardIds - */ - static Set getParentShardIds(final Shard shard, - final Map shardIdToShardMapOfAllKinesisShards) { - final Set parentShardIds = new HashSet<>(2); - final String parentShardId = shard.parentShardId(); - if (parentShardId != null && shardIdToShardMapOfAllKinesisShards.containsKey(parentShardId)) { - parentShardIds.add(parentShardId); - } - final String adjacentParentShardId = shard.adjacentParentShardId(); - if (adjacentParentShardId != null && shardIdToShardMapOfAllKinesisShards.containsKey(adjacentParentShardId)) { - parentShardIds.add(adjacentParentShardId); - } - return parentShardIds; - } - - /** - * Delete leases corresponding to shards that no longer exist in the stream. Current scheme: Delete a lease if: - *
    - *
  • The corresponding shard is not present in the list of Kinesis shards
  • - *
  • The parentShardIds listed in the lease are also not present in the list of Kinesis shards.
  • - *
- * - * @param shards - * List of all Kinesis shards (assumed to be a consistent snapshot - when stream is in Active state). - * @param trackedLeases - * List of - * @param leaseRefresher - * @throws KinesisClientLibIOException - * Thrown if we couldn't get a fresh shard list from Kinesis. - * @throws ProvisionedThroughputException - * @throws InvalidStateException - * @throws DependencyException - */ - private static void cleanupGarbageLeases(@NonNull final ShardDetector shardDetector, final List shards, - final List trackedLeases, final LeaseRefresher leaseRefresher) throws KinesisClientLibIOException, - DependencyException, InvalidStateException, ProvisionedThroughputException { - final Set kinesisShards = shards.stream().map(Shard::shardId).collect(Collectors.toSet()); - - // Check if there are leases for non-existent shards - final List garbageLeases = trackedLeases.stream() - .filter(lease -> isCandidateForCleanup(lease, kinesisShards)).collect(Collectors.toList()); - - if (!CollectionUtils.isNullOrEmpty(garbageLeases)) { - log.info("Found {} candidate leases for cleanup. Refreshing list of" - + " Kinesis shards to pick up recent/latest shards", garbageLeases.size()); - final Set currentKinesisShardIds = getShardList(shardDetector).stream().map(Shard::shardId) - .collect(Collectors.toSet()); - - for (Lease lease : garbageLeases) { - if (isCandidateForCleanup(lease, currentKinesisShardIds)) { - log.info("Deleting lease for shard {} as it is not present in Kinesis stream.", lease.leaseKey()); - leaseRefresher.deleteLease(lease); - } - } - } - } - - /** - * Note: This method has package level access, solely for testing purposes. - * - * @param lease Candidate shard we are considering for deletion. - * @param currentKinesisShardIds - * @return true if neither the shard (corresponding to the lease), nor its parents are present in - * currentKinesisShardIds - * @throws KinesisClientLibIOException Thrown if currentKinesisShardIds contains a parent shard but not the child - * shard (we are evaluating for deletion). - */ - static boolean isCandidateForCleanup(final Lease lease, final Set currentKinesisShardIds) - throws KinesisClientLibIOException { - boolean isCandidateForCleanup = true; - - if (currentKinesisShardIds.contains(lease.leaseKey())) { - isCandidateForCleanup = false; - } else { - log.info("Found lease for non-existent shard: {}. Checking its parent shards", lease.leaseKey()); - final Set parentShardIds = lease.parentShardIds(); - for (String parentShardId : parentShardIds) { - - // Throw an exception if the parent shard exists (but the child does not). - // This may be a (rare) race condition between fetching the shard list and Kinesis expiring shards. - if (currentKinesisShardIds.contains(parentShardId)) { - final String message = String.format("Parent shard %s exists but not the child shard %s", - parentShardId, lease.leaseKey()); - log.info(message); - throw new KinesisClientLibIOException(message); - } - } - } - - return isCandidateForCleanup; - } - - /** - * Private helper method. - * Clean up leases for shards that meet the following criteria: - * a/ the shard has been fully processed (checkpoint is set to SHARD_END) - * b/ we've begun processing all the child shards: we have leases for all child shards and their checkpoint is not - * TRIM_HORIZON. - * - * @param currentLeases List of leases we evaluate for clean up - * @param shardIdToShardMap Map of shardId->Shard (assumed to include all Kinesis shards) - * @param shardIdToChildShardIdsMap Map of shardId->childShardIds (assumed to include all Kinesis shards) - * @param trackedLeases List of all leases we are tracking. - * @param leaseRefresher Lease refresher (will be used to delete leases) - * @throws DependencyException - * @throws InvalidStateException - * @throws ProvisionedThroughputException - * @throws KinesisClientLibIOException - */ - private synchronized void cleanupLeasesOfFinishedShards(final Collection currentLeases, - final Map shardIdToShardMap, final Map> shardIdToChildShardIdsMap, - final List trackedLeases, final LeaseRefresher leaseRefresher) throws DependencyException, - InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException { - final List leasesOfClosedShards = currentLeases.stream() - .filter(lease -> lease.checkpoint().equals(ExtendedSequenceNumber.SHARD_END)) - .collect(Collectors.toList()); - final Set shardIdsOfClosedShards = leasesOfClosedShards.stream().map(Lease::leaseKey) - .collect(Collectors.toSet()); - - if (!CollectionUtils.isNullOrEmpty(leasesOfClosedShards)) { - assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, shardIdsOfClosedShards); - Comparator startingSequenceNumberComparator = new StartingSequenceNumberAndShardIdBasedComparator( - shardIdToShardMap); - leasesOfClosedShards.sort(startingSequenceNumberComparator); - final Map trackedLeaseMap = trackedLeases.stream() - .collect(Collectors.toMap(Lease::leaseKey, Function.identity())); - - for (Lease leaseOfClosedShard : leasesOfClosedShards) { - final String closedShardId = leaseOfClosedShard.leaseKey(); - final Set childShardIds = shardIdToChildShardIdsMap.get(closedShardId); - if (closedShardId != null && !CollectionUtils.isNullOrEmpty(childShardIds)) { - cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseRefresher); - } - } - } - } - - /** - * Delete lease for the closed shard. Rules for deletion are: - * a/ the checkpoint for the closed shard is SHARD_END, - * b/ there are leases for all the childShardIds and their checkpoint is NOT TRIM_HORIZON - * Note: This method has package level access solely for testing purposes. - * - * @param closedShardId Identifies the closed shard - * @param childShardIds ShardIds of children of the closed shard - * @param trackedLeases shardId->Lease map with all leases we are tracking (should not be null) - * @param leaseRefresher - * @throws ProvisionedThroughputException - * @throws InvalidStateException - * @throws DependencyException - */ - synchronized void cleanupLeaseForClosedShard(final String closedShardId, final Set childShardIds, - final Map trackedLeases, final LeaseRefresher leaseRefresher) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - final Lease leaseForClosedShard = trackedLeases.get(closedShardId); - final List childShardLeases = childShardIds.stream().map(trackedLeases::get).filter(Objects::nonNull) - .collect(Collectors.toList()); - - if (leaseForClosedShard != null && leaseForClosedShard.checkpoint().equals(ExtendedSequenceNumber.SHARD_END) - && childShardLeases.size() == childShardIds.size()) { - boolean okayToDelete = true; - for (Lease lease : childShardLeases) { - if (lease.checkpoint().equals(ExtendedSequenceNumber.TRIM_HORIZON)) { - okayToDelete = false; - break; - } - } - - if (okayToDelete) { - log.info("Deleting lease for shard {} as it has been completely processed and processing of child " - + "shards has begun.", leaseForClosedShard.leaseKey()); - leaseRefresher.deleteLease(leaseForClosedShard); - } - } - } - - /** - * Helper method to create a new Lease POJO for a shard. - * Note: Package level access only for testing purposes - * - * @param shard - * @return - */ - private static Lease newKCLLease(final Shard shard) { - Lease newLease = new Lease(); - newLease.leaseKey(shard.shardId()); - List parentShardIds = new ArrayList<>(2); - if (shard.parentShardId() != null) { - parentShardIds.add(shard.parentShardId()); - } - if (shard.adjacentParentShardId() != null) { - parentShardIds.add(shard.adjacentParentShardId()); - } - newLease.parentShardIds(parentShardIds); - newLease.ownerSwitchesSinceCheckpoint(0L); - - return newLease; - } - - /** - * Helper method to construct a shardId->Shard map for the specified list of shards. - * - * @param shards List of shards - * @return ShardId->Shard map - */ - static Map constructShardIdToShardMap(final List shards) { - return shards.stream().collect(Collectors.toMap(Shard::shardId, Function.identity())); - } - - /** - * Helper method to return all the open shards for a stream. - * Note: Package level access only for testing purposes. - * - * @param allShards All shards returved via DescribeStream. We assume this to represent a consistent shard list. - * @return List of open shards (shards at the tip of the stream) - may include shards that are not yet active. - */ - static List getOpenShards(final List allShards) { - return allShards.stream().filter(shard -> shard.sequenceNumberRange().endingSequenceNumber() == null) - .peek(shard -> log.debug("Found open shard: {}", shard.shardId())).collect(Collectors.toList()); - } - - private static ExtendedSequenceNumber convertToCheckpoint(final InitialPositionInStreamExtended position) { - ExtendedSequenceNumber checkpoint = null; - - if (position.getInitialPositionInStream().equals(InitialPositionInStream.TRIM_HORIZON)) { - checkpoint = ExtendedSequenceNumber.TRIM_HORIZON; - } else if (position.getInitialPositionInStream().equals(InitialPositionInStream.LATEST)) { - checkpoint = ExtendedSequenceNumber.LATEST; - } else if (position.getInitialPositionInStream().equals(InitialPositionInStream.AT_TIMESTAMP)) { - checkpoint = ExtendedSequenceNumber.AT_TIMESTAMP; - } - - return checkpoint; - } - - /** Helper class to compare leases based on starting sequence number of the corresponding shards. - * - */ - @RequiredArgsConstructor - private static class StartingSequenceNumberAndShardIdBasedComparator implements Comparator, Serializable { - private static final long serialVersionUID = 1L; - - private final Map shardIdToShardMap; - - /** - * Compares two leases based on the starting sequence number of corresponding shards. - * If shards are not found in the shardId->shard map supplied, we do a string comparison on the shardIds. - * We assume that lease1 and lease2 are: - * a/ not null, - * b/ shards (if found) have non-null starting sequence numbers - * - * {@inheritDoc} - */ - @Override - public int compare(final Lease lease1, final Lease lease2) { - int result = 0; - final String shardId1 = lease1.leaseKey(); - final String shardId2 = lease2.leaseKey(); - final Shard shard1 = shardIdToShardMap.get(shardId1); - final Shard shard2 = shardIdToShardMap.get(shardId2); - - // If we found shards for the two leases, use comparison of the starting sequence numbers - if (shard1 != null && shard2 != null) { - BigInteger sequenceNumber1 = new BigInteger(shard1.sequenceNumberRange().startingSequenceNumber()); - BigInteger sequenceNumber2 = new BigInteger(shard2.sequenceNumberRange().startingSequenceNumber()); - result = sequenceNumber1.compareTo(sequenceNumber2); - } - - if (result == 0) { - result = shardId1.compareTo(shardId2); - } - - return result; - } - - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java deleted file mode 100644 index b5645d9d..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.leases; - -import java.time.Duration; -import java.time.Instant; -import java.time.temporal.ChronoUnit; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Function; -import java.util.stream.Collectors; - -import org.apache.commons.lang3.StringUtils; - -import lombok.AccessLevel; -import lombok.Getter; -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.Synchronized; -import lombok.experimental.Accessors; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.KinesisException; -import software.amazon.awssdk.services.kinesis.model.LimitExceededException; -import software.amazon.awssdk.services.kinesis.model.ListShardsRequest; -import software.amazon.awssdk.services.kinesis.model.ListShardsResponse; -import software.amazon.awssdk.services.kinesis.model.ResourceInUseException; -import software.amazon.awssdk.services.kinesis.model.Shard; -import software.amazon.awssdk.utils.CollectionUtils; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.common.KinesisRequestsBuilder; -import software.amazon.kinesis.retrieval.AWSExceptionManager; - -/** - * - */ -@RequiredArgsConstructor -@Slf4j -@Accessors(fluent = true) -@KinesisClientInternalApi -public class KinesisShardDetector implements ShardDetector { - @NonNull - private final KinesisAsyncClient kinesisClient; - @NonNull - private final String streamName; - private final long listShardsBackoffTimeInMillis; - private final int maxListShardsRetryAttempts; - private final long listShardsCacheAllowedAgeInSeconds; - private final int maxCacheMissesBeforeReload; - private final int cacheMissWarningModulus; - - private volatile Map cachedShardMap = null; - private volatile Instant lastCacheUpdateTime; - @Getter(AccessLevel.PACKAGE) - private AtomicInteger cacheMisses = new AtomicInteger(0); - - @Override - public Shard shard(@NonNull final String shardId) { - if (CollectionUtils.isNullOrEmpty(this.cachedShardMap)) { - synchronized (this) { - if (CollectionUtils.isNullOrEmpty(this.cachedShardMap)) { - listShards(); - } - } - } - - Shard shard = cachedShardMap.get(shardId); - - if (shard == null) { - if (cacheMisses.incrementAndGet() > maxCacheMissesBeforeReload || shouldRefreshCache()) { - synchronized (this) { - shard = cachedShardMap.get(shardId); - - if (shard == null) { - log.info("Too many shard map cache misses or cache is out of date -- forcing a refresh"); - listShards(); - shard = cachedShardMap.get(shardId); - - if (shard == null) { - log.warn("Even after cache refresh shard '{}' wasn't found. This could indicate a bigger" - + " problem.", shardId); - } - - cacheMisses.set(0); - } else { - // - // If the shardmap got updated, go ahead and set cache misses to 0 - // - cacheMisses.set(0); - } - } - } - } - - if (shard == null) { - final String message = String.format("Cannot find the shard given the shardId %s. Cache misses: %s", - shardId, cacheMisses); - if (cacheMisses.get() % cacheMissWarningModulus == 0) { - log.warn(message); - } else { - log.debug(message); - } - } - - return shard; - } - - @Override - @Synchronized - public List listShards() { - final List shards = new ArrayList<>(); - ListShardsResponse result; - String nextToken = null; - - do { - result = listShards(nextToken); - - if (result == null) { - /* - * If listShards ever returns null, we should bail and return null. This indicates the stream is not - * in ACTIVE or UPDATING state and we may not have accurate/consistent information about the stream. - */ - return null; - } else { - shards.addAll(result.shards()); - nextToken = result.nextToken(); - } - } while (StringUtils.isNotEmpty(result.nextToken())); - - cachedShardMap(shards); - return shards; - } - - private ListShardsResponse listShards(final String nextToken) { - final AWSExceptionManager exceptionManager = new AWSExceptionManager(); - exceptionManager.add(LimitExceededException.class, t -> t); - exceptionManager.add(ResourceInUseException.class, t -> t); - exceptionManager.add(KinesisException.class, t -> t); - - ListShardsRequest.Builder request = KinesisRequestsBuilder.listShardsRequestBuilder(); - if (StringUtils.isEmpty(nextToken)) { - request = request.streamName(streamName); - } else { - request = request.nextToken(nextToken); - } - ListShardsResponse result = null; - LimitExceededException lastException = null; - int remainingRetries = maxListShardsRetryAttempts; - - while (result == null) { - - try { - try { - result = kinesisClient.listShards(request.build()).get(); - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: check if this is the correct behavior for Interrupted Exception - log.debug("Interrupted exception caught, shutdown initiated, returning null"); - return null; - } - } catch (ResourceInUseException e) { - log.info("Stream is not in Active/Updating status, returning null (wait until stream is in" - + " Active or Updating)"); - return null; - } catch (LimitExceededException e) { - log.info("Got LimitExceededException when listing shards {}. Backing off for {} millis.", streamName, - listShardsBackoffTimeInMillis); - try { - Thread.sleep(listShardsBackoffTimeInMillis); - } catch (InterruptedException ie) { - log.debug("Stream {} : Sleep was interrupted ", streamName, ie); - } - lastException = e; - } - remainingRetries--; - if (remainingRetries <= 0 && result == null) { - if (lastException != null) { - throw lastException; - } - throw new IllegalStateException("Received null from ListShards call."); - } - } - return result; - } - - void cachedShardMap(final List shards) { - cachedShardMap = shards.stream().collect(Collectors.toMap(Shard::shardId, Function.identity())); - lastCacheUpdateTime = Instant.now(); - } - - private boolean shouldRefreshCache() { - final Duration secondsSinceLastUpdate = Duration.between(lastCacheUpdateTime, Instant.now()); - final String message = String.format("Shard map cache is %d seconds old", secondsSinceLastUpdate.getSeconds()); - if (secondsSinceLastUpdate.compareTo(Duration.of(listShardsCacheAllowedAgeInSeconds, ChronoUnit.SECONDS)) > 0) { - log.info("{}. Age exceeds limit of {} seconds -- Refreshing.", message, listShardsCacheAllowedAgeInSeconds); - return true; - } - - log.debug("{}. Age doesn't exceed limit of {} seconds.", message, listShardsCacheAllowedAgeInSeconds); - return false; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java deleted file mode 100644 index 482555b3..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.leases; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.UUID; - -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseCoordinator; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; - -/** - * - */ -public interface LeaseCoordinator { - /** - * Initialize the lease coordinator (create the lease table if needed). - * @throws DependencyException - * @throws ProvisionedThroughputException - */ - void initialize() throws ProvisionedThroughputException, DependencyException, IllegalStateException; - - /** - * Start background LeaseHolder and LeaseTaker threads. - * @throws ProvisionedThroughputException If we can't talk to DynamoDB due to insufficient capacity. - * @throws InvalidStateException If the lease table doesn't exist - * @throws DependencyException If we encountered exception taking to DynamoDB - */ - void start() throws DependencyException, InvalidStateException, ProvisionedThroughputException; - - /** - * Runs a single iteration of the lease taker - used by integration tests. - * - * @throws InvalidStateException - * @throws DependencyException - */ - void runLeaseTaker() throws DependencyException, InvalidStateException; - - /** - * Runs a single iteration of the lease renewer - used by integration tests. - * - * @throws InvalidStateException - * @throws DependencyException - */ - void runLeaseRenewer() throws DependencyException, InvalidStateException; - - /** - * @return true if this LeaseCoordinator is running - */ - boolean isRunning(); - - /** - * @return workerIdentifier - */ - String workerIdentifier(); - - /** - * @return {@link LeaseRefresher} - */ - LeaseRefresher leaseRefresher(); - - /** - * @return currently held leases - */ - Collection getAssignments(); - - /** - * @param leaseKey lease key to fetch currently held lease for - * - * @return deep copy of currently held Lease for given key, or null if we don't hold the lease for that key - */ - Lease getCurrentlyHeldLease(String leaseKey); - - /** - * Updates application-specific lease values in DynamoDB. - * - * @param lease lease object containing updated values - * @param concurrencyToken obtained by calling Lease.concurrencyToken for a currently held lease - * - * @return true if update succeeded, false otherwise - * - * @throws InvalidStateException if lease table does not exist - * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity - * @throws DependencyException if DynamoDB update fails in an unexpected way - */ - boolean updateLease(Lease lease, UUID concurrencyToken, String operation, String shardId) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; - - /** - * Requests the cancellation of the lease taker. - */ - void stopLeaseTaker(); - - /** - * Requests that renewals for the given lease are stopped. - * - * @param lease the lease to stop renewing. - */ - void dropLease(Lease lease); - - /** - * Stops background threads and waits for specific amount of time for all background tasks to complete. - * If tasks are not completed after this time, method will shutdown thread pool forcefully and return. - */ - void stop(); - - /** - * @return Current shard/lease assignments - */ - List getCurrentAssignments(); - - /** - * Default implementation returns an empty list and concrete implementation is expected to return all leases - * for the application that are in the lease table. This enables application managing Kcl Scheduler to take care of - * horizontal scaling for example. - * - * @return all leases for the application that are in the lease table - */ - default List allLeases() { - return Collections.emptyList(); - } - - /** - * @param writeCapacity The DynamoDB table used for tracking leases will be provisioned with the specified initial - * write capacity - * @return LeaseCoordinator - */ - DynamoDBLeaseCoordinator initialLeaseTableWriteCapacity(long writeCapacity); - - /** - * @param readCapacity The DynamoDB table used for tracking leases will be provisioned with the specified initial - * read capacity - * @return LeaseCoordinator - */ - DynamoDBLeaseCoordinator initialLeaseTableReadCapacity(long readCapacity); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java deleted file mode 100644 index 5c98bae6..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java +++ /dev/null @@ -1,269 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.leases; - -import java.util.concurrent.ExecutorService; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import lombok.Data; -import lombok.NonNull; -import lombok.experimental.Accessors; -import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseManagementFactory; -import software.amazon.kinesis.leases.dynamodb.TableCreatorCallback; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.NullMetricsFactory; - -/** - * Used by the KCL to configure lease management. - */ -@Data -@Accessors(fluent = true) -public class LeaseManagementConfig { - /** - * Name of the table to use in DynamoDB - * - * @return String - */ - @NonNull - private final String tableName; - /** - * Client to be used to access DynamoDB service. - * - * @return {@link DynamoDbAsyncClient} - */ - @NonNull - private final DynamoDbAsyncClient dynamoDBClient; - /** - * Client to be used to access Kinesis Data Streams service. - * - * @return {@link KinesisAsyncClient} - */ - @NonNull - private final KinesisAsyncClient kinesisClient; - /** - * Name of the Kinesis Data Stream to read records from. - */ - @NonNull - private final String streamName; - /** - * Used to distinguish different workers/processes of a KCL application. - * - * @return String - */ - @NonNull - private final String workerIdentifier; - - /** - * Fail over time in milliseconds. A worker which does not renew it's lease within this time interval - * will be regarded as having problems and it's shards will be assigned to other workers. - * For applications that have a large number of shards, this may be set to a higher number to reduce - * the number of DynamoDB IOPS required for tracking leases. - * - *

Default value: 10000L

- */ - private long failoverTimeMillis = 10000L; - - /** - * Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks. - * - *

Default value: 60000L

- */ - private long shardSyncIntervalMillis = 60000L; - - /** - * Cleanup leases upon shards completion (don't wait until they expire in Kinesis). - * Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by default we try - * to delete the ones we don't need any longer. - * - *

Default value: true

- */ - private boolean cleanupLeasesUponShardCompletion = true; - - /** - * The max number of leases (shards) this worker should process. - * This can be useful to avoid overloading (and thrashing) a worker when a host has resource constraints - * or during deployment. - * - *

NOTE: Setting this to a low value can cause data loss if workers are not able to pick up all shards in the - * stream due to the max limit.

- * - *

Default value: {@link Integer#MAX_VALUE}

- */ - private int maxLeasesForWorker = Integer.MAX_VALUE;; - - /** - * Max leases to steal from another worker at one time (for load balancing). - * Setting this to a higher number can allow for faster load convergence (e.g. during deployments, cold starts), - * but can cause higher churn in the system. - * - *

Default value: 1

- */ - private int maxLeasesToStealAtOneTime = 1; - - /** - * The Amazon DynamoDB table used for tracking leases will be provisioned with this read capacity. - * - *

Default value: 10

- */ - private int initialLeaseTableReadCapacity = 10; - - /** - * The Amazon DynamoDB table used for tracking leases will be provisioned with this write capacity. - * - *

Default value: 10

- */ - private int initialLeaseTableWriteCapacity = 10; - - /** - * The size of the thread pool to create for the lease renewer to use. - * - *

Default value: 20

- */ - private int maxLeaseRenewalThreads = 20; - - /** - * - */ - private boolean ignoreUnexpectedChildShards = false; - - /** - * - */ - private boolean consistentReads = false; - - private long listShardsBackoffTimeInMillis = 1500L; - - private int maxListShardsRetryAttempts = 50; - - public long epsilonMillis = 25L; - - /** - * The initial position for getting records from Kinesis streams. - * - *

Default value: {@link InitialPositionInStream#TRIM_HORIZON}

- */ - private InitialPositionInStreamExtended initialPositionInStream = - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - - private int maxCacheMissesBeforeReload = 1000; - private long listShardsCacheAllowedAgeInSeconds = 30; - private int cacheMissWarningModulus = 250; - - private MetricsFactory metricsFactory = new NullMetricsFactory(); - - /** - * Returns the metrics factory. - * - *

- * NOTE: This method is deprecated and will be removed in a future release. This metrics factory is not being used - * in the KCL. - *

- * - * @return - */ - @Deprecated - public MetricsFactory metricsFactory() { - return metricsFactory; - } - - /** - * Sets the metrics factory. - * - *

- * NOTE: This method is deprecated and will be removed in a future release. This metrics factory is not being used - * in the KCL. - *

- * - * @param metricsFactory - */ - @Deprecated - public LeaseManagementConfig metricsFactory(final MetricsFactory metricsFactory) { - this.metricsFactory = metricsFactory; - return this; - } - - /** - * The {@link ExecutorService} to be used by {@link ShardSyncTaskManager}. - * - *

Default value: {@link LeaseManagementThreadPool}

- */ - private ExecutorService executorService = new LeaseManagementThreadPool( - new ThreadFactoryBuilder().setNameFormat("ShardSyncTaskManager-%04d").build()); - - static class LeaseManagementThreadPool extends ThreadPoolExecutor { - private static final long DEFAULT_KEEP_ALIVE_TIME = 60L; - - LeaseManagementThreadPool(ThreadFactory threadFactory) { - super(0, Integer.MAX_VALUE, DEFAULT_KEEP_ALIVE_TIME, TimeUnit.SECONDS, new SynchronousQueue<>(), - threadFactory); - } - } - - /** - * Callback used with DynamoDB lease management. Callback is invoked once the table is newly created and is in the - * active status. - * - *

- * Default value: {@link TableCreatorCallback#NOOP_TABLE_CREATOR_CALLBACK} - *

- */ - private TableCreatorCallback tableCreatorCallback = TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK; - - private HierarchicalShardSyncer hierarchicalShardSyncer = new HierarchicalShardSyncer(); - - private LeaseManagementFactory leaseManagementFactory; - - public LeaseManagementFactory leaseManagementFactory() { - if (leaseManagementFactory == null) { - leaseManagementFactory = new DynamoDBLeaseManagementFactory(kinesisClient(), - streamName(), - dynamoDBClient(), - tableName(), - workerIdentifier(), - executorService(), - initialPositionInStream(), - failoverTimeMillis(), - epsilonMillis(), - maxLeasesForWorker(), - maxLeasesToStealAtOneTime(), - maxLeaseRenewalThreads(), - cleanupLeasesUponShardCompletion(), - ignoreUnexpectedChildShards(), - shardSyncIntervalMillis(), - consistentReads(), - listShardsBackoffTimeInMillis(), - maxListShardsRetryAttempts(), - maxCacheMissesBeforeReload(), - listShardsCacheAllowedAgeInSeconds(), - cacheMissWarningModulus(), - initialLeaseTableReadCapacity(), - initialLeaseTableWriteCapacity(), - hierarchicalShardSyncer(), - tableCreatorCallback()); - } - return leaseManagementFactory; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementFactory.java deleted file mode 100644 index 5e685d31..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementFactory.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.leases; - -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher; -import software.amazon.kinesis.metrics.MetricsFactory; - -/** - * - */ -public interface LeaseManagementFactory { - LeaseCoordinator createLeaseCoordinator(MetricsFactory metricsFactory); - - ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory); - - DynamoDBLeaseRefresher createLeaseRefresher(); - - ShardDetector createShardDetector(); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseSerializer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseSerializer.java deleted file mode 100644 index e4d8f6f3..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseSerializer.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -import java.util.Collection; -import java.util.Map; - - -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.kinesis.leases.Lease; - -/** - * Utility class that manages the mapping of Lease objects/operations to records in DynamoDB. - */ -public interface LeaseSerializer { - - /** - * Construct a DynamoDB record out of a Lease object - * - * @param lease lease object to serialize - * @return an attribute value map representing the lease object - */ - Map toDynamoRecord(Lease lease); - - /** - * Construct a Lease object out of a DynamoDB record. - * - * @param dynamoRecord attribute value map from DynamoDB - * @return a deserialized lease object representing the attribute value map - */ - Lease fromDynamoRecord(Map dynamoRecord); - - /** - * @param lease - * @return the attribute value map representing a Lease's hash key given a Lease object. - */ - Map getDynamoHashKey(Lease lease); - - /** - * Special getDynamoHashKey implementation used by {@link LeaseRefresher#getLease(String)}. - * - * @param leaseKey - * @return the attribute value map representing a Lease's hash key given a string. - */ - Map getDynamoHashKey(String leaseKey); - - /** - * @param lease - * @return the attribute value map asserting that a lease counter is what we expect. - */ - Map getDynamoLeaseCounterExpectation(Lease lease); - - /** - * @param lease - * @return the attribute value map asserting that the lease owner is what we expect. - */ - Map getDynamoLeaseOwnerExpectation(Lease lease); - - /** - * @return the attribute value map asserting that a lease does not exist. - */ - Map getDynamoNonexistantExpectation(); - - /** - * @param lease - * @return the attribute value map that increments a lease counter - */ - Map getDynamoLeaseCounterUpdate(Lease lease); - - /** - * @param lease - * @param newOwner - * @return the attribute value map that takes a lease for a new owner - */ - Map getDynamoTakeLeaseUpdate(Lease lease, String newOwner); - - /** - * @param lease - * @return the attribute value map that voids a lease - */ - Map getDynamoEvictLeaseUpdate(Lease lease); - - /** - * @param lease - * @return the attribute value map that updates application-specific data for a lease and increments the lease - * counter - */ - Map getDynamoUpdateLeaseUpdate(Lease lease); - - /** - * @return the key schema for creating a DynamoDB table to store leases - */ - Collection getKeySchema(); - - /** - * @return attribute definitions for creating a DynamoDB table to store leases - */ - Collection getAttributeDefinitions(); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java deleted file mode 100644 index 394375b3..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; - -/** - * ILeaseTaker is used by LeaseCoordinator to take new leases, or leases that other workers fail to renew. Each - * LeaseCoordinator instance corresponds to one worker and uses exactly one ILeaseTaker to take leases for that worker. - */ -public interface LeaseTaker { - - /** - * Compute the set of leases available to be taken and attempt to take them. Lease taking rules are: - * - * 1) If a lease's counter hasn't changed in long enough, try to take it. - * 2) If we see a lease we've never seen before, take it only if owner == null. If it's owned, odds are the owner is - * holding it. We can't tell until we see it more than once. - * 3) For load balancing purposes, you may violate rules 1 and 2 for EXACTLY ONE lease per call of takeLeases(). - * - * @return map of shardId to Lease object for leases we just successfully took. - * - * @throws DependencyException on unexpected DynamoDB failures - * @throws InvalidStateException if lease table does not exist - */ - Map takeLeases() throws DependencyException, InvalidStateException; - - /** - * @return workerIdentifier for this LeaseTaker - */ - String getWorkerIdentifier(); - - /** - * Default implementation returns an empty list and concrete implementaion is expected to return all leases - * for the application that are in the lease table either by reading lease table or from an internal cache. - * - * @return all leases for the application that are in the lease table - */ - default List allLeases() { - return Collections.emptyList(); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/NoOpShardPrioritization.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/NoOpShardPrioritization.java deleted file mode 100644 index ec93d764..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/NoOpShardPrioritization.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -import java.util.List; - -/** - * Shard Prioritization that returns the same original list of shards without any modifications. - */ -public class NoOpShardPrioritization implements - ShardPrioritization { - - /** - * Empty constructor for NoOp Shard Prioritization. - */ - public NoOpShardPrioritization() { - } - - @Override - public List prioritize(List original) { - return original; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java deleted file mode 100644 index ebcb190a..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.leases; - -import software.amazon.awssdk.services.kinesis.model.Shard; - -import java.util.List; - -/** - * - */ -public interface ShardDetector { - Shard shard(String shardId); - - List listShards(); - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardPrioritization.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardPrioritization.java deleted file mode 100644 index 11b7586a..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardPrioritization.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -import java.util.List; - -/** - * Provides logic to prioritize or filter shards before their execution. - */ -public interface ShardPrioritization { - - /** - * Returns new list of shards ordered based on their priority. - * Resulted list may have fewer shards compared to original list - * - * @param original - * list of shards needed to be prioritized - * @return new list that contains only shards that should be processed - */ - List prioritize(List original); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java deleted file mode 100644 index 046efdea..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.lifecycle.ConsumerTask; -import software.amazon.kinesis.lifecycle.TaskResult; -import software.amazon.kinesis.lifecycle.TaskType; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.MetricsUtil; - -/** - * This task syncs leases/activies with shards of the stream. - * It will create new leases/activites when it discovers new shards (e.g. setup/resharding). - * It will clean up leases/activities for shards that have been completely processed (if - * cleanupLeasesUponShardCompletion is true). - */ -@RequiredArgsConstructor -@Slf4j -@KinesisClientInternalApi -public class ShardSyncTask implements ConsumerTask { - private final String SHARD_SYNC_TASK_OPERATION = "ShardSyncTask"; - - @NonNull - private final ShardDetector shardDetector; - @NonNull - private final LeaseRefresher leaseRefresher; - @NonNull - private final InitialPositionInStreamExtended initialPosition; - private final boolean cleanupLeasesUponShardCompletion; - private final boolean ignoreUnexpectedChildShards; - private final long shardSyncTaskIdleTimeMillis; - @NonNull - private final HierarchicalShardSyncer hierarchicalShardSyncer; - @NonNull - private final MetricsFactory metricsFactory; - - private final TaskType taskType = TaskType.SHARDSYNC; - - /* - * (non-Javadoc) - * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#call() - */ - @Override - public TaskResult call() { - Exception exception = null; - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, SHARD_SYNC_TASK_OPERATION); - - try { - hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, leaseRefresher, initialPosition, - cleanupLeasesUponShardCompletion, ignoreUnexpectedChildShards, scope); - if (shardSyncTaskIdleTimeMillis > 0) { - Thread.sleep(shardSyncTaskIdleTimeMillis); - } - } catch (Exception e) { - log.error("Caught exception while sync'ing Kinesis shards and leases", e); - exception = e; - } finally { - MetricsUtil.endScope(scope); - } - - return new TaskResult(exception); - } - - /* - * (non-Javadoc) - * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#taskType() - */ - @Override - public TaskType taskType() { - return taskType; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTaskManager.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTaskManager.java deleted file mode 100644 index d97c9b90..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTaskManager.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; - -import software.amazon.kinesis.common.InitialPositionInStreamExtended; - -import lombok.Data; -import lombok.NonNull; -import lombok.experimental.Accessors; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.lifecycle.ConsumerTask; -import software.amazon.kinesis.lifecycle.TaskResult; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.MetricsCollectingTaskDecorator; - -/** - * The ShardSyncTaskManager is used to track the task to sync shards with leases (create leases for new - * Kinesis shards, remove obsolete leases). We'll have at most one outstanding sync task at any time. - * Worker will use this class to kick off a sync task when it finds shards which have been completely processed. - */ -@Data -@Accessors(fluent = true) -@Slf4j -public class ShardSyncTaskManager { - @NonNull - private final ShardDetector shardDetector; - @NonNull - private final LeaseRefresher leaseRefresher; - @NonNull - private final InitialPositionInStreamExtended initialPositionInStream; - private final boolean cleanupLeasesUponShardCompletion; - private final boolean ignoreUnexpectedChildShards; - private final long shardSyncIdleTimeMillis; - @NonNull - private final ExecutorService executorService; - @NonNull - private final HierarchicalShardSyncer hierarchicalShardSyncer; - @NonNull - private final MetricsFactory metricsFactory; - - /** - * Constructor. - * - *

NOTE: This constructor is deprecated and will be removed in a future release.

- * - * @param shardDetector - * @param leaseRefresher - * @param initialPositionInStream - * @param cleanupLeasesUponShardCompletion - * @param ignoreUnexpectedChildShards - * @param shardSyncIdleTimeMillis - * @param executorService - * @param metricsFactory - */ - @Deprecated - public ShardSyncTaskManager(ShardDetector shardDetector, LeaseRefresher leaseRefresher, - InitialPositionInStreamExtended initialPositionInStream, boolean cleanupLeasesUponShardCompletion, - boolean ignoreUnexpectedChildShards, long shardSyncIdleTimeMillis, ExecutorService executorService, - MetricsFactory metricsFactory) { - this.shardDetector = shardDetector; - this.leaseRefresher = leaseRefresher; - this.initialPositionInStream = initialPositionInStream; - this.cleanupLeasesUponShardCompletion = cleanupLeasesUponShardCompletion; - this.ignoreUnexpectedChildShards = ignoreUnexpectedChildShards; - this.shardSyncIdleTimeMillis = shardSyncIdleTimeMillis; - this.executorService = executorService; - this.hierarchicalShardSyncer = new HierarchicalShardSyncer(); - this.metricsFactory = metricsFactory; - } - - /** - * Constructor. - * - * @param shardDetector - * @param leaseRefresher - * @param initialPositionInStream - * @param cleanupLeasesUponShardCompletion - * @param ignoreUnexpectedChildShards - * @param shardSyncIdleTimeMillis - * @param executorService - * @param hierarchicalShardSyncer - * @param metricsFactory - */ - public ShardSyncTaskManager(ShardDetector shardDetector, LeaseRefresher leaseRefresher, - InitialPositionInStreamExtended initialPositionInStream, boolean cleanupLeasesUponShardCompletion, - boolean ignoreUnexpectedChildShards, long shardSyncIdleTimeMillis, ExecutorService executorService, - HierarchicalShardSyncer hierarchicalShardSyncer, MetricsFactory metricsFactory) { - this.shardDetector = shardDetector; - this.leaseRefresher = leaseRefresher; - this.initialPositionInStream = initialPositionInStream; - this.cleanupLeasesUponShardCompletion = cleanupLeasesUponShardCompletion; - this.ignoreUnexpectedChildShards = ignoreUnexpectedChildShards; - this.shardSyncIdleTimeMillis = shardSyncIdleTimeMillis; - this.executorService = executorService; - this.hierarchicalShardSyncer = hierarchicalShardSyncer; - this.metricsFactory = metricsFactory; - } - - private ConsumerTask currentTask; - private Future future; - - public synchronized boolean syncShardAndLeaseInfo() { - return checkAndSubmitNextTask(); - } - - private synchronized boolean checkAndSubmitNextTask() { - boolean submittedNewTask = false; - if ((future == null) || future.isCancelled() || future.isDone()) { - if ((future != null) && future.isDone()) { - try { - TaskResult result = future.get(); - if (result.getException() != null) { - log.error("Caught exception running {} task: ", currentTask.taskType(), - result.getException()); - } - } catch (InterruptedException | ExecutionException e) { - log.warn("{} task encountered exception.", currentTask.taskType(), e); - } - } - - currentTask = - new MetricsCollectingTaskDecorator( - new ShardSyncTask(shardDetector, - leaseRefresher, - initialPositionInStream, - cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, - shardSyncIdleTimeMillis, - hierarchicalShardSyncer, - metricsFactory), - metricsFactory); - future = executorService.submit(currentTask); - submittedNewTask = true; - if (log.isDebugEnabled()) { - log.debug("Submitted new {} task.", currentTask.taskType()); - } - } else { - if (log.isDebugEnabled()) { - log.debug("Previous {} task still pending. Not submitting new task.", currentTask.taskType()); - } - } - - return submittedNewTask; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java deleted file mode 100644 index 20a66e9a..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java +++ /dev/null @@ -1,414 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.dynamodb; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedTransferQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseCoordinator; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.LeaseRenewer; -import software.amazon.kinesis.leases.LeaseTaker; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.LeasingException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.MetricsUtil; - -/** - * LeaseCoordinator abstracts away LeaseTaker and LeaseRenewer from the application code that's using leasing. It owns - * the scheduling of the two previously mentioned components as well as informing LeaseRenewer when LeaseTaker takes new - * leases. - * - */ -@Slf4j -@KinesisClientInternalApi -public class DynamoDBLeaseCoordinator implements LeaseCoordinator { - // Time to wait for in-flight Runnables to finish when calling .stop(); - private static final long STOP_WAIT_TIME_MILLIS = 2000L; - private static final ThreadFactory LEASE_COORDINATOR_THREAD_FACTORY = new ThreadFactoryBuilder() - .setNameFormat("LeaseCoordinator-%04d").setDaemon(true).build(); - private static final ThreadFactory LEASE_RENEWAL_THREAD_FACTORY = new ThreadFactoryBuilder() - .setNameFormat("LeaseRenewer-%04d").setDaemon(true).build(); - - private final LeaseRenewer leaseRenewer; - private final LeaseTaker leaseTaker; - private final long renewerIntervalMillis; - private final long takerIntervalMillis; - private final ExecutorService leaseRenewalThreadpool; - private final LeaseRefresher leaseRefresher; - private long initialLeaseTableReadCapacity; - private long initialLeaseTableWriteCapacity; - protected final MetricsFactory metricsFactory; - - private final Object shutdownLock = new Object(); - - private ScheduledExecutorService leaseCoordinatorThreadPool; - private ScheduledFuture takerFuture; - - private volatile boolean running = false; - - /** - * Constructor. - * - *

NOTE: This constructor is deprecated and will be removed in a future release.

- * - * @param leaseRefresher - * LeaseRefresher instance to use - * @param workerIdentifier - * Identifies the worker (e.g. useful to track lease ownership) - * @param leaseDurationMillis - * Duration of a lease - * @param epsilonMillis - * Allow for some variance when calculating lease expirations - * @param maxLeasesForWorker - * Max leases this Worker can handle at a time - * @param maxLeasesToStealAtOneTime - * Steal up to these many leases at a time (for load balancing) - * @param metricsFactory - * Used to publish metrics about lease operations - */ - @Deprecated - public DynamoDBLeaseCoordinator(final LeaseRefresher leaseRefresher, - final String workerIdentifier, - final long leaseDurationMillis, - final long epsilonMillis, - final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, - final int maxLeaseRenewerThreadCount, - final MetricsFactory metricsFactory) { - this(leaseRefresher, workerIdentifier, leaseDurationMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewerThreadCount, - TableConstants.DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, - TableConstants.DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); - } - - /** - * Constructor. - * - * @param leaseRefresher - * LeaseRefresher instance to use - * @param workerIdentifier - * Identifies the worker (e.g. useful to track lease ownership) - * @param leaseDurationMillis - * Duration of a lease - * @param epsilonMillis - * Allow for some variance when calculating lease expirations - * @param maxLeasesForWorker - * Max leases this Worker can handle at a time - * @param maxLeasesToStealAtOneTime - * Steal up to these many leases at a time (for load balancing) - * @param initialLeaseTableReadCapacity - * Initial dynamodb lease table read iops if creating the lease table - * @param initialLeaseTableWriteCapacity - * Initial dynamodb lease table write iops if creating the lease table - * @param metricsFactory - * Used to publish metrics about lease operations - */ - public DynamoDBLeaseCoordinator(final LeaseRefresher leaseRefresher, - final String workerIdentifier, - final long leaseDurationMillis, - final long epsilonMillis, - final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, - final int maxLeaseRenewerThreadCount, - final long initialLeaseTableReadCapacity, - final long initialLeaseTableWriteCapacity, - final MetricsFactory metricsFactory) { - this.leaseRefresher = leaseRefresher; - this.leaseRenewalThreadpool = getLeaseRenewalExecutorService(maxLeaseRenewerThreadCount); - this.leaseTaker = new DynamoDBLeaseTaker(leaseRefresher, workerIdentifier, leaseDurationMillis, metricsFactory) - .withMaxLeasesForWorker(maxLeasesForWorker) - .withMaxLeasesToStealAtOneTime(maxLeasesToStealAtOneTime); - this.leaseRenewer = new DynamoDBLeaseRenewer( - leaseRefresher, workerIdentifier, leaseDurationMillis, leaseRenewalThreadpool, metricsFactory); - this.renewerIntervalMillis = leaseDurationMillis / 3 - epsilonMillis; - this.takerIntervalMillis = (leaseDurationMillis + epsilonMillis) * 2; - if (initialLeaseTableReadCapacity <= 0) { - throw new IllegalArgumentException("readCapacity should be >= 1"); - } - this.initialLeaseTableReadCapacity = initialLeaseTableReadCapacity; - if (initialLeaseTableWriteCapacity <= 0) { - throw new IllegalArgumentException("writeCapacity should be >= 1"); - } - this.initialLeaseTableWriteCapacity = initialLeaseTableWriteCapacity; - this.metricsFactory = metricsFactory; - - log.info("With failover time {} ms and epsilon {} ms, LeaseCoordinator will renew leases every {} ms, take" - + "leases every {} ms, process maximum of {} leases and steal {} lease(s) at a time.", - leaseDurationMillis, - epsilonMillis, - renewerIntervalMillis, - takerIntervalMillis, - maxLeasesForWorker, - maxLeasesToStealAtOneTime); - } - - private class TakerRunnable implements Runnable { - - @Override - public void run() { - try { - runLeaseTaker(); - } catch (LeasingException e) { - log.error("LeasingException encountered in lease taking thread", e); - } catch (Throwable t) { - log.error("Throwable encountered in lease taking thread", t); - } - } - - } - - private class RenewerRunnable implements Runnable { - - @Override - public void run() { - try { - runLeaseRenewer(); - } catch (LeasingException e) { - log.error("LeasingException encountered in lease renewing thread", e); - } catch (Throwable t) { - log.error("Throwable encountered in lease renewing thread", t); - } - } - - } - - @Override - public void initialize() throws ProvisionedThroughputException, DependencyException, IllegalStateException { - final boolean newTableCreated = - leaseRefresher.createLeaseTableIfNotExists(initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity); - if (newTableCreated) { - log.info("Created new lease table for coordinator with initial read capacity of {} and write capacity of {}.", - initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity); - } - // Need to wait for table in active state. - final long secondsBetweenPolls = 10L; - final long timeoutSeconds = 600L; - final boolean isTableActive = leaseRefresher.waitUntilLeaseTableExists(secondsBetweenPolls, timeoutSeconds); - if (!isTableActive) { - throw new DependencyException(new IllegalStateException("Creating table timeout")); - } - } - - @Override - public void start() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - leaseRenewer.initialize(); - - // 2 because we know we'll have at most 2 concurrent tasks at a time. - leaseCoordinatorThreadPool = Executors.newScheduledThreadPool(2, LEASE_COORDINATOR_THREAD_FACTORY); - - // Taker runs with fixed DELAY because we want it to run slower in the event of performance degredation. - takerFuture = leaseCoordinatorThreadPool.scheduleWithFixedDelay(new TakerRunnable(), - 0L, - takerIntervalMillis, - TimeUnit.MILLISECONDS); - // Renewer runs at fixed INTERVAL because we want it to run at the same rate in the event of degredation. - leaseCoordinatorThreadPool.scheduleAtFixedRate(new RenewerRunnable(), - 0L, - renewerIntervalMillis, - TimeUnit.MILLISECONDS); - running = true; - } - - @Override - public void runLeaseTaker() throws DependencyException, InvalidStateException { - MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, "TakeLeases"); - long startTime = System.currentTimeMillis(); - boolean success = false; - - try { - Map takenLeases = leaseTaker.takeLeases(); - - // Only add taken leases to renewer if coordinator is still running. - synchronized (shutdownLock) { - if (running) { - leaseRenewer.addLeasesToRenew(takenLeases.values()); - } - } - - success = true; - } finally { - MetricsUtil.addWorkerIdentifier(scope, workerIdentifier()); - MetricsUtil.addSuccessAndLatency(scope, success, startTime, MetricsLevel.SUMMARY); - MetricsUtil.endScope(scope); - } - } - - @Override - public void runLeaseRenewer() throws DependencyException, InvalidStateException { - leaseRenewer.renewLeases(); - } - - @Override - public Collection getAssignments() { - return leaseRenewer.getCurrentlyHeldLeases().values(); - } - - @Override - public List allLeases() { - return leaseTaker.allLeases(); - } - - @Override - public Lease getCurrentlyHeldLease(String leaseKey) { - return leaseRenewer.getCurrentlyHeldLease(leaseKey); - } - - @Override - public String workerIdentifier() { - return leaseTaker.getWorkerIdentifier(); - } - - @Override - public LeaseRefresher leaseRefresher() { - return leaseRefresher; - } - - @Override - public void stop() { - if (leaseCoordinatorThreadPool != null) { - leaseCoordinatorThreadPool.shutdown(); - try { - if (leaseCoordinatorThreadPool.awaitTermination(STOP_WAIT_TIME_MILLIS, TimeUnit.MILLISECONDS)) { - log.info("Worker {} has successfully stopped lease-tracking threads", - leaseTaker.getWorkerIdentifier()); - } else { - leaseCoordinatorThreadPool.shutdownNow(); - log.info("Worker {} stopped lease-tracking threads {} ms after stop", - leaseTaker.getWorkerIdentifier(), - STOP_WAIT_TIME_MILLIS); - } - } catch (InterruptedException e) { - log.debug("Encountered InterruptedException when awaiting threadpool termination"); - } - } else { - log.debug("Threadpool was null, no need to shutdown/terminate threadpool."); - } - - leaseRenewalThreadpool.shutdownNow(); - synchronized (shutdownLock) { - leaseRenewer.clearCurrentlyHeldLeases(); - running = false; - } - } - - @Override - public void stopLeaseTaker() { - takerFuture.cancel(false); - - } - - @Override - public void dropLease(final Lease lease) { - synchronized (shutdownLock) { - if (lease != null) { - leaseRenewer.dropLease(lease); - } - } - } - - @Override - public boolean isRunning() { - return running; - } - - @Override - public boolean updateLease(final Lease lease, final UUID concurrencyToken, final String operation, - final String shardId) throws DependencyException, InvalidStateException, ProvisionedThroughputException { - return leaseRenewer.updateLease(lease, concurrencyToken, operation, shardId); - } - - /** - * Returns executor service that should be used for lease renewal. - * @param maximumPoolSize Maximum allowed thread pool size - * @return Executor service that should be used for lease renewal. - */ - private static ExecutorService getLeaseRenewalExecutorService(int maximumPoolSize) { - int coreLeaseCount = Math.max(maximumPoolSize / 4, 2); - - return new ThreadPoolExecutor(coreLeaseCount, maximumPoolSize, 60, TimeUnit.SECONDS, - new LinkedTransferQueue<>(), LEASE_RENEWAL_THREAD_FACTORY); - } - - @Override - public List getCurrentAssignments() { - Collection leases = getAssignments(); - return convertLeasesToAssignments(leases); - } - - private static List convertLeasesToAssignments(final Collection leases) { - if (leases == null) { - return Collections.emptyList(); - } - return leases.stream().map(DynamoDBLeaseCoordinator::convertLeaseToAssignment).collect(Collectors.toList()); - } - - public static ShardInfo convertLeaseToAssignment(final Lease lease) { - return new ShardInfo(lease.leaseKey(), lease.concurrencyToken().toString(), lease.parentShardIds(), - lease.checkpoint()); - } - - /** - * {@inheritDoc} - * - *

NOTE: This method is deprecated. Please set the initial capacity through the constructor.

- */ - @Override - @Deprecated - public DynamoDBLeaseCoordinator initialLeaseTableReadCapacity(long readCapacity) { - if (readCapacity <= 0) { - throw new IllegalArgumentException("readCapacity should be >= 1"); - } - initialLeaseTableReadCapacity = readCapacity; - return this; - } - - /** - * {@inheritDoc} - * - *

NOTE: This method is deprecated. Please set the initial capacity through the constructor.

- */ - @Override - @Deprecated - public DynamoDBLeaseCoordinator initialLeaseTableWriteCapacity(long writeCapacity) { - if (writeCapacity <= 0) { - throw new IllegalArgumentException("writeCapacity should be >= 1"); - } - initialLeaseTableWriteCapacity = writeCapacity; - return this; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseManagementFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseManagementFactory.java deleted file mode 100644 index 1ec3e0b3..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseManagementFactory.java +++ /dev/null @@ -1,279 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.leases.dynamodb; - -import java.util.concurrent.ExecutorService; - -import lombok.Data; -import lombok.NonNull; -import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.leases.HierarchicalShardSyncer; -import software.amazon.kinesis.leases.KinesisShardDetector; -import software.amazon.kinesis.leases.LeaseCoordinator; -import software.amazon.kinesis.leases.LeaseManagementFactory; -import software.amazon.kinesis.leases.ShardDetector; -import software.amazon.kinesis.leases.ShardSyncTaskManager; -import software.amazon.kinesis.metrics.MetricsFactory; - -/** - * - */ -@Data -@KinesisClientInternalApi -public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { - @NonNull - private final KinesisAsyncClient kinesisClient; - @NonNull - private final String streamName; - @NonNull - private final DynamoDbAsyncClient dynamoDBClient; - @NonNull - private final String tableName; - @NonNull - private final String workerIdentifier; - @NonNull - private final ExecutorService executorService; - @NonNull - private final InitialPositionInStreamExtended initialPositionInStream; - @NonNull - private final HierarchicalShardSyncer hierarchicalShardSyncer; - - private final long failoverTimeMillis; - private final long epsilonMillis; - private final int maxLeasesForWorker; - private final int maxLeasesToStealAtOneTime; - private final int maxLeaseRenewalThreads; - private final boolean cleanupLeasesUponShardCompletion; - private final boolean ignoreUnexpectedChildShards; - private final long shardSyncIntervalMillis; - private final boolean consistentReads; - private final long listShardsBackoffTimeMillis; - private final int maxListShardsRetryAttempts; - private final int maxCacheMissesBeforeReload; - private final long listShardsCacheAllowedAgeInSeconds; - private final int cacheMissWarningModulus; - private final long initialLeaseTableReadCapacity; - private final long initialLeaseTableWriteCapacity; - private final TableCreatorCallback tableCreatorCallback; - - /** - * Constructor. - * - *

NOTE: This constructor is deprecated and will be removed in a future release.

- * - * @param kinesisClient - * @param streamName - * @param dynamoDBClient - * @param tableName - * @param workerIdentifier - * @param executorService - * @param initialPositionInStream - * @param failoverTimeMillis - * @param epsilonMillis - * @param maxLeasesForWorker - * @param maxLeasesToStealAtOneTime - * @param maxLeaseRenewalThreads - * @param cleanupLeasesUponShardCompletion - * @param ignoreUnexpectedChildShards - * @param shardSyncIntervalMillis - * @param consistentReads - * @param listShardsBackoffTimeMillis - * @param maxListShardsRetryAttempts - * @param maxCacheMissesBeforeReload - * @param listShardsCacheAllowedAgeInSeconds - * @param cacheMissWarningModulus - */ - @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus) { - this(kinesisClient, streamName, dynamoDBClient, tableName, workerIdentifier, executorService, - initialPositionInStream, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, TableConstants.DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, - TableConstants.DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY); - } - - /** - * Constructor. - * - *

- * NOTE: This constructor is deprecated and will be removed in a future release. - *

- * - * @param kinesisClient - * @param streamName - * @param dynamoDBClient - * @param tableName - * @param workerIdentifier - * @param executorService - * @param initialPositionInStream - * @param failoverTimeMillis - * @param epsilonMillis - * @param maxLeasesForWorker - * @param maxLeasesToStealAtOneTime - * @param maxLeaseRenewalThreads - * @param cleanupLeasesUponShardCompletion - * @param ignoreUnexpectedChildShards - * @param shardSyncIntervalMillis - * @param consistentReads - * @param listShardsBackoffTimeMillis - * @param maxListShardsRetryAttempts - * @param maxCacheMissesBeforeReload - * @param listShardsCacheAllowedAgeInSeconds - * @param cacheMissWarningModulus - * @param initialLeaseTableReadCapacity - * @param initialLeaseTableWriteCapacity - */ - @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity) { - this(kinesisClient, streamName, dynamoDBClient, tableName, workerIdentifier, executorService, - initialPositionInStream, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - new HierarchicalShardSyncer(), TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); - } - - /** - * Constructor. - * - * @param kinesisClient - * @param streamName - * @param dynamoDBClient - * @param tableName - * @param workerIdentifier - * @param executorService - * @param initialPositionInStream - * @param failoverTimeMillis - * @param epsilonMillis - * @param maxLeasesForWorker - * @param maxLeasesToStealAtOneTime - * @param maxLeaseRenewalThreads - * @param cleanupLeasesUponShardCompletion - * @param ignoreUnexpectedChildShards - * @param shardSyncIntervalMillis - * @param consistentReads - * @param listShardsBackoffTimeMillis - * @param maxListShardsRetryAttempts - * @param maxCacheMissesBeforeReload - * @param listShardsCacheAllowedAgeInSeconds - * @param cacheMissWarningModulus - * @param initialLeaseTableReadCapacity - * @param initialLeaseTableWriteCapacity - * @param hierarchicalShardSyncer - * @param tableCreatorCallback - */ - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer hierarchicalShardSyncer, - final TableCreatorCallback tableCreatorCallback) { - this.kinesisClient = kinesisClient; - this.streamName = streamName; - this.dynamoDBClient = dynamoDBClient; - this.tableName = tableName; - this.workerIdentifier = workerIdentifier; - this.executorService = executorService; - this.initialPositionInStream = initialPositionInStream; - this.failoverTimeMillis = failoverTimeMillis; - this.epsilonMillis = epsilonMillis; - this.maxLeasesForWorker = maxLeasesForWorker; - this.maxLeasesToStealAtOneTime = maxLeasesToStealAtOneTime; - this.maxLeaseRenewalThreads = maxLeaseRenewalThreads; - this.cleanupLeasesUponShardCompletion = cleanupLeasesUponShardCompletion; - this.ignoreUnexpectedChildShards = ignoreUnexpectedChildShards; - this.shardSyncIntervalMillis = shardSyncIntervalMillis; - this.consistentReads = consistentReads; - this.listShardsBackoffTimeMillis = listShardsBackoffTimeMillis; - this.maxListShardsRetryAttempts = maxListShardsRetryAttempts; - this.maxCacheMissesBeforeReload = maxCacheMissesBeforeReload; - this.listShardsCacheAllowedAgeInSeconds = listShardsCacheAllowedAgeInSeconds; - this.cacheMissWarningModulus = cacheMissWarningModulus; - this.initialLeaseTableReadCapacity = initialLeaseTableReadCapacity; - this.initialLeaseTableWriteCapacity = initialLeaseTableWriteCapacity; - this.hierarchicalShardSyncer = hierarchicalShardSyncer; - this.tableCreatorCallback = tableCreatorCallback; - } - - @Override - public LeaseCoordinator createLeaseCoordinator(@NonNull final MetricsFactory metricsFactory) { - return new DynamoDBLeaseCoordinator(this.createLeaseRefresher(), - workerIdentifier, - failoverTimeMillis, - epsilonMillis, - maxLeasesForWorker, - maxLeasesToStealAtOneTime, - maxLeaseRenewalThreads, - initialLeaseTableReadCapacity, - initialLeaseTableWriteCapacity, - metricsFactory); - } - - @Override - public ShardSyncTaskManager createShardSyncTaskManager(@NonNull final MetricsFactory metricsFactory) { - return new ShardSyncTaskManager(this.createShardDetector(), - this.createLeaseRefresher(), - initialPositionInStream, - cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, - shardSyncIntervalMillis, - executorService, - hierarchicalShardSyncer, - metricsFactory); - } - - @Override - public DynamoDBLeaseRefresher createLeaseRefresher() { - return new DynamoDBLeaseRefresher(tableName, dynamoDBClient, new DynamoDBLeaseSerializer(), consistentReads, - tableCreatorCallback); - } - - @Override - public ShardDetector createShardDetector() { - return new KinesisShardDetector(kinesisClient, streamName, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, listShardsCacheAllowedAgeInSeconds, maxCacheMissesBeforeReload, - cacheMissWarningModulus); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java deleted file mode 100644 index 79a12fc3..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java +++ /dev/null @@ -1,646 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.dynamodb; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; - -import lombok.NonNull; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate; -import software.amazon.awssdk.services.dynamodb.model.ConditionalCheckFailedException; -import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; -import software.amazon.awssdk.services.dynamodb.model.DescribeTableRequest; -import software.amazon.awssdk.services.dynamodb.model.DescribeTableResponse; -import software.amazon.awssdk.services.dynamodb.model.DynamoDbException; -import software.amazon.awssdk.services.dynamodb.model.GetItemRequest; -import software.amazon.awssdk.services.dynamodb.model.GetItemResponse; -import software.amazon.awssdk.services.dynamodb.model.LimitExceededException; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughput; -import software.amazon.awssdk.services.dynamodb.model.ProvisionedThroughputExceededException; -import software.amazon.awssdk.services.dynamodb.model.PutItemRequest; -import software.amazon.awssdk.services.dynamodb.model.ResourceInUseException; -import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; -import software.amazon.awssdk.services.dynamodb.model.ScanRequest; -import software.amazon.awssdk.services.dynamodb.model.ScanResponse; -import software.amazon.awssdk.services.dynamodb.model.TableStatus; -import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; -import software.amazon.awssdk.utils.CollectionUtils; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.LeaseSerializer; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.retrieval.AWSExceptionManager; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * An implementation of {@link LeaseRefresher} that uses DynamoDB. - */ -@Slf4j -@KinesisClientInternalApi -public class DynamoDBLeaseRefresher implements LeaseRefresher { - protected final String table; - protected final DynamoDbAsyncClient dynamoDBClient; - protected final LeaseSerializer serializer; - protected final boolean consistentReads; - private final TableCreatorCallback tableCreatorCallback; - - private boolean newTableCreated = false; - - /** - * Constructor. - * - *

- * NOTE: This constructor is deprecated and will be removed in a future release. - *

- * - * @param table - * @param dynamoDBClient - * @param serializer - * @param consistentReads - */ - @Deprecated - public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient, - final LeaseSerializer serializer, final boolean consistentReads) { - this(table, dynamoDBClient, serializer, consistentReads, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); - } - - /** - * Constructor. - * - * @param table - * @param dynamoDBClient - * @param serializer - * @param consistentReads - * @param tableCreatorCallback - */ - public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient, - final LeaseSerializer serializer, final boolean consistentReads, - @NonNull final TableCreatorCallback tableCreatorCallback) { - this.table = table; - this.dynamoDBClient = dynamoDBClient; - this.serializer = serializer; - this.consistentReads = consistentReads; - this.tableCreatorCallback = tableCreatorCallback; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean createLeaseTableIfNotExists(@NonNull final Long readCapacity, @NonNull final Long writeCapacity) - throws ProvisionedThroughputException, DependencyException { - try { - if (tableStatus() != null) { - return newTableCreated; - } - } catch (DependencyException de) { - // - // Something went wrong with DynamoDB - // - log.error("Failed to get table status for {}", table, de); - } - ProvisionedThroughput throughput = ProvisionedThroughput.builder().readCapacityUnits(readCapacity) - .writeCapacityUnits(writeCapacity).build(); - CreateTableRequest request = CreateTableRequest.builder().tableName(table).keySchema(serializer.getKeySchema()) - .attributeDefinitions(serializer.getAttributeDefinitions()).provisionedThroughput(throughput).build(); - - final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ResourceInUseException.class, t -> t); - exceptionManager.add(LimitExceededException.class, t -> t); - - try { - try { - dynamoDBClient.createTable(request).get(); - newTableCreated = true; - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - throw new DependencyException(e); - } - } catch (ResourceInUseException e) { - log.info("Table {} already exists.", table); - return newTableCreated; - } catch (LimitExceededException e) { - throw new ProvisionedThroughputException("Capacity exceeded when creating table " + table, e); - } catch (DynamoDbException e) { - throw new DependencyException(e); - } - return newTableCreated; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean leaseTableExists() throws DependencyException { - return TableStatus.ACTIVE == tableStatus(); - } - - private TableStatus tableStatus() throws DependencyException { - DescribeTableRequest request = DescribeTableRequest.builder().tableName(table).build(); - - final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ResourceNotFoundException.class, t -> t); - - DescribeTableResponse result; - try { - try { - result = dynamoDBClient.describeTable(request).get(); - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: Check if this is the correct behavior - throw new DependencyException(e); - } - } catch (ResourceNotFoundException e) { - log.debug("Got ResourceNotFoundException for table {} in leaseTableExists, returning false.", table); - return null; - } catch (DynamoDbException e) { - throw new DependencyException(e); - } - - TableStatus tableStatus = result.table().tableStatus(); - log.debug("Lease table exists and is in status {}", tableStatus); - - return tableStatus; - } - - @Override - public boolean waitUntilLeaseTableExists(long secondsBetweenPolls, long timeoutSeconds) throws DependencyException { - long sleepTimeRemaining = TimeUnit.SECONDS.toMillis(timeoutSeconds); - - while (!leaseTableExists()) { - if (sleepTimeRemaining <= 0) { - return false; - } - - long timeToSleepMillis = Math.min(TimeUnit.SECONDS.toMillis(secondsBetweenPolls), sleepTimeRemaining); - - sleepTimeRemaining -= sleep(timeToSleepMillis); - } - - if (newTableCreated) { - log.debug("Lease table was recently created, will perform post table creation actions"); - performPostTableCreationAction(); - } - - return true; - } - - /** - * Exposed for testing purposes. - * - * @param timeToSleepMillis time to sleep in milliseconds - * - * @return actual time slept in millis - */ - long sleep(long timeToSleepMillis) { - long startTime = System.currentTimeMillis(); - - try { - Thread.sleep(timeToSleepMillis); - } catch (InterruptedException e) { - log.debug("Interrupted while sleeping"); - } - - return System.currentTimeMillis() - startTime; - } - - /** - * {@inheritDoc} - */ - @Override - public List listLeases() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - return list(null); - } - - /** - * {@inheritDoc} - */ - @Override - public boolean isLeaseTableEmpty() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - return list(1).isEmpty(); - } - - /** - * List with the given page size. Package access for integration testing. - * - * @param limit number of items to consider at a time - used by integration tests to force paging. - * @return list of leases - * @throws InvalidStateException if table does not exist - * @throws DependencyException if DynamoDB scan fail in an unexpected way - * @throws ProvisionedThroughputException if DynamoDB scan fail due to exceeded capacity - */ - List list(Integer limit) throws DependencyException, InvalidStateException, ProvisionedThroughputException { - log.debug("Listing leases from table {}", table); - - ScanRequest.Builder scanRequestBuilder = ScanRequest.builder().tableName(table); - if (limit != null) { - scanRequestBuilder = scanRequestBuilder.limit(limit); - } - ScanRequest scanRequest = scanRequestBuilder.build(); - - final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ResourceNotFoundException.class, t -> t); - exceptionManager.add(ProvisionedThroughputExceededException.class, t -> t); - - try { - try { - ScanResponse scanResult = dynamoDBClient.scan(scanRequest).get(); - List result = new ArrayList<>(); - - while (scanResult != null) { - for (Map item : scanResult.items()) { - log.debug("Got item {} from DynamoDB.", item.toString()); - result.add(serializer.fromDynamoRecord(item)); - } - - Map lastEvaluatedKey = scanResult.lastEvaluatedKey(); - if (CollectionUtils.isNullOrEmpty(lastEvaluatedKey)) { - // Signify that we're done. - scanResult = null; - log.debug("lastEvaluatedKey was null - scan finished."); - } else { - // Make another request, picking up where we left off. - scanRequest = scanRequest.toBuilder().exclusiveStartKey(lastEvaluatedKey).build(); - log.debug("lastEvaluatedKey was {}, continuing scan.", lastEvaluatedKey); - scanResult = dynamoDBClient.scan(scanRequest).get(); - } - } - log.debug("Listed {} leases from table {}", result.size(), table); - return result; - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: Check if this is the correct behavior - throw new DependencyException(e); - } - } catch (ResourceNotFoundException e) { - throw new InvalidStateException("Cannot scan lease table " + table + " because it does not exist.", e); - } catch (ProvisionedThroughputExceededException e) { - throw new ProvisionedThroughputException(e); - } catch (DynamoDbException e) { - throw new DependencyException(e); - } - } - - /** - * {@inheritDoc} - */ - @Override - public boolean createLeaseIfNotExists(@NonNull final Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - log.debug("Creating lease {}", lease); - - PutItemRequest request = PutItemRequest.builder().tableName(table).item(serializer.toDynamoRecord(lease)) - .expected(serializer.getDynamoNonexistantExpectation()).build(); - - final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ConditionalCheckFailedException.class, t -> t); - - try { - try { - dynamoDBClient.putItem(request).get(); - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: Check if this is the correct behavior - throw new DependencyException(e); - } - } catch (ConditionalCheckFailedException e) { - log.debug("Did not create lease {} because it already existed", lease); - return false; - } catch (DynamoDbException e) { - throw convertAndRethrowExceptions("create", lease.leaseKey(), e); - } - return true; - } - - /** - * {@inheritDoc} - */ - @Override - public Lease getLease(@NonNull final String leaseKey) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - log.debug("Getting lease with key {}", leaseKey); - - GetItemRequest request = GetItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(leaseKey)) - .consistentRead(consistentReads).build(); - final AWSExceptionManager exceptionManager = createExceptionManager(); - try { - try { - GetItemResponse result = dynamoDBClient.getItem(request).get(); - - Map dynamoRecord = result.item(); - if (CollectionUtils.isNullOrEmpty(dynamoRecord)) { - log.debug("No lease found with key {}, returning null.", leaseKey); - return null; - } else { - final Lease lease = serializer.fromDynamoRecord(dynamoRecord); - log.debug("Got lease {}", lease); - return lease; - } - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: check behavior - throw new DependencyException(e); - } - } catch (DynamoDbException e) { - throw convertAndRethrowExceptions("get", leaseKey, e); - } - } - - /** - * {@inheritDoc} - */ - @Override - public boolean renewLease(@NonNull final Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - log.debug("Renewing lease with key {}", lease.leaseKey()); - - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) - .expected(serializer.getDynamoLeaseCounterExpectation(lease)) - .attributeUpdates(serializer.getDynamoLeaseCounterUpdate(lease)).build(); - - final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ConditionalCheckFailedException.class, t -> t); - - try { - try { - dynamoDBClient.updateItem(request).get(); - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: Check if this is correct behavior - throw new DependencyException(e); - } - } catch (ConditionalCheckFailedException e) { - log.debug("Lease renewal failed for lease with key {} because the lease counter was not {}", - lease.leaseKey(), lease.leaseCounter()); - - // If we had a spurious retry during the Dynamo update, then this conditional PUT failure - // might be incorrect. So, we get the item straight away and check if the lease owner + lease - // counter are what we expected. - String expectedOwner = lease.leaseOwner(); - Long expectedCounter = lease.leaseCounter() + 1; - final Lease updatedLease = getLease(lease.leaseKey()); - if (updatedLease == null || !expectedOwner.equals(updatedLease.leaseOwner()) - || !expectedCounter.equals(updatedLease.leaseCounter())) { - return false; - } - - log.info("Detected spurious renewal failure for lease with key {}, but recovered", lease.leaseKey()); - } catch (DynamoDbException e) { - throw new DependencyException(e); - } - - lease.leaseCounter(lease.leaseCounter() + 1); - return true; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean takeLease(@NonNull final Lease lease, @NonNull final String owner) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - final String oldOwner = lease.leaseOwner(); - - log.debug("Taking lease with leaseKey {} from {} to {}", lease.leaseKey(), - lease.leaseOwner() == null ? "nobody" : lease.leaseOwner(), owner); - - final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ConditionalCheckFailedException.class, t -> t); - - Map updates = serializer.getDynamoLeaseCounterUpdate(lease); - updates.putAll(serializer.getDynamoTakeLeaseUpdate(lease, owner)); - - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) - .expected(serializer.getDynamoLeaseCounterExpectation(lease)).attributeUpdates(updates).build(); - - try { - try { - dynamoDBClient.updateItem(request).get(); - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: Check behavior - throw new DependencyException(e); - } - } catch (ConditionalCheckFailedException e) { - log.debug("Lease renewal failed for lease with key {} because the lease counter was not {}", - lease.leaseKey(), lease.leaseCounter()); - return false; - } catch (DynamoDbException e) { - throw convertAndRethrowExceptions("take", lease.leaseKey(), e); - } - - lease.leaseCounter(lease.leaseCounter() + 1); - lease.leaseOwner(owner); - - if (oldOwner != null && !oldOwner.equals(owner)) { - lease.ownerSwitchesSinceCheckpoint(lease.ownerSwitchesSinceCheckpoint() + 1); - } - - return true; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean evictLease(@NonNull final Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - log.debug("Evicting lease with leaseKey {} owned by {}", lease.leaseKey(), lease.leaseOwner()); - - final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ConditionalCheckFailedException.class, t -> t); - - Map updates = serializer.getDynamoLeaseCounterUpdate(lease); - updates.putAll(serializer.getDynamoEvictLeaseUpdate(lease)); - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) - .expected(serializer.getDynamoLeaseOwnerExpectation(lease)).attributeUpdates(updates).build(); - - try { - try { - dynamoDBClient.updateItem(request).get(); - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: check behavior - throw new DependencyException(e); - } - } catch (ConditionalCheckFailedException e) { - log.debug("Lease eviction failed for lease with key {} because the lease owner was not {}", - lease.leaseKey(), lease.leaseOwner()); - return false; - } catch (DynamoDbException e) { - throw convertAndRethrowExceptions("evict", lease.leaseKey(), e); - } - - lease.leaseOwner(null); - lease.leaseCounter(lease.leaseCounter() + 1); - return true; - } - - /** - * {@inheritDoc} - */ - public void deleteAll() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - List allLeases = listLeases(); - - log.warn("Deleting {} items from table {}", allLeases.size(), table); - - final AWSExceptionManager exceptionManager = createExceptionManager(); - for (final Lease lease : allLeases) { - DeleteItemRequest deleteRequest = DeleteItemRequest.builder().tableName(table) - .key(serializer.getDynamoHashKey(lease)).build(); - - try { - try { - dynamoDBClient.deleteItem(deleteRequest).get(); - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: check the behavior - throw new DependencyException(e); - } - } catch (DynamoDbException e) { - throw convertAndRethrowExceptions("deleteAll", lease.leaseKey(), e); - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public void deleteLease(@NonNull final Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - log.debug("Deleting lease with leaseKey {}", lease.leaseKey()); - - DeleteItemRequest deleteRequest = DeleteItemRequest.builder().tableName(table) - .key(serializer.getDynamoHashKey(lease)).build(); - - final AWSExceptionManager exceptionManager = createExceptionManager(); - try { - try { - dynamoDBClient.deleteItem(deleteRequest).get(); - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: Check if this is the correct behavior - throw new DependencyException(e); - } - } catch (DynamoDbException e) { - throw convertAndRethrowExceptions("delete", lease.leaseKey(), e); - } - } - - /** - * {@inheritDoc} - */ - @Override - public boolean updateLease(@NonNull final Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - log.debug("Updating lease {}", lease); - - final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ConditionalCheckFailedException.class, t -> t); - - Map updates = serializer.getDynamoLeaseCounterUpdate(lease); - updates.putAll(serializer.getDynamoUpdateLeaseUpdate(lease)); - - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) - .expected(serializer.getDynamoLeaseCounterExpectation(lease)).attributeUpdates(updates).build(); - - try { - try { - dynamoDBClient.updateItem(request).get(); - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - throw new DependencyException(e); - } - } catch (ConditionalCheckFailedException e) { - log.debug("Lease update failed for lease with key {} because the lease counter was not {}", - lease.leaseKey(), lease.leaseCounter()); - return false; - } catch (DynamoDbException e) { - throw convertAndRethrowExceptions("update", lease.leaseKey(), e); - } - - lease.leaseCounter(lease.leaseCounter() + 1); - return true; - } - - /** - * {@inheritDoc} - */ - @Override - public ExtendedSequenceNumber getCheckpoint(String shardId) - throws ProvisionedThroughputException, InvalidStateException, DependencyException { - ExtendedSequenceNumber checkpoint = null; - Lease lease = getLease(shardId); - if (lease != null) { - checkpoint = lease.checkpoint(); - } - return checkpoint; - } - - /* - * This method contains boilerplate exception handling - it throws or returns something to be thrown. The - * inconsistency there exists to satisfy the compiler when this method is used at the end of non-void methods. - */ - protected DependencyException convertAndRethrowExceptions(String operation, String leaseKey, Exception e) - throws ProvisionedThroughputException, InvalidStateException { - if (e instanceof ProvisionedThroughputExceededException) { - log.warn("Provisioned Throughput on the lease table has been exceeded. It's recommended that you increase" - + " the IOPs on the table. Failure to increase the IOPs may cause the application to not make" - + " progress."); - throw new ProvisionedThroughputException(e); - } else if (e instanceof ResourceNotFoundException) { - throw new InvalidStateException( - String.format("Cannot %s lease with key %s because table %s does not exist.", - operation, leaseKey, table), - e); - } else { - return new DependencyException(e); - } - } - - private AWSExceptionManager createExceptionManager() { - final AWSExceptionManager exceptionManager = new AWSExceptionManager(); - exceptionManager.add(DynamoDbException.class, t -> t); - return exceptionManager; - } - - void performPostTableCreationAction() { - tableCreatorCallback.performAction( - TableCreatorCallbackInput.builder().dynamoDbClient(dynamoDBClient).tableName(table).build()); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewer.java deleted file mode 100644 index 35251e99..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewer.java +++ /dev/null @@ -1,421 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.dynamodb; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentNavigableMap; -import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.lang3.StringUtils; - -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.LeaseRenewer; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.MetricsUtil; - -/** - * An implementation of {@link LeaseRenewer} that uses DynamoDB via {@link LeaseRefresher}. - */ -@Slf4j -@KinesisClientInternalApi -public class DynamoDBLeaseRenewer implements LeaseRenewer { - private static final int RENEWAL_RETRIES = 2; - private static final String RENEW_ALL_LEASES_DIMENSION = "RenewAllLeases"; - - private final LeaseRefresher leaseRefresher; - private final String workerIdentifier; - private final long leaseDurationNanos; - private final ExecutorService executorService; - private final MetricsFactory metricsFactory; - - private final ConcurrentNavigableMap ownedLeases = new ConcurrentSkipListMap<>(); - - /** - * Constructor. - * - * @param leaseRefresher - * LeaseRefresher to use - * @param workerIdentifier - * identifier of this worker - * @param leaseDurationMillis - * duration of a lease in milliseconds - * @param executorService - * ExecutorService to use for renewing leases in parallel - */ - public DynamoDBLeaseRenewer(final LeaseRefresher leaseRefresher, final String workerIdentifier, - final long leaseDurationMillis, final ExecutorService executorService, - final MetricsFactory metricsFactory) { - this.leaseRefresher = leaseRefresher; - this.workerIdentifier = workerIdentifier; - this.leaseDurationNanos = TimeUnit.MILLISECONDS.toNanos(leaseDurationMillis); - this.executorService = executorService; - this.metricsFactory = metricsFactory; - } - - /** - * {@inheritDoc} - */ - @Override - public void renewLeases() throws DependencyException, InvalidStateException { - // Due to the eventually consistent nature of ConcurrentNavigableMap iterators, this log entry may become - // inaccurate during iteration. - log.debug("Worker {} holding {} leases: {}", workerIdentifier, ownedLeases.size(), ownedLeases); - - /* - * Lease renewals are done in parallel so many leases can be renewed for short lease fail over time - * configuration. In this case, metrics scope is also shared across different threads, so scope must be thread - * safe. - */ - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, RENEW_ALL_LEASES_DIMENSION); - - long startTime = System.currentTimeMillis(); - boolean success = false; - - try { - /* - * We iterate in descending order here so that the synchronized(lease) inside renewLease doesn't "lead" calls - * to getCurrentlyHeldLeases. They'll still cross paths, but they won't interleave their executions. - */ - int lostLeases = 0; - List> renewLeaseTasks = new ArrayList<>(); - for (Lease lease : ownedLeases.descendingMap().values()) { - renewLeaseTasks.add(executorService.submit(new RenewLeaseTask(lease))); - } - int leasesInUnknownState = 0; - Exception lastException = null; - for (Future renewLeaseTask : renewLeaseTasks) { - try { - if (!renewLeaseTask.get()) { - lostLeases++; - } - } catch (InterruptedException e) { - log.info("Interrupted while waiting for a lease to renew."); - leasesInUnknownState += 1; - Thread.currentThread().interrupt(); - } catch (ExecutionException e) { - log.error("Encountered an exception while renewing a lease.", e.getCause()); - leasesInUnknownState += 1; - lastException = e; - } - } - - scope.addData("LostLeases", lostLeases, StandardUnit.COUNT, MetricsLevel.SUMMARY); - scope.addData("CurrentLeases", ownedLeases.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY); - if (leasesInUnknownState > 0) { - throw new DependencyException( - String.format("Encountered an exception while renewing leases. The number" - + " of leases which might not have been renewed is %d", leasesInUnknownState), - lastException); - } - success = true; - } finally { - MetricsUtil.addWorkerIdentifier(scope, workerIdentifier); - MetricsUtil.addSuccessAndLatency(scope, success, startTime, MetricsLevel.SUMMARY); - MetricsUtil.endScope(scope); - } - } - - @RequiredArgsConstructor - private class RenewLeaseTask implements Callable { - private final Lease lease; - - @Override - public Boolean call() throws Exception { - return renewLease(lease); - } - } - - private boolean renewLease(Lease lease) throws DependencyException, InvalidStateException { - return renewLease(lease, false); - } - - private boolean renewLease(Lease lease, boolean renewEvenIfExpired) throws DependencyException, InvalidStateException { - String leaseKey = lease.leaseKey(); - - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, RENEW_ALL_LEASES_DIMENSION); - - boolean success = false; - boolean renewedLease = false; - long startTime = System.currentTimeMillis(); - try { - for (int i = 1; i <= RENEWAL_RETRIES; i++) { - try { - synchronized (lease) { - // Don't renew expired lease during regular renewals. getCopyOfHeldLease may have returned null - // triggering the application processing to treat this as a lost lease (fail checkpoint with - // ShutdownException). - boolean isLeaseExpired = lease.isExpired(leaseDurationNanos, System.nanoTime()); - if (renewEvenIfExpired || !isLeaseExpired) { - renewedLease = leaseRefresher.renewLease(lease); - } - if (renewedLease) { - lease.lastCounterIncrementNanos(System.nanoTime()); - } - } - - if (renewedLease) { - if (log.isDebugEnabled()) { - log.debug("Worker {} successfully renewed lease with key {}", workerIdentifier, leaseKey); - } - } else { - log.info("Worker {} lost lease with key {}", workerIdentifier, leaseKey); - ownedLeases.remove(leaseKey); - } - - success = true; - break; - } catch (ProvisionedThroughputException e) { - log.info("Worker {} could not renew lease with key {} on try {} out of {} due to capacity", - workerIdentifier, leaseKey, i, RENEWAL_RETRIES); - } - } - } finally { - MetricsUtil.addWorkerIdentifier(scope, workerIdentifier); - MetricsUtil.addSuccessAndLatency(scope, "RenewLease", success, startTime, MetricsLevel.DETAILED); - MetricsUtil.endScope(scope); - } - - return renewedLease; - } - - /** - * {@inheritDoc} - */ - @Override - public Map getCurrentlyHeldLeases() { - Map result = new HashMap<>(); - long now = System.nanoTime(); - - for (String leaseKey : ownedLeases.keySet()) { - Lease copy = getCopyOfHeldLease(leaseKey, now); - if (copy != null) { - result.put(copy.leaseKey(), copy); - } - } - - return result; - } - - /** - * {@inheritDoc} - */ - @Override - public Lease getCurrentlyHeldLease(String leaseKey) { - return getCopyOfHeldLease(leaseKey, System.nanoTime()); - } - - /** - * Internal method to return a lease with a specific lease key only if we currently hold it. - * - * @param leaseKey key of lease to return - * @param now current timestamp for old-ness checking - * @return non-authoritative copy of the held lease, or null if we don't currently hold it - */ - private Lease getCopyOfHeldLease(String leaseKey, long now) { - Lease authoritativeLease = ownedLeases.get(leaseKey); - if (authoritativeLease == null) { - return null; - } else { - Lease copy = null; - synchronized (authoritativeLease) { - copy = authoritativeLease.copy(); - } - - if (copy.isExpired(leaseDurationNanos, now)) { - log.info("getCurrentlyHeldLease not returning lease with key {} because it is expired", - copy.leaseKey()); - return null; - } else { - return copy; - } - } - } - - /** - * {@inheritDoc} - */ - @Override - public boolean updateLease(Lease lease, UUID concurrencyToken, @NonNull String operation, String shardId) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - verifyNotNull(lease, "lease cannot be null"); - verifyNotNull(lease.leaseKey(), "leaseKey cannot be null"); - verifyNotNull(concurrencyToken, "concurrencyToken cannot be null"); - - String leaseKey = lease.leaseKey(); - Lease authoritativeLease = ownedLeases.get(leaseKey); - - if (authoritativeLease == null) { - log.info("Worker {} could not update lease with key {} because it does not hold it", workerIdentifier, - leaseKey); - return false; - } - - /* - * If the passed-in concurrency token doesn't match the concurrency token of the authoritative lease, it means - * the lease was lost and regained between when the caller acquired his concurrency token and when the caller - * called update. - */ - if (!authoritativeLease.concurrencyToken().equals(concurrencyToken)) { - log.info("Worker {} refusing to update lease with key {} because concurrency tokens don't match", - workerIdentifier, leaseKey); - return false; - } - - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, operation); - if (StringUtils.isNotEmpty(shardId)) { - MetricsUtil.addShardId(scope, shardId); - } - - long startTime = System.currentTimeMillis(); - boolean success = false; - try { - synchronized (authoritativeLease) { - authoritativeLease.update(lease); - boolean updatedLease = leaseRefresher.updateLease(authoritativeLease); - if (updatedLease) { - // Updates increment the counter - authoritativeLease.lastCounterIncrementNanos(System.nanoTime()); - } else { - /* - * If updateLease returns false, it means someone took the lease from us. Remove the lease - * from our set of owned leases pro-actively rather than waiting for a run of renewLeases(). - */ - log.info("Worker {} lost lease with key {} - discovered during update", workerIdentifier, leaseKey); - - /* - * Remove only if the value currently in the map is the same as the authoritative lease. We're - * guarding against a pause after the concurrency token check above. It plays out like so: - * - * 1) Concurrency token check passes - * 2) Pause. Lose lease, re-acquire lease. This requires at least one lease counter update. - * 3) Unpause. leaseRefresher.updateLease fails conditional write due to counter updates, returns - * false. - * 4) ownedLeases.remove(key, value) doesn't do anything because authoritativeLease does not - * .equals() the re-acquired version in the map on the basis of lease counter. This is what we want. - * If we just used ownedLease.remove(key), we would have pro-actively removed a lease incorrectly. - * - * Note that there is a subtlety here - Lease.equals() deliberately does not check the concurrency - * token, but it does check the lease counter, so this scheme works. - */ - ownedLeases.remove(leaseKey, authoritativeLease); - } - - success = true; - return updatedLease; - } - } finally { - MetricsUtil.addSuccessAndLatency(scope, "UpdateLease", success, startTime, MetricsLevel.DETAILED); - MetricsUtil.endScope(scope); - } - } - - /** - * {@inheritDoc} - */ - @Override - public void addLeasesToRenew(Collection newLeases) { - verifyNotNull(newLeases, "newLeases cannot be null"); - - for (Lease lease : newLeases) { - if (lease.lastCounterIncrementNanos() == null) { - log.info("addLeasesToRenew ignoring lease with key {} because it does not have lastRenewalNanos set", - lease.leaseKey()); - continue; - } - - Lease authoritativeLease = lease.copy(); - - /* - * Assign a concurrency token when we add this to the set of currently owned leases. This ensures that - * every time we acquire a lease, it gets a new concurrency token. - */ - authoritativeLease.concurrencyToken(UUID.randomUUID()); - ownedLeases.put(authoritativeLease.leaseKey(), authoritativeLease); - } - } - - /** - * {@inheritDoc} - */ - @Override - public void clearCurrentlyHeldLeases() { - ownedLeases.clear(); - } - - /** - * {@inheritDoc} - * @param lease the lease to drop. - */ - @Override - public void dropLease(Lease lease) { - ownedLeases.remove(lease.leaseKey()); - } - - /** - * {@inheritDoc} - */ - @Override - public void initialize() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - Collection leases = leaseRefresher.listLeases(); - List myLeases = new LinkedList<>(); - boolean renewEvenIfExpired = true; - - for (Lease lease : leases) { - if (workerIdentifier.equals(lease.leaseOwner())) { - log.info(" Worker {} found lease {}", workerIdentifier, lease); - // Okay to renew even if lease is expired, because we start with an empty list and we add the lease to - // our list only after a successful renew. So we don't need to worry about the edge case where we could - // continue renewing a lease after signaling a lease loss to the application. - - if (renewLease(lease, renewEvenIfExpired)) { - myLeases.add(lease); - } - } else { - log.debug("Worker {} ignoring lease {} ", workerIdentifier, lease); - } - } - - addLeasesToRenew(myLeases); - } - - private void verifyNotNull(Object object, String message) { - if (object == null) { - throw new IllegalArgumentException(message); - } - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseSerializer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseSerializer.java deleted file mode 100644 index 4b58a429..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseSerializer.java +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.dynamodb; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - - -import com.google.common.base.Strings; -import software.amazon.awssdk.services.dynamodb.model.AttributeAction; -import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate; -import software.amazon.awssdk.services.dynamodb.model.ExpectedAttributeValue; -import software.amazon.awssdk.services.dynamodb.model.KeySchemaElement; -import software.amazon.awssdk.services.dynamodb.model.KeyType; -import software.amazon.awssdk.services.dynamodb.model.ScalarAttributeType; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.leases.DynamoUtils; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseSerializer; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * An implementation of ILeaseSerializer for basic Lease objects. Can also instantiate subclasses of Lease so that - * LeaseSerializer can be decorated by other classes if you need to add fields to leases. - */ -@KinesisClientInternalApi -public class DynamoDBLeaseSerializer implements LeaseSerializer { - private static final String LEASE_KEY_KEY = "leaseKey"; - private static final String LEASE_OWNER_KEY = "leaseOwner"; - private static final String LEASE_COUNTER_KEY = "leaseCounter"; - private static final String OWNER_SWITCHES_KEY = "ownerSwitchesSinceCheckpoint"; - private static final String CHECKPOINT_SEQUENCE_NUMBER_KEY = "checkpoint"; - private static final String CHECKPOINT_SUBSEQUENCE_NUMBER_KEY = "checkpointSubSequenceNumber"; - private static final String PENDING_CHECKPOINT_SEQUENCE_KEY = "pendingCheckpoint"; - private static final String PENDING_CHECKPOINT_SUBSEQUENCE_KEY = "pendingCheckpointSubSequenceNumber"; - private static final String PARENT_SHARD_ID_KEY = "parentShardId"; - - @Override - public Map toDynamoRecord(final Lease lease) { - Map result = new HashMap<>(); - - result.put(LEASE_KEY_KEY, DynamoUtils.createAttributeValue(lease.leaseKey())); - result.put(LEASE_COUNTER_KEY, DynamoUtils.createAttributeValue(lease.leaseCounter())); - - if (lease.leaseOwner() != null) { - result.put(LEASE_OWNER_KEY, DynamoUtils.createAttributeValue(lease.leaseOwner())); - } - - result.put(OWNER_SWITCHES_KEY, DynamoUtils.createAttributeValue(lease.ownerSwitchesSinceCheckpoint())); - result.put(CHECKPOINT_SEQUENCE_NUMBER_KEY, DynamoUtils.createAttributeValue(lease.checkpoint().sequenceNumber())); - result.put(CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber())); - if (lease.parentShardIds() != null && !lease.parentShardIds().isEmpty()) { - result.put(PARENT_SHARD_ID_KEY, DynamoUtils.createAttributeValue(lease.parentShardIds())); - } - - if (lease.pendingCheckpoint() != null && !lease.pendingCheckpoint().sequenceNumber().isEmpty()) { - result.put(PENDING_CHECKPOINT_SEQUENCE_KEY, DynamoUtils.createAttributeValue(lease.pendingCheckpoint().sequenceNumber())); - result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, DynamoUtils.createAttributeValue(lease.pendingCheckpoint().subSequenceNumber())); - } - - return result; - } - - @Override - public Lease fromDynamoRecord(final Map dynamoRecord) { - Lease result = new Lease(); - result.leaseKey(DynamoUtils.safeGetString(dynamoRecord, LEASE_KEY_KEY)); - result.leaseOwner(DynamoUtils.safeGetString(dynamoRecord, LEASE_OWNER_KEY)); - result.leaseCounter(DynamoUtils.safeGetLong(dynamoRecord, LEASE_COUNTER_KEY)); - - result.ownerSwitchesSinceCheckpoint(DynamoUtils.safeGetLong(dynamoRecord, OWNER_SWITCHES_KEY)); - result.checkpoint( - new ExtendedSequenceNumber( - DynamoUtils.safeGetString(dynamoRecord, CHECKPOINT_SEQUENCE_NUMBER_KEY), - DynamoUtils.safeGetLong(dynamoRecord, CHECKPOINT_SUBSEQUENCE_NUMBER_KEY)) - ); - result.parentShardIds(DynamoUtils.safeGetSS(dynamoRecord, PARENT_SHARD_ID_KEY)); - - if (!Strings.isNullOrEmpty(DynamoUtils.safeGetString(dynamoRecord, PENDING_CHECKPOINT_SEQUENCE_KEY))) { - result.pendingCheckpoint( - new ExtendedSequenceNumber( - DynamoUtils.safeGetString(dynamoRecord, PENDING_CHECKPOINT_SEQUENCE_KEY), - DynamoUtils.safeGetLong(dynamoRecord, PENDING_CHECKPOINT_SUBSEQUENCE_KEY)) - ); - } - - return result; - } - - @Override - public Map getDynamoHashKey(final String leaseKey) { - Map result = new HashMap<>(); - - result.put(LEASE_KEY_KEY, DynamoUtils.createAttributeValue(leaseKey)); - - return result; - } - - @Override - public Map getDynamoHashKey(final Lease lease) { - return getDynamoHashKey(lease.leaseKey()); - } - - @Override - public Map getDynamoLeaseCounterExpectation(final Lease lease) { - return getDynamoLeaseCounterExpectation(lease.leaseCounter()); - } - - public Map getDynamoLeaseCounterExpectation(final Long leaseCounter) { - Map result = new HashMap<>(); - - ExpectedAttributeValue eav = ExpectedAttributeValue.builder().value(DynamoUtils.createAttributeValue(leaseCounter)).build(); - result.put(LEASE_COUNTER_KEY, eav); - - return result; - } - - @Override - public Map getDynamoLeaseOwnerExpectation(final Lease lease) { - Map result = new HashMap<>(); - - ExpectedAttributeValue.Builder eavBuilder = ExpectedAttributeValue.builder(); - - if (lease.leaseOwner() == null) { - eavBuilder = eavBuilder.exists(false); - } else { - eavBuilder = eavBuilder.value(DynamoUtils.createAttributeValue(lease.leaseOwner())); - } - - result.put(LEASE_OWNER_KEY, eavBuilder.build()); - - return result; - } - - @Override - public Map getDynamoNonexistantExpectation() { - Map result = new HashMap<>(); - - ExpectedAttributeValue expectedAV = ExpectedAttributeValue.builder().exists(false).build(); - result.put(LEASE_KEY_KEY, expectedAV); - - return result; - } - - @Override - public Map getDynamoLeaseCounterUpdate(final Lease lease) { - return getDynamoLeaseCounterUpdate(lease.leaseCounter()); - } - - public Map getDynamoLeaseCounterUpdate(Long leaseCounter) { - Map result = new HashMap<>(); - - AttributeValueUpdate avu = - AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(leaseCounter + 1)).action(AttributeAction.PUT).build(); - result.put(LEASE_COUNTER_KEY, avu); - - return result; - } - - @Override - public Map getDynamoTakeLeaseUpdate(final Lease lease, String owner) { - Map result = new HashMap<>(); - - result.put(LEASE_OWNER_KEY, AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(owner)).action(AttributeAction.PUT).build()); - - String oldOwner = lease.leaseOwner(); - if (oldOwner != null && !oldOwner.equals(owner)) { - result.put(OWNER_SWITCHES_KEY, AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(1L)).action(AttributeAction.ADD).build()); - } - - return result; - } - - @Override - public Map getDynamoEvictLeaseUpdate(final Lease lease) { - Map result = new HashMap<>(); - AttributeValue value = null; - - result.put(LEASE_OWNER_KEY, AttributeValueUpdate.builder().value(value).action(AttributeAction.DELETE).build()); - - return result; - } - - private AttributeValueUpdate putUpdate(AttributeValue attributeValue) { - return AttributeValueUpdate.builder().value(attributeValue).action(AttributeAction.PUT).build(); - } - - @Override - public Map getDynamoUpdateLeaseUpdate(final Lease lease) { - Map result = new HashMap<>(); - result.put(CHECKPOINT_SEQUENCE_NUMBER_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.checkpoint().sequenceNumber()))); - result.put(CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber()))); - result.put(OWNER_SWITCHES_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.ownerSwitchesSinceCheckpoint()))); - - if (lease.pendingCheckpoint() != null && !lease.pendingCheckpoint().sequenceNumber().isEmpty()) { - result.put(PENDING_CHECKPOINT_SEQUENCE_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.pendingCheckpoint().sequenceNumber()))); - result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.pendingCheckpoint().subSequenceNumber()))); - } else { - result.put(PENDING_CHECKPOINT_SEQUENCE_KEY, AttributeValueUpdate.builder().action(AttributeAction.DELETE).build()); - result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, AttributeValueUpdate.builder().action(AttributeAction.DELETE).build()); - } - return result; - } - - @Override - public Collection getKeySchema() { - List keySchema = new ArrayList<>(); - keySchema.add(KeySchemaElement.builder().attributeName(LEASE_KEY_KEY).keyType(KeyType.HASH).build()); - - return keySchema; - } - - @Override - public Collection getAttributeDefinitions() { - List definitions = new ArrayList<>(); - definitions.add(AttributeDefinition.builder().attributeName(LEASE_KEY_KEY) - .attributeType(ScalarAttributeType.S).build()); - - return definitions; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java deleted file mode 100644 index 165ad01d..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java +++ /dev/null @@ -1,541 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.dynamodb; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.TimeUnit; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.LeaseTaker; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.metrics.MetricsUtil; - -/** - * An implementation of {@link LeaseTaker} that uses DynamoDB via {@link LeaseRefresher}. - */ -@Slf4j -@KinesisClientInternalApi -public class DynamoDBLeaseTaker implements LeaseTaker { - private static final int TAKE_RETRIES = 3; - private static final int SCAN_RETRIES = 1; - - // See note on TAKE_LEASES_DIMENSION(Callable) for why we have this callable. - private static final Callable SYSTEM_CLOCK_CALLABLE = System::nanoTime; - - private static final String TAKE_LEASES_DIMENSION = "TakeLeases"; - - private final LeaseRefresher leaseRefresher; - private final String workerIdentifier; - private final long leaseDurationNanos; - private final MetricsFactory metricsFactory; - - private final Map allLeases = new HashMap<>(); - // TODO: Remove these defaults and use the defaults in the config - private int maxLeasesForWorker = Integer.MAX_VALUE; - private int maxLeasesToStealAtOneTime = 1; - - private long lastScanTimeNanos = 0L; - - public DynamoDBLeaseTaker(LeaseRefresher leaseRefresher, String workerIdentifier, long leaseDurationMillis, - final MetricsFactory metricsFactory) { - this.leaseRefresher = leaseRefresher; - this.workerIdentifier = workerIdentifier; - this.leaseDurationNanos = TimeUnit.MILLISECONDS.toNanos(leaseDurationMillis); - this.metricsFactory = metricsFactory; - } - - /** - * Worker will not acquire more than the specified max number of leases even if there are more - * shards that need to be processed. This can be used in scenarios where a worker is resource constrained or - * to prevent lease thrashing when small number of workers pick up all leases for small amount of time during - * deployment. - * Note that setting a low value may cause data loss (e.g. if there aren't enough Workers to make progress on all - * shards). When setting the value for this property, one must ensure enough workers are present to process - * shards and should consider future resharding, child shards that may be blocked on parent shards, some workers - * becoming unhealthy, etc. - * - * @param maxLeasesForWorker Max leases this Worker can handle at a time - * @return LeaseTaker - */ - public DynamoDBLeaseTaker withMaxLeasesForWorker(int maxLeasesForWorker) { - if (maxLeasesForWorker <= 0) { - throw new IllegalArgumentException("maxLeasesForWorker should be >= 1"); - } - this.maxLeasesForWorker = maxLeasesForWorker; - return this; - } - - /** - * Max leases to steal from a more loaded Worker at one time (for load balancing). - * Setting this to a higher number can allow for faster load convergence (e.g. during deployments, cold starts), - * but can cause higher churn in the system. - * - * @param maxLeasesToStealAtOneTime Steal up to this many leases at one time (for load balancing) - * @return LeaseTaker - */ - public DynamoDBLeaseTaker withMaxLeasesToStealAtOneTime(int maxLeasesToStealAtOneTime) { - if (maxLeasesToStealAtOneTime <= 0) { - throw new IllegalArgumentException("maxLeasesToStealAtOneTime should be >= 1"); - } - this.maxLeasesToStealAtOneTime = maxLeasesToStealAtOneTime; - return this; - } - - /** - * {@inheritDoc} - */ - @Override - public Map takeLeases() throws DependencyException, InvalidStateException { - return takeLeases(SYSTEM_CLOCK_CALLABLE); - } - - /** - * Internal implementation of TAKE_LEASES_DIMENSION. Takes a callable that can provide the time to enable test cases - * without Thread.sleep. Takes a callable instead of a raw time value because the time needs to be computed as-of - * immediately after the scan. - * - * @param timeProvider - * Callable that will supply the time - * - * @return map of lease key to taken lease - * - * @throws DependencyException - * @throws InvalidStateException - */ - synchronized Map takeLeases(Callable timeProvider) - throws DependencyException, InvalidStateException { - // Key is leaseKey - Map takenLeases = new HashMap<>(); - - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, TAKE_LEASES_DIMENSION); - - long startTime = System.currentTimeMillis(); - boolean success = false; - - ProvisionedThroughputException lastException = null; - - try { - try { - for (int i = 1; i <= SCAN_RETRIES; i++) { - try { - updateAllLeases(timeProvider); - success = true; - } catch (ProvisionedThroughputException e) { - log.info("Worker {} could not find expired leases on try {} out of {}", workerIdentifier, i, - TAKE_RETRIES); - lastException = e; - } - } - } finally { - MetricsUtil.addWorkerIdentifier(scope, workerIdentifier); - MetricsUtil.addSuccessAndLatency(scope, "ListLeases", success, startTime, MetricsLevel.DETAILED); - } - - if (lastException != null) { - log.error("Worker {} could not scan leases table, aborting TAKE_LEASES_DIMENSION. Exception caught by" - + " last retry:", workerIdentifier, lastException); - return takenLeases; - } - - List expiredLeases = getExpiredLeases(); - - Set leasesToTake = computeLeasesToTake(expiredLeases); - Set untakenLeaseKeys = new HashSet<>(); - - for (Lease lease : leasesToTake) { - String leaseKey = lease.leaseKey(); - - startTime = System.currentTimeMillis(); - success = false; - try { - for (int i = 1; i <= TAKE_RETRIES; i++) { - try { - if (leaseRefresher.takeLease(lease, workerIdentifier)) { - lease.lastCounterIncrementNanos(System.nanoTime()); - takenLeases.put(leaseKey, lease); - } else { - untakenLeaseKeys.add(leaseKey); - } - - success = true; - break; - } catch (ProvisionedThroughputException e) { - log.info("Could not take lease with key {} for worker {} on try {} out of {} due to" - + " capacity", leaseKey, workerIdentifier, i, TAKE_RETRIES); - } - } - } finally { - MetricsUtil.addSuccessAndLatency(scope, "TakeLease", success, startTime, MetricsLevel.DETAILED); - } - } - - if (takenLeases.size() > 0) { - log.info("Worker {} successfully took {} leases: {}", workerIdentifier, takenLeases.size(), - stringJoin(takenLeases.keySet(), ", ")); - } - - if (untakenLeaseKeys.size() > 0) { - log.info("Worker {} failed to take {} leases: {}", workerIdentifier, untakenLeaseKeys.size(), - stringJoin(untakenLeaseKeys, ", ")); - } - - scope.addData("TakenLeases", takenLeases.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY); - } finally { - MetricsUtil.endScope(scope); - } - - return takenLeases; - } - - /** Package access for testing purposes. - * - * @param strings - * @param delimiter - * @return Joined string. - */ - static String stringJoin(Collection strings, String delimiter) { - StringBuilder builder = new StringBuilder(); - boolean needDelimiter = false; - for (String string : strings) { - if (needDelimiter) { - builder.append(delimiter); - } - builder.append(string); - needDelimiter = true; - } - - return builder.toString(); - } - - /** - * Scan all leases and update lastRenewalTime. Add new leases and delete old leases. - * - * @param timeProvider callable that supplies the current time - * - * @return list of expired leases, possibly empty, never null. - * - * @throws ProvisionedThroughputException if listLeases fails due to lack of provisioned throughput - * @throws InvalidStateException if the lease table does not exist - * @throws DependencyException if listLeases fails in an unexpected way - */ - private void updateAllLeases(Callable timeProvider) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - List freshList = leaseRefresher.listLeases(); - try { - lastScanTimeNanos = timeProvider.call(); - } catch (Exception e) { - throw new DependencyException("Exception caught from timeProvider", e); - } - - // This set will hold the lease keys not updated by the previous listLeases call. - Set notUpdated = new HashSet<>(allLeases.keySet()); - - // Iterate over all leases, finding ones to try to acquire that haven't changed since the last iteration - for (Lease lease : freshList) { - String leaseKey = lease.leaseKey(); - - Lease oldLease = allLeases.get(leaseKey); - allLeases.put(leaseKey, lease); - notUpdated.remove(leaseKey); - - if (oldLease != null) { - // If we've seen this lease before... - if (oldLease.leaseCounter().equals(lease.leaseCounter())) { - // ...and the counter hasn't changed, propagate the lastRenewalNanos time from the old lease - lease.lastCounterIncrementNanos(oldLease.lastCounterIncrementNanos()); - } else { - // ...and the counter has changed, set lastRenewalNanos to the time of the scan. - lease.lastCounterIncrementNanos(lastScanTimeNanos); - } - } else { - if (lease.leaseOwner() == null) { - // if this new lease is unowned, it's never been renewed. - lease.lastCounterIncrementNanos(0L); - - if (log.isDebugEnabled()) { - log.debug("Treating new lease with key {} as never renewed because it is new and unowned.", - leaseKey); - } - } else { - // if this new lease is owned, treat it as renewed as of the scan - lease.lastCounterIncrementNanos(lastScanTimeNanos); - if (log.isDebugEnabled()) { - log.debug("Treating new lease with key {} as recently renewed because it is new and owned.", - leaseKey); - } - } - } - } - - // Remove dead leases from allLeases - for (String key : notUpdated) { - allLeases.remove(key); - } - } - - /** - * @return list of leases that were expired as of our last scan. - */ - private List getExpiredLeases() { - List expiredLeases = new ArrayList<>(); - - for (Lease lease : allLeases.values()) { - if (lease.isExpired(leaseDurationNanos, lastScanTimeNanos)) { - expiredLeases.add(lease); - } - } - - return expiredLeases; - } - - /** - * Compute the number of leases I should try to take based on the state of the system. - * - * @param expiredLeases list of leases we determined to be expired - * @return set of leases to take. - */ - private Set computeLeasesToTake(List expiredLeases) { - Map leaseCounts = computeLeaseCounts(expiredLeases); - Set leasesToTake = new HashSet<>(); - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, TAKE_LEASES_DIMENSION); - MetricsUtil.addWorkerIdentifier(scope, workerIdentifier); - - try { - int numLeases = allLeases.size(); - int numWorkers = leaseCounts.size(); - - if (numLeases == 0) { - // If there are no leases, I shouldn't try to take any. - return leasesToTake; - } - - int target; - if (numWorkers >= numLeases) { - // If we have n leases and n or more workers, each worker can have up to 1 lease, including myself. - target = 1; - } else { - /* - * numWorkers must be < numLeases. - * - * Our target for each worker is numLeases / numWorkers (+1 if numWorkers doesn't evenly divide numLeases) - */ - target = numLeases / numWorkers + (numLeases % numWorkers == 0 ? 0 : 1); - - // Spill over is the number of leases this worker should have claimed, but did not because it would - // exceed the max allowed for this worker. - int leaseSpillover = Math.max(0, target - maxLeasesForWorker); - if (target > maxLeasesForWorker) { - log.warn( - "Worker {} target is {} leases and maxLeasesForWorker is {}. Resetting target to {}," - + " lease spillover is {}. Note that some shards may not be processed if no other " - + "workers are able to pick them up.", - workerIdentifier, target, maxLeasesForWorker, maxLeasesForWorker, leaseSpillover); - target = maxLeasesForWorker; - } - scope.addData("LeaseSpillover", leaseSpillover, StandardUnit.COUNT, MetricsLevel.SUMMARY); - } - - int myCount = leaseCounts.get(workerIdentifier); - int numLeasesToReachTarget = target - myCount; - - if (numLeasesToReachTarget <= 0) { - // If we don't need anything, return the empty set. - return leasesToTake; - } - - // Shuffle expiredLeases so workers don't all try to contend for the same leases. - Collections.shuffle(expiredLeases); - - int originalExpiredLeasesSize = expiredLeases.size(); - if (expiredLeases.size() > 0) { - // If we have expired leases, get up to leases from expiredLeases - for (; numLeasesToReachTarget > 0 && expiredLeases.size() > 0; numLeasesToReachTarget--) { - leasesToTake.add(expiredLeases.remove(0)); - } - } else { - // If there are no expired leases and we need a lease, consider stealing. - List leasesToSteal = chooseLeasesToSteal(leaseCounts, numLeasesToReachTarget, target); - for (Lease leaseToSteal : leasesToSteal) { - log.info("Worker {} needed {} leases but none were expired, so it will steal lease {} from {}", - workerIdentifier, numLeasesToReachTarget, leaseToSteal.leaseKey(), - leaseToSteal.leaseOwner()); - leasesToTake.add(leaseToSteal); - } - } - - if (!leasesToTake.isEmpty()) { - log.info( - "Worker {} saw {} total leases, {} available leases, {} " - + "workers. Target is {} leases, I have {} leases, I will take {} leases", - workerIdentifier, numLeases, originalExpiredLeasesSize, numWorkers, target, myCount, - leasesToTake.size()); - } - - scope.addData("TotalLeases", numLeases, StandardUnit.COUNT, MetricsLevel.DETAILED); - scope.addData("ExpiredLeases", originalExpiredLeasesSize, StandardUnit.COUNT, MetricsLevel.SUMMARY); - scope.addData("NumWorkers", numWorkers, StandardUnit.COUNT, MetricsLevel.SUMMARY); - scope.addData("NeededLeases", numLeasesToReachTarget, StandardUnit.COUNT, MetricsLevel.DETAILED); - scope.addData("LeasesToTake", leasesToTake.size(), StandardUnit.COUNT, MetricsLevel.DETAILED); - } finally { - MetricsUtil.endScope(scope); - } - - return leasesToTake; - } - - /** - * Choose leases to steal by randomly selecting one or more (up to max) from the most loaded worker. - * Stealing rules: - * - * Steal up to maxLeasesToStealAtOneTime leases from the most loaded worker if - * a) he has > target leases and I need >= 1 leases : steal min(leases needed, maxLeasesToStealAtOneTime) - * b) he has == target leases and I need > 1 leases : steal 1 - * - * @param leaseCounts map of workerIdentifier to lease count - * @param needed # of leases needed to reach the target leases for the worker - * @param target target # of leases per worker - * @return Leases to steal, or empty list if we should not steal - */ - private List chooseLeasesToSteal(Map leaseCounts, int needed, int target) { - List leasesToSteal = new ArrayList<>(); - - Entry mostLoadedWorker = null; - // Find the most loaded worker - for (Entry worker : leaseCounts.entrySet()) { - if (mostLoadedWorker == null || mostLoadedWorker.getValue() < worker.getValue()) { - mostLoadedWorker = worker; - } - } - - int numLeasesToSteal = 0; - if ((mostLoadedWorker.getValue() >= target) && (needed > 0)) { - int leasesOverTarget = mostLoadedWorker.getValue() - target; - numLeasesToSteal = Math.min(needed, leasesOverTarget); - // steal 1 if we need > 1 and max loaded worker has target leases. - if ((needed > 1) && (numLeasesToSteal == 0)) { - numLeasesToSteal = 1; - } - numLeasesToSteal = Math.min(numLeasesToSteal, maxLeasesToStealAtOneTime); - } - - if (numLeasesToSteal <= 0) { - if (log.isDebugEnabled()) { - log.debug(String.format("Worker %s not stealing from most loaded worker %s. He has %d," - + " target is %d, and I need %d", - workerIdentifier, - mostLoadedWorker.getKey(), - mostLoadedWorker.getValue(), - target, - needed)); - } - return leasesToSteal; - } else { - if (log.isDebugEnabled()) { - log.debug("Worker {} will attempt to steal {} leases from most loaded worker {}. " - + " He has {} leases, target is {}, I need {}, maxLeasesToSteatAtOneTime is {}.", - workerIdentifier, - numLeasesToSteal, - mostLoadedWorker.getKey(), - mostLoadedWorker.getValue(), - target, - needed, - maxLeasesToStealAtOneTime); - } - } - - String mostLoadedWorkerIdentifier = mostLoadedWorker.getKey(); - List candidates = new ArrayList<>(); - // Collect leases belonging to that worker - for (Lease lease : allLeases.values()) { - if (mostLoadedWorkerIdentifier.equals(lease.leaseOwner())) { - candidates.add(lease); - } - } - - // Return random ones - Collections.shuffle(candidates); - int toIndex = Math.min(candidates.size(), numLeasesToSteal); - leasesToSteal.addAll(candidates.subList(0, toIndex)); - - return leasesToSteal; - } - - /** - * Count leases by host. Always includes myself, but otherwise only includes hosts that are currently holding - * leases. - * - * @param expiredLeases list of leases that are currently expired - * @return map of workerIdentifier to lease count - */ - private Map computeLeaseCounts(List expiredLeases) { - Map leaseCounts = new HashMap<>(); - - // Compute the number of leases per worker by looking through allLeases and ignoring leases that have expired. - for (Lease lease : allLeases.values()) { - if (!expiredLeases.contains(lease)) { - String leaseOwner = lease.leaseOwner(); - Integer oldCount = leaseCounts.get(leaseOwner); - if (oldCount == null) { - leaseCounts.put(leaseOwner, 1); - } else { - leaseCounts.put(leaseOwner, oldCount + 1); - } - } - } - - // If I have no leases, I wasn't represented in leaseCounts. Let's fix that. - Integer myCount = leaseCounts.get(workerIdentifier); - if (myCount == null) { - myCount = 0; - leaseCounts.put(workerIdentifier, myCount); - } - - return leaseCounts; - } - - /** - * {@inheritDoc} - */ - @Override - public String getWorkerIdentifier() { - return workerIdentifier; - } - - /** - * {@inheritDoc} - */ - @Override - public synchronized List allLeases() { - return new ArrayList<>(allLeases.values()); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableConstants.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableConstants.java deleted file mode 100644 index 3848c2f0..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableConstants.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.leases.dynamodb; - -import lombok.AccessLevel; -import lombok.NoArgsConstructor; - -/** - * This class is just a holder for initial lease table IOPs units. This class will be removed in a future release. - */ -@Deprecated -@NoArgsConstructor(access = AccessLevel.PRIVATE) -public class TableConstants { - public static final long DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY = 10L; - public static final long DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY = 10L; -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallback.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallback.java deleted file mode 100644 index 088ba924..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallback.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.leases.dynamodb; - -/** - * Callback interface for interacting with the DynamoDB lease table post creation. - */ -@FunctionalInterface -public interface TableCreatorCallback { - /** - * NoOp implemetation for TableCreatorCallback - */ - TableCreatorCallback NOOP_TABLE_CREATOR_CALLBACK = (TableCreatorCallbackInput tableCreatorCallbackInput) -> { - // Do nothing - }; - - /** - * Actions needed to be performed on the DynamoDB lease table once the table has been created and is in the ACTIVE - * status. Will not be called if the table previously exists. - * - * @param tableCreatorCallbackInput - * Input object for table creator - */ - void performAction(TableCreatorCallbackInput tableCreatorCallbackInput); - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallbackInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallbackInput.java deleted file mode 100644 index 4c4d6f12..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallbackInput.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.leases.dynamodb; - -import lombok.Builder; -import lombok.Data; -import lombok.EqualsAndHashCode; -import lombok.NonNull; -import lombok.ToString; -import lombok.experimental.Accessors; -import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; - -/** - * - */ -@Builder(toBuilder = true) -@Data -@Accessors(fluent = true) -@ToString -@EqualsAndHashCode -public class TableCreatorCallbackInput { - @NonNull - private final DynamoDbAsyncClient dynamoDbClient; - @NonNull - private final String tableName; -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/DependencyException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/DependencyException.java deleted file mode 100644 index efaa1ad9..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/DependencyException.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.exceptions; - -/** - * Indicates that a lease operation has failed because a dependency of the leasing system has failed. This will happen - * if DynamoDB throws an InternalServerException or a generic AmazonClientException (the specific subclasses of - * AmazonClientException are all handled more gracefully). - */ -public class DependencyException extends LeasingException { - - private static final long serialVersionUID = 1L; - - public DependencyException(Throwable e) { - super(e); - } - - public DependencyException(String message, Throwable e) { - super(message, e); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/InvalidStateException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/InvalidStateException.java deleted file mode 100644 index 0929fee2..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/InvalidStateException.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.exceptions; - -/** - * Indicates that a lease operation has failed because DynamoDB is an invalid state. The most common example is failing - * to create the DynamoDB table before doing any lease operations. - */ -public class InvalidStateException extends LeasingException { - - private static final long serialVersionUID = 1L; - - public InvalidStateException(Throwable e) { - super(e); - } - - public InvalidStateException(String message, Throwable e) { - super(message, e); - } - - public InvalidStateException(String message) { - super(message); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasingException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasingException.java deleted file mode 100644 index a59e69c1..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasingException.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.exceptions; - -/** - * Top-level exception type for all exceptions thrown by the leasing code. - */ -public class LeasingException extends Exception { - - public LeasingException(Throwable e) { - super(e); - } - - public LeasingException(String message, Throwable e) { - super(message, e); - } - - public LeasingException(String message) { - super(message); - } - - private static final long serialVersionUID = 1L; - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ProvisionedThroughputException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ProvisionedThroughputException.java deleted file mode 100644 index 9409d3db..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ProvisionedThroughputException.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.exceptions; - -/** - * Indicates that a lease operation has failed due to lack of provisioned throughput for a DynamoDB table. - */ -public class ProvisionedThroughputException extends LeasingException { - - private static final long serialVersionUID = 1L; - - public ProvisionedThroughputException(Throwable e) { - super(e); - } - - public ProvisionedThroughputException(String message, Throwable e) { - super(message, e); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ShardSyncer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ShardSyncer.java deleted file mode 100644 index 4e9245f6..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ShardSyncer.java +++ /dev/null @@ -1,46 +0,0 @@ -package software.amazon.kinesis.leases.exceptions; - -import lombok.NonNull; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.exceptions.internal.KinesisClientLibIOException; -import software.amazon.kinesis.leases.HierarchicalShardSyncer; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.ShardDetector; -import software.amazon.kinesis.metrics.MetricsScope; - -/** - * Helper class to sync leases with shards of the Kinesis stream. - * It will create new leases/activities when it discovers new Kinesis shards (bootstrap/resharding). - * It deletes leases for shards that have been trimmed from Kinesis, or if we've completed processing it - * and begun processing it's child shards. - * - *

NOTE: This class is deprecated and will be removed in a future release.

- */ -@Deprecated -public class ShardSyncer { - private static final HierarchicalShardSyncer HIERARCHICAL_SHARD_SYNCER = new HierarchicalShardSyncer(); - - /** - *

NOTE: This method is deprecated and will be removed in a future release.

- * - * @param shardDetector - * @param leaseRefresher - * @param initialPosition - * @param cleanupLeasesOfCompletedShards - * @param ignoreUnexpectedChildShards - * @param scope - * @throws DependencyException - * @throws InvalidStateException - * @throws ProvisionedThroughputException - * @throws KinesisClientLibIOException - */ - @Deprecated - public static synchronized void checkAndCreateLeasesForNewShards(@NonNull final ShardDetector shardDetector, - final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition, - final boolean cleanupLeasesOfCompletedShards, final boolean ignoreUnexpectedChildShards, - final MetricsScope scope) throws DependencyException, InvalidStateException, ProvisionedThroughputException, - KinesisClientLibIOException { - HIERARCHICAL_SHARD_SYNCER.checkAndCreateLeaseForNewShards(shardDetector, leaseRefresher, initialPosition, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, scope); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTask.java deleted file mode 100644 index e14b111a..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTask.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import lombok.AccessLevel; -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.exceptions.internal.BlockedOnParentShardException; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Task to block until processing of all data records in the parent shard(s) is completed. - * We check if we have checkpoint(s) for the parent shard(s). - * If a checkpoint for a parent shard is found, we poll and wait until the checkpoint value is SHARD_END - * (application has checkpointed after processing all records in the shard). - * If we don't find a checkpoint for the parent shard(s), we assume they have been trimmed and directly - * proceed with processing data from the shard. - */ -@RequiredArgsConstructor(access = AccessLevel.PACKAGE) -@Slf4j -@KinesisClientInternalApi -// TODO: Check for non null values -public class BlockOnParentShardTask implements ConsumerTask { - @NonNull - private final ShardInfo shardInfo; - private final LeaseRefresher leaseRefresher; - // Sleep for this duration if the parent shards have not completed processing, or we encounter an exception. - private final long parentShardPollIntervalMillis; - - private final TaskType taskType = TaskType.BLOCK_ON_PARENT_SHARDS; - - /* - * (non-Javadoc) - * - * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#call() - */ - @Override - public TaskResult call() { - Exception exception = null; - - try { - boolean blockedOnParentShard = false; - for (String shardId : shardInfo.parentShardIds()) { - Lease lease = leaseRefresher.getLease(shardId); - if (lease != null) { - ExtendedSequenceNumber checkpoint = lease.checkpoint(); - if ((checkpoint == null) || (!checkpoint.equals(ExtendedSequenceNumber.SHARD_END))) { - log.debug("Shard {} is not yet done. Its current checkpoint is {}", shardId, checkpoint); - blockedOnParentShard = true; - exception = new BlockedOnParentShardException("Parent shard not yet done"); - break; - } else { - log.debug("Shard {} has been completely processed.", shardId); - } - } else { - log.info("No lease found for shard {}. Not blocking on completion of this shard.", shardId); - } - } - - if (!blockedOnParentShard) { - log.info("No need to block on parents {} of shard {}", shardInfo.parentShardIds(), shardInfo.shardId()); - return new TaskResult(null); - } - } catch (Exception e) { - log.error("Caught exception when checking for parent shard checkpoint", e); - exception = e; - } - try { - Thread.sleep(parentShardPollIntervalMillis); - } catch (InterruptedException e) { - log.error("Sleep interrupted when waiting on parent shard(s) of {}", shardInfo.shardId(), e); - } - - return new TaskResult(exception); - } - - /* - * (non-Javadoc) - * - * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#taskType() - */ - @Override - public TaskType taskType() { - return taskType; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerState.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerState.java deleted file mode 100644 index cf246b28..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerState.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.lifecycle; - -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; - -/** - * Represents a the current state of the consumer. This handles the creation of tasks for the consumer, and what to - * do when a transition occurs. - * - */ -interface ConsumerState { - /** - * Creates a new task for this state using the passed in consumer to build the task. If there is no task - * required for this state it may return a null value. {@link ConsumerState}'s are allowed to modify the - * consumer during the execution of this method. - * - * @param consumerArgument - * configuration specific to the task being created - * @param consumer - * the consumer to use build the task, or execute state. - * @param input - * the process input received, this may be null if it's a control message - * @return a valid task for this state or null if there is no task required. - */ - ConsumerTask createTask(ShardConsumerArgument consumerArgument, ShardConsumer consumer, ProcessRecordsInput input); - - /** - * Provides the next state of the consumer upon success of the task return by - * {@link ConsumerState#createTask(ShardConsumerArgument, ShardConsumer, ProcessRecordsInput)}. - * - * @return the next state that the consumer should transition to, this may be the same object as the current - * state. - */ - ConsumerState successTransition(); - - /** - * Provides the next state of the consumer if the task failed. This defaults to no state change. - * - * @return the state to change to upon a task failure - */ - default ConsumerState failureTransition() { - return this; - } - - /** - * Provides the next state of the consumer when a shutdown has been requested. The returned state is dependent - * on the current state, and the shutdown reason. - * - * @param shutdownReason - * the reason that a shutdown was requested - * @return the next state that the consumer should transition to, this may be the same object as the current - * state. - */ - ConsumerState shutdownTransition(ShutdownReason shutdownReason); - - /** - * The type of task that {@link ConsumerState#createTask(ShardConsumerArgument, ShardConsumer, ProcessRecordsInput)} - * would return. This is always a valid state - * even if createTask would return a null value. - * - * @return the type of task that this state represents. - */ - TaskType taskType(); - - /** - * An enumeration represent the type of this state. Different consumer states may return the same - * {@link ConsumerStates.ShardConsumerState}. - * - * @return the type of consumer state this represents. - */ - ConsumerStates.ShardConsumerState state(); - - boolean isTerminal(); - - /** - * Whether this state requires data to be available before the task can be created - * - * @return true if the task requires data to be available before creation, false otherwise - */ - default boolean requiresDataAvailability() { - return false; - } - - /** - * Indicates whether a state requires an external event to re-awaken for processing. - * - * @return true if the state is some external event to restart processing, false if events can be immediately - * dispatched. - */ - default boolean requiresAwake() { - return false; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/InitializeTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/InitializeTask.java deleted file mode 100644 index 4d151574..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/InitializeTask.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.checkpoint.Checkpoint; -import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.MetricsUtil; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Task for initializing shard position and invoking the ShardRecordProcessor initialize() API. - */ -@RequiredArgsConstructor -@Slf4j -@KinesisClientInternalApi -public class InitializeTask implements ConsumerTask { - private static final String INITIALIZE_TASK_OPERATION = "InitializeTask"; - private static final String RECORD_PROCESSOR_INITIALIZE_METRIC = "RecordProcessor.initialize"; - - @NonNull - private final ShardInfo shardInfo; - @NonNull - private final ShardRecordProcessor shardRecordProcessor; - @NonNull - private final Checkpointer checkpoint; - @NonNull - private final ShardRecordProcessorCheckpointer recordProcessorCheckpointer; - @NonNull - private final InitialPositionInStreamExtended initialPositionInStream; - @NonNull - private final RecordsPublisher cache; - - // Back off for this interval if we encounter a problem (exception) - private final long backoffTimeMillis; - @NonNull - private final MetricsFactory metricsFactory; - - private final TaskType taskType = TaskType.INITIALIZE; - - /* - * Initializes the data fetcher (position in shard) and invokes the ShardRecordProcessor initialize() API. - * (non-Javadoc) - * - * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#call() - */ - @Override - public TaskResult call() { - boolean applicationException = false; - Exception exception = null; - - try { - log.debug("Initializing ShardId {}", shardInfo); - Checkpoint initialCheckpointObject = checkpoint.getCheckpointObject(shardInfo.shardId()); - ExtendedSequenceNumber initialCheckpoint = initialCheckpointObject.checkpoint(); - log.debug("[{}]: Checkpoint: {} -- Initial Position: {}", shardInfo.shardId(), initialCheckpoint, - initialPositionInStream); - - cache.start(initialCheckpoint, initialPositionInStream); - - recordProcessorCheckpointer.largestPermittedCheckpointValue(initialCheckpoint); - recordProcessorCheckpointer.setInitialCheckpointValue(initialCheckpoint); - - log.debug("Calling the record processor initialize()."); - final InitializationInput initializationInput = InitializationInput.builder() - .shardId(shardInfo.shardId()) - .extendedSequenceNumber(initialCheckpoint) - .pendingCheckpointSequenceNumber(initialCheckpointObject.pendingCheckpoint()) - .build(); - - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, - INITIALIZE_TASK_OPERATION); - - final long startTime = System.currentTimeMillis(); - try { - shardRecordProcessor.initialize(initializationInput); - log.debug("Record processor initialize() completed."); - } catch (Exception e) { - applicationException = true; - throw e; - } finally { - MetricsUtil.addLatency(scope, RECORD_PROCESSOR_INITIALIZE_METRIC, startTime, MetricsLevel.SUMMARY); - MetricsUtil.endScope(scope); - } - - return new TaskResult(null); - } catch (Exception e) { - if (applicationException) { - log.error("Application initialize() threw exception: ", e); - } else { - log.error("Caught exception: ", e); - } - exception = e; - // backoff if we encounter an exception. - try { - Thread.sleep(this.backoffTimeMillis); - } catch (InterruptedException ie) { - log.debug("Interrupted sleep", ie); - } - } - - return new TaskResult(exception); - } - - /* - * (non-Javadoc) - * - * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#taskType() - */ - @Override - public TaskType taskType() { - return taskType; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/LifecycleConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/LifecycleConfig.java deleted file mode 100644 index b04d75ce..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/LifecycleConfig.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.lifecycle; - -import java.util.Optional; - -import lombok.Data; -import lombok.experimental.Accessors; -import software.amazon.kinesis.retrieval.AggregatorUtil; - -/** - * Used by the KCL to configure the lifecycle. - */ -@Data -@Accessors(fluent = true) -public class LifecycleConfig { - /** - * Logs warn message if as task is held in a task for more than the set time. - * - *

Default value: {@link Optional#empty()}

- */ - private Optional logWarningForTaskAfterMillis = Optional.empty(); - - /** - * Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures). - * - *

Default value: 500L

- */ - private long taskBackoffTimeMillis = 500L; - - /** - * AggregatorUtil is responsible for deaggregating KPL records. - */ - private AggregatorUtil aggregatorUtil = new AggregatorUtil(); - - /** - * TaskExecutionListener to be used to handle events during task execution lifecycle for a shard. - * - *

Default value: {@link NoOpTaskExecutionListener}

- */ - private TaskExecutionListener taskExecutionListener = new NoOpTaskExecutionListener(); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NoOpTaskExecutionListener.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NoOpTaskExecutionListener.java deleted file mode 100644 index 95d225fa..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NoOpTaskExecutionListener.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import software.amazon.kinesis.lifecycle.events.TaskExecutionListenerInput; - -/** - * NoOp implementation of {@link TaskExecutionListener} interface that takes no action on task execution. - */ -public class NoOpTaskExecutionListener implements TaskExecutionListener { - @Override - public void beforeTaskExecution(TaskExecutionListenerInput input) { - } - - @Override - public void afterTaskExecution(TaskExecutionListenerInput input) { - } -} - diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ProcessTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ProcessTask.java deleted file mode 100644 index f9cdd2ac..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ProcessTask.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import java.util.List; -import java.util.ListIterator; - -import lombok.NonNull; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; -import software.amazon.awssdk.services.kinesis.model.Shard; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; -import software.amazon.kinesis.leases.ShardDetector; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.metrics.MetricsUtil; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.retrieval.AggregatorUtil; -import software.amazon.kinesis.retrieval.KinesisClientRecord; -import software.amazon.kinesis.retrieval.ThrottlingReporter; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Task for fetching data records and invoking processRecords() on the record processor instance. - */ -@Slf4j -@KinesisClientInternalApi -public class ProcessTask implements ConsumerTask { - private static final String PROCESS_TASK_OPERATION = "ProcessTask"; - private static final String DATA_BYTES_PROCESSED_METRIC = "DataBytesProcessed"; - private static final String RECORDS_PROCESSED_METRIC = "RecordsProcessed"; - private static final String RECORD_PROCESSOR_PROCESS_RECORDS_METRIC = "RecordProcessor.processRecords"; - private static final String MILLIS_BEHIND_LATEST_METRIC = "MillisBehindLatest"; - - private final ShardInfo shardInfo; - private final ShardRecordProcessor shardRecordProcessor; - private final ShardRecordProcessorCheckpointer recordProcessorCheckpointer; - private final TaskType taskType = TaskType.PROCESS; - private final long backoffTimeMillis; - private final Shard shard; - private final ThrottlingReporter throttlingReporter; - private final boolean shouldCallProcessRecordsEvenForEmptyRecordList; - private final long idleTimeInMilliseconds; - private final ProcessRecordsInput processRecordsInput; - private final MetricsFactory metricsFactory; - private final AggregatorUtil aggregatorUtil; - - public ProcessTask(@NonNull ShardInfo shardInfo, - @NonNull ShardRecordProcessor shardRecordProcessor, - @NonNull ShardRecordProcessorCheckpointer recordProcessorCheckpointer, - long backoffTimeMillis, - boolean skipShardSyncAtWorkerInitializationIfLeasesExist, - ShardDetector shardDetector, - @NonNull ThrottlingReporter throttlingReporter, - ProcessRecordsInput processRecordsInput, - boolean shouldCallProcessRecordsEvenForEmptyRecordList, - long idleTimeInMilliseconds, - @NonNull AggregatorUtil aggregatorUtil, - @NonNull MetricsFactory metricsFactory) { - this.shardInfo = shardInfo; - this.shardRecordProcessor = shardRecordProcessor; - this.recordProcessorCheckpointer = recordProcessorCheckpointer; - this.backoffTimeMillis = backoffTimeMillis; - this.throttlingReporter = throttlingReporter; - this.processRecordsInput = processRecordsInput; - this.shouldCallProcessRecordsEvenForEmptyRecordList = shouldCallProcessRecordsEvenForEmptyRecordList; - this.idleTimeInMilliseconds = idleTimeInMilliseconds; - this.metricsFactory = metricsFactory; - - if (!skipShardSyncAtWorkerInitializationIfLeasesExist) { - this.shard = shardDetector.shard(shardInfo.shardId()); - } else { - this.shard = null; - } - - if (this.shard == null && !skipShardSyncAtWorkerInitializationIfLeasesExist) { - log.warn("Cannot get the shard for this ProcessTask, so duplicate KPL user records " - + "in the event of resharding will not be dropped during deaggregation of Amazon " - + "Kinesis records."); - } - this.aggregatorUtil = aggregatorUtil; - - this.recordProcessorCheckpointer.checkpointer().operation(PROCESS_TASK_OPERATION); - } - - /* - * (non-Javadoc) - * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#call() - */ - @Override - public TaskResult call() { - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, PROCESS_TASK_OPERATION); - MetricsUtil.addShardId(scope, shardInfo.shardId()); - long startTimeMillis = System.currentTimeMillis(); - boolean success = false; - try { - scope.addData(RECORDS_PROCESSED_METRIC, 0, StandardUnit.COUNT, MetricsLevel.SUMMARY); - scope.addData(DATA_BYTES_PROCESSED_METRIC, 0, StandardUnit.BYTES, MetricsLevel.SUMMARY); - Exception exception = null; - - try { - if (processRecordsInput.millisBehindLatest() != null) { - scope.addData(MILLIS_BEHIND_LATEST_METRIC, processRecordsInput.millisBehindLatest(), - StandardUnit.MILLISECONDS, MetricsLevel.SUMMARY); - } - - if (processRecordsInput.isAtShardEnd() && processRecordsInput.records().isEmpty()) { - log.info("Reached end of shard {} and have no records to process", shardInfo.shardId()); - return new TaskResult(null, true); - } - - throttlingReporter.success(); - List records = deaggregateAnyKplRecords(processRecordsInput.records()); - - - if (!records.isEmpty()) { - scope.addData(RECORDS_PROCESSED_METRIC, records.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY); - } - - recordProcessorCheckpointer.largestPermittedCheckpointValue(filterAndGetMaxExtendedSequenceNumber( - scope, records, recordProcessorCheckpointer.lastCheckpointValue(), - recordProcessorCheckpointer.largestPermittedCheckpointValue())); - - if (shouldCallProcessRecords(records)) { - callProcessRecords(processRecordsInput, records); - } - success = true; - } catch (RuntimeException e) { - log.error("ShardId {}: Caught exception: ", shardInfo.shardId(), e); - exception = e; - backoff(); - } - - if (processRecordsInput.isAtShardEnd()) { - log.info("Reached end of shard {}, and processed {} records", shardInfo.shardId(), processRecordsInput.records().size()); - return new TaskResult(null, true); - } - return new TaskResult(exception); - } finally { - MetricsUtil.addSuccessAndLatency(scope, success, startTimeMillis, MetricsLevel.SUMMARY); - MetricsUtil.endScope(scope); - } - } - - private List deaggregateAnyKplRecords(List records) { - if (shard == null) { - return aggregatorUtil.deaggregate(records); - } else { - return aggregatorUtil.deaggregate(records, shard.hashKeyRange().startingHashKey(), shard.hashKeyRange().endingHashKey()); - } - } - - /** - * Sleeps for the configured backoff period. This is usually only called when an exception occurs. - */ - private void backoff() { - // backoff if we encounter an exception. - try { - Thread.sleep(this.backoffTimeMillis); - } catch (InterruptedException ie) { - log.debug("{}: Sleep was interrupted", shardInfo.shardId(), ie); - } - } - - /** - * Dispatches a batch of records to the record processor, and handles any fallout from that. - * - * @param input - * the result of the last call to Kinesis - * @param records - * the records to be dispatched. It's possible the records have been adjusted by KPL deaggregation. - */ - private void callProcessRecords(ProcessRecordsInput input, List records) { - log.debug("Calling application processRecords() with {} records from {}", records.size(), - shardInfo.shardId()); - - final ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder().records(records).cacheExitTime(input.cacheExitTime()).cacheEntryTime(input.cacheEntryTime()) - .checkpointer(recordProcessorCheckpointer).millisBehindLatest(input.millisBehindLatest()).build(); - - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, PROCESS_TASK_OPERATION); - MetricsUtil.addShardId(scope, shardInfo.shardId()); - final long startTime = System.currentTimeMillis(); - try { - shardRecordProcessor.processRecords(processRecordsInput); - } catch (Exception e) { - log.error("ShardId {}: Application processRecords() threw an exception when processing shard ", - shardInfo.shardId(), e); - log.error("ShardId {}: Skipping over the following data records: {}", shardInfo.shardId(), records); - } finally { - MetricsUtil.addLatency(scope, RECORD_PROCESSOR_PROCESS_RECORDS_METRIC, startTime, MetricsLevel.SUMMARY); - MetricsUtil.endScope(scope); - } - } - - /** - * Whether we should call process records or not - * - * @param records - * the records returned from the call to Kinesis, and/or deaggregation - * @return true if the set of records should be dispatched to the record process, false if they should not. - */ - private boolean shouldCallProcessRecords(List records) { - return (!records.isEmpty()) || shouldCallProcessRecordsEvenForEmptyRecordList; - } - - /** - * Emits metrics, and sleeps if there are no records available - * - * @param startTimeMillis - * the time when the task started - */ - private void handleNoRecords(long startTimeMillis) { - log.debug("Kinesis didn't return any records for shard {}", shardInfo.shardId()); - - long sleepTimeMillis = idleTimeInMilliseconds - (System.currentTimeMillis() - startTimeMillis); - if (sleepTimeMillis > 0) { - sleepTimeMillis = Math.max(sleepTimeMillis, idleTimeInMilliseconds); - try { - log.debug("Sleeping for {} ms since there were no new records in shard {}", sleepTimeMillis, - shardInfo.shardId()); - Thread.sleep(sleepTimeMillis); - } catch (InterruptedException e) { - log.debug("ShardId {}: Sleep was interrupted", shardInfo.shardId()); - } - } - } - - @Override - public TaskType taskType() { - return taskType; - } - - /** - * Scans a list of records to filter out records up to and including the most recent checkpoint value and to get the - * greatest extended sequence number from the retained records. Also emits metrics about the records. - * - * @param scope - * metrics scope to emit metrics into - * @param records - * list of records to scan and change in-place as needed - * @param lastCheckpointValue - * the most recent checkpoint value - * @param lastLargestPermittedCheckpointValue - * previous largest permitted checkpoint value - * @return the largest extended sequence number among the retained records - */ - private ExtendedSequenceNumber filterAndGetMaxExtendedSequenceNumber(final MetricsScope scope, - final List records, - final ExtendedSequenceNumber lastCheckpointValue, - final ExtendedSequenceNumber lastLargestPermittedCheckpointValue) { - ExtendedSequenceNumber largestExtendedSequenceNumber = lastLargestPermittedCheckpointValue; - ListIterator recordIterator = records.listIterator(); - while (recordIterator.hasNext()) { - KinesisClientRecord record = recordIterator.next(); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(record.sequenceNumber(), - record.subSequenceNumber()); - - if (extendedSequenceNumber.compareTo(lastCheckpointValue) <= 0) { - recordIterator.remove(); - log.debug("removing record with ESN {} because the ESN is <= checkpoint ({})", extendedSequenceNumber, - lastCheckpointValue); - continue; - } - - if (largestExtendedSequenceNumber == null - || largestExtendedSequenceNumber.compareTo(extendedSequenceNumber) < 0) { - largestExtendedSequenceNumber = extendedSequenceNumber; - } - - scope.addData(DATA_BYTES_PROCESSED_METRIC, record.data().limit(), StandardUnit.BYTES, - MetricsLevel.SUMMARY); - } - return largestExtendedSequenceNumber; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumer.java deleted file mode 100644 index f386d48c..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumer.java +++ /dev/null @@ -1,535 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import java.time.Duration; -import java.time.Instant; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; - -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -import com.google.common.annotations.VisibleForTesting; - -import io.reactivex.Flowable; -import io.reactivex.Scheduler; -import io.reactivex.schedulers.Schedulers; -import lombok.AccessLevel; -import lombok.Getter; -import lombok.NonNull; -import lombok.experimental.Accessors; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.exceptions.internal.BlockedOnParentShardException; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.events.TaskExecutionListenerInput; -import software.amazon.kinesis.metrics.MetricsCollectingTaskDecorator; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RetryableRetrievalException; - -/** - * Responsible for consuming data records of a (specified) shard. - * The instance should be shutdown when we lose the primary responsibility for a shard. - * A new instance should be created if the primary responsibility is reassigned back to this process. - */ -@Getter(AccessLevel.PACKAGE) -@Accessors(fluent = true) -@Slf4j -@KinesisClientInternalApi -public class ShardConsumer { - - public static final int MAX_TIME_BETWEEN_REQUEST_RESPONSE = 35000; - private final RecordsPublisher recordsPublisher; - private final ExecutorService executorService; - private final Scheduler scheduler; - private final ShardInfo shardInfo; - private final ShardConsumerArgument shardConsumerArgument; - @NonNull - private final Optional logWarningForTaskAfterMillis; - private final Function taskMetricsDecorator; - private final int bufferSize; - private final TaskExecutionListener taskExecutionListener; - - private ConsumerTask currentTask; - private TaskOutcome taskOutcome; - - private final AtomicReference processFailure = new AtomicReference<>(null); - private final AtomicReference dispatchFailure = new AtomicReference<>(null); - - private CompletableFuture stateChangeFuture; - private boolean needsInitialization = true; - - private volatile Instant taskDispatchedAt; - private volatile boolean taskIsRunning = false; - - /* - * Tracks current state. It is only updated via the consumeStream/shutdown APIs. Therefore we don't do - * much coordination/synchronization to handle concurrent reads/updates. - */ - private ConsumerState currentState; - /* - * Used to track if we lost the primary responsibility. Once set to true, we will start shutting down. - * If we regain primary responsibility before shutdown is complete, Worker should create a new ShardConsumer object. - */ - @Getter(AccessLevel.PUBLIC) - private volatile ShutdownReason shutdownReason; - private volatile ShutdownNotification shutdownNotification; - - private final InternalSubscriber subscriber; - - public ShardConsumer(RecordsPublisher recordsPublisher, ExecutorService executorService, ShardInfo shardInfo, - Optional logWarningForTaskAfterMillis, ShardConsumerArgument shardConsumerArgument, - TaskExecutionListener taskExecutionListener) { - this(recordsPublisher, executorService, shardInfo, logWarningForTaskAfterMillis, shardConsumerArgument, - ConsumerStates.INITIAL_STATE, - ShardConsumer.metricsWrappingFunction(shardConsumerArgument.metricsFactory()), 8, taskExecutionListener); - } - - // - // TODO: Make bufferSize configurable - // - public ShardConsumer(RecordsPublisher recordsPublisher, ExecutorService executorService, ShardInfo shardInfo, - Optional logWarningForTaskAfterMillis, ShardConsumerArgument shardConsumerArgument, - ConsumerState initialState, Function taskMetricsDecorator, - int bufferSize, TaskExecutionListener taskExecutionListener) { - this.recordsPublisher = recordsPublisher; - this.executorService = executorService; - this.shardInfo = shardInfo; - this.shardConsumerArgument = shardConsumerArgument; - this.logWarningForTaskAfterMillis = logWarningForTaskAfterMillis; - this.taskExecutionListener = taskExecutionListener; - this.currentState = initialState; - this.taskMetricsDecorator = taskMetricsDecorator; - scheduler = Schedulers.from(executorService); - subscriber = new InternalSubscriber(); - this.bufferSize = bufferSize; - - if (this.shardInfo.isCompleted()) { - markForShutdown(ShutdownReason.SHARD_END); - } - } - - private void startSubscriptions() { - Flowable.fromPublisher(recordsPublisher).subscribeOn(scheduler).observeOn(scheduler, true, bufferSize) - .subscribe(subscriber); - } - - private final Object lockObject = new Object(); - private Instant lastRequestTime = null; - - private class InternalSubscriber implements Subscriber { - - private Subscription subscription; - private volatile Instant lastDataArrival; - - @Override - public void onSubscribe(Subscription s) { - subscription = s; - subscription.request(1); - } - - @Override - public void onNext(ProcessRecordsInput input) { - try { - synchronized (lockObject) { - lastRequestTime = null; - } - lastDataArrival = Instant.now(); - handleInput(input.toBuilder().cacheExitTime(Instant.now()).build(), subscription); - } catch (Throwable t) { - log.warn("{}: Caught exception from handleInput", shardInfo.shardId(), t); - dispatchFailure.set(t); - } finally { - subscription.request(1); - synchronized (lockObject) { - lastRequestTime = Instant.now(); - } - } - } - - @Override - public void onError(Throwable t) { - log.warn("{}: onError(). Cancelling subscription, and marking self as failed.", shardInfo.shardId(), t); - subscription.cancel(); - processFailure.set(t); - } - - @Override - public void onComplete() { - log.debug("{}: onComplete(): Received onComplete. Activity should be triggered externally", shardInfo.shardId()); - } - - public void cancel() { - if (subscription != null) { - subscription.cancel(); - } - } - } - - private synchronized void handleInput(ProcessRecordsInput input, Subscription subscription) { - if (isShutdownRequested()) { - subscription.cancel(); - return; - } - processData(input); - if (taskOutcome == TaskOutcome.END_OF_SHARD) { - markForShutdown(ShutdownReason.SHARD_END); - subscription.cancel(); - return; - } - subscription.request(1); - } - - public void executeLifecycle() { - if (isShutdown()) { - return; - } - if (stateChangeFuture != null && !stateChangeFuture.isDone()) { - return; - } - try { - if (isShutdownRequested()) { - stateChangeFuture = shutdownComplete(); - } else if (needsInitialization) { - if (stateChangeFuture != null) { - if (stateChangeFuture.get()) { - subscribe(); - needsInitialization = false; - } - } - stateChangeFuture = initializeComplete(); - } - - } catch (InterruptedException e) { - // - // Ignored should be handled by scheduler - // - } catch (ExecutionException e) { - throw new RuntimeException(e); - } - - if (ConsumerStates.ShardConsumerState.PROCESSING.equals(currentState.state())) { - Throwable t = healthCheck(); - if (t instanceof Error) { - throw (Error) t; - } - } - - } - - @VisibleForTesting - Throwable healthCheck() { - logNoDataRetrievedAfterTime(); - logLongRunningTask(); - Throwable failure = processFailure.get(); - if (!processFailure.compareAndSet(failure, null) && failure != null) { - log.error("{}: processFailure was updated while resetting, this shouldn't happen. " + - "Will retry on next health check", shardInfo.shardId()); - return null; - } - if (failure != null) { - String logMessage = String.format("%s: Failure occurred in retrieval. Restarting data requests", shardInfo.shardId()); - if (failure instanceof RetryableRetrievalException) { - log.debug(logMessage, failure.getCause()); - } else { - log.warn(logMessage, failure); - } - startSubscriptions(); - return failure; - } - Throwable expectedDispatchFailure = dispatchFailure.get(); - if (expectedDispatchFailure != null) { - if (!dispatchFailure.compareAndSet(expectedDispatchFailure, null)) { - log.info("{}: Unable to reset the dispatch failure, this can happen if the record processor is failing aggressively.", shardInfo.shardId()); - return null; - } - log.warn("Exception occurred while dispatching incoming data. The incoming data has been skipped", expectedDispatchFailure); - return expectedDispatchFailure; - } - synchronized (lockObject) { - if (lastRequestTime != null) { - Instant now = Instant.now(); - Duration timeSinceLastResponse = Duration.between(lastRequestTime, now); - if (timeSinceLastResponse.toMillis() > MAX_TIME_BETWEEN_REQUEST_RESPONSE) { - log.error( - "{}: Last request was dispatched at {}, but no response as of {} ({}). Cancelling subscription, and restarting.", - shardInfo.shardId(), lastRequestTime, now, timeSinceLastResponse); - if (subscriber != null) { - subscriber.cancel(); - } - // - // Set the last request time to now, we specifically don't null it out since we want it to trigger a - // restart if the subscription still doesn't start producing. - // - lastRequestTime = Instant.now(); - startSubscriptions(); - } - } - } - - return null; - } - - Duration taskRunningTime() { - if (taskDispatchedAt != null && taskIsRunning) { - return Duration.between(taskDispatchedAt, Instant.now()); - } - return null; - } - - String longRunningTaskMessage(Duration taken) { - if (taken != null) { - return String.format("Previous %s task still pending for shard %s since %s ago. ", currentTask.taskType(), - shardInfo.shardId(), taken); - } - return null; - } - - private void logNoDataRetrievedAfterTime() { - logWarningForTaskAfterMillis.ifPresent(value -> { - Instant lastDataArrival = subscriber.lastDataArrival; - if (lastDataArrival != null) { - Instant now = Instant.now(); - Duration timeSince = Duration.between(subscriber.lastDataArrival, now); - if (timeSince.toMillis() > value) { - log.warn("Last time data arrived: {} ({})", lastDataArrival, timeSince); - } - } - }); - } - - private void logLongRunningTask() { - Duration taken = taskRunningTime(); - - if (taken != null) { - String message = longRunningTaskMessage(taken); - if (log.isDebugEnabled()) { - log.debug("{} Not submitting new task.", message); - } - logWarningForTaskAfterMillis.ifPresent(value -> { - if (taken.toMillis() > value) { - log.warn(message); - } - }); - } - } - - @VisibleForTesting - void subscribe() { - startSubscriptions(); - } - - @VisibleForTesting - synchronized CompletableFuture initializeComplete() { - if (taskOutcome != null) { - updateState(taskOutcome); - } - if (currentState.state() == ConsumerStates.ShardConsumerState.PROCESSING) { - return CompletableFuture.completedFuture(true); - } - return CompletableFuture.supplyAsync(() -> { - if (isShutdownRequested()) { - throw new IllegalStateException("Shutdown requested while initializing"); - } - executeTask(null); - if (isShutdownRequested()) { - throw new IllegalStateException("Shutdown requested while initializing"); - } - return false; - }, executorService); - } - - @VisibleForTesting - synchronized CompletableFuture shutdownComplete() { - if (taskOutcome != null) { - updateState(taskOutcome); - } else { - // - // ShardConsumer has been asked to shutdown before the first task even had a chance to run. - // In this case generate a successful task outcome, and allow the shutdown to continue. This should only - // happen if the lease was lost before the initial state had a chance to run. - // - updateState(TaskOutcome.SUCCESSFUL); - } - if (isShutdown()) { - return CompletableFuture.completedFuture(true); - } - return CompletableFuture.supplyAsync(() -> { - executeTask(null); - return false; - }); - } - - private synchronized void processData(ProcessRecordsInput input) { - executeTask(input); - } - - private synchronized void executeTask(ProcessRecordsInput input) { - TaskExecutionListenerInput taskExecutionListenerInput = TaskExecutionListenerInput.builder() - .shardInfo(shardInfo) - .taskType(currentState.taskType()) - .build(); - taskExecutionListener.beforeTaskExecution(taskExecutionListenerInput); - ConsumerTask task = currentState.createTask(shardConsumerArgument, ShardConsumer.this, input); - if (task != null) { - taskDispatchedAt = Instant.now(); - currentTask = task; - taskIsRunning = true; - TaskResult result; - try { - result = task.call(); - } finally { - taskIsRunning = false; - } - taskOutcome = resultToOutcome(result); - taskExecutionListenerInput = taskExecutionListenerInput.toBuilder().taskOutcome(taskOutcome).build(); - } - taskExecutionListener.afterTaskExecution(taskExecutionListenerInput); - } - - private TaskOutcome resultToOutcome(TaskResult result) { - if (result.getException() == null) { - if (result.isShardEndReached()) { - return TaskOutcome.END_OF_SHARD; - } - return TaskOutcome.SUCCESSFUL; - } - logTaskException(result); - return TaskOutcome.FAILURE; - } - - private synchronized void updateState(TaskOutcome outcome) { - ConsumerState nextState = currentState; - switch (outcome) { - case SUCCESSFUL: - nextState = currentState.successTransition(); - break; - case END_OF_SHARD: - markForShutdown(ShutdownReason.SHARD_END); - break; - case FAILURE: - nextState = currentState.failureTransition(); - break; - default: - log.error("No handler for outcome of {}", outcome.name()); - nextState = currentState.failureTransition(); - break; - } - - nextState = handleShutdownTransition(outcome, nextState); - - currentState = nextState; - } - - private ConsumerState handleShutdownTransition(TaskOutcome outcome, ConsumerState nextState) { - if (isShutdownRequested() && outcome != TaskOutcome.FAILURE) { - return currentState.shutdownTransition(shutdownReason); - } - return nextState; - } - - private void logTaskException(TaskResult taskResult) { - if (log.isDebugEnabled()) { - Exception taskException = taskResult.getException(); - if (taskException instanceof BlockedOnParentShardException) { - // No need to log the stack trace for this exception (it is very specific). - log.debug("Shard {} is blocked on completion of parent shard.", shardInfo.shardId()); - } else { - log.debug("Caught exception running {} task: ", currentTask.taskType(), taskResult.getException()); - } - } - } - - /** - * Requests the shutdown of the this ShardConsumer. This should give the record processor a chance to checkpoint - * before being shutdown. - * - * @param shutdownNotification - * used to signal that the record processor has been given the chance to shutdown. - */ - public void gracefulShutdown(ShutdownNotification shutdownNotification) { - if (subscriber != null) { - subscriber.cancel(); - } - this.shutdownNotification = shutdownNotification; - markForShutdown(ShutdownReason.REQUESTED); - } - - /** - * Shutdown this ShardConsumer (including invoking the ShardRecordProcessor shutdown API). - * This is called by Worker when it loses responsibility for a shard. - * - * @return true if shutdown is complete (false if shutdown is still in progress) - */ - public boolean leaseLost() { - log.debug("Shutdown({}): Lease lost triggered.", shardInfo.shardId()); - if (subscriber != null) { - subscriber.cancel(); - log.debug("Shutdown({}): Subscriber cancelled.", shardInfo.shardId()); - } - markForShutdown(ShutdownReason.LEASE_LOST); - return isShutdown(); - } - - synchronized void markForShutdown(ShutdownReason reason) { - // - // ShutdownReason.LEASE_LOST takes precedence over SHARD_END - // (we won't be able to save checkpoint at end of shard) - // - if (shutdownReason == null || shutdownReason.canTransitionTo(reason)) { - shutdownReason = reason; - } - } - - /** - * Used (by Worker) to check if this ShardConsumer instance has been shutdown - * ShardRecordProcessor shutdown() has been invoked, as appropriate. - * - * @return true if shutdown is complete - */ - public boolean isShutdown() { - return currentState.isTerminal(); - } - - @VisibleForTesting - public boolean isShutdownRequested() { - return shutdownReason != null; - } - - /** - * Default task wrapping function for metrics - * - * @param metricsFactory - * the factory used for reporting metrics - * @return a function that will wrap the task with a metrics reporter - */ - private static Function metricsWrappingFunction(MetricsFactory metricsFactory) { - return (task) -> { - if (task == null) { - return null; - } else { - return new MetricsCollectingTaskDecorator(task, metricsFactory); - } - }; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerArgument.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerArgument.java deleted file mode 100644 index d5ec57fe..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerArgument.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.lifecycle; - -import lombok.Data; -import lombok.NonNull; -import lombok.experimental.Accessors; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.ShardDetector; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.leases.HierarchicalShardSyncer; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.retrieval.AggregatorUtil; -import software.amazon.kinesis.retrieval.RecordsPublisher; - -import java.util.concurrent.ExecutorService; - -@Data -@Accessors(fluent = true) -@KinesisClientInternalApi -public class ShardConsumerArgument { - @NonNull - private final ShardInfo shardInfo; - @NonNull - private final String streamName; - @NonNull - private final LeaseRefresher leaseRefresher; - @NonNull - private final ExecutorService executorService; - @NonNull - private final RecordsPublisher recordsPublisher; - @NonNull - private final ShardRecordProcessor shardRecordProcessor; - @NonNull - private final Checkpointer checkpoint; - @NonNull - private final ShardRecordProcessorCheckpointer recordProcessorCheckpointer; - private final long parentShardPollIntervalMillis; - private final long taskBackoffTimeMillis; - private final boolean skipShardSyncAtWorkerInitializationIfLeasesExist; - private final long listShardsBackoffTimeInMillis; - private final int maxListShardsRetryAttempts; - private final boolean shouldCallProcessRecordsEvenForEmptyRecordList; - private final long idleTimeInMilliseconds; - @NonNull - private final InitialPositionInStreamExtended initialPositionInStream; - private final boolean cleanupLeasesOfCompletedShards; - private final boolean ignoreUnexpectedChildShards; - @NonNull - private final ShardDetector shardDetector; - private final AggregatorUtil aggregatorUtil; - private final HierarchicalShardSyncer hierarchicalShardSyncer; - @NonNull - private final MetricsFactory metricsFactory; -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownInput.java deleted file mode 100644 index 69c1176d..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownInput.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import lombok.Builder; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.ToString; -import lombok.experimental.Accessors; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; - -/** - * Container for the parameters to the IRecordProcessor's - * {@link ShardRecordProcessor#shutdown(ShutdownInput - * shutdownInput) shutdown} method. - */ -@Builder -@Getter -@Accessors(fluent = true) -@EqualsAndHashCode -@ToString -public class ShutdownInput { - - /** - * Get shutdown reason. - * - * -- GETTER -- - * @return Reason for the shutdown (ShutdownReason.SHARD_END indicates the shard is closed and there are no - * more records to process. Shutdown.LEASE_LOST indicates a fail over has occurred). - */ - private final ShutdownReason shutdownReason; - - /** - * Get Checkpointer. - * - * -- GETTER -- - * @return The checkpointer object that the record processor should use to checkpoint - */ - private final RecordProcessorCheckpointer checkpointer; - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotification.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotification.java deleted file mode 100644 index 669e805e..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotification.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import software.amazon.kinesis.processor.ShardRecordProcessor; - -/** - * A shutdown request to the ShardConsumer - */ -public interface ShutdownNotification { - /** - * Used to indicate that the record processor has been notified of a requested shutdown, and given the chance to - * checkpoint. - * - */ - void shutdownNotificationComplete(); - - /** - * Used to indicate that the record processor has completed the call to - * {@link ShardRecordProcessor#shutdown(ShutdownInput)} has - * completed. - */ - void shutdownComplete(); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotificationTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotificationTask.java deleted file mode 100644 index a0d8061e..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotificationTask.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import lombok.AccessLevel; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.processor.ShardRecordProcessor; - -/** - * Notifies record processor of incoming shutdown request, and gives them a chance to checkpoint. - */ -@RequiredArgsConstructor(access = AccessLevel.PACKAGE) -@Slf4j -@KinesisClientInternalApi -public class ShutdownNotificationTask implements ConsumerTask { - private final ShardRecordProcessor shardRecordProcessor; - private final RecordProcessorCheckpointer recordProcessorCheckpointer; - private final ShutdownNotification shutdownNotification; -// TODO: remove if not used - private final ShardInfo shardInfo; - - @Override - public TaskResult call() { - try { - try { - shardRecordProcessor.shutdownRequested(ShutdownRequestedInput.builder().checkpointer(recordProcessorCheckpointer).build()); - } catch (Exception ex) { - return new TaskResult(ex); - } - - return new TaskResult(null); - } finally { - shutdownNotification.shutdownNotificationComplete(); - } - } - - @Override - public TaskType taskType() { - return TaskType.SHUTDOWN_NOTIFICATION; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownTask.java deleted file mode 100644 index 1466dd02..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownTask.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import com.google.common.annotations.VisibleForTesting; - -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.ShardDetector; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.leases.HierarchicalShardSyncer; -import software.amazon.kinesis.lifecycle.events.LeaseLostInput; -import software.amazon.kinesis.lifecycle.events.ShardEndedInput; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.metrics.MetricsUtil; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Task for invoking the ShardRecordProcessor shutdown() callback. - */ -@RequiredArgsConstructor -@Slf4j -@KinesisClientInternalApi -public class ShutdownTask implements ConsumerTask { - private static final String SHUTDOWN_TASK_OPERATION = "ShutdownTask"; - private static final String RECORD_PROCESSOR_SHUTDOWN_METRIC = "RecordProcessor.shutdown"; - - @NonNull - private final ShardInfo shardInfo; - @NonNull - private final ShardDetector shardDetector; - @NonNull - private final ShardRecordProcessor shardRecordProcessor; - @NonNull - private final ShardRecordProcessorCheckpointer recordProcessorCheckpointer; - @NonNull - private final ShutdownReason reason; - @NonNull - private final InitialPositionInStreamExtended initialPositionInStream; - private final boolean cleanupLeasesOfCompletedShards; - private final boolean ignoreUnexpectedChildShards; - @NonNull - private final LeaseRefresher leaseRefresher; - private final long backoffTimeMillis; - @NonNull - private final RecordsPublisher recordsPublisher; - @NonNull - private final HierarchicalShardSyncer hierarchicalShardSyncer; - @NonNull - private final MetricsFactory metricsFactory; - - private final TaskType taskType = TaskType.SHUTDOWN; - - /* - * Invokes ShardRecordProcessor shutdown() API. - * (non-Javadoc) - * - * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#call() - */ - @Override - public TaskResult call() { - recordProcessorCheckpointer.checkpointer().operation(SHUTDOWN_TASK_OPERATION); - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, SHUTDOWN_TASK_OPERATION); - - Exception exception; - boolean applicationException = false; - - try { - try { - // If we reached end of the shard, set sequence number to SHARD_END. - if (reason == ShutdownReason.SHARD_END) { - recordProcessorCheckpointer - .sequenceNumberAtShardEnd(recordProcessorCheckpointer.largestPermittedCheckpointValue()); - recordProcessorCheckpointer.largestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END); - } - - log.debug("Invoking shutdown() for shard {}, concurrencyToken {}. Shutdown reason: {}", - shardInfo.shardId(), shardInfo.concurrencyToken(), reason); - final ShutdownInput shutdownInput = ShutdownInput.builder().shutdownReason(reason) - .checkpointer(recordProcessorCheckpointer).build(); - final long startTime = System.currentTimeMillis(); - try { - if (reason == ShutdownReason.SHARD_END) { - shardRecordProcessor.shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build()); - ExtendedSequenceNumber lastCheckpointValue = recordProcessorCheckpointer.lastCheckpointValue(); - if (lastCheckpointValue == null - || !lastCheckpointValue.equals(ExtendedSequenceNumber.SHARD_END)) { - throw new IllegalArgumentException( - "Application didn't checkpoint at end of shard " + shardInfo.shardId()); - } - } else { - shardRecordProcessor.leaseLost(LeaseLostInput.builder().build()); - } - log.debug("Shutting down retrieval strategy."); - recordsPublisher.shutdown(); - log.debug("Record processor completed shutdown() for shard {}", shardInfo.shardId()); - } catch (Exception e) { - applicationException = true; - throw e; - } finally { - MetricsUtil.addLatency(scope, RECORD_PROCESSOR_SHUTDOWN_METRIC, startTime, MetricsLevel.SUMMARY); - } - - if (reason == ShutdownReason.SHARD_END) { - log.debug("Looking for child shards of shard {}", shardInfo.shardId()); - // create leases for the child shards - hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, leaseRefresher, - initialPositionInStream, cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, scope); - log.debug("Finished checking for child shards of shard {}", shardInfo.shardId()); - } - - return new TaskResult(null); - } catch (Exception e) { - if (applicationException) { - log.error("Application exception. ", e); - } else { - log.error("Caught exception: ", e); - } - exception = e; - // backoff if we encounter an exception. - try { - Thread.sleep(this.backoffTimeMillis); - } catch (InterruptedException ie) { - log.debug("Interrupted sleep", ie); - } - } - } finally { - MetricsUtil.endScope(scope); - } - - return new TaskResult(exception); - - } - - /* - * (non-Javadoc) - * - * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#taskType() - */ - @Override - public TaskType taskType() { - return taskType; - } - - @VisibleForTesting - public ShutdownReason getReason() { - return reason; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskExecutionListener.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskExecutionListener.java deleted file mode 100644 index b70a6103..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskExecutionListener.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import software.amazon.kinesis.lifecycle.events.TaskExecutionListenerInput; - -/** - * A listener for callbacks on task execution lifecycle for for a shard. - * - * Note: Recommended not to have a blocking implementation since these methods are - * called around the ShardRecordProcessor. A blocking call would result in slowing - * down the ShardConsumer. - */ -public interface TaskExecutionListener { - - void beforeTaskExecution(TaskExecutionListenerInput input); - - void afterTaskExecution(TaskExecutionListenerInput input); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskOutcome.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskOutcome.java deleted file mode 100644 index 832137fc..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskOutcome.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -/** - * Enumerates types of outcome of tasks executed as part of processing a shard. - */ -public enum TaskOutcome { - /** - * Denotes a successful task outcome. - */ - SUCCESSFUL, - /** - * Denotes that the last record from the shard has been read/consumed. - */ - END_OF_SHARD, - /** - * Denotes a failure or exception during processing of the shard. - */ - FAILURE -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskType.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskType.java deleted file mode 100644 index 76f58bd3..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskType.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -/** - * Enumerates types of tasks executed as part of processing a shard. - */ -public enum TaskType { - /** - * Polls and waits until parent shard(s) have been fully processed. - */ - BLOCK_ON_PARENT_SHARDS, - /** - * Initialization of ShardRecordProcessor (and Amazon Kinesis Client Library internal state for a shard). - */ - INITIALIZE, - /** - * Fetching and processing of records. - */ - PROCESS, - /** - * Shutdown of ShardRecordProcessor. - */ - SHUTDOWN, - /** - * Graceful shutdown has been requested, and notification of the record processor will occur. - */ - SHUTDOWN_NOTIFICATION, - /** - * Occurs once the shutdown has been completed - */ - SHUTDOWN_COMPLETE, - /** - * Sync leases/activities corresponding to Kinesis shards. - */ - SHARDSYNC -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/InitializationInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/InitializationInput.java deleted file mode 100644 index 79f70fa4..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/InitializationInput.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle.events; - -import lombok.Builder; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.ToString; -import lombok.experimental.Accessors; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Container for the parameters to the ShardRecordProcessor - * {@link ShardRecordProcessor#initialize(InitializationInput initializationInput) initialize} method. - */ -@Builder -@Getter -@Accessors(fluent = true) -@EqualsAndHashCode -@ToString -public class InitializationInput { - /** - * The shardId that the record processor is being initialized for. - */ - private final String shardId; - /** - * The last extended sequence number that was successfully checkpointed by the previous record processor. - */ - private final ExtendedSequenceNumber extendedSequenceNumber; - /** - * The pending extended sequence number that may have been started by the previous record processor. - * - * This will only be set if the previous record processor had prepared a checkpoint, but lost its lease before - * completing the checkpoint. - */ - private final ExtendedSequenceNumber pendingCheckpointSequenceNumber; -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/LeaseLostInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/LeaseLostInput.java deleted file mode 100644 index 84423ed1..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/LeaseLostInput.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.lifecycle.events; - -import lombok.Builder; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.ToString; -import lombok.experimental.Accessors; -import software.amazon.kinesis.processor.ShardRecordProcessor; - -/** - * Provides data, and interaction about the loss of a lease to a - * {@link ShardRecordProcessor}. - * - * This currently has no members, but exists for forward compatibility reasons. - */ -@Accessors(fluent = true) -@Getter -@Builder -@EqualsAndHashCode -@ToString -public class LeaseLostInput { -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ProcessRecordsInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ProcessRecordsInput.java deleted file mode 100644 index 86d56192..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ProcessRecordsInput.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle.events; - -import java.time.Duration; -import java.time.Instant; -import java.util.List; - -import lombok.Builder; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.ToString; -import lombok.experimental.Accessors; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.retrieval.KinesisClientRecord; - -/** - * Container for the parameters to the ShardRecordProcessor's - * {@link ShardRecordProcessor#processRecords(ProcessRecordsInput processRecordsInput) processRecords} method. - */ -@Builder(toBuilder = true) -@Getter -@Accessors(fluent = true) -@EqualsAndHashCode -@ToString -public class ProcessRecordsInput { - /** - * The time that this batch of records was received by the KCL. - */ - private Instant cacheEntryTime; - /** - * The time that this batch of records was prepared to be provided to the {@link ShardRecordProcessor} - */ - private Instant cacheExitTime; - /** - * Whether this batch of records is at the end of the shard. - * - * {@link ShardRecordProcessor}'s do not need to check this. If this is set the Scheduler will trigger a call to - * {@link ShardRecordProcessor#shardEnded(ShardEndedInput)} after the completion of the current processing call. - */ - private boolean isAtShardEnd; - /** - * The records received from Kinesis. These records may have been de-aggregated if they were published by the KPL. - */ - private List records; - /** - * A checkpointer that the {@link ShardRecordProcessor} can use to checkpoint its progress. - */ - private RecordProcessorCheckpointer checkpointer; - /** - * How far behind this batch of records was when received from Kinesis. - * - * This value does not include the {@link #timeSpentInCache()}. - */ - private Long millisBehindLatest; - - /** - * How long the records spent waiting to be dispatched to the {@link ShardRecordProcessor} - * - * @return the amount of time that records spent waiting before processing. - */ - public Duration timeSpentInCache() { - if (cacheEntryTime == null || cacheExitTime == null) { - return Duration.ZERO; - } - return Duration.between(cacheEntryTime, cacheExitTime); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShardEndedInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShardEndedInput.java deleted file mode 100644 index d85f93e4..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShardEndedInput.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.lifecycle.events; - -import lombok.Builder; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.ToString; -import lombok.experimental.Accessors; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; - -/** - * Provides a checkpointer that must be used to signal the completion of the shard to the Scheduler. - */ -@Builder -@Accessors(fluent = true) -@Getter -@EqualsAndHashCode -@ToString -public class ShardEndedInput { - - /** - * The checkpointer used to record that the record processor has completed the shard. - * - * The record processor must call {@link RecordProcessorCheckpointer#checkpoint()} before returning from - * {@link ShardRecordProcessor#shardEnded(ShardEndedInput)}. Failing to do so will trigger the Scheduler to retry - * shutdown until a successful checkpoint occurs. - */ - private final RecordProcessorCheckpointer checkpointer; - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShutdownRequestedInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShutdownRequestedInput.java deleted file mode 100644 index e2347be1..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShutdownRequestedInput.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.lifecycle.events; - -import lombok.Builder; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.ToString; -import lombok.experimental.Accessors; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.processor.ShardRecordProcessor; - -/** - * Provides access to a checkpointer so that {@link ShardRecordProcessor}'s can checkpoint - * before the lease is released during shutdown. - */ -@Builder -@Accessors(fluent = true) -@Getter -@EqualsAndHashCode -@ToString -public class ShutdownRequestedInput { - /** - * Checkpointer used to record the current progress of the - * {@link ShardRecordProcessor}. - */ - private final RecordProcessorCheckpointer checkpointer; -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/TaskExecutionListenerInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/TaskExecutionListenerInput.java deleted file mode 100644 index b64addb5..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/TaskExecutionListenerInput.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle.events; - -import lombok.Builder; -import lombok.Data; -import lombok.experimental.Accessors; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.lifecycle.TaskOutcome; -import software.amazon.kinesis.lifecycle.TaskType; -import software.amazon.kinesis.lifecycle.TaskExecutionListener; - -/** - * Container for the parameters to the TaskExecutionListener's - * {@link TaskExecutionListener#beforeTaskExecution(TaskExecutionListenerInput)} method. - * {@link TaskExecutionListener#afterTaskExecution(TaskExecutionListenerInput)} method. - */ -@Data -@Builder(toBuilder = true) -@Accessors(fluent = true) -public class TaskExecutionListenerInput { - /** - * Detailed information about the shard whose progress is monitored by TaskExecutionListener. - */ - private final ShardInfo shardInfo; - /** - * The type of task being executed for the shard. - * - * This corresponds to the state the shard is in. - */ - private final TaskType taskType; - /** - * Outcome of the task execution for the shard. - */ - private final TaskOutcome taskOutcome; -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulateByNameMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulateByNameMetricsScope.java deleted file mode 100644 index e8df50ec..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulateByNameMetricsScope.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -/** - * This is a MetricScope with a KeyType of String. It provides the implementation of - * getting the key based off of the String KeyType. - */ - -public abstract class AccumulateByNameMetricsScope extends AccumulatingMetricsScope { - - @Override - protected String getKey(String name) { - return name; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricKey.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricKey.java deleted file mode 100644 index d2f4e6dd..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricKey.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -import java.util.List; -import java.util.Objects; - -import software.amazon.awssdk.services.cloudwatch.model.Dimension; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; - - - -/* - * A representation of a key of a MetricDatum. This class is useful when wanting to compare - * whether 2 keys have the same MetricDatum. This feature will be used in MetricAccumulatingQueue - * where we aggregate metrics across multiple MetricScopes. - */ -public class CloudWatchMetricKey { - - private List dimensions; - private String metricName; - - /** - * @param datum data point - */ - - public CloudWatchMetricKey(MetricDatum datum) { - this.dimensions = datum.dimensions(); - this.metricName = datum.metricName(); - } - - @Override - public int hashCode() { - return Objects.hash(dimensions, metricName); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - CloudWatchMetricKey other = (CloudWatchMetricKey) obj; - return Objects.equals(other.dimensions, dimensions) && Objects.equals(other.metricName, metricName); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsFactory.java deleted file mode 100644 index 0419ad8e..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsFactory.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -import java.util.Set; - -import com.google.common.collect.ImmutableSet; - -import lombok.NonNull; -import software.amazon.awssdk.core.exception.AbortedException; -import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; - -/** - * An IMetricsFactory that creates IMetricsScopes that output themselves via CloudWatch. Batches IMetricsScopes together - * to reduce API calls. - */ -public class CloudWatchMetricsFactory implements MetricsFactory { - - /** - * If the CloudWatchPublisherRunnable accumulates more than FLUSH_SIZE distinct metrics, it will call CloudWatch - * immediately instead of waiting for the next scheduled call. - */ - private final CloudWatchPublisherRunnable runnable; - private final Thread publicationThread; - - /** - * Enabled metrics level. All metrics below this level will be dropped. - */ - private final MetricsLevel metricsLevel; - /** - * List of enabled dimensions for metrics. - */ - private final Set metricsEnabledDimensions; - - /** - * Constructor. - * - * @param cloudWatchClient - * Client used to make CloudWatch requests - * @param namespace - * the namespace under which the metrics will appear in the CloudWatch console - * @param bufferTimeMillis - * time to buffer metrics before publishing to CloudWatch - * @param maxQueueSize - * maximum number of metrics that we can have in a queue - * @param metricsLevel - * metrics level to enable - * @param metricsEnabledDimensions - * metrics dimensions to allow - * @param flushSize - * size of batch that can be published - */ - public CloudWatchMetricsFactory(@NonNull final CloudWatchAsyncClient cloudWatchClient, - @NonNull final String namespace, final long bufferTimeMillis, final int maxQueueSize, - @NonNull final MetricsLevel metricsLevel, @NonNull final Set metricsEnabledDimensions, - final int flushSize) { - this.metricsLevel = metricsLevel; - this.metricsEnabledDimensions = (metricsEnabledDimensions == null ? ImmutableSet.of() - : ImmutableSet.copyOf(metricsEnabledDimensions)); - - runnable = new CloudWatchPublisherRunnable(new CloudWatchMetricsPublisher(cloudWatchClient, namespace), - bufferTimeMillis, maxQueueSize, flushSize); - publicationThread = new Thread(runnable); - publicationThread.setName("cw-metrics-publisher"); - publicationThread.start(); - } - - @Override - public MetricsScope createMetrics() { - return new CloudWatchMetricsScope(runnable, metricsLevel, metricsEnabledDimensions); - } - - public void shutdown() { - runnable.shutdown(); - try { - publicationThread.join(); - } catch (InterruptedException e) { - throw AbortedException.builder().message(e.getMessage()).cause(e).build(); - } - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisher.java deleted file mode 100644 index 24137187..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisher.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -import java.util.ArrayList; -import java.util.List; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; -import software.amazon.awssdk.services.cloudwatch.model.CloudWatchException; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; -import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; - -/** - * Publisher that contains the logic to publish metrics. - */ -@Slf4j -public class CloudWatchMetricsPublisher { - // CloudWatch API has a limit of 20 MetricDatums per request - private static final int BATCH_SIZE = 20; - - private final String namespace; - private final CloudWatchAsyncClient cloudWatchClient; - - public CloudWatchMetricsPublisher(CloudWatchAsyncClient cloudWatchClient, String namespace) { - this.cloudWatchClient = cloudWatchClient; - this.namespace = namespace; - } - - /** - * Given a list of MetricDatumWithKey, this method extracts the MetricDatum from each - * MetricDatumWithKey and publishes those datums. - * - * @param dataToPublish a list containing all the MetricDatums to publish - */ - public void publishMetrics(List> dataToPublish) { - for (int startIndex = 0; startIndex < dataToPublish.size(); startIndex += BATCH_SIZE) { - int endIndex = Math.min(dataToPublish.size(), startIndex + BATCH_SIZE); - - PutMetricDataRequest.Builder request = PutMetricDataRequest.builder(); - request = request.namespace(namespace); - - List metricData = new ArrayList<>(); - for (int i = startIndex; i < endIndex; i++) { - metricData.add(dataToPublish.get(i).datum); - } - - request = request.metricData(metricData); - - try { - cloudWatchClient.putMetricData(request.build()); - - log.debug("Successfully published {} datums.", endIndex - startIndex); - } catch (CloudWatchException e) { - log.warn("Could not publish {} datums to CloudWatch", endIndex - startIndex, e); - } - } - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsScope.java deleted file mode 100644 index e81d2308..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsScope.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; - - -/** - * Metrics scope for CloudWatch metrics. - */ -public class CloudWatchMetricsScope extends FilteringMetricsScope implements MetricsScope { - - private CloudWatchPublisherRunnable publisher; - - /** - * Creates a CloudWatch metrics scope with given metrics level and enabled dimensions. - * @param publisher Publisher that emits CloudWatch metrics periodically. - * @param metricsLevel Metrics level to enable. All data with level below this will be dropped. - * @param metricsEnabledDimensions Enabled dimensions for CloudWatch metrics. - */ - public CloudWatchMetricsScope(CloudWatchPublisherRunnable publisher, - MetricsLevel metricsLevel, Set metricsEnabledDimensions) { - super(metricsLevel, metricsEnabledDimensions); - this.publisher = publisher; - } - - /** - * Once we call this method, all MetricDatums added to the scope will be enqueued to the publisher runnable. - * We enqueue MetricDatumWithKey because the publisher will aggregate similar metrics (i.e. MetricDatum with the - * same metricName) in the background thread. Hence aggregation using MetricDatumWithKey will be especially useful - * when aggregating across multiple MetricScopes. - */ - @Override - public void end() { - super.end(); - - final List> dataWithKeys = data.values().stream() - .map(metricDatum -> metricDatum.toBuilder().dimensions(getDimensions()).build()) - .map(metricDatum -> new MetricDatumWithKey<>(new CloudWatchMetricKey(metricDatum), metricDatum)) - .collect(Collectors.toList()); - - publisher.enqueue(dataWithKeys); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/DimensionTrackingMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/DimensionTrackingMetricsScope.java deleted file mode 100644 index fa8a9733..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/DimensionTrackingMetricsScope.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -import software.amazon.awssdk.services.cloudwatch.model.Dimension; - -import java.util.HashSet; -import java.util.Set; - - -/** - * DimensionTrackingMetricsScope is where we provide functionality for dimensions. - * Dimensions allow the user to be able view their metrics based off of the parameters they specify. - * - * The following examples show how to add dimensions if they would like to view their all metrics - * pertaining to a particular stream or for a specific date. - * - * myScope.addDimension("StreamName", "myStreamName"); - * myScope.addDimension("Date", "Dec012013"); - * - * - */ - -public abstract class DimensionTrackingMetricsScope implements MetricsScope { - - private Set dimensions = new HashSet<>(); - - @Override - public void addDimension(String name, String value) { - dimensions.add(Dimension.builder().name(name).value(value).build()); - } - - /** - * @return a set of dimensions for an IMetricsScope - */ - - protected Set getDimensions() { - return dimensions; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/InterceptingMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/InterceptingMetricsFactory.java deleted file mode 100644 index 3c762578..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/InterceptingMetricsFactory.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - - -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - -public abstract class InterceptingMetricsFactory implements MetricsFactory { - - private final MetricsFactory other; - - public InterceptingMetricsFactory(MetricsFactory other) { - this.other = other; - } - - @Override - public MetricsScope createMetrics() { - MetricsScope otherScope = other.createMetrics(); - interceptCreateMetrics(otherScope); - return new InterceptingMetricsScope(otherScope); - } - - protected void interceptCreateMetrics(MetricsScope scope) { - // Default implementation does nothing; - } - - protected void interceptAddData(String name, double value, StandardUnit unit, MetricsScope scope) { - scope.addData(name, value, unit); - } - - protected void interceptAddData(String name, double value, StandardUnit unit, MetricsLevel level, MetricsScope scope) { - scope.addData(name, value, unit, level); - } - - protected void interceptAddDimension(String name, String value, MetricsScope scope) { - scope.addDimension(name, value); - } - - protected void interceptEnd(MetricsScope scope) { - scope.end(); - } - - private class InterceptingMetricsScope implements MetricsScope { - - private MetricsScope other; - - public InterceptingMetricsScope(MetricsScope other) { - this.other = other; - } - - @Override - public void addData(String name, double value, StandardUnit unit) { - interceptAddData(name, value, unit, other); - } - - @Override - public void addData(String name, double value, StandardUnit unit, MetricsLevel level) { - interceptAddData(name, value, unit, level, other); - } - - @Override - public void addDimension(String name, String value) { - interceptAddDimension(name, value, other); - } - - @Override - public void end() { - interceptEnd(other); - } - - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsFactory.java deleted file mode 100644 index 2262de80..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsFactory.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -/** - * An IMetricsFactory that creates IMetricsScopes that output themselves via log4j. - */ -public class LogMetricsFactory implements MetricsFactory { - - @Override - public LogMetricsScope createMetrics() { - return new LogMetricsScope(); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsScope.java deleted file mode 100644 index cf85af6b..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsScope.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - - - -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.cloudwatch.model.Dimension; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; -import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; - -/** - * An AccumulatingMetricsScope that outputs via log4j. - */ -@Slf4j -public class LogMetricsScope extends AccumulateByNameMetricsScope { - @Override - public void end() { - StringBuilder output = new StringBuilder(); - output.append("Metrics:\n"); - - output.append("Dimensions: "); - boolean needsComma = false; - for (Dimension dimension : getDimensions()) { - output.append(String.format("%s[%s: %s]", needsComma ? ", " : "", dimension.name(), dimension.value())); - needsComma = true; - } - output.append("\n"); - - for (MetricDatum datum : data.values()) { - StatisticSet statistics = datum.statisticValues(); - output.append(String.format("Name=%25s\tMin=%.2f\tMax=%.2f\tCount=%.2f\tSum=%.2f\tAvg=%.2f\tUnit=%s\n", - datum.metricName(), - statistics.minimum(), - statistics.maximum(), - statistics.sampleCount(), - statistics.sum(), - statistics.sum() / statistics.sampleCount(), - datum.unit())); - } - - log.info(output.toString()); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsCollectingTaskDecorator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsCollectingTaskDecorator.java deleted file mode 100644 index 24c3a3c5..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsCollectingTaskDecorator.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.lifecycle.ConsumerTask; -import software.amazon.kinesis.lifecycle.TaskResult; -import software.amazon.kinesis.lifecycle.TaskType; - -/** - * Decorates an ConsumerTask and reports metrics about its timing and success/failure. - */ -@KinesisClientInternalApi -public class MetricsCollectingTaskDecorator implements ConsumerTask { - - private final ConsumerTask other; - private final MetricsFactory factory; - - /** - * Constructor. - * - * @param other - * task to report metrics on - * @param factory - * IMetricsFactory to use - */ - public MetricsCollectingTaskDecorator(ConsumerTask other, MetricsFactory factory) { - this.other = other; - this.factory = factory; - } - - /** - * {@inheritDoc} - */ - @Override - public TaskResult call() { - MetricsScope scope = MetricsUtil.createMetricsWithOperation(factory, other.getClass().getSimpleName()); - TaskResult result = null; - final long startTimeMillis = System.currentTimeMillis(); - try { - result = other.call(); - } finally { - MetricsUtil.addSuccessAndLatency(scope, result != null && result.getException() == null, startTimeMillis, - MetricsLevel.SUMMARY); - MetricsUtil.endScope(scope); - } - return result; - } - - /** - * {@inheritDoc} - */ - @Override - public TaskType taskType() { - return other.taskType(); - } - - @Override - public String toString() { - return this.getClass().getName() + "<" + other.taskType() + ">(" + other + ")"; - } - - public ConsumerTask getOther() { - return other; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsConfig.java deleted file mode 100644 index 8a57e454..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsConfig.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.metrics; - -import java.util.Set; - -import com.google.common.collect.ImmutableSet; - -import lombok.Data; -import lombok.experimental.Accessors; -import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; - -/** - * Used by KCL to configure the metrics reported by the application. - */ -@Data -@Accessors(fluent = true) -public class MetricsConfig { - /** - * Metrics dimensions that always will be enabled regardless of the config provided by user. - */ - public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS = ImmutableSet - .of(MetricsUtil.OPERATION_DIMENSION_NAME); - - /** - * Allowed dimensions for CloudWatch metrics. By default, worker ID dimension will be disabled. - */ - public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS_WITH_SHARD_ID = ImmutableSet. builder() - .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS).add(MetricsUtil.SHARD_ID_DIMENSION_NAME).build(); - - /** - * Metrics dimensions that signify all possible dimensions. - */ - public static final Set METRICS_DIMENSIONS_ALL = ImmutableSet.of(MetricsScope.METRICS_DIMENSIONS_ALL); - - /** - * Client used by the KCL to access the CloudWatch service for reporting metrics. - * - * @return {@link CloudWatchAsyncClient} - */ - private final CloudWatchAsyncClient cloudWatchClient; - - /** - * Namespace for KCL metrics. - * - * @return String - */ - private final String namespace; - - /** - * Buffer metrics for at most this long before publishing to CloudWatch. - * - *

- * Default value: 10000L - *

- */ - private long metricsBufferTimeMillis = 10000L; - - /** - * Buffer at most this many metrics before publishing to CloudWatch. - * - *

- * Default value: 10000 - *

- */ - private int metricsMaxQueueSize = 10000; - - /** - * Metrics level for which to enable CloudWatch metrics. - * - *

- * Default value: {@link MetricsLevel#DETAILED} - *

- */ - private MetricsLevel metricsLevel = MetricsLevel.DETAILED; - - /** - * Allowed dimensions for CloudWatchMetrics. - * - *

- * Default value: {@link MetricsConfig#METRICS_DIMENSIONS_ALL} - *

- */ - private Set metricsEnabledDimensions = METRICS_DIMENSIONS_ALL; - - /** - * Buffer size for MetricDatums before publishing. - * - *

- * Default value: 200 - *

- */ - private int publisherFlushBuffer = 200; - - private MetricsFactory metricsFactory; - - public MetricsFactory metricsFactory() { - if (metricsFactory == null) { - metricsFactory = new CloudWatchMetricsFactory(cloudWatchClient(), namespace(), metricsBufferTimeMillis(), - metricsMaxQueueSize(), metricsLevel(), metricsEnabledDimensions(), publisherFlushBuffer()); - } - return metricsFactory; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsFactory.java deleted file mode 100644 index 870c16d0..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsFactory.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -/** - * Factory for MetricsScope objects. - */ -public interface MetricsFactory { - /** - * @return a new IMetricsScope object of the type constructed by this factory. - */ - MetricsScope createMetrics(); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsUtil.java deleted file mode 100644 index 0f859bb0..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsUtil.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.metrics; - -import org.apache.commons.lang3.StringUtils; - -import lombok.NonNull; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - -/** - * - */ -public class MetricsUtil { - public static final String OPERATION_DIMENSION_NAME = "Operation"; - public static final String SHARD_ID_DIMENSION_NAME = "ShardId"; - private static final String WORKER_IDENTIFIER_DIMENSION = "WorkerIdentifier"; - private static final String TIME_METRIC = "Time"; - private static final String SUCCESS_METRIC = "Success"; - - public static MetricsScope createMetrics(@NonNull final MetricsFactory metricsFactory) { - return createMetricScope(metricsFactory, null); - } - - public static MetricsScope createMetricsWithOperation(@NonNull final MetricsFactory metricsFactory, - @NonNull final String operation) { - return createMetricScope(metricsFactory, operation); - } - - private static MetricsScope createMetricScope(final MetricsFactory metricsFactory, final String operation) { - final MetricsScope metricsScope = metricsFactory.createMetrics(); - if (StringUtils.isNotEmpty(operation)) { - metricsScope.addDimension(OPERATION_DIMENSION_NAME, operation); - } - return metricsScope; - } - - public static void addShardId(@NonNull final MetricsScope metricsScope, @NonNull final String shardId) { - addOperation(metricsScope, SHARD_ID_DIMENSION_NAME, shardId); - } - - public static void addWorkerIdentifier(@NonNull final MetricsScope metricsScope, - @NonNull final String workerIdentifier) { - addOperation(metricsScope, WORKER_IDENTIFIER_DIMENSION, workerIdentifier); - } - - public static void addOperation(@NonNull final MetricsScope metricsScope, @NonNull final String dimension, - @NonNull final String value) { - metricsScope.addDimension(dimension, value); - } - - public static void addSuccessAndLatency(@NonNull final MetricsScope metricsScope, final boolean success, - final long startTime, @NonNull final MetricsLevel metricsLevel) { - addSuccessAndLatency(metricsScope, null, success, startTime, metricsLevel); - } - - public static void addSuccessAndLatency(@NonNull final MetricsScope metricsScope, final String dimension, - final boolean success, final long startTime, @NonNull final MetricsLevel metricsLevel) { - addSuccess(metricsScope, dimension, success, metricsLevel); - addLatency(metricsScope, dimension, startTime, metricsLevel); - } - - public static void addLatency(@NonNull final MetricsScope metricsScope, final String dimension, - final long startTime, @NonNull final MetricsLevel metricsLevel) { - final String metricName = StringUtils.isEmpty(dimension) ? TIME_METRIC - : String.format("%s.%s", dimension, TIME_METRIC); - metricsScope.addData(metricName, System.currentTimeMillis() - startTime, StandardUnit.MILLISECONDS, - metricsLevel); - } - - public static void addSuccess(@NonNull final MetricsScope metricsScope, final String dimension, - final boolean success, @NonNull final MetricsLevel metricsLevel) { - final String metricName = StringUtils.isEmpty(dimension) ? SUCCESS_METRIC - : String.format("%s.%s", dimension, SUCCESS_METRIC); - metricsScope.addData(metricName, success ? 1 : 0, StandardUnit.COUNT, metricsLevel); - } - - public static void endScope(@NonNull final MetricsScope metricsScope) { - metricsScope.end(); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsFactory.java deleted file mode 100644 index 1518b681..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsFactory.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -public class NullMetricsFactory implements MetricsFactory { - - private static final NullMetricsScope SCOPE = new NullMetricsScope(); - - @Override - public MetricsScope createMetrics() { - return SCOPE; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsScope.java deleted file mode 100644 index eab7bf47..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsScope.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - -public class NullMetricsScope implements MetricsScope { - - @Override - public void addData(String name, double value, StandardUnit unit) { - - } - - @Override - public void addData(String name, double value, StandardUnit unit, MetricsLevel level) { - - } - - @Override - public void addDimension(String name, String value) { - - } - - @Override - public void end() { - - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingFactory.java deleted file mode 100644 index 3213628b..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingFactory.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -/** - * Metrics scope factory that delegates metrics scope creation to another factory, but - * returns metrics scope that is thread safe. - */ -public class ThreadSafeMetricsDelegatingFactory implements MetricsFactory { - - /** Metrics factory to delegate to. */ - private final MetricsFactory delegate; - - /** - * Creates an instance of the metrics factory. - * @param delegate metrics factory to delegate to - */ - public ThreadSafeMetricsDelegatingFactory(MetricsFactory delegate) { - this.delegate = delegate; - } - - /** - * {@inheritDoc} - */ - @Override - public MetricsScope createMetrics() { - return new ThreadSafeMetricsDelegatingScope(delegate.createMetrics()); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/Checkpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/Checkpointer.java deleted file mode 100644 index e28cad1b..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/Checkpointer.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.processor; - -import software.amazon.kinesis.exceptions.KinesisClientLibException; -import software.amazon.kinesis.checkpoint.Checkpoint; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Interface for checkpoint trackers. - */ -public interface Checkpointer { - - /** - * Record a checkpoint for a shard (e.g. sequence and subsequence numbers of last record processed - * by application). Upon failover, record processing is resumed from this point. - * - * @param shardId Checkpoint is specified for this shard. - * @param checkpointValue Value of the checkpoint (e.g. Kinesis sequence number and subsequence number) - * @param concurrencyToken Used with conditional writes to prevent stale updates - * (e.g. if there was a fail over to a different record processor, we don't want to - * overwrite it's checkpoint) - * @throws KinesisClientLibException Thrown if we were unable to save the checkpoint - */ - void setCheckpoint(String shardId, ExtendedSequenceNumber checkpointValue, String concurrencyToken) - throws KinesisClientLibException; - - /** - * Get the current checkpoint stored for the specified shard. Useful for checking that the parent shard - * has been completely processed before we start processing the child shard. - * - * @param shardId Current checkpoint for this shard is fetched - * @return Current checkpoint for this shard, null if there is no record for this shard. - * @throws KinesisClientLibException Thrown if we are unable to fetch the checkpoint - */ - ExtendedSequenceNumber getCheckpoint(String shardId) throws KinesisClientLibException; - - /** - * Get the current checkpoint stored for the specified shard, which holds the sequence numbers for the checkpoint - * and pending checkpoint. Useful for checking that the parent shard has been completely processed before we start - * processing the child shard. - * - * @param shardId Current checkpoint for this shard is fetched - * @return Current checkpoint object for this shard, null if there is no record for this shard. - * @throws KinesisClientLibException Thrown if we are unable to fetch the checkpoint - */ - Checkpoint getCheckpointObject(String shardId) throws KinesisClientLibException; - - - /** - * Record intent to checkpoint for a shard. Upon failover, the pendingCheckpointValue will be passed to the new - * ShardRecordProcessor's initialize() method. - * - * @param shardId Checkpoint is specified for this shard. - * @param pendingCheckpoint Value of the pending checkpoint (e.g. Kinesis sequence number and subsequence number) - * @param concurrencyToken Used with conditional writes to prevent stale updates - * (e.g. if there was a fail over to a different record processor, we don't want to - * overwrite it's checkpoint) - * @throws KinesisClientLibException Thrown if we were unable to save the checkpoint - */ - void prepareCheckpoint(String shardId, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken) - throws KinesisClientLibException; - - void operation(String operation); - - String operation(); - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/PreparedCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/PreparedCheckpointer.java deleted file mode 100644 index a7cf19d1..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/PreparedCheckpointer.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.processor; - -import software.amazon.kinesis.exceptions.InvalidStateException; -import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.exceptions.ThrottlingException; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Objects of this class are prepared to checkpoint at a specific sequence number. They use an - * RecordProcessorCheckpointer to do the actual checkpointing, so their checkpoint is subject to the same 'didn't go - * backwards' validation as a normal checkpoint. - */ -public interface PreparedCheckpointer { - - /** - * @return sequence number of pending checkpoint - */ - ExtendedSequenceNumber pendingCheckpoint(); - - /** - * This method will record a pending checkpoint. - * - * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @throws ShutdownException The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this ShardRecordProcessor instance. - * @throws InvalidStateException Can't store checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @throws KinesisClientLibDependencyException Encountered an issue when storing the checkpoint. The application can - * backoff and retry. - * @throws IllegalArgumentException The sequence number being checkpointed is invalid because it is out of range, - * i.e. it is smaller than the last check point value (prepared or committed), or larger than the greatest - * sequence number seen by the associated record processor. - */ - void checkpoint() - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; - -} \ No newline at end of file diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ProcessorConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ProcessorConfig.java deleted file mode 100644 index 04ac8735..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ProcessorConfig.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.processor; - - import lombok.Data; - import lombok.NonNull; - import lombok.experimental.Accessors; - -/** - * Used by the KCL to configure the processor for processing the records. - */ -@Data -@Accessors(fluent = true) -public class ProcessorConfig { - /** - * - */ - @NonNull - private final ShardRecordProcessorFactory shardRecordProcessorFactory; - - /** - * Don't call processRecords() on the record processor for empty record lists. - * - *

Default value: false

- */ - private boolean callProcessRecordsEvenForEmptyRecordList = false; - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/RecordProcessorCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/RecordProcessorCheckpointer.java deleted file mode 100644 index e9db304a..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/RecordProcessorCheckpointer.java +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.processor; - -import software.amazon.awssdk.services.kinesis.model.Record; -import software.amazon.kinesis.exceptions.InvalidStateException; -import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.exceptions.ThrottlingException; - -/** - * Used by RecordProcessors when they want to checkpoint their progress. - * The Amazon Kinesis Client Library will pass an object implementing this interface to RecordProcessors, so they can - * checkpoint their progress. - */ -public interface RecordProcessorCheckpointer { - - /** - * This method will checkpoint the progress at the last data record that was delivered to the record processor. - * Upon fail over (after a successful checkpoint() call), the new/replacement ShardRecordProcessor instance - * will receive data records whose sequenceNumber > checkpoint position (for each partition key). - * In steady state, applications should checkpoint periodically (e.g. once every 5 minutes). - * Calling this API too frequently can slow down the application (because it puts pressure on the underlying - * checkpoint storage layer). - * - * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @throws ShutdownException The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this ShardRecordProcessor instance. - * @throws InvalidStateException Can't store checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @throws KinesisClientLibDependencyException Encountered an issue when storing the checkpoint. The application can - * backoff and retry. - */ - void checkpoint() - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; - - /** - * This method will checkpoint the progress at the provided record. This method is analogous to - * {@link #checkpoint()} but provides the ability to specify the record at which to - * checkpoint. - * - * @param record A record at which to checkpoint in this shard. Upon failover, - * the Kinesis Client Library will start fetching records after this record's sequence number. - * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @throws ShutdownException The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this ShardRecordProcessor instance. - * @throws InvalidStateException Can't store checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @throws KinesisClientLibDependencyException Encountered an issue when storing the checkpoint. The application can - * backoff and retry. - */ - void checkpoint(Record record) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; - - /** - * This method will checkpoint the progress at the provided sequenceNumber. This method is analogous to - * {@link #checkpoint()} but provides the ability to specify the sequence number at which to - * checkpoint. - * - * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover, - * the Kinesis Client Library will start fetching records after this sequence number. - * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @throws ShutdownException The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this ShardRecordProcessor instance. - * @throws InvalidStateException Can't store checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @throws KinesisClientLibDependencyException Encountered an issue when storing the checkpoint. The application can - * backoff and retry. - * @throws IllegalArgumentException The sequence number is invalid for one of the following reasons: - * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the - * greatest sequence number seen by the associated record processor. - * 2.) It is not a valid sequence number for a record in this shard. - */ - void checkpoint(String sequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; - - - /** - * This method will checkpoint the progress at the provided sequenceNumber and subSequenceNumber, the latter for - * aggregated records produced with the Producer Library. This method is analogous to {@link #checkpoint()} - * but provides the ability to specify the sequence and subsequence numbers at which to checkpoint. - * - * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover, the Kinesis - * Client Library will start fetching records after the given sequence and subsequence numbers. - * @param subSequenceNumber A subsequence number at which to checkpoint within this shard. Upon failover, the - * Kinesis Client Library will start fetching records after the given sequence and subsequence numbers. - * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @throws ShutdownException The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this ShardRecordProcessor instance. - * @throws InvalidStateException Can't store checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @throws KinesisClientLibDependencyException Encountered an issue when storing the checkpoint. The application can - * backoff and retry. - * @throws IllegalArgumentException The sequence number is invalid for one of the following reasons: - * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the - * greatest sequence number seen by the associated record processor. - * 2.) It is not a valid sequence number for a record in this shard. - */ - void checkpoint(String sequenceNumber, long subSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; - - /** - * This method will record a pending checkpoint at the last data record that was delivered to the record processor. - * If the application fails over between calling prepareCheckpoint() and checkpoint(), the init() method of the next - * IRecordProcessor for this shard will be informed of the prepared sequence number - * - * Application should use this to assist with idempotency across failover by calling prepareCheckpoint before having - * side effects, then by calling checkpoint on the returned PreparedCheckpointer after side effects are complete. - * Use the sequence number passed in to init() to behave idempotently. - * - * @return an PreparedCheckpointer object that can be called later to persist the checkpoint. - * - * @throws ThrottlingException Can't store pending checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @throws ShutdownException The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this ShardRecordProcessor instance. - * @throws InvalidStateException Can't store pending checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @throws KinesisClientLibDependencyException Encountered an issue when storing the pending checkpoint. The - * application can backoff and retry. - */ - PreparedCheckpointer prepareCheckpoint() - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; - - /** - * This method will record a pending checkpoint at the at the provided record. This method is analogous to - * {@link #prepareCheckpoint()} but provides the ability to specify the record at which to prepare the checkpoint. - * - * @param record A record at which to prepare checkpoint in this shard. - * - * Application should use this to assist with idempotency across failover by calling prepareCheckpoint before having - * side effects, then by calling checkpoint on the returned PreparedCheckpointer after side effects are complete. - * Use the sequence number and application state passed in to init() to behave idempotently. - * - * @return an PreparedCheckpointer object that can be called later to persist the checkpoint. - * - * @throws ThrottlingException Can't store pending checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @throws ShutdownException The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this ShardRecordProcessor instance. - * @throws InvalidStateException Can't store pending checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @throws KinesisClientLibDependencyException Encountered an issue when storing the pending checkpoint. The - * application can backoff and retry. - * @throws IllegalArgumentException The sequence number is invalid for one of the following reasons: - * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the - * greatest sequence number seen by the associated record processor. - * 2.) It is not a valid sequence number for a record in this shard. - */ - PreparedCheckpointer prepareCheckpoint(Record record) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; - - /** - * This method will record a pending checkpoint at the provided sequenceNumber. This method is analogous to - * {@link #prepareCheckpoint()} but provides the ability to specify the sequence number at which to checkpoint. - * - * @param sequenceNumber A sequence number at which to prepare checkpoint in this shard. - - * @return an PreparedCheckpointer object that can be called later to persist the checkpoint. - * - * @throws ThrottlingException Can't store pending checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @throws ShutdownException The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this ShardRecordProcessor instance. - * @throws InvalidStateException Can't store pending checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @throws KinesisClientLibDependencyException Encountered an issue when storing the pending checkpoint. The - * application can backoff and retry. - * @throws IllegalArgumentException The sequence number is invalid for one of the following reasons: - * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the - * greatest sequence number seen by the associated record processor. - * 2.) It is not a valid sequence number for a record in this shard. - */ - PreparedCheckpointer prepareCheckpoint(String sequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; - - /** - * This method will record a pending checkpoint at the provided sequenceNumber and subSequenceNumber, the latter for - * aggregated records produced with the Producer Library. This method is analogous to {@link #prepareCheckpoint()} - * but provides the ability to specify the sequence number at which to checkpoint - * - * @param sequenceNumber A sequence number at which to prepare checkpoint in this shard. - * @param subSequenceNumber A subsequence number at which to prepare checkpoint within this shard. - * - * @return an PreparedCheckpointer object that can be called later to persist the checkpoint. - * - * @throws ThrottlingException Can't store pending checkpoint. Can be caused by checkpointing too frequently. - * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. - * @throws ShutdownException The record processor instance has been shutdown. Another instance may have - * started processing some of these records already. - * The application should abort processing via this ShardRecordProcessor instance. - * @throws InvalidStateException Can't store pending checkpoint. - * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). - * @throws KinesisClientLibDependencyException Encountered an issue when storing the pending checkpoint. The - * application can backoff and retry. - * @throws IllegalArgumentException The sequence number is invalid for one of the following reasons: - * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the - * greatest sequence number seen by the associated record processor. - * 2.) It is not a valid sequence number for a record in this shard. - */ - PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; - - Checkpointer checkpointer(); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessor.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessor.java deleted file mode 100644 index 96012754..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessor.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.processor; - -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.LeaseLostInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.events.ShardEndedInput; -import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput; - -/** - * The Amazon Kinesis Client Library will instantiate record processors to process data records fetched from Amazon - * Kinesis. - */ -public interface ShardRecordProcessor { - - /** - * Invoked by the Amazon Kinesis Client Library before data records are delivered to the ShardRecordProcessor instance - * (via processRecords). - * - * @param initializationInput Provides information related to initialization - */ - void initialize(InitializationInput initializationInput); - - /** - * Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the - * application. - * Upon fail over, the new instance will get records with sequence number > checkpoint position - * for each partition key. - * - * @param processRecordsInput Provides the records to be processed as well as information and capabilities related - * to them (eg checkpointing). - */ - void processRecords(ProcessRecordsInput processRecordsInput); - - /** - * Called when the lease that tied to this record processor has been lost. Once the lease has been lost the record - * processor can no longer checkpoint. - * - * @param leaseLostInput - * access to functions and data related to the loss of the lease. Currently this has no functionality. - */ - void leaseLost(LeaseLostInput leaseLostInput); - - /** - * Called when the shard that this record process is handling has been completed. Once a shard has been completed no - * further records will ever arrive on that shard. - * - * When this is called the record processor must call {@link RecordProcessorCheckpointer#checkpoint()}, - * otherwise an exception will be thrown and the all child shards of this shard will not make progress. - * - * @param shardEndedInput - * provides access to a checkpointer method for completing processing of the shard. - */ - void shardEnded(ShardEndedInput shardEndedInput); - - /** - * Called when the Scheduler has been requested to shutdown. This is called while the record processor still holds - * the lease so checkpointing is possible. Once this method has completed the lease for the record processor is - * released, and {@link #leaseLost(LeaseLostInput)} will be called at a later time. - * - * @param shutdownRequestedInput - * provides access to a checkpointer allowing a record processor to checkpoint before the shutdown is - * completed. - */ - void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput); - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessorFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessorFactory.java deleted file mode 100644 index 3b87d676..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessorFactory.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.processor; - -/** - * - */ -public interface ShardRecordProcessorFactory { - /** - * Returns a new instance of the ShardRecordProcessor - * - * @return - */ - ShardRecordProcessor shardRecordProcessor(); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShutdownNotificationAware.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShutdownNotificationAware.java deleted file mode 100644 index d9b0f5b9..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShutdownNotificationAware.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.processor; - -/** - * Allows a record processor to indicate it's aware of requested shutdowns, and handle the request. - */ -public interface ShutdownNotificationAware { - - /** - * Called when the worker has been requested to shutdown, and gives the record processor a chance to checkpoint. - * - * The record processor will still have shutdown called. - * - * @param checkpointer the checkpointer that can be used to save progress. - */ - void shutdownRequested(RecordProcessorCheckpointer checkpointer); - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AWSExceptionManager.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AWSExceptionManager.java deleted file mode 100644 index 2bc18032..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AWSExceptionManager.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval; - -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.function.Function; - -import lombok.NonNull; -import lombok.Setter; -import lombok.experimental.Accessors; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; - -/** - * - */ -@KinesisClientInternalApi -public class AWSExceptionManager { - private final Map, Function> map = new HashMap<>(); - - @Setter - @Accessors(fluent = true) - private Function defaultFunction = RuntimeException::new; - - public void add(@NonNull final Class clazz, - @NonNull final Function function) { - map.put(clazz, function); - } - - @SuppressWarnings("unchecked") - private Function handleFor(@NonNull final Throwable t) { - Class clazz = t.getClass(); - Optional> toApply = Optional.ofNullable(map.get(clazz)); - while (!toApply.isPresent() && clazz.getSuperclass() != null) { - clazz = (Class) clazz.getSuperclass(); - toApply = Optional.ofNullable(map.get(clazz)); - } - - return toApply.orElse(defaultFunction); - } - - @SuppressWarnings("unchecked") - public RuntimeException apply(Throwable t) { - // - // We know this is safe as the handler guarantees that the function we get will be able to accept the actual - // type of the throwable. handlerFor walks up the inheritance chain so we can't get a function more specific - // than the actual type of the throwable only. - // - Function f = - (Function) handleFor(t); - return f.apply(t); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AggregatorUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AggregatorUtil.java deleted file mode 100644 index 4b1e92f3..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AggregatorUtil.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval; - -import java.io.UnsupportedEncodingException; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.apache.commons.lang3.StringUtils; - -import com.google.protobuf.InvalidProtocolBufferException; - -import lombok.NonNull; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.retrieval.kpl.Messages; - -/** - * - */ -@Slf4j -public class AggregatorUtil { - public static final byte[] AGGREGATED_RECORD_MAGIC = new byte[]{-13, -119, -102, -62}; - private static final int DIGEST_SIZE = 16; - private static final BigInteger STARTING_HASH_KEY = new BigInteger("0"); - // largest hash key = 2^128-1 - private static final BigInteger ENDING_HASH_KEY = new BigInteger(StringUtils.repeat("FF", 16), 16); - - /** - * This method deaggregates the given list of Amazon Kinesis records into a - * list of KPL user records. This method will then return the resulting list - * of KPL user records. - * - * @param records A list of Amazon Kinesis records, each possibly aggregated. - * @return A resulting list of deaggregated KPL user records. - */ - public List deaggregate(List records) { - return deaggregate(records, STARTING_HASH_KEY, ENDING_HASH_KEY); - } - - /** - * Deaggregate any KPL records found. This method converts the starting and ending hash keys to {@link BigInteger}s - * before passing them on to {@link #deaggregate(List, BigInteger, BigInteger)} - * - * @param records - * the records to potentially deaggreate - * @param startingHashKey - * the starting hash key of the shard - * @param endingHashKey - * the ending hash key of the shard - * @return A list of records with any aggregate records deaggregated - */ - public List deaggregate(List records, String startingHashKey, - String endingHashKey) { - return deaggregate(records, new BigInteger(startingHashKey), new BigInteger(endingHashKey)); - } - - /** - * This method deaggregates the given list of Amazon Kinesis records into a - * list of KPL user records. Any KPL user records whose explicit hash key or - * partition key falls outside the range of the startingHashKey and the - * endingHashKey are discarded from the resulting list. This method will - * then return the resulting list of KPL user records. - * - * @param records A list of Amazon Kinesis records, each possibly aggregated. - * @param startingHashKey A BigInteger representing the starting hash key that the - * explicit hash keys or partition keys of retained resulting KPL - * user records must be greater than or equal to. - * @param endingHashKey A BigInteger representing the ending hash key that the the - * explicit hash keys or partition keys of retained resulting KPL - * user records must be smaller than or equal to. - * @return A resulting list of KPL user records whose explicit hash keys or - * partition keys fall within the range of the startingHashKey and - * the endingHashKey. - */ - // CHECKSTYLE:OFF NPathComplexity - public List deaggregate(List records, - BigInteger startingHashKey, - BigInteger endingHashKey) { - List result = new ArrayList<>(); - byte[] magic = new byte[AGGREGATED_RECORD_MAGIC.length]; - byte[] digest = new byte[DIGEST_SIZE]; - - for (KinesisClientRecord r : records) { - boolean isAggregated = true; - long subSeqNum = 0; - ByteBuffer bb = r.data(); - - if (bb.remaining() >= magic.length) { - bb.get(magic); - } else { - isAggregated = false; - } - - if (!Arrays.equals(AGGREGATED_RECORD_MAGIC, magic) || bb.remaining() <= DIGEST_SIZE) { - isAggregated = false; - } - - if (isAggregated) { - int oldLimit = bb.limit(); - bb.limit(oldLimit - DIGEST_SIZE); - byte[] messageData = new byte[bb.remaining()]; - bb.get(messageData); - bb.limit(oldLimit); - bb.get(digest); - byte[] calculatedDigest = calculateTailCheck(messageData); - - if (!Arrays.equals(digest, calculatedDigest)) { - isAggregated = false; - } else { - try { - Messages.AggregatedRecord ar = Messages.AggregatedRecord.parseFrom(messageData); - List pks = ar.getPartitionKeyTableList(); - List ehks = ar.getExplicitHashKeyTableList(); - long aat = r.approximateArrivalTimestamp() == null - ? -1 : r.approximateArrivalTimestamp().toEpochMilli(); - try { - int recordsInCurrRecord = 0; - for (Messages.Record mr : ar.getRecordsList()) { - String explicitHashKey = null; - String partitionKey = pks.get((int) mr.getPartitionKeyIndex()); - if (mr.hasExplicitHashKeyIndex()) { - explicitHashKey = ehks.get((int) mr.getExplicitHashKeyIndex()); - } - - BigInteger effectiveHashKey = effectiveHashKey(partitionKey, explicitHashKey); - - if (effectiveHashKey.compareTo(startingHashKey) < 0 - || effectiveHashKey.compareTo(endingHashKey) > 0) { - for (int toRemove = 0; toRemove < recordsInCurrRecord; ++toRemove) { - result.remove(result.size() - 1); - } - break; - } - - ++recordsInCurrRecord; - - KinesisClientRecord record = r.toBuilder() - .data(ByteBuffer.wrap(mr.getData().toByteArray())) - .partitionKey(partitionKey) - .explicitHashKey(explicitHashKey) - .build(); - result.add(convertRecordToKinesisClientRecord(record, true, subSeqNum++, explicitHashKey)); - } - } catch (Exception e) { - StringBuilder sb = new StringBuilder(); - sb.append("Unexpected exception during deaggregation, record was:\n"); - sb.append("PKS:\n"); - for (String s : pks) { - sb.append(s).append("\n"); - } - sb.append("EHKS: \n"); - for (String s : ehks) { - sb.append(s).append("\n"); - } - for (Messages.Record mr : ar.getRecordsList()) { - sb.append("Record: [hasEhk=").append(mr.hasExplicitHashKeyIndex()).append(", ") - .append("ehkIdx=").append(mr.getExplicitHashKeyIndex()).append(", ") - .append("pkIdx=").append(mr.getPartitionKeyIndex()).append(", ") - .append("dataLen=").append(mr.getData().toByteArray().length).append("]\n"); - } - sb.append("Sequence number: ").append(r.sequenceNumber()).append("\n") - .append("Raw data: ") - .append(javax.xml.bind.DatatypeConverter.printBase64Binary(messageData)).append("\n"); - log.error(sb.toString(), e); - } - } catch (InvalidProtocolBufferException e) { - isAggregated = false; - } - } - } - - if (!isAggregated) { - bb.rewind(); - result.add(r); - } - } - return result; - } - - protected byte[] calculateTailCheck(byte[] data) { - return md5(data); - } - - protected BigInteger effectiveHashKey(String partitionKey, String explicitHashKey) throws UnsupportedEncodingException { - if (explicitHashKey == null) { - return new BigInteger(1, md5(partitionKey.getBytes("UTF-8"))); - } - return new BigInteger(explicitHashKey); - } - - private byte[] md5(byte[] data) { - try { - MessageDigest d = MessageDigest.getInstance("MD5"); - return d.digest(data); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException(e); - } - } - - public KinesisClientRecord convertRecordToKinesisClientRecord(@NonNull final KinesisClientRecord record, - final boolean aggregated, - final long subSequenceNumber, - final String explicitHashKey) { - return KinesisClientRecord.builder() - .data(record.data()) - .partitionKey(record.partitionKey()) - .approximateArrivalTimestamp(record.approximateArrivalTimestamp()) - .encryptionType(record.encryptionType()) - .sequenceNumber(record.sequenceNumber()) - .aggregated(aggregated) - .subSequenceNumber(subSequenceNumber) - .explicitHashKey(explicitHashKey) - .build(); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ConsumerRegistration.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ConsumerRegistration.java deleted file mode 100644 index 344d879b..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ConsumerRegistration.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Amazon Software License - * (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at - * http://aws.amazon.com/asl/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval; - -import software.amazon.kinesis.leases.exceptions.DependencyException; - -/** - * - */ -public interface ConsumerRegistration { - /** - * This method is used to get or create StreamConsumer information from Kinesis. It returns the StreamConsumer ARN - * after retrieving it. - * - * @return StreamConsumer ARN - * @throws DependencyException - */ - String getOrCreateStreamConsumerArn() throws DependencyException; -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherResult.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherResult.java deleted file mode 100644 index ff12755d..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherResult.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Amazon Software License - * (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at - * http://aws.amazon.com/asl/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval; - -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; - -/** - * Represents the result from the DataFetcher, and allows the receiver to accept a result - */ -public interface DataFetcherResult { - /** - * The result of the request to Kinesis - * - * @return The result of the request, this can be null if the request failed. - */ - GetRecordsResponse getResult(); - - /** - * Accepts the result, and advances the shard iterator. A result from the data fetcher must be accepted before any - * further progress can be made. - * - * @return the result of the request, this can be null if the request failed. - */ - GetRecordsResponse accept(); - - /** - * Indicates whether this result is at the end of the shard or not - * - * @return true if the result is at the end of a shard, false otherwise - */ - boolean isShardEnd(); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetchingStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetchingStrategy.java deleted file mode 100644 index 05547db2..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetchingStrategy.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval; - -/** - * - */ -public enum DataFetchingStrategy { - DEFAULT, PREFETCH_CACHED; -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetrievalStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetrievalStrategy.java deleted file mode 100644 index b638d909..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetrievalStrategy.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval; - -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.kinesis.retrieval.polling.KinesisDataFetcher; - -/** - * Represents a strategy to retrieve records from Kinesis. Allows for variations on how records are retrieved from - * Kinesis. - */ -public interface GetRecordsRetrievalStrategy { - /** - * Gets a set of records from Kinesis. - * - * @param maxRecords - * passed to Kinesis, and can be used to restrict the number of records returned from Kinesis. - * @return the resulting records. - * @throws IllegalStateException - * if the strategy has been shutdown. - */ - GetRecordsResponse getRecords(int maxRecords); - - /** - * Releases any resources used by the strategy. Once the strategy is shutdown it is no longer safe to call - * {@link #getRecords(int)}. - */ - void shutdown(); - - /** - * Returns whether this strategy has been shutdown. - * - * @return true if the strategy has been shutdown, false otherwise. - */ - boolean isShutdown(); - - /** - * Returns the KinesisDataFetcher used to records from Kinesis. - * - * @return KinesisDataFetcher - */ - KinesisDataFetcher getDataFetcher(); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetriever.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetriever.java deleted file mode 100644 index 662bd670..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetriever.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval; - -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; - -/** - * This class uses the GetRecordsRetrievalStrategy class to retrieve the next set of records and update the cache. - */ -public interface GetRecordsRetriever { - GetRecordsResponse getNextRecords(int maxRecords); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/IteratorBuilder.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/IteratorBuilder.java deleted file mode 100644 index 2b49e031..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/IteratorBuilder.java +++ /dev/null @@ -1,85 +0,0 @@ -package software.amazon.kinesis.retrieval; - -import java.time.Instant; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; -import software.amazon.awssdk.services.kinesis.model.ShardIteratorType; -import software.amazon.awssdk.services.kinesis.model.StartingPosition; -import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.checkpoint.SentinelCheckpoint; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; - -@KinesisClientInternalApi -public class IteratorBuilder { - - public static SubscribeToShardRequest.Builder request(SubscribeToShardRequest.Builder builder, - String sequenceNumber, InitialPositionInStreamExtended initialPosition) { - return builder.startingPosition(request(StartingPosition.builder(), sequenceNumber, initialPosition).build()); - } - - public static SubscribeToShardRequest.Builder reconnectRequest(SubscribeToShardRequest.Builder builder, - String sequenceNumber, InitialPositionInStreamExtended initialPosition) { - return builder.startingPosition( - reconnectRequest(StartingPosition.builder(), sequenceNumber, initialPosition).build()); - } - - public static StartingPosition.Builder request(StartingPosition.Builder builder, String sequenceNumber, - InitialPositionInStreamExtended initialPosition) { - return apply(builder, StartingPosition.Builder::type, StartingPosition.Builder::timestamp, - StartingPosition.Builder::sequenceNumber, initialPosition, sequenceNumber, - ShardIteratorType.AT_SEQUENCE_NUMBER); - } - - public static StartingPosition.Builder reconnectRequest(StartingPosition.Builder builder, String sequenceNumber, - InitialPositionInStreamExtended initialPosition) { - return apply(builder, StartingPosition.Builder::type, StartingPosition.Builder::timestamp, - StartingPosition.Builder::sequenceNumber, initialPosition, sequenceNumber, - ShardIteratorType.AFTER_SEQUENCE_NUMBER); - } - - public static GetShardIteratorRequest.Builder request(GetShardIteratorRequest.Builder builder, - String sequenceNumber, InitialPositionInStreamExtended initialPosition) { - return apply(builder, GetShardIteratorRequest.Builder::shardIteratorType, GetShardIteratorRequest.Builder::timestamp, - GetShardIteratorRequest.Builder::startingSequenceNumber, initialPosition, sequenceNumber, - ShardIteratorType.AT_SEQUENCE_NUMBER); - } - - private final static Map SHARD_ITERATOR_MAPPING; - - static { - Map map = new HashMap<>(); - map.put(SentinelCheckpoint.LATEST.name(), ShardIteratorType.LATEST); - map.put(SentinelCheckpoint.TRIM_HORIZON.name(), ShardIteratorType.TRIM_HORIZON); - map.put(SentinelCheckpoint.AT_TIMESTAMP.name(), ShardIteratorType.AT_TIMESTAMP); - - SHARD_ITERATOR_MAPPING = Collections.unmodifiableMap(map); - } - - @FunctionalInterface - private interface UpdatingFunction { - R apply(R updated, T value); - } - - private static R apply(R initial, UpdatingFunction shardIterFunc, - UpdatingFunction dateFunc, UpdatingFunction sequenceFunction, - InitialPositionInStreamExtended initialPositionInStreamExtended, String sequenceNumber, - ShardIteratorType defaultIteratorType) { - ShardIteratorType iteratorType = SHARD_ITERATOR_MAPPING.getOrDefault( - sequenceNumber, defaultIteratorType); - R result = shardIterFunc.apply(initial, iteratorType); - switch (iteratorType) { - case AT_TIMESTAMP: - return dateFunc.apply(result, initialPositionInStreamExtended.getTimestamp().toInstant()); - case AT_SEQUENCE_NUMBER: - case AFTER_SEQUENCE_NUMBER: - return sequenceFunction.apply(result, sequenceNumber); - default: - return result; - } - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisClientRecord.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisClientRecord.java deleted file mode 100644 index 7e81bafb..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisClientRecord.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval; - -import java.nio.ByteBuffer; -import java.time.Instant; - -import lombok.Builder; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.ToString; -import lombok.experimental.Accessors; -import software.amazon.awssdk.services.kinesis.model.EncryptionType; -import software.amazon.awssdk.services.kinesis.model.Record; - -/** - * A converted record from Kinesis, maybe an aggregate record. - */ -@Builder(toBuilder = true) -@EqualsAndHashCode -@ToString -@Getter -@Accessors(fluent = true) -public class KinesisClientRecord { - private final String sequenceNumber; - private final Instant approximateArrivalTimestamp; - private final ByteBuffer data; - private final String partitionKey; - private final EncryptionType encryptionType; - private final long subSequenceNumber; - private final String explicitHashKey; - private final boolean aggregated; - - public static KinesisClientRecord fromRecord(Record record) { - return KinesisClientRecord.builder().sequenceNumber(record.sequenceNumber()) - .approximateArrivalTimestamp(record.approximateArrivalTimestamp()).data(record.data().asByteBuffer()) - .partitionKey(record.partitionKey()).encryptionType(record.encryptionType()).build(); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsFetcherFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsFetcherFactory.java deleted file mode 100644 index 830f9be9..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsFetcherFactory.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval; - -import software.amazon.kinesis.metrics.MetricsFactory; - -/** - * This factory is used to create the records fetcher to retrieve data from Kinesis for a given shard. - */ -public interface RecordsFetcherFactory { - /** - * Returns a RecordsPublisher to be used for retrieving records for a given shard. - * - * @param getRecordsRetrievalStrategy GetRecordsRetrievalStrategy to be used with the RecordsPublisher - * @param shardId ShardId of the shard that the fetcher will retrieve records for - * @param metricsFactory MetricsFactory used to create metricScope - * @param maxRecords Max number of records to be returned in a single get call - * - * @return RecordsPublisher used to get records from Kinesis. - */ - RecordsPublisher createRecordsFetcher(GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, String shardId, - MetricsFactory metricsFactory, int maxRecords); - - /** - * Sets the maximum number of ProcessRecordsInput objects the RecordsPublisher can hold, before further requests are - * blocked. - * - * @param maxPendingProcessRecordsInput The maximum number of ProcessRecordsInput objects that the cache will accept - * before blocking. - */ - void maxPendingProcessRecordsInput(int maxPendingProcessRecordsInput); - - int maxPendingProcessRecordsInput(); - - /** - * Sets the max byte size for the RecordsPublisher, before further requests are blocked. The byte size of the cache - * is the sum of byte size of all the ProcessRecordsInput objects in the cache at any point of time. - * - * @param maxByteSize The maximum byte size for the cache before blocking. - */ - void maxByteSize(int maxByteSize); - - int maxByteSize(); - - /** - * Sets the max number of records for the RecordsPublisher can hold, before further requests are blocked. The records - * count is the sum of all records present in across all the ProcessRecordsInput objects in the cache at any point - * of time. - * - * @param maxRecordsCount The mximum number of records in the cache before blocking. - */ - void maxRecordsCount(int maxRecordsCount); - - int maxRecordsCount(); - - /** - * Sets the dataFetchingStrategy to determine the type of RecordsPublisher to be used. - * - * @param dataFetchingStrategy Fetching strategy to be used - */ - void dataFetchingStrategy(DataFetchingStrategy dataFetchingStrategy); - - DataFetchingStrategy dataFetchingStrategy(); - - /** - * Sets the maximum idle time between two get calls. - * - * @param idleMillisBetweenCalls Sleep millis between calls. - */ - void idleMillisBetweenCalls(long idleMillisBetweenCalls); - - long idleMillisBetweenCalls(); - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsPublisher.java deleted file mode 100644 index 87e881a4..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsPublisher.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval; - -import org.reactivestreams.Publisher; - -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Provides a record publisher that will retrieve records from Kinesis for processing - */ -public interface RecordsPublisher extends Publisher { - /** - * Initializes the publisher with where to start processing. If there is a stored sequence number the publisher will - * begin from that sequence number, otherwise it will use the initial position. - * - * @param extendedSequenceNumber - * the sequence number to start processing from - * @param initialPositionInStreamExtended - * if there is no sequence number the initial position to use - */ - void start(ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended); - - - /** - * Shutdowns the publisher. Once this method returns the publisher should no longer provide any records. - */ - void shutdown(); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalConfig.java deleted file mode 100644 index 2f45c5b6..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalConfig.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval; - -import lombok.Data; -import lombok.NonNull; -import lombok.experimental.Accessors; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.retrieval.fanout.FanOutConfig; - -/** - * Used by the KCL to configure the retrieval of records from Kinesis. - */ -@Data -@Accessors(fluent = true) -public class RetrievalConfig { - /** - * User agent set when Amazon Kinesis Client Library makes AWS requests. - */ - public static final String KINESIS_CLIENT_LIB_USER_AGENT = "amazon-kinesis-client-library-java"; - - public static final String KINESIS_CLIENT_LIB_USER_AGENT_VERSION = "2.0.5"; - - /** - * Client used to make calls to Kinesis for records retrieval - */ - @NonNull - private final KinesisAsyncClient kinesisClient; - - /** - * The name of the stream to process records from. - */ - @NonNull - private final String streamName; - - @NonNull - private final String applicationName; - - /** - * Backoff time between consecutive ListShards calls. - * - *

- * Default value: 1500L - *

- */ - private long listShardsBackoffTimeInMillis = 1500L; - - /** - * Max number of retries for ListShards when throttled/exception is thrown. - * - *

- * Default value: 50 - *

- */ - private int maxListShardsRetryAttempts = 50; - - /** - * The location in the shard from which the KinesisClientLibrary will start fetching records from - * when the application starts for the first time and there is no checkpoint for the shard. - * - *

- * Default value: {@link InitialPositionInStream#LATEST} - *

- */ - private InitialPositionInStreamExtended initialPositionInStreamExtended = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.LATEST); - - private RetrievalSpecificConfig retrievalSpecificConfig; - - private RetrievalFactory retrievalFactory; - - public RetrievalFactory retrievalFactory() { - - if (retrievalFactory == null) { - if (retrievalSpecificConfig == null) { - retrievalSpecificConfig = new FanOutConfig(kinesisClient()).streamName(streamName()) - .applicationName(applicationName()); - } - retrievalFactory = retrievalSpecificConfig.retrievalFactory(); - } - return retrievalFactory; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalFactory.java deleted file mode 100644 index bcaf9e52..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalFactory.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval; - -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.metrics.MetricsFactory; - -/** - * - */ -public interface RetrievalFactory { - GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy(ShardInfo shardInfo, MetricsFactory metricsFactory); - - RecordsPublisher createGetRecordsCache(ShardInfo shardInfo, MetricsFactory metricsFactory); -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetryableRetrievalException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetryableRetrievalException.java deleted file mode 100644 index cd36aa33..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetryableRetrievalException.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval; - -import software.amazon.kinesis.exceptions.KinesisClientLibRetryableException; - -/** - * RetryableException for SubscribeToShard APIs. - */ -public class RetryableRetrievalException extends KinesisClientLibRetryableException { - public RetryableRetrievalException(final String message) { - super(message); - } - - public RetryableRetrievalException(final String message, final Exception e) { - super(message, e); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ThrottlingReporter.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ThrottlingReporter.java deleted file mode 100644 index 8a679c93..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ThrottlingReporter.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval; - -import org.slf4j.Logger; - -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; - -@RequiredArgsConstructor -@Slf4j -public class ThrottlingReporter { - - private final int maxConsecutiveWarnThrottles; - private final String shardId; - - private int consecutiveThrottles = 0; - - public void throttled() { - consecutiveThrottles++; - String message = "Shard '" + shardId + "' has been throttled " - + consecutiveThrottles + " consecutively"; - - if (consecutiveThrottles > maxConsecutiveWarnThrottles) { - getLog().error(message); - } else { - getLog().warn(message); - } - - } - - public void success() { - consecutiveThrottles = 0; - } - - protected Logger getLog() { - return log; - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConfig.java deleted file mode 100644 index 55ad1012..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConfig.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.fanout; - -import org.apache.commons.lang3.ObjectUtils; - -import com.google.common.base.Preconditions; - -import lombok.Data; -import lombok.NonNull; -import lombok.experimental.Accessors; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.retrieval.RetrievalFactory; -import software.amazon.kinesis.retrieval.RetrievalSpecificConfig; - -@Data -@Accessors(fluent = true) -public class FanOutConfig implements RetrievalSpecificConfig { - - /** - * Client used for retrieval, and optional consumer creation - */ - @NonNull - private final KinesisAsyncClient kinesisClient; - - /** - * The ARN of an already created consumer, if this is set no automatic consumer creation will be attempted. - */ - private String consumerArn; - - /** - * The name of the stream to create a consumer for. - */ - private String streamName; - - /** - * The name of the consumer to create. If this isn't set the {@link #applicationName} will be used. - */ - private String consumerName; - - /** - * The name of this application. Used as the name of the consumer unless {@link #consumerName} is set - */ - private String applicationName; - - /** - * The maximum number of retries for calling describe stream summary. Once exhausted the consumer creation/retrieval - * will fail. - */ - private int maxDescribeStreamSummaryRetries = 10; - - /** - * The maximum number of retries for calling DescribeStreamConsumer. Once exhausted the consumer creation/retrieval - * will fail. - */ - private int maxDescribeStreamConsumerRetries = 10; - - /** - * The maximum number of retries for calling RegisterStreamConsumer. Once exhausted the consumer creation/retrieval - * will fail. - */ - private int registerStreamConsumerRetries = 10; - - /** - * The maximum amount of time that will be made between failed calls. - */ - private long retryBackoffMillis = 1000; - - @Override - public RetrievalFactory retrievalFactory() { - return new FanOutRetrievalFactory(kinesisClient, getOrCreateConsumerArn()); - } - - private String getOrCreateConsumerArn() { - if (consumerArn != null) { - return consumerArn; - } - - FanOutConsumerRegistration registration = createConsumerRegistration(); - try { - return registration.getOrCreateStreamConsumerArn(); - } catch (DependencyException e) { - throw new RuntimeException(e); - } - } - - private FanOutConsumerRegistration createConsumerRegistration() { - String consumerToCreate = ObjectUtils.firstNonNull(consumerName(), applicationName()); - return createConsumerRegistration(kinesisClient(), - Preconditions.checkNotNull(streamName(), "streamName must be set for consumer creation"), - Preconditions.checkNotNull(consumerToCreate, - "applicationName or consumerName must be set for consumer creation")); - - } - - protected FanOutConsumerRegistration createConsumerRegistration(KinesisAsyncClient client, String stream, - String consumerToCreate) { - return new FanOutConsumerRegistration(client, stream, consumerToCreate, maxDescribeStreamSummaryRetries(), - maxDescribeStreamConsumerRetries(), registerStreamConsumerRetries(), retryBackoffMillis()); - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistration.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistration.java deleted file mode 100644 index 9baf5863..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistration.java +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Amazon Software License - * (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at - * http://aws.amazon.com/asl/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.fanout; - -import java.util.concurrent.ExecutionException; - -import org.apache.commons.lang3.StringUtils; - -import lombok.AccessLevel; -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.Setter; -import lombok.experimental.Accessors; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.ConsumerStatus; -import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest; -import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse; -import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest; -import software.amazon.awssdk.services.kinesis.model.KinesisException; -import software.amazon.awssdk.services.kinesis.model.LimitExceededException; -import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest; -import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse; -import software.amazon.awssdk.services.kinesis.model.ResourceInUseException; -import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.common.KinesisRequestsBuilder; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.retrieval.AWSExceptionManager; -import software.amazon.kinesis.retrieval.ConsumerRegistration; - -/** - * - */ -@RequiredArgsConstructor -@Slf4j -@Accessors(fluent = true) -@KinesisClientInternalApi -public class FanOutConsumerRegistration implements ConsumerRegistration { - @NonNull - private final KinesisAsyncClient kinesisClient; - private final String streamName; - @NonNull - private final String streamConsumerName; - private final int maxDescribeStreamSummaryRetries; - private final int maxDescribeStreamConsumerRetries; - private final int registerStreamConsumerRetries; - private final long retryBackoffMillis; - - private String streamArn; - @Setter(AccessLevel.PRIVATE) - private String streamConsumerArn; - - /** - * @inheritDoc - */ - @Override - public String getOrCreateStreamConsumerArn() throws DependencyException { - if (StringUtils.isEmpty(streamConsumerArn)) { - DescribeStreamConsumerResponse response = null; - - // 1. Check if consumer exists - try { - response = describeStreamConsumer(); - } catch (ResourceNotFoundException e) { - log.info("StreamConsumer not found, need to create it."); - } - - // 2. If not, register consumer - if (response == null) { - LimitExceededException finalException = null; - int retries = registerStreamConsumerRetries; - try { - while (retries > 0) { - finalException = null; - try { - final RegisterStreamConsumerResponse registerResponse = registerStreamConsumer(); - streamConsumerArn(registerResponse.consumer().consumerARN()); - break; - } catch (LimitExceededException e) { - // TODO: Figure out internal service exceptions - log.debug("RegisterStreamConsumer call got throttled will retry."); - finalException = e; - } - retries--; - } - - // All calls got throttled, returning. - if (finalException != null) { - throw new DependencyException(finalException); - } - } catch (ResourceInUseException e) { - // Consumer is present, call DescribeStreamConsumer - log.debug("Got ResourceInUseException consumer exists, will call DescribeStreamConsumer again."); - response = describeStreamConsumer(); - } - } - - // Update consumer arn, if describe was successful. - if (response != null) { - streamConsumerArn(response.consumerDescription().consumerARN()); - } - - // Check if consumer is active before proceeding - waitForActive(); - } - return streamConsumerArn; - } - - private RegisterStreamConsumerResponse registerStreamConsumer() throws DependencyException { - final AWSExceptionManager exceptionManager = createExceptionManager(); - try { - final RegisterStreamConsumerRequest request = KinesisRequestsBuilder - .registerStreamConsumerRequestBuilder().streamARN(streamArn()) - .consumerName(streamConsumerName).build(); - return kinesisClient.registerStreamConsumer(request).get(); - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - throw new DependencyException(e); - } - } - - private DescribeStreamConsumerResponse describeStreamConsumer() throws DependencyException { - final DescribeStreamConsumerRequest.Builder requestBuilder = KinesisRequestsBuilder - .describeStreamConsumerRequestBuilder(); - final DescribeStreamConsumerRequest request; - - if (StringUtils.isEmpty(streamConsumerArn)) { - request = requestBuilder.streamARN(streamArn()).consumerName(streamConsumerName).build(); - } else { - request = requestBuilder.consumerARN(streamConsumerArn).build(); - } - - final ServiceCallerSupplier dsc = () -> kinesisClient - .describeStreamConsumer(request).get(); - - return retryWhenThrottled(dsc, maxDescribeStreamConsumerRetries, "DescribeStreamConsumer"); - } - - private void waitForActive() throws DependencyException { - ConsumerStatus status = null; - - int retries = maxDescribeStreamConsumerRetries; - - while (!ConsumerStatus.ACTIVE.equals(status) && retries > 0) { - status = describeStreamConsumer().consumerDescription().consumerStatus(); - retries--; - } - - if (!ConsumerStatus.ACTIVE.equals(status)) { - final String message = String.format( - "Status of StreamConsumer %s, was not ACTIVE after all retries. Was instead %s.", - streamConsumerName, status); - log.error(message); - throw new IllegalStateException(message); - } - } - - private String streamArn() throws DependencyException { - if (StringUtils.isEmpty(streamArn)) { - final DescribeStreamSummaryRequest request = KinesisRequestsBuilder - .describeStreamSummaryRequestBuilder().streamName(streamName).build(); - final ServiceCallerSupplier dss = () -> kinesisClient.describeStreamSummary(request).get() - .streamDescriptionSummary().streamARN(); - - streamArn = retryWhenThrottled(dss, maxDescribeStreamSummaryRetries, "DescribeStreamSummary"); - } - - return streamArn; - } - - @FunctionalInterface - private interface ServiceCallerSupplier { - T get() throws ExecutionException, InterruptedException; - } - - private T retryWhenThrottled(@NonNull final ServiceCallerSupplier retriever, final int maxRetries, - @NonNull final String apiName) throws DependencyException { - final AWSExceptionManager exceptionManager = createExceptionManager(); - - LimitExceededException finalException = null; - - int retries = maxRetries; - while (retries > 0) { - try { - try { - return retriever.get(); - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - throw new DependencyException(e); - } - } catch (LimitExceededException e) { - log.info("Throttled while calling {} API, will backoff.", apiName); - try { - Thread.sleep(retryBackoffMillis + (long) (Math.random() * 100)); - } catch (InterruptedException ie) { - log.debug("Sleep interrupted, shutdown invoked."); - } - finalException = e; - } - retries--; - } - - if (finalException == null) { - throw new IllegalStateException( - String.format("Finished all retries and no exception was caught while calling %s", apiName)); - } - - throw finalException; - } - - private AWSExceptionManager createExceptionManager() { - final AWSExceptionManager exceptionManager = new AWSExceptionManager(); - exceptionManager.add(LimitExceededException.class, t -> t); - exceptionManager.add(ResourceInUseException.class, t -> t); - exceptionManager.add(ResourceNotFoundException.class, t -> t); - exceptionManager.add(KinesisException.class, t -> t); - - return exceptionManager; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisher.java deleted file mode 100644 index c199eeca..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisher.java +++ /dev/null @@ -1,686 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.fanout; - -import java.time.Instant; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; - -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.core.async.SdkPublisher; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; -import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent; -import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEventStream; -import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest; -import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponse; -import software.amazon.awssdk.services.kinesis.model.SubscribeToShardResponseHandler; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.common.KinesisRequestsBuilder; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.retrieval.IteratorBuilder; -import software.amazon.kinesis.retrieval.KinesisClientRecord; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RetryableRetrievalException; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -@RequiredArgsConstructor -@Slf4j -@KinesisClientInternalApi -public class FanOutRecordsPublisher implements RecordsPublisher { - private static final ThrowableCategory ACQUIRE_TIMEOUT_CATEGORY = new ThrowableCategory( - ThrowableType.ACQUIRE_TIMEOUT); - private static final ThrowableCategory READ_TIMEOUT_CATEGORY = new ThrowableCategory(ThrowableType.READ_TIMEOUT); - - private final KinesisAsyncClient kinesis; - private final String shardId; - private final String consumerArn; - - private final Object lockObject = new Object(); - - private final AtomicInteger subscribeToShardId = new AtomicInteger(0); - - private RecordFlow flow; - - private String currentSequenceNumber; - private InitialPositionInStreamExtended initialPositionInStreamExtended; - private boolean isFirstConnection = true; - - private Subscriber subscriber; - private long availableQueueSpace = 0; - - @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, - InitialPositionInStreamExtended initialPositionInStreamExtended) { - synchronized (lockObject) { - log.debug("[{}] Initializing Publisher @ Sequence: {} -- Initial Position: {}", shardId, - extendedSequenceNumber, initialPositionInStreamExtended); - this.initialPositionInStreamExtended = initialPositionInStreamExtended; - this.currentSequenceNumber = extendedSequenceNumber.sequenceNumber(); - this.isFirstConnection = true; - } - - } - - @Override - public void shutdown() { - synchronized (lockObject) { - if (flow != null) { - flow.cancel(); - } - flow = null; - } - } - - private boolean hasValidSubscriber() { - return subscriber != null; - } - - private void subscribeToShard(String sequenceNumber) { - synchronized (lockObject) { - SubscribeToShardRequest.Builder builder = KinesisRequestsBuilder.subscribeToShardRequestBuilder() - .shardId(shardId).consumerARN(consumerArn); - SubscribeToShardRequest request; - if (isFirstConnection) { - request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStreamExtended).build(); - } else { - request = IteratorBuilder.reconnectRequest(builder, sequenceNumber, initialPositionInStreamExtended) - .build(); - } - - Instant connectionStart = Instant.now(); - int subscribeInvocationId = subscribeToShardId.incrementAndGet(); - String instanceId = shardId + "-" + subscribeInvocationId; - log.debug( - "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#subscribeToShard) @ {} id: {} -- Starting subscribe to shard", - shardId, connectionStart, instanceId); - flow = new RecordFlow(this, connectionStart, instanceId); - kinesis.subscribeToShard(request, flow); - } - } - - private void errorOccurred(RecordFlow triggeringFlow, Throwable t) { - synchronized (lockObject) { - if (!hasValidSubscriber()) { - log.warn( - "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- Subscriber is null", - shardId, flow.connectionStartedAt, flow.subscribeToShardId); - return; - } - Throwable propagationThrowable = t; - ThrowableCategory category = throwableCategory(propagationThrowable); - - if (isActiveFlow(triggeringFlow)) { - if (flow != null) { - String logMessage = String.format( - "%s: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ %s id: %s -- %s", - shardId, flow.connectionStartedAt, flow.subscribeToShardId, category.throwableTypeString); - if (category.throwableType.equals(ThrowableType.READ_TIMEOUT)) { - log.debug(logMessage, propagationThrowable); - propagationThrowable = new RetryableRetrievalException(category.throwableTypeString, - (Exception) propagationThrowable.getCause()); - } else { - log.warn(logMessage, propagationThrowable); - } - - flow.cancel(); - } - log.debug("{}: availableQueueSpace zeroing from {}", shardId, availableQueueSpace); - availableQueueSpace = 0; - - try { - handleFlowError(propagationThrowable); - } catch (Throwable innerThrowable) { - log.warn("{}: Exception while calling subscriber.onError", shardId, innerThrowable); - } - subscriber = null; - flow = null; - } else { - if (triggeringFlow != null) { - log.debug( - "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- {} -> triggeringFlow wasn't the active flow. Didn't dispatch error", - shardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId, - category.throwableTypeString); - triggeringFlow.cancel(); - } - } - - } - } - - private void handleFlowError(Throwable t) { - if (t.getCause() instanceof ResourceNotFoundException) { - log.debug( - "{}: Could not call SubscribeToShard successfully because shard no longer exists. Marking shard for completion.", - shardId); - subscriber - .onNext(ProcessRecordsInput.builder().records(Collections.emptyList()).isAtShardEnd(true).build()); - subscriber.onComplete(); - } else { - subscriber.onError(t); - } - } - - private enum ThrowableType { - ACQUIRE_TIMEOUT("AcquireTimeout"), READ_TIMEOUT("ReadTimeout"), OTHER("Other"); - - String value; - - ThrowableType(final String value) { - this.value = value; - } - } - - private static class ThrowableCategory { - @NonNull - final ThrowableType throwableType; - @NonNull - final String throwableTypeString; - - ThrowableCategory(final ThrowableType throwableType) { - this(throwableType, throwableType.value); - } - - ThrowableCategory(final ThrowableType throwableType, final String throwableTypeString) { - this.throwableType = throwableType; - this.throwableTypeString = throwableTypeString; - } - } - - private ThrowableCategory throwableCategory(Throwable t) { - Throwable current = t; - StringBuilder builder = new StringBuilder(); - do { - if (current.getMessage() != null && current.getMessage().startsWith("Acquire operation")) { - return ACQUIRE_TIMEOUT_CATEGORY; - } - if (current.getClass().getName().equals("io.netty.handler.timeout.ReadTimeoutException")) { - return READ_TIMEOUT_CATEGORY; - } - - if (current.getCause() == null) { - // - // At the bottom - // - builder.append(current.getClass().getName()).append(": ").append(current.getMessage()); - } else { - builder.append(current.getClass().getSimpleName()); - builder.append("/"); - } - current = current.getCause(); - } while (current != null); - return new ThrowableCategory(ThrowableType.OTHER, builder.toString()); - } - - private void recordsReceived(RecordFlow triggeringFlow, SubscribeToShardEvent recordBatchEvent) { - synchronized (lockObject) { - if (!hasValidSubscriber()) { - log.debug( - "{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Subscriber is null.", - shardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); - triggeringFlow.cancel(); - if (flow != null) { - flow.cancel(); - } - return; - } - if (!isActiveFlow(triggeringFlow)) { - log.debug( - "{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Received records for an inactive flow.", - shardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); - return; - } - - List records = recordBatchEvent.records().stream().map(KinesisClientRecord::fromRecord) - .collect(Collectors.toList()); - ProcessRecordsInput input = ProcessRecordsInput.builder().cacheEntryTime(Instant.now()) - .millisBehindLatest(recordBatchEvent.millisBehindLatest()) - .isAtShardEnd(recordBatchEvent.continuationSequenceNumber() == null).records(records).build(); - - try { - subscriber.onNext(input); - // - // Only advance the currentSequenceNumber if we successfully dispatch the last received input - // - currentSequenceNumber = recordBatchEvent.continuationSequenceNumber(); - } catch (Throwable t) { - log.warn("{}: Unable to call onNext for subscriber. Failing publisher.", shardId); - errorOccurred(triggeringFlow, t); - } - - if (availableQueueSpace <= 0) { - log.debug( - "{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Attempted to decrement availableQueueSpace to below 0", - shardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); - } else { - availableQueueSpace--; - if (availableQueueSpace > 0) { - triggeringFlow.request(1); - } - } - } - } - - private void onComplete(RecordFlow triggeringFlow) { - synchronized (lockObject) { - log.debug("{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {}", shardId, - triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); - triggeringFlow.cancel(); - if (!hasValidSubscriber()) { - log.debug("{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {}", shardId, - triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); - return; - } - - if (!isActiveFlow(triggeringFlow)) { - log.debug( - "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {} -- Received spurious onComplete from unexpected flow. Ignoring.", - shardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); - return; - } - - if (currentSequenceNumber != null) { - log.debug("{}: Shard hasn't ended resubscribing.", shardId); - subscribeToShard(currentSequenceNumber); - } else { - log.debug("{}: Shard has ended completing subscriber.", shardId); - subscriber.onComplete(); - } - } - } - - @Override - public void subscribe(Subscriber s) { - synchronized (lockObject) { - if (subscriber != null) { - log.error( - "{}: A subscribe occurred while there was an active subscriber. Sending error to current subscriber", - shardId); - MultipleSubscriberException multipleSubscriberException = new MultipleSubscriberException(); - - // - // Notify current subscriber - // - subscriber.onError(multipleSubscriberException); - subscriber = null; - - // - // Notify attempted subscriber - // - s.onError(multipleSubscriberException); - terminateExistingFlow(); - return; - } - terminateExistingFlow(); - - subscriber = s; - try { - subscribeToShard(currentSequenceNumber); - } catch (Throwable t) { - errorOccurred(flow, t); - return; - } - if (flow == null) { - // - // Failed to subscribe to a flow - // - errorOccurred(flow, new IllegalStateException("SubscribeToShard failed")); - return; - } - subscriber.onSubscribe(new Subscription() { - @Override - public void request(long n) { - synchronized (lockObject) { - if (subscriber != s) { - log.warn( - "{}: (FanOutRecordsPublisher/Subscription#request) - Rejected an attempt to request({}), because subscribers don't match.", - shardId, n); - return; - } - if (flow == null) { - // - // Flow has been terminated, so we can't make any requests on it anymore. - // - log.debug( - "{}: (FanOutRecordsPublisher/Subscription#request) - Request called for a null flow.", - shardId); - errorOccurred(flow, new IllegalStateException("Attempted to request on a null flow.")); - return; - } - long previous = availableQueueSpace; - availableQueueSpace += n; - if (previous <= 0) { - flow.request(1); - } - } - } - - @Override - public void cancel() { - synchronized (lockObject) { - if (subscriber != s) { - log.warn( - "{}: (FanOutRecordsPublisher/Subscription#cancel) - Rejected attempt to cancel subscription, because subscribers don't match.", - shardId); - return; - } - if (!hasValidSubscriber()) { - log.warn( - "{}: (FanOutRecordsPublisher/Subscription#cancel) - Cancelled called even with an invalid subscriber", - shardId); - } - subscriber = null; - if (flow != null) { - log.debug("{}: [SubscriptionLifetime]: (FanOutRecordsPublisher/Subscription#cancel) @ {} id: {}", - shardId, flow.connectionStartedAt, flow.subscribeToShardId); - flow.cancel(); - availableQueueSpace = 0; - } - } - } - }); - } - } - - private void terminateExistingFlow() { - if (flow != null) { - RecordFlow current = flow; - flow = null; - current.cancel(); - } - } - - private boolean isActiveFlow(RecordFlow requester) { - synchronized (lockObject) { - return requester == flow; - } - } - - private void rejectSubscription(SdkPublisher publisher) { - publisher.subscribe(new Subscriber() { - Subscription localSub; - - @Override - public void onSubscribe(Subscription s) { - localSub = s; - localSub.cancel(); - } - - @Override - public void onNext(SubscribeToShardEventStream subscribeToShardEventStream) { - localSub.cancel(); - } - - @Override - public void onError(Throwable t) { - localSub.cancel(); - } - - @Override - public void onComplete() { - localSub.cancel(); - } - }); - } - - @RequiredArgsConstructor - @Slf4j - static class RecordFlow implements SubscribeToShardResponseHandler { - - private final FanOutRecordsPublisher parent; - private final Instant connectionStartedAt; - private final String subscribeToShardId; - - private RecordSubscription subscription; - private boolean isDisposed = false; - private boolean isErrorDispatched = false; - private boolean isCancelled = false; - - @Override - public void onEventStream(SdkPublisher publisher) { - synchronized (parent.lockObject) { - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- Subscribe", - parent.shardId, connectionStartedAt, subscribeToShardId); - if (!parent.isActiveFlow(this)) { - this.isDisposed = true; - log.debug( - "{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- parent is disposed", - parent.shardId, connectionStartedAt, subscribeToShardId); - parent.rejectSubscription(publisher); - return; - } - - try { - log.debug( - "{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- creating record subscription", - parent.shardId, connectionStartedAt, subscribeToShardId); - subscription = new RecordSubscription(parent, this, connectionStartedAt, subscribeToShardId); - publisher.subscribe(subscription); - - // - // Only flip this once we succeed - // - parent.isFirstConnection = false; - } catch (Throwable t) { - log.debug( - "{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- throwable during record subscription: {}", - parent.shardId, connectionStartedAt, subscribeToShardId, t.getMessage()); - parent.errorOccurred(this, t); - } - } - } - - @Override - public void responseReceived(SubscribeToShardResponse response) { - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#responseReceived) @ {} id: {} -- Response received", - parent.shardId, connectionStartedAt, subscribeToShardId); - } - - @Override - public void exceptionOccurred(Throwable throwable) { - synchronized (parent.lockObject) { - - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- {}: {}", - parent.shardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(), - throwable.getMessage()); - if (this.isDisposed) { - log.debug( - "{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- This flow has been disposed, not dispatching error. {}: {}", - parent.shardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(), - throwable.getMessage()); - this.isErrorDispatched = true; - } - this.isDisposed = true; - if (!isErrorDispatched) { - parent.errorOccurred(this, throwable); - isErrorDispatched = true; - } else { - log.debug( - "{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- An error has previously been dispatched, not dispatching this error {}: {}", - parent.shardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(), - throwable.getMessage()); - } - } - } - - @Override - public void complete() { - synchronized (parent.lockObject) { - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- Connection completed", - parent.shardId, connectionStartedAt, subscribeToShardId); - - if (isCancelled) { - // - // The SDK currently calls onComplete when the subscription is cancelled, which we really don't - // want to do. When that happens we don't want to call the parent onComplete since that will restart - // the - // subscription, which was cancelled for a reason (usually queue overflow). - // - log.warn("{}: complete called on a cancelled subscription. Ignoring completion", parent.shardId); - return; - } - if (this.isDisposed) { - log.warn( - "{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- This flow has been disposed not dispatching completion", - parent.shardId, connectionStartedAt, subscribeToShardId); - return; - } - - parent.onComplete(this); - } - } - - public void cancel() { - synchronized (parent.lockObject) { - this.isDisposed = true; - this.isCancelled = true; - if (subscription != null) { - try { - subscription.cancel(); - } catch (Throwable t) { - log.error( - "{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- Exception while trying to cancel failed subscription: {}", - parent.shardId, connectionStartedAt, subscribeToShardId, t.getMessage(), t); - } - } - } - } - - private boolean shouldSubscriptionCancel() { - return this.isDisposed || this.isCancelled || !parent.isActiveFlow(this); - } - - public void request(long n) { - if (subscription != null && !shouldSubscriptionCancel()) { - subscription.request(n); - } - } - - private void recordsReceived(SubscribeToShardEvent event) { - parent.recordsReceived(this, event); - } - } - - @RequiredArgsConstructor - @Slf4j - static class RecordSubscription implements Subscriber { - - private final FanOutRecordsPublisher parent; - private final RecordFlow flow; - private final Instant connectionStartedAt; - private final String subscribeToShardId; - - private Subscription subscription; - - public void request(long n) { - synchronized (parent.lockObject) { - subscription.request(n); - } - } - - public void cancel() { - synchronized (parent.lockObject) { - log.debug("{}: [SubscriptionLifetime]: (RecordSubscription#cancel) @ {} id: {} -- Cancel called", - parent.shardId, connectionStartedAt, subscribeToShardId); - flow.isCancelled = true; - if (subscription != null) { - subscription.cancel(); - } else { - log.debug( - "{}: [SubscriptionLifetime]: (RecordSubscription#cancel) @ {} id: {} -- SDK subscription is null", - parent.shardId, connectionStartedAt, subscribeToShardId); - } - } - } - - @Override - public void onSubscribe(Subscription s) { - synchronized (parent.lockObject) { - subscription = s; - - if (flow.shouldSubscriptionCancel()) { - if (flow.isCancelled) { - log.debug( - "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- Subscription was cancelled before onSubscribe", - parent.shardId, connectionStartedAt, subscribeToShardId); - } - if (flow.isDisposed) { - log.debug( - "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- RecordFlow has been disposed cancelling subscribe", - parent.shardId, connectionStartedAt, subscribeToShardId); - } - log.debug( - "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- RecordFlow requires cancelling", - parent.shardId, connectionStartedAt, subscribeToShardId); - cancel(); - } - log.debug( - "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- Outstanding: {} items so requesting an item", - parent.shardId, connectionStartedAt, subscribeToShardId, parent.availableQueueSpace); - if (parent.availableQueueSpace > 0) { - request(1); - } - } - } - - @Override - public void onNext(SubscribeToShardEventStream recordBatchEvent) { - synchronized (parent.lockObject) { - if (flow.shouldSubscriptionCancel()) { - log.debug( - "{}: [SubscriptionLifetime]: (RecordSubscription#onNext) @ {} id: {} -- RecordFlow requires cancelling", - parent.shardId, connectionStartedAt, subscribeToShardId); - cancel(); - return; - } - recordBatchEvent.accept(new SubscribeToShardResponseHandler.Visitor() { - @Override - public void visit(SubscribeToShardEvent event) { - flow.recordsReceived(event); - } - }); - } - } - - @Override - public void onError(Throwable t) { - log.debug("{}: [SubscriptionLifetime]: (RecordSubscription#onError) @ {} id: {} -- {}: {}", parent.shardId, - connectionStartedAt, subscribeToShardId, t.getClass().getName(), t.getMessage()); - - // - // We don't propagate the throwable, as the SDK will call - // SubscribeToShardResponseHandler#exceptionOccurred() - // - } - - @Override - public void onComplete() { - log.debug( - "{}: [SubscriptionLifetime]: (RecordSubscription#onComplete) @ {} id: {} -- Allowing RecordFlow to call onComplete", - parent.shardId, connectionStartedAt, subscribeToShardId); - - } - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRetrievalFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRetrievalFactory.java deleted file mode 100644 index eea61250..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRetrievalFactory.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.fanout; - -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RetrievalFactory; - -@RequiredArgsConstructor -@KinesisClientInternalApi -public class FanOutRetrievalFactory implements RetrievalFactory { - - private final KinesisAsyncClient kinesisClient; - private final String consumerArn; - - @Override - public GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy(final ShardInfo shardInfo, - final MetricsFactory metricsFactory) { - return null; - } - - @Override - public RecordsPublisher createGetRecordsCache(@NonNull final ShardInfo shardInfo, - final MetricsFactory metricsFactory) { - return new FanOutRecordsPublisher(kinesisClient, shardInfo.shardId(), consumerArn); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategy.java deleted file mode 100644 index bb975652..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategy.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval.polling; - -import java.util.HashSet; -import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.CompletionService; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorCompletionService; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import lombok.NonNull; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.retrieval.DataFetcherResult; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; - -/** - * - */ -@Slf4j -@KinesisClientInternalApi -public class AsynchronousGetRecordsRetrievalStrategy implements GetRecordsRetrievalStrategy { - private static final int TIME_TO_KEEP_ALIVE = 5; - private static final int CORE_THREAD_POOL_COUNT = 1; - - private final KinesisDataFetcher dataFetcher; - private final ExecutorService executorService; - private final int retryGetRecordsInSeconds; - private final String shardId; - final Supplier> completionServiceSupplier; - - public AsynchronousGetRecordsRetrievalStrategy(@NonNull final KinesisDataFetcher dataFetcher, - final int retryGetRecordsInSeconds, final int maxGetRecordsThreadPool, String shardId) { - this(dataFetcher, buildExector(maxGetRecordsThreadPool, shardId), retryGetRecordsInSeconds, shardId); - } - - public AsynchronousGetRecordsRetrievalStrategy(final KinesisDataFetcher dataFetcher, - final ExecutorService executorService, final int retryGetRecordsInSeconds, String shardId) { - this(dataFetcher, executorService, retryGetRecordsInSeconds, () -> new ExecutorCompletionService<>(executorService), - shardId); - } - - AsynchronousGetRecordsRetrievalStrategy(KinesisDataFetcher dataFetcher, ExecutorService executorService, - int retryGetRecordsInSeconds, Supplier> completionServiceSupplier, - String shardId) { - this.dataFetcher = dataFetcher; - this.executorService = executorService; - this.retryGetRecordsInSeconds = retryGetRecordsInSeconds; - this.completionServiceSupplier = completionServiceSupplier; - this.shardId = shardId; - } - - @Override - public GetRecordsResponse getRecords(final int maxRecords) { - if (executorService.isShutdown()) { - throw new IllegalStateException("Strategy has been shutdown"); - } - GetRecordsResponse result = null; - CompletionService completionService = completionServiceSupplier.get(); - Set> futures = new HashSet<>(); - Callable retrieverCall = createRetrieverCallable(); - try { - while (true) { - try { - futures.add(completionService.submit(retrieverCall)); - } catch (RejectedExecutionException e) { - log.warn("Out of resources, unable to start additional requests."); - } - - try { - Future resultFuture = completionService.poll(retryGetRecordsInSeconds, - TimeUnit.SECONDS); - if (resultFuture != null) { - // - // Fix to ensure that we only let the shard iterator advance when we intend to return the result - // to the caller. This ensures that the shard iterator is consistently advance in step with - // what the caller sees. - // - result = resultFuture.get().accept(); - break; - } - } catch (ExecutionException e) { - if (e.getCause() instanceof ExpiredIteratorException) { - throw (ExpiredIteratorException) e.getCause(); - } - log.error("ExecutionException thrown while trying to get records", e); - } catch (InterruptedException e) { - log.error("Thread was interrupted", e); - break; - } - } - } finally { - futures.forEach(f -> f.cancel(true)); - } - return result; - } - - private Callable createRetrieverCallable() { - return dataFetcher::getRecords; - } - - @Override - public void shutdown() { - executorService.shutdownNow(); - } - - @Override - public boolean isShutdown() { - return executorService.isShutdown(); - } - - private static ExecutorService buildExector(int maxGetRecordsThreadPool, String shardId) { - String threadNameFormat = "get-records-worker-" + shardId + "-%d"; - return new ThreadPoolExecutor(CORE_THREAD_POOL_COUNT, maxGetRecordsThreadPool, TIME_TO_KEEP_ALIVE, - TimeUnit.SECONDS, new LinkedBlockingQueue<>(1), - new ThreadFactoryBuilder().setDaemon(true).setNameFormat(threadNameFormat).build(), - new ThreadPoolExecutor.AbortPolicy()); - } - - @Override - public KinesisDataFetcher getDataFetcher() { - return dataFetcher; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/BlockingRecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/BlockingRecordsPublisher.java deleted file mode 100644 index 8fd68b80..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/BlockingRecordsPublisher.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.polling; - -import java.util.List; -import java.util.stream.Collectors; - -import org.reactivestreams.Subscriber; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; - -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.KinesisClientRecord; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * This is the BlockingRecordsPublisher class. This class blocks any calls to the records on the - * GetRecordsRetrievalStrategy class. - */ -@KinesisClientInternalApi -public class BlockingRecordsPublisher implements RecordsPublisher { - private final int maxRecordsPerCall; - private final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy; - - private Subscriber subscriber; - - public BlockingRecordsPublisher(final int maxRecordsPerCall, - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy) { - this.maxRecordsPerCall = maxRecordsPerCall; - this.getRecordsRetrievalStrategy = getRecordsRetrievalStrategy; - } - - @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, - InitialPositionInStreamExtended initialPositionInStreamExtended) { - // - // Nothing to do here - // - } - - public ProcessRecordsInput getNextResult() { - GetRecordsResponse getRecordsResult = getRecordsRetrievalStrategy.getRecords(maxRecordsPerCall); - List records = getRecordsResult.records().stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); - return ProcessRecordsInput.builder() - .records(records) - .millisBehindLatest(getRecordsResult.millisBehindLatest()) - .build(); - } - - @Override - public void shutdown() { - getRecordsRetrievalStrategy.shutdown(); - } - - @Override - public void subscribe(Subscriber s) { - subscriber = s; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcher.java deleted file mode 100644 index 06843cc6..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcher.java +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval.polling; - -import java.util.Collections; -import java.util.concurrent.ExecutionException; - -import org.apache.commons.lang3.StringUtils; - -import com.google.common.collect.Iterables; - -import lombok.AccessLevel; -import lombok.Data; -import lombok.Getter; -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; -import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse; -import software.amazon.awssdk.services.kinesis.model.KinesisException; -import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.common.KinesisRequestsBuilder; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.MetricsUtil; -import software.amazon.kinesis.retrieval.AWSExceptionManager; -import software.amazon.kinesis.retrieval.DataFetcherResult; -import software.amazon.kinesis.retrieval.IteratorBuilder; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Used to get data from Amazon Kinesis. Tracks iterator state internally. - */ -@RequiredArgsConstructor -@Slf4j -public class KinesisDataFetcher { - private static final String METRICS_PREFIX = "KinesisDataFetcher"; - private static final String OPERATION = "ProcessTask"; - - @NonNull - private final KinesisAsyncClient kinesisClient; - @NonNull - private final String streamName; - @NonNull - private final String shardId; - private final int maxRecords; - @NonNull - private final MetricsFactory metricsFactory; - - /** Note: This method has package level access for testing purposes. - * @return nextIterator - */ - @Getter(AccessLevel.PACKAGE) - private String nextIterator; - @Getter - private boolean isShardEndReached; - private boolean isInitialized; - private String lastKnownSequenceNumber; - private InitialPositionInStreamExtended initialPositionInStream; - - /** - * Get records from the current position in the stream (up to maxRecords). - * - * @return list of records of up to maxRecords size - */ - public DataFetcherResult getRecords() { - if (!isInitialized) { - throw new IllegalArgumentException("KinesisDataFetcher.records called before initialization."); - } - - if (nextIterator != null) { - try { - return new AdvancingResult(getRecords(nextIterator)); - } catch (ResourceNotFoundException e) { - log.info("Caught ResourceNotFoundException when fetching records for shard {}", shardId); - return TERMINAL_RESULT; - } - } else { - return TERMINAL_RESULT; - } - } - - final DataFetcherResult TERMINAL_RESULT = new DataFetcherResult() { - @Override - public GetRecordsResponse getResult() { - return GetRecordsResponse.builder().millisBehindLatest(null).records(Collections.emptyList()) - .nextShardIterator(null).build(); - } - - @Override - public GetRecordsResponse accept() { - isShardEndReached = true; - return getResult(); - } - - @Override - public boolean isShardEnd() { - return isShardEndReached; - } - }; - - @Data - class AdvancingResult implements DataFetcherResult { - - final GetRecordsResponse result; - - @Override - public GetRecordsResponse getResult() { - return result; - } - - @Override - public GetRecordsResponse accept() { - nextIterator = result.nextShardIterator(); - if (result.records() != null && !result.records().isEmpty()) { - lastKnownSequenceNumber = Iterables.getLast(result.records()).sequenceNumber(); - } - if (nextIterator == null) { - isShardEndReached = true; - } - return getResult(); - } - - @Override - public boolean isShardEnd() { - return isShardEndReached; - } - } - - /** - * Initializes this KinesisDataFetcher's iterator based on the checkpointed sequence number. - * @param initialCheckpoint Current checkpoint sequence number for this shard. - * @param initialPositionInStream The initialPositionInStream. - */ - public void initialize(final String initialCheckpoint, - final InitialPositionInStreamExtended initialPositionInStream) { - log.info("Initializing shard {} with {}", shardId, initialCheckpoint); - advanceIteratorTo(initialCheckpoint, initialPositionInStream); - isInitialized = true; - } - - public void initialize(final ExtendedSequenceNumber initialCheckpoint, - final InitialPositionInStreamExtended initialPositionInStream) { - log.info("Initializing shard {} with {}", shardId, initialCheckpoint.sequenceNumber()); - advanceIteratorTo(initialCheckpoint.sequenceNumber(), initialPositionInStream); - isInitialized = true; - } - - /** - * Advances this KinesisDataFetcher's internal iterator to be at the passed-in sequence number. - * - * @param sequenceNumber advance the iterator to the record at this sequence number. - * @param initialPositionInStream The initialPositionInStream. - */ - public void advanceIteratorTo(final String sequenceNumber, - final InitialPositionInStreamExtended initialPositionInStream) { - if (sequenceNumber == null) { - throw new IllegalArgumentException("SequenceNumber should not be null: shardId " + shardId); - } - - final AWSExceptionManager exceptionManager = createExceptionManager(); - - GetShardIteratorRequest.Builder builder = KinesisRequestsBuilder.getShardIteratorRequestBuilder() - .streamName(streamName).shardId(shardId); - GetShardIteratorRequest request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStream) - .build(); - - // TODO: Check if this metric is fine to be added - final MetricsScope metricsScope = MetricsUtil.createMetricsWithOperation(metricsFactory, OPERATION); - MetricsUtil.addShardId(metricsScope, shardId); - boolean success = false; - long startTime = System.currentTimeMillis(); - - try { - try { - final GetShardIteratorResponse result = kinesisClient.getShardIterator(request).get(); - nextIterator = result.shardIterator(); - success = true; - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: Check behavior - throw new RuntimeException(e); - } - } catch (ResourceNotFoundException e) { - log.info("Caught ResourceNotFoundException when getting an iterator for shard {}", shardId, e); - nextIterator = null; - } finally { - MetricsUtil.addSuccessAndLatency(metricsScope, String.format("%s.%s", METRICS_PREFIX, "getShardIterator"), - success, startTime, MetricsLevel.DETAILED); - MetricsUtil.endScope(metricsScope); - } - - if (nextIterator == null) { - isShardEndReached = true; - } - this.lastKnownSequenceNumber = sequenceNumber; - this.initialPositionInStream = initialPositionInStream; - } - - /** - * Gets a new iterator from the last known sequence number i.e. the sequence number of the last record from the last - * records call. - */ - public void restartIterator() { - if (StringUtils.isEmpty(lastKnownSequenceNumber) || initialPositionInStream == null) { - throw new IllegalStateException( - "Make sure to initialize the KinesisDataFetcher before restarting the iterator."); - } - advanceIteratorTo(lastKnownSequenceNumber, initialPositionInStream); - } - - private GetRecordsResponse getRecords(@NonNull final String nextIterator) { - final AWSExceptionManager exceptionManager = createExceptionManager(); - GetRecordsRequest request = KinesisRequestsBuilder.getRecordsRequestBuilder().shardIterator(nextIterator) - .limit(maxRecords).build(); - - final MetricsScope metricsScope = MetricsUtil.createMetricsWithOperation(metricsFactory, OPERATION); - MetricsUtil.addShardId(metricsScope, shardId); - boolean success = false; - long startTime = System.currentTimeMillis(); - try { - final GetRecordsResponse response = kinesisClient.getRecords(request).get(); - success = true; - return response; - } catch (ExecutionException e) { - throw exceptionManager.apply(e.getCause()); - } catch (InterruptedException e) { - // TODO: Check behavior - log.debug("Interrupt called on metod, shutdown initiated"); - throw new RuntimeException(e); - } finally { - MetricsUtil.addSuccessAndLatency(metricsScope, String.format("%s.%s", METRICS_PREFIX, "getRecords"), - success, startTime, MetricsLevel.DETAILED); - MetricsUtil.endScope(metricsScope); - } - } - - private AWSExceptionManager createExceptionManager() { - final AWSExceptionManager exceptionManager = new AWSExceptionManager(); - exceptionManager.add(ResourceNotFoundException.class, t -> t); - exceptionManager.add(KinesisException.class, t -> t); - return exceptionManager; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PollingConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PollingConfig.java deleted file mode 100644 index e9b4e6a2..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PollingConfig.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.polling; - -import java.util.Optional; - -import lombok.Data; -import lombok.Getter; -import lombok.NonNull; -import lombok.experimental.Accessors; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; -import software.amazon.kinesis.retrieval.RecordsFetcherFactory; -import software.amazon.kinesis.retrieval.RetrievalFactory; -import software.amazon.kinesis.retrieval.RetrievalSpecificConfig; - -@Accessors(fluent = true) -@Data -@Getter -public class PollingConfig implements RetrievalSpecificConfig { - - /** - * Name of the Kinesis stream. - * - * @return String - */ - @NonNull - private final String streamName; - - /** - * Client used to access to Kinesis service. - * - * @return {@link KinesisAsyncClient} - */ - @NonNull - private final KinesisAsyncClient kinesisClient; - - /** - * Max records to fetch from Kinesis in a single GetRecords call. - * - *

- * Default value: 10000 - *

- */ - private int maxRecords = 10000; - - /** - * The value for how long the ShardConsumer should sleep if no records are returned from the call to - * {@link KinesisAsyncClient#getRecords(GetRecordsRequest)}. - * - *

- * Default value: 1000L - *

- */ - private long idleTimeBetweenReadsInMillis = 1000L; - - /** - * Time to wait in seconds before the worker retries to get a record. - * - *

- * Default value: {@link Optional#empty()} - *

- */ - private Optional retryGetRecordsInSeconds = Optional.empty(); - - /** - * The max number of threads in the records thread pool. - * - *

- * Default value: {@link Optional#empty()} - *

- */ - private Optional maxGetRecordsThreadPool = Optional.empty(); - - /** - * The factory that creates the RecordsPublisher used to records from Kinesis. - * - *

- * Default value: {@link SimpleRecordsFetcherFactory} - *

- */ - private RecordsFetcherFactory recordsFetcherFactory = new SimpleRecordsFetcherFactory(); - - @Override - public RetrievalFactory retrievalFactory() { - return new SynchronousBlockingRetrievalFactory(streamName(), kinesisClient(), recordsFetcherFactory, - maxRecords()); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java deleted file mode 100644 index 15a564df..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java +++ /dev/null @@ -1,312 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.polling; - -import java.time.Duration; -import java.time.Instant; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; - -import org.apache.commons.lang3.Validate; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -import lombok.NonNull; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.core.exception.SdkClientException; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; -import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.MetricsUtil; -import software.amazon.kinesis.metrics.ThreadSafeMetricsDelegatingFactory; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.KinesisClientRecord; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * This is the prefetch caching class, this class spins up a thread if prefetching is enabled. That thread fetches the - * next set of records and stores it in the cache. The size of the cache is limited by setting - * maxPendingProcessRecordsInput i.e. the maximum number of GetRecordsResult that the cache can store, maxByteSize - * i.e. the byte size of the records stored in the cache and maxRecordsCount i.e. the max number of records that should - * be present in the cache across multiple GetRecordsResult object. If no data is available in the cache, the call from - * the record processor is blocked till records are retrieved from Kinesis. - */ -@Slf4j -@KinesisClientInternalApi -public class PrefetchRecordsPublisher implements RecordsPublisher { - private static final String EXPIRED_ITERATOR_METRIC = "ExpiredIterator"; - LinkedBlockingQueue getRecordsResultQueue; - private int maxPendingProcessRecordsInput; - private int maxByteSize; - private int maxRecordsCount; - private final int maxRecordsPerCall; - private final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy; - private final ExecutorService executorService; - private final MetricsFactory metricsFactory; - private final long idleMillisBetweenCalls; - private Instant lastSuccessfulCall; - private final DefaultGetRecordsCacheDaemon defaultGetRecordsCacheDaemon; - private PrefetchCounters prefetchCounters; - private boolean started = false; - private final String operation; - private final KinesisDataFetcher dataFetcher; - private final String shardId; - - private Subscriber subscriber; - private final AtomicLong requestedResponses = new AtomicLong(0); - - /** - * Constructor for the PrefetchRecordsPublisher. This cache prefetches records from Kinesis and stores them in a - * LinkedBlockingQueue. - * - * @see PrefetchRecordsPublisher - * - * @param maxPendingProcessRecordsInput Max number of ProcessRecordsInput that can be held in the cache before - * blocking - * @param maxByteSize Max byte size of the queue before blocking next get records call - * @param maxRecordsCount Max number of records in the queue across all ProcessRecordInput objects - * @param maxRecordsPerCall Max records to be returned per call - * @param getRecordsRetrievalStrategy Retrieval strategy for the get records call - * @param executorService Executor service for the cache - * @param idleMillisBetweenCalls maximum time to wait before dispatching the next get records call - */ - public PrefetchRecordsPublisher(final int maxPendingProcessRecordsInput, final int maxByteSize, final int maxRecordsCount, - final int maxRecordsPerCall, - @NonNull final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, - @NonNull final ExecutorService executorService, - final long idleMillisBetweenCalls, - @NonNull final MetricsFactory metricsFactory, - @NonNull final String operation, - @NonNull final String shardId) { - this.getRecordsRetrievalStrategy = getRecordsRetrievalStrategy; - this.maxRecordsPerCall = maxRecordsPerCall; - this.maxPendingProcessRecordsInput = maxPendingProcessRecordsInput; - this.maxByteSize = maxByteSize; - this.maxRecordsCount = maxRecordsCount; - this.getRecordsResultQueue = new LinkedBlockingQueue<>(this.maxPendingProcessRecordsInput); - this.prefetchCounters = new PrefetchCounters(); - this.executorService = executorService; - this.metricsFactory = new ThreadSafeMetricsDelegatingFactory(metricsFactory); - this.idleMillisBetweenCalls = idleMillisBetweenCalls; - this.defaultGetRecordsCacheDaemon = new DefaultGetRecordsCacheDaemon(); - Validate.notEmpty(operation, "Operation cannot be empty"); - this.operation = operation; - this.dataFetcher = this.getRecordsRetrievalStrategy.getDataFetcher(); - this.shardId = shardId; - } - - @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended) { - if (executorService.isShutdown()) { - throw new IllegalStateException("ExecutorService has been shutdown."); - } - - dataFetcher.initialize(extendedSequenceNumber, initialPositionInStreamExtended); - - if (!started) { - log.info("Starting prefetching thread."); - executorService.execute(defaultGetRecordsCacheDaemon); - } - started = true; - } - - ProcessRecordsInput getNextResult() { - if (executorService.isShutdown()) { - throw new IllegalStateException("Shutdown has been called on the cache, can't accept new requests."); - } - - if (!started) { - throw new IllegalStateException("Cache has not been initialized, make sure to call start."); - } - ProcessRecordsInput result = null; - try { - result = getRecordsResultQueue.take().toBuilder().cacheExitTime(Instant.now()).build(); - prefetchCounters.removed(result); - requestedResponses.decrementAndGet(); - } catch (InterruptedException e) { - log.error("Interrupted while getting records from the cache", e); - } - return result; - } - - @Override - public void shutdown() { - defaultGetRecordsCacheDaemon.isShutdown = true; - executorService.shutdownNow(); - started = false; - } - - @Override - public void subscribe(Subscriber s) { - subscriber = s; - subscriber.onSubscribe(new Subscription() { - @Override - public void request(long n) { - requestedResponses.addAndGet(n); - drainQueueForRequests(); - } - - @Override - public void cancel() { - requestedResponses.set(0); - } - }); - } - - private void addArrivedRecordsInput(ProcessRecordsInput processRecordsInput) throws InterruptedException { - getRecordsResultQueue.put(processRecordsInput); - prefetchCounters.added(processRecordsInput); - } - - private synchronized void drainQueueForRequests() { - while (requestedResponses.get() > 0 && !getRecordsResultQueue.isEmpty()) { - subscriber.onNext(getNextResult()); - } - } - - private class DefaultGetRecordsCacheDaemon implements Runnable { - volatile boolean isShutdown = false; - - @Override - public void run() { - while (!isShutdown) { - if (Thread.currentThread().isInterrupted()) { - log.warn("Prefetch thread was interrupted."); - break; - } - MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, operation); - if (prefetchCounters.shouldGetNewRecords()) { - try { - sleepBeforeNextCall(); - GetRecordsResponse getRecordsResult = getRecordsRetrievalStrategy.getRecords(maxRecordsPerCall); - lastSuccessfulCall = Instant.now(); - - final List records = getRecordsResult.records().stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); - ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder() - .records(records) - .millisBehindLatest(getRecordsResult.millisBehindLatest()) - .cacheEntryTime(lastSuccessfulCall) - .isAtShardEnd(getRecordsRetrievalStrategy.getDataFetcher().isShardEndReached()) - .build(); - addArrivedRecordsInput(processRecordsInput); - drainQueueForRequests(); - } catch (InterruptedException e) { - log.info("Thread was interrupted, indicating shutdown was called on the cache."); - } catch (ExpiredIteratorException e) { - log.info("ShardId {}: records threw ExpiredIteratorException - restarting" - + " after greatest seqNum passed to customer", shardId, e); - - scope.addData(EXPIRED_ITERATOR_METRIC, 1, StandardUnit.COUNT, MetricsLevel.SUMMARY); - - dataFetcher.restartIterator(); - } catch (SdkClientException e) { - log.error("Exception thrown while fetching records from Kinesis", e); - } catch (Throwable e) { - log.error("Unexpected exception was thrown. This could probably be an issue or a bug." + - " Please search for the exception/error online to check what is going on. If the " + - "issue persists or is a recurring problem, feel free to open an issue on, " + - "https://github.com/awslabs/amazon-kinesis-client.", e); - } finally { - MetricsUtil.endScope(scope); - } - } else { - // - // Consumer isn't ready to receive new records will allow prefetch counters to pause - // - try { - prefetchCounters.waitForConsumer(); - } catch (InterruptedException ie) { - log.info("Thread was interrupted while waiting for the consumer. " + - "Shutdown has probably been started"); - } - } - } - callShutdownOnStrategy(); - } - - private void callShutdownOnStrategy() { - if (!getRecordsRetrievalStrategy.isShutdown()) { - getRecordsRetrievalStrategy.shutdown(); - } - } - - private void sleepBeforeNextCall() throws InterruptedException { - if (lastSuccessfulCall == null) { - return; - } - long timeSinceLastCall = Duration.between(lastSuccessfulCall, Instant.now()).abs().toMillis(); - if (timeSinceLastCall < idleMillisBetweenCalls) { - Thread.sleep(idleMillisBetweenCalls - timeSinceLastCall); - } - } - } - - private class PrefetchCounters { - private long size = 0; - private long byteSize = 0; - - public synchronized void added(final ProcessRecordsInput result) { - size += getSize(result); - byteSize += getByteSize(result); - } - - public synchronized void removed(final ProcessRecordsInput result) { - size -= getSize(result); - byteSize -= getByteSize(result); - this.notifyAll(); - } - - private long getSize(final ProcessRecordsInput result) { - return result.records().size(); - } - - private long getByteSize(final ProcessRecordsInput result) { - return result.records().stream().mapToLong(record -> record.data().limit()).sum(); - } - - public synchronized void waitForConsumer() throws InterruptedException { - if (!shouldGetNewRecords()) { - log.debug("Queue is full waiting for consumer for {} ms", idleMillisBetweenCalls); - this.wait(idleMillisBetweenCalls); - } - } - - public synchronized boolean shouldGetNewRecords() { - if (log.isDebugEnabled()) { - log.debug("Current Prefetch Counter States: {}", this.toString()); - } - return size < maxRecordsCount && byteSize < maxByteSize; - } - - @Override - public String toString() { - return String.format("{ Requests: %d, Records: %d, Bytes: %d }", getRecordsResultQueue.size(), size, - byteSize); - } - } - -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SimpleRecordsFetcherFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SimpleRecordsFetcherFactory.java deleted file mode 100644 index dcf1367c..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SimpleRecordsFetcherFactory.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval.polling; - -import java.util.concurrent.Executors; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.retrieval.DataFetchingStrategy; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.RecordsFetcherFactory; -import software.amazon.kinesis.retrieval.RecordsPublisher; - -@Slf4j -@KinesisClientInternalApi -public class SimpleRecordsFetcherFactory implements RecordsFetcherFactory { - private int maxPendingProcessRecordsInput = 3; - private int maxByteSize = 8 * 1024 * 1024; - private int maxRecordsCount = 30000; - private long idleMillisBetweenCalls = 1500L; - private DataFetchingStrategy dataFetchingStrategy = DataFetchingStrategy.DEFAULT; - - @Override - public RecordsPublisher createRecordsFetcher(GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, String shardId, - MetricsFactory metricsFactory, int maxRecords) { - - return new PrefetchRecordsPublisher(maxPendingProcessRecordsInput, maxByteSize, maxRecordsCount, maxRecords, - getRecordsRetrievalStrategy, - Executors - .newFixedThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("prefetch-cache-" + shardId + "-%04d").build()), - idleMillisBetweenCalls, metricsFactory, "ProcessTask", shardId); - - } - - @Override - public void maxPendingProcessRecordsInput(int maxPendingProcessRecordsInput){ - this.maxPendingProcessRecordsInput = maxPendingProcessRecordsInput; - } - - @Override - public void maxByteSize(int maxByteSize){ - this.maxByteSize = maxByteSize; - } - - @Override - public void maxRecordsCount(int maxRecordsCount) { - this.maxRecordsCount = maxRecordsCount; - } - - @Override - public void dataFetchingStrategy(DataFetchingStrategy dataFetchingStrategy){ - this.dataFetchingStrategy = dataFetchingStrategy; - } - - @Override - public void idleMillisBetweenCalls(final long idleMillisBetweenCalls) { - this.idleMillisBetweenCalls = idleMillisBetweenCalls; - } - - @Override - public int maxPendingProcessRecordsInput() { - return maxPendingProcessRecordsInput; - } - - @Override - public int maxByteSize() { - return maxByteSize; - } - - @Override - public int maxRecordsCount() { - return maxRecordsCount; - } - - @Override - public DataFetchingStrategy dataFetchingStrategy() { - return dataFetchingStrategy; - } - - @Override - public long idleMillisBetweenCalls() { - return idleMillisBetweenCalls; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousBlockingRetrievalFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousBlockingRetrievalFactory.java deleted file mode 100644 index 1e827fc9..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousBlockingRetrievalFactory.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Amazon Software License - * (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at - * http://aws.amazon.com/asl/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.polling; - -import lombok.Data; -import lombok.NonNull; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.RecordsFetcherFactory; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RetrievalFactory; - -/** - * - */ -@Data -@KinesisClientInternalApi -public class SynchronousBlockingRetrievalFactory implements RetrievalFactory { - @NonNull - private final String streamName; - @NonNull - private final KinesisAsyncClient kinesisClient; - @NonNull - private final RecordsFetcherFactory recordsFetcherFactory; - // private final long listShardsBackoffTimeInMillis; - // private final int maxListShardsRetryAttempts; - private final int maxRecords; - - @Override - public GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy(@NonNull final ShardInfo shardInfo, - @NonNull final MetricsFactory metricsFactory) { - return new SynchronousGetRecordsRetrievalStrategy( - new KinesisDataFetcher(kinesisClient, streamName, shardInfo.shardId(), maxRecords, metricsFactory)); - } - - @Override - public RecordsPublisher createGetRecordsCache(@NonNull final ShardInfo shardInfo, - @NonNull final MetricsFactory metricsFactory) { - return recordsFetcherFactory.createRecordsFetcher(createGetRecordsRetrievalStrategy(shardInfo, metricsFactory), - shardInfo.shardId(), metricsFactory, maxRecords); - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousGetRecordsRetrievalStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousGetRecordsRetrievalStrategy.java deleted file mode 100644 index 933e9318..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousGetRecordsRetrievalStrategy.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval.polling; - -import lombok.Data; -import lombok.NonNull; -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; - -/** - * - */ -@Data -@KinesisClientInternalApi -public class SynchronousGetRecordsRetrievalStrategy implements GetRecordsRetrievalStrategy { - @NonNull - private final KinesisDataFetcher dataFetcher; - - @Override - public GetRecordsResponse getRecords(final int maxRecords) { - return dataFetcher.getRecords().accept(); - } - - @Override - public void shutdown() { - // - // Does nothing as this retriever doesn't manage any resources - // - } - - @Override - public boolean isShutdown() { - return false; - } - - @Override - public KinesisDataFetcher getDataFetcher() { - return dataFetcher; - } -} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousPrefetchingRetrievalFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousPrefetchingRetrievalFactory.java deleted file mode 100644 index be69618d..00000000 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousPrefetchingRetrievalFactory.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.polling; - -import java.util.concurrent.ExecutorService; - -import lombok.NonNull; -import lombok.RequiredArgsConstructor; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.RecordsFetcherFactory; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RetrievalFactory; - -/** - * - */ -@RequiredArgsConstructor -@KinesisClientInternalApi -public class SynchronousPrefetchingRetrievalFactory implements RetrievalFactory { - @NonNull - private final String streamName; - @NonNull - private final KinesisAsyncClient kinesisClient; - @NonNull - private final RecordsFetcherFactory recordsFetcherFactory; - private final int maxRecords; - @NonNull - private final ExecutorService executorService; - private final long idleMillisBetweenCalls; - - @Override - public GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy(@NonNull final ShardInfo shardInfo, - @NonNull final MetricsFactory metricsFactory) { - return new SynchronousGetRecordsRetrievalStrategy( - new KinesisDataFetcher(kinesisClient, streamName, shardInfo.shardId(), maxRecords, metricsFactory)); - } - - @Override - public RecordsPublisher createGetRecordsCache(@NonNull final ShardInfo shardInfo, - @NonNull final MetricsFactory metricsFactory) { - return new PrefetchRecordsPublisher(recordsFetcherFactory.maxPendingProcessRecordsInput(), - recordsFetcherFactory.maxByteSize(), - recordsFetcherFactory.maxRecordsCount(), - maxRecords, - createGetRecordsRetrievalStrategy(shardInfo, metricsFactory), - executorService, - idleMillisBetweenCalls, - metricsFactory, - "Prefetching", - shardInfo.shardId()); - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/CheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/CheckpointerTest.java deleted file mode 100644 index bfa3978f..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/CheckpointerTest.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.checkpoint; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Base class for unit testing checkpoint implementations. - * This class has tests common to InMemory and FileBased implementations. - */ -public class CheckpointerTest { - - private final String testConcurrencyToken = "testToken"; - private Checkpointer checkpoint; - - @Before - public void setup() { - checkpoint = new InMemoryCheckpointer(); - } - - @Test - public final void testInitialSetCheckpoint() throws Exception { - String sequenceNumber = "1"; - String shardId = "myShardId"; - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); - checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); - ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId); - Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint); - } - - @Test - public final void testAdvancingSetCheckpoint() throws Exception { - String shardId = "myShardId"; - for (Integer i = 0; i < 10; i++) { - String sequenceNumber = i.toString(); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); - checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); - ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId); - Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint); - } - } - - /** - * Test method to verify checkpoint and checkpoint methods. - * - * @throws Exception - */ - @Test - public final void testSetAndGetCheckpoint() throws Exception { - String checkpointValue = "12345"; - String shardId = "testShardId-1"; - String concurrencyToken = "token-1"; - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(checkpointValue); - checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken); - Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - } - - @Test - public final void testInitialPrepareCheckpoint() throws Exception { - String sequenceNumber = "1"; - String pendingCheckpointValue = "99999"; - String shardId = "myShardId"; - ExtendedSequenceNumber extendedCheckpointNumber = new ExtendedSequenceNumber(sequenceNumber); - checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); - - ExtendedSequenceNumber extendedPendingCheckpointNumber = new ExtendedSequenceNumber(pendingCheckpointValue); - checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), testConcurrencyToken); - - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - } - - @Test - public final void testAdvancingPrepareCheckpoint() throws Exception { - String shardId = "myShardId"; - String checkpointValue = "12345"; - ExtendedSequenceNumber extendedCheckpointNumber = new ExtendedSequenceNumber(checkpointValue); - checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), testConcurrencyToken); - - for (Integer i = 0; i < 10; i++) { - String sequenceNumber = i.toString(); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); - checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - } - } - - @Test - public final void testPrepareAndSetCheckpoint() throws Exception { - String checkpointValue = "12345"; - String shardId = "testShardId-1"; - String concurrencyToken = "token-1"; - String pendingCheckpointValue = "99999"; - - // set initial checkpoint - ExtendedSequenceNumber extendedCheckpointNumber = new ExtendedSequenceNumber(checkpointValue); - checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - - // prepare checkpoint - ExtendedSequenceNumber extendedPendingCheckpointNumber = new ExtendedSequenceNumber(pendingCheckpointValue); - checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - - // do checkpoint - checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/InMemoryCheckpointer.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/InMemoryCheckpointer.java deleted file mode 100644 index 85e30ebe..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/InMemoryCheckpointer.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.checkpoint; - -import java.util.HashMap; -import java.util.Map; - -import software.amazon.kinesis.exceptions.KinesisClientLibException; -import software.amazon.kinesis.checkpoint.Checkpoint; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -import lombok.extern.slf4j.Slf4j; - -/** - * Everything is stored in memory and there is no fault-tolerance. - */ -@Slf4j -public class InMemoryCheckpointer implements Checkpointer { - private Map checkpoints = new HashMap<>(); - private Map flushpoints = new HashMap<>(); - private Map pendingCheckpoints = new HashMap<>(); - - private String operation; - - /** - * {@inheritDoc} - */ - @Override - public void setCheckpoint(String shardId, ExtendedSequenceNumber checkpointValue, String concurrencyToken) - throws KinesisClientLibException { - checkpoints.put(shardId, checkpointValue); - flushpoints.put(shardId, checkpointValue); - pendingCheckpoints.remove(shardId); - - if (log.isDebugEnabled()) { - log.debug("shardId: {} checkpoint: {}", shardId, checkpointValue); - } - - } - - /** - * {@inheritDoc} - */ - @Override - public ExtendedSequenceNumber getCheckpoint(String shardId) throws KinesisClientLibException { - ExtendedSequenceNumber checkpoint = flushpoints.get(shardId); - log.debug("checkpoint shardId: {} checkpoint: {}", shardId, checkpoint); - return checkpoint; - } - - @Override - public void prepareCheckpoint(String shardId, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken) - throws KinesisClientLibException { - pendingCheckpoints.put(shardId, pendingCheckpoint); - } - - @Override - public Checkpoint getCheckpointObject(String shardId) throws KinesisClientLibException { - ExtendedSequenceNumber checkpoint = flushpoints.get(shardId); - ExtendedSequenceNumber pendingCheckpoint = pendingCheckpoints.get(shardId); - - Checkpoint checkpointObj = new Checkpoint(checkpoint, pendingCheckpoint); - log.debug("getCheckpointObject shardId: {}, {}", shardId, checkpointObj); - return checkpointObj; - } - - @Override - public void operation(final String operation) { - this.operation = operation; - } - - @Override - public String operation() { - return operation; - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/SequenceNumberValidatorTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/SequenceNumberValidatorTest.java deleted file mode 100644 index a2ed3208..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/SequenceNumberValidatorTest.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.checkpoint; - -import org.junit.Before; -import org.junit.Test; - -import java.util.Optional; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.not; -import static org.junit.Assert.assertThat; - -public class SequenceNumberValidatorTest { - - private SequenceNumberValidator validator; - - @Before - public void begin() { - validator = new SequenceNumberValidator(); - } - - - @Test - public void matchingSequenceNumberTest() { - String sequenceNumber = "49587497311274533994574834252742144236107130636007899138"; - String expectedShardId = "shardId-000000000000"; - - Optional version = validator.versionFor(sequenceNumber); - assertThat(version, equalTo(Optional.of(2))); - - Optional shardId = validator.shardIdFor(sequenceNumber); - assertThat(shardId, equalTo(Optional.of(expectedShardId))); - - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.of(true))); - } - - @Test - public void shardMismatchTest() { - String sequenceNumber = "49585389983312162443796657944872008114154899568972529698"; - String invalidShardId = "shardId-000000000001"; - - Optional version = validator.versionFor(sequenceNumber); - assertThat(version, equalTo(Optional.of(2))); - - Optional shardId = validator.shardIdFor(sequenceNumber); - assertThat(shardId, not(equalTo(invalidShardId))); - - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, invalidShardId), equalTo(Optional.of(false))); - } - - @Test - public void versionMismatchTest() { - String sequenceNumber = "74107425965128755728308386687147091174006956590945533954"; - String expectedShardId = "shardId-000000000000"; - - Optional version = validator.versionFor(sequenceNumber); - assertThat(version, equalTo(Optional.empty())); - - Optional shardId = validator.shardIdFor(sequenceNumber); - assertThat(shardId, equalTo(Optional.empty())); - - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); - } - - @Test - public void sequenceNumberToShortTest() { - String sequenceNumber = "4958538998331216244379665794487200811415489956897252969"; - String expectedShardId = "shardId-000000000000"; - - assertThat(validator.versionFor(sequenceNumber), equalTo(Optional.empty())); - assertThat(validator.shardIdFor(sequenceNumber), equalTo(Optional.empty())); - - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); - } - - @Test - public void sequenceNumberToLongTest() { - String sequenceNumber = "495874973112745339945748342527421442361071306360078991381"; - String expectedShardId = "shardId-000000000000"; - - assertThat(validator.versionFor(sequenceNumber), equalTo(Optional.empty())); - assertThat(validator.shardIdFor(sequenceNumber), equalTo(Optional.empty())); - - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); - } - - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointerTest.java deleted file mode 100644 index e263a14d..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointerTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.checkpoint; - -import software.amazon.kinesis.processor.PreparedCheckpointer; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -public class ShardPreparedCheckpointerTest { - - /** - * This test verifies the relationship between the constructor and pendingCheckpoint. - */ - @Test - public void testGetSequenceNumber() { - ExtendedSequenceNumber sn = new ExtendedSequenceNumber("sn"); - PreparedCheckpointer checkpointer = new ShardPreparedCheckpointer(sn, null); - Assert.assertEquals(sn, checkpointer.pendingCheckpoint()); - } - - /** - * This test makes sure the PreparedCheckpointer calls the RecordProcessorCheckpointer properly. - * - * @throws Exception - */ - @Test - public void testCheckpoint() throws Exception { - ExtendedSequenceNumber sn = new ExtendedSequenceNumber("sn"); - RecordProcessorCheckpointer mockRecordProcessorCheckpointer = Mockito.mock(RecordProcessorCheckpointer.class); - PreparedCheckpointer checkpointer = new ShardPreparedCheckpointer(sn, mockRecordProcessorCheckpointer); - checkpointer.checkpoint(); - Mockito.verify(mockRecordProcessorCheckpointer).checkpoint(sn.sequenceNumber(), sn.subSequenceNumber()); - } - - /** - * This test makes sure the PreparedCheckpointer calls the RecordProcessorCheckpointer properly. - * - * @throws Exception - */ - @Test - public void testDoesNothingPreparedCheckpoint() throws Exception { - ExtendedSequenceNumber sn = new ExtendedSequenceNumber("sn"); - PreparedCheckpointer checkpointer = new DoesNothingPreparedCheckpointer(sn); - Assert.assertEquals(sn, checkpointer.pendingCheckpoint()); - // nothing happens here - checkpointer.checkpoint(); - } -} \ No newline at end of file diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardShardRecordProcessorCheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardShardRecordProcessorCheckpointerTest.java deleted file mode 100644 index c46a8572..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardShardRecordProcessorCheckpointerTest.java +++ /dev/null @@ -1,810 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.checkpoint; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map.Entry; - -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.awssdk.services.kinesis.model.Record; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.processor.PreparedCheckpointer; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * - */ -@RunWith(MockitoJUnitRunner.class) -public class ShardShardRecordProcessorCheckpointerTest { - private String startingSequenceNumber = "13"; - private ExtendedSequenceNumber startingExtendedSequenceNumber = new ExtendedSequenceNumber(startingSequenceNumber); - private String testConcurrencyToken = "testToken"; - private Checkpointer checkpoint; - private ShardInfo shardInfo; - private String shardId = "shardId-123"; - - /** - * @throws java.lang.Exception - */ - @Before - public void setup() throws Exception { - checkpoint = new InMemoryCheckpointer(); - // A real checkpoint will return a checkpoint value after it is initialized. - checkpoint.setCheckpoint(shardId, startingExtendedSequenceNumber, testConcurrencyToken); - assertThat(this.startingExtendedSequenceNumber, equalTo(checkpoint.getCheckpoint(shardId))); - - shardInfo = new ShardInfo(shardId, testConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - } - - /** - * Test method for - * {@link ShardRecordProcessorCheckpointer#checkpoint()}. - */ - @Test - public final void testCheckpoint() throws Exception { - // First call to checkpoint - ShardRecordProcessorCheckpointer processingCheckpointer = - new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.largestPermittedCheckpointValue(startingExtendedSequenceNumber); - processingCheckpointer.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(startingExtendedSequenceNumber)); - - // Advance checkpoint - ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("5019"); - - processingCheckpointer.largestPermittedCheckpointValue(sequenceNumber); - processingCheckpointer.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(sequenceNumber)); - } - - private Record makeRecord(String seqNum) { - return Record.builder().sequenceNumber(seqNum).build(); - } - - /** - * Test method for - * {@link ShardRecordProcessorCheckpointer#checkpoint(Record record)}. - */ - @Test - public final void testCheckpointRecord() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = - new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5025"); - Record record = makeRecord("5025"); - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - processingCheckpointer.checkpoint(record); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); - } - - /** - * Test method for - * {@link ShardRecordProcessorCheckpointer#checkpoint(Record record)}. - */ - @Test - public final void testCheckpointSubRecord() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = - new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030"); - Record record = makeRecord("5030"); - //UserRecord subRecord = new UserRecord(record); - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - processingCheckpointer.checkpoint(record); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); - } - - /** - * Test method for - * {@link ShardRecordProcessorCheckpointer#checkpoint(String sequenceNumber)}. - */ - @Test - public final void testCheckpointSequenceNumber() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = - new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5035"); - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - processingCheckpointer.checkpoint("5035"); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); - } - - /** - * Test method for - * {@link ShardRecordProcessorCheckpointer#checkpoint(String sequenceNumber, long subSequenceNumber)}. - */ - @Test - public final void testCheckpointExtendedSequenceNumber() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = - new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5040"); - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - processingCheckpointer.checkpoint("5040", 0); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); - } - - /** - * Test method for {@link ShardRecordProcessorCheckpointer#checkpoint(String SHARD_END)}. - */ - @Test - public final void testCheckpointAtShardEnd() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = - new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = ExtendedSequenceNumber.SHARD_END; - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - processingCheckpointer.checkpoint(ExtendedSequenceNumber.SHARD_END.sequenceNumber()); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); - } - - - /** - * Test method for - * {@link ShardRecordProcessorCheckpointer#prepareCheckpoint()}. - */ - @Test - public final void testPrepareCheckpoint() throws Exception { - // First call to checkpoint - ShardRecordProcessorCheckpointer processingCheckpointer = - new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - - ExtendedSequenceNumber sequenceNumber1 = new ExtendedSequenceNumber("5001"); - processingCheckpointer.largestPermittedCheckpointValue(sequenceNumber1); - PreparedCheckpointer preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); - assertThat(preparedCheckpoint.pendingCheckpoint(), equalTo(sequenceNumber1)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(sequenceNumber1)); - - // Advance checkpoint - ExtendedSequenceNumber sequenceNumber2 = new ExtendedSequenceNumber("5019"); - - processingCheckpointer.largestPermittedCheckpointValue(sequenceNumber2); - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); - assertThat(preparedCheckpoint.pendingCheckpoint(), equalTo(sequenceNumber2)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(sequenceNumber2)); - - // Checkpoint using preparedCheckpoint - preparedCheckpoint.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(sequenceNumber2)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(sequenceNumber2)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - } - - /** - * Test method for - * {@link ShardRecordProcessorCheckpointer#prepareCheckpoint(Record record)}. - */ - @Test - public final void testPrepareCheckpointRecord() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = - new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5025"); - Record record = makeRecord("5025"); - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - PreparedCheckpointer preparedCheckpoint = processingCheckpointer.prepareCheckpoint(record); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(startingExtendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(startingExtendedSequenceNumber)); - assertThat(preparedCheckpoint.pendingCheckpoint(), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(extendedSequenceNumber)); - - // Checkpoint using preparedCheckpoint - preparedCheckpoint.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - } - - /** - * Test method for - * {@link ShardRecordProcessorCheckpointer#prepareCheckpoint(Record record)}. - */ - @Test - public final void testPrepareCheckpointSubRecord() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = - new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030"); - Record record = makeRecord("5030"); - //UserRecord subRecord = new UserRecord(record); - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - PreparedCheckpointer preparedCheckpoint = processingCheckpointer.prepareCheckpoint(record); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(startingExtendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(startingExtendedSequenceNumber)); - assertThat(preparedCheckpoint.pendingCheckpoint(), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(extendedSequenceNumber)); - - // Checkpoint using preparedCheckpoint - preparedCheckpoint.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - } - - /** - * Test method for - * {@link ShardRecordProcessorCheckpointer#checkpoint(String sequenceNumber)}. - */ - @Test - public final void testPrepareCheckpointSequenceNumber() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5035"); - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - PreparedCheckpointer preparedCheckpoint = processingCheckpointer.prepareCheckpoint("5035"); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(startingExtendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(startingExtendedSequenceNumber)); - assertThat(preparedCheckpoint.pendingCheckpoint(), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(extendedSequenceNumber)); - - // Checkpoint using preparedCheckpoint - preparedCheckpoint.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - } - - /** - * Test method for - * {@link ShardRecordProcessorCheckpointer#checkpoint(String sequenceNumber, long subSequenceNumber)}. - */ - @Test - public final void testPrepareCheckpointExtendedSequenceNumber() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5040"); - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - PreparedCheckpointer preparedCheckpoint = processingCheckpointer.prepareCheckpoint("5040", 0); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(startingExtendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(startingExtendedSequenceNumber)); - assertThat(preparedCheckpoint.pendingCheckpoint(), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(extendedSequenceNumber)); - - // Checkpoint using preparedCheckpoint - preparedCheckpoint.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - } - - /** - * Test method for {@link ShardRecordProcessorCheckpointer#checkpoint(String SHARD_END)}. - */ - @Test - public final void testPrepareCheckpointAtShardEnd() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - ExtendedSequenceNumber extendedSequenceNumber = ExtendedSequenceNumber.SHARD_END; - processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - PreparedCheckpointer preparedCheckpoint = processingCheckpointer.prepareCheckpoint(ExtendedSequenceNumber.SHARD_END.sequenceNumber()); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(startingExtendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(startingExtendedSequenceNumber)); - assertThat(preparedCheckpoint.pendingCheckpoint(), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(extendedSequenceNumber)); - - // Checkpoint using preparedCheckpoint - preparedCheckpoint.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(extendedSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - } - - - /** - * Test that having multiple outstanding prepared checkpointers works if they are redeemed in the right order. - */ - @Test - public final void testMultipleOutstandingCheckpointersHappyCase() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - processingCheckpointer.largestPermittedCheckpointValue(new ExtendedSequenceNumber("6040")); - - ExtendedSequenceNumber sn1 = new ExtendedSequenceNumber("6010"); - PreparedCheckpointer firstPreparedCheckpoint = processingCheckpointer.prepareCheckpoint("6010", 0); - assertThat(firstPreparedCheckpoint.pendingCheckpoint(), equalTo(sn1)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(sn1)); - - ExtendedSequenceNumber sn2 = new ExtendedSequenceNumber("6020"); - PreparedCheckpointer secondPreparedCheckpoint = processingCheckpointer.prepareCheckpoint("6020", 0); - assertThat(secondPreparedCheckpoint.pendingCheckpoint(), equalTo(sn2)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(sn2)); - - // checkpoint in order - firstPreparedCheckpoint.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(sn1)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(sn1)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - - secondPreparedCheckpoint.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(sn2)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(sn2)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - } - - /** - * Test that having multiple outstanding prepared checkpointers works if they are redeemed in the right order. - */ - @Test - public final void testMultipleOutstandingCheckpointersOutOfOrder() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); - processingCheckpointer.largestPermittedCheckpointValue(new ExtendedSequenceNumber("7040")); - - ExtendedSequenceNumber sn1 = new ExtendedSequenceNumber("7010"); - PreparedCheckpointer firstPreparedCheckpoint = processingCheckpointer.prepareCheckpoint("7010", 0); - assertThat(firstPreparedCheckpoint.pendingCheckpoint(), equalTo(sn1)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(sn1)); - - ExtendedSequenceNumber sn2 = new ExtendedSequenceNumber("7020"); - PreparedCheckpointer secondPreparedCheckpoint = processingCheckpointer.prepareCheckpoint("7020", 0); - assertThat(secondPreparedCheckpoint.pendingCheckpoint(), equalTo(sn2)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(sn2)); - - // checkpoint out of order - secondPreparedCheckpoint.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(sn2)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(sn2)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - - try { - firstPreparedCheckpoint.checkpoint(); - fail("checkpoint() should have failed because the sequence number was too low"); - } catch (IllegalArgumentException e) { - } catch (Exception e) { - fail("checkpoint() should have thrown an IllegalArgumentException but instead threw " + e); - } - } - - /** - * Test method for update() - * - */ - @Test - public final void testUpdate() throws Exception { - ShardRecordProcessorCheckpointer checkpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - - ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("10"); - checkpointer.largestPermittedCheckpointValue(sequenceNumber); - assertThat(checkpointer.largestPermittedCheckpointValue(), equalTo(sequenceNumber)); - - sequenceNumber = new ExtendedSequenceNumber("90259185948592875928375908214918273491783097"); - checkpointer.largestPermittedCheckpointValue(sequenceNumber); - assertThat(checkpointer.largestPermittedCheckpointValue(), equalTo(sequenceNumber)); - } - - /* - * This test is a mixed test of checking some basic functionality of checkpointing at a sequence number and making - * sure certain bounds checks and validations are being performed inside the checkpointer to prevent clients from - * checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be checkpointing - */ - @Test - public final void testClientSpecifiedCheckpoint() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - - // Several checkpoints we're gonna hit - ExtendedSequenceNumber tooSmall = new ExtendedSequenceNumber("2"); - ExtendedSequenceNumber firstSequenceNumber = checkpoint.getCheckpoint(shardId); // 13 - ExtendedSequenceNumber secondSequenceNumber = new ExtendedSequenceNumber("127"); - ExtendedSequenceNumber thirdSequenceNumber = new ExtendedSequenceNumber("5019"); - ExtendedSequenceNumber lastSequenceNumberOfShard = new ExtendedSequenceNumber("6789"); - ExtendedSequenceNumber tooBigSequenceNumber = new ExtendedSequenceNumber("9000"); - - processingCheckpointer.setInitialCheckpointValue(firstSequenceNumber); - processingCheckpointer.largestPermittedCheckpointValue(thirdSequenceNumber); - - // confirm that we cannot move backward - try { - processingCheckpointer.checkpoint(tooSmall.sequenceNumber(), tooSmall.subSequenceNumber()); - fail("You shouldn't be able to checkpoint earlier than the initial checkpoint."); - } catch (IllegalArgumentException e) { - // yay! - } - - // advance to first - processingCheckpointer.checkpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); - processingCheckpointer.checkpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); - - // advance to second - processingCheckpointer.checkpoint(secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(secondSequenceNumber)); - - ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt = - { tooSmall, // Shouldn't be able to move before the first value we ever checkpointed - firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number - tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer - lastSequenceNumberOfShard, // Just another big value that we will use later - null, // Not a valid sequence number - new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string - ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max - ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value - ExtendedSequenceNumber.LATEST // Can't go back to an initial sentinel value - }; - for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) { - try { - processingCheckpointer.checkpoint(badCheckpointValue.sequenceNumber(), badCheckpointValue.subSequenceNumber()); - fail("checkpointing at bad or out of order sequence didn't throw exception"); - } catch (IllegalArgumentException e) { - - } catch (NullPointerException e) { - - } - assertThat("Checkpoint value should not have changed", checkpoint.getCheckpoint(shardId), - equalTo(secondSequenceNumber)); - assertThat("Last checkpoint value should not have changed", processingCheckpointer.lastCheckpointValue(), - equalTo(secondSequenceNumber)); - assertThat("Largest sequence number should not have changed", - processingCheckpointer.largestPermittedCheckpointValue(), equalTo(thirdSequenceNumber)); - } - - // advance to third number - processingCheckpointer.checkpoint(thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(thirdSequenceNumber)); - - // Testing a feature that prevents checkpointing at SHARD_END twice - processingCheckpointer.largestPermittedCheckpointValue(lastSequenceNumberOfShard); - processingCheckpointer.sequenceNumberAtShardEnd(processingCheckpointer.largestPermittedCheckpointValue()); - processingCheckpointer.largestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END); - processingCheckpointer.checkpoint(lastSequenceNumberOfShard.sequenceNumber(), lastSequenceNumberOfShard.subSequenceNumber()); - assertThat( - "Checkpoing at the sequence number at the end of a shard should be the same as checkpointing at SHARD_END", - processingCheckpointer.lastCheckpointValue(), equalTo(ExtendedSequenceNumber.SHARD_END)); - } - - /* - * This test is a mixed test of checking some basic functionality of two phase checkpointing at a sequence number - * and making sure certain bounds checks and validations are being performed inside the checkpointer to prevent - * clients from checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be - * checkpointing - */ - @Test - public final void testClientSpecifiedTwoPhaseCheckpoint() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - - // Several checkpoints we're gonna hit - ExtendedSequenceNumber tooSmall = new ExtendedSequenceNumber("2"); - ExtendedSequenceNumber firstSequenceNumber = checkpoint.getCheckpoint(shardId); // 13 - ExtendedSequenceNumber secondSequenceNumber = new ExtendedSequenceNumber("127"); - ExtendedSequenceNumber thirdSequenceNumber = new ExtendedSequenceNumber("5019"); - ExtendedSequenceNumber lastSequenceNumberOfShard = new ExtendedSequenceNumber("6789"); - ExtendedSequenceNumber tooBigSequenceNumber = new ExtendedSequenceNumber("9000"); - - processingCheckpointer.setInitialCheckpointValue(firstSequenceNumber); - processingCheckpointer.largestPermittedCheckpointValue(thirdSequenceNumber); - - // confirm that we cannot move backward - try { - processingCheckpointer.prepareCheckpoint(tooSmall.sequenceNumber(), tooSmall.subSequenceNumber()); - fail("You shouldn't be able to prepare a checkpoint earlier than the initial checkpoint."); - } catch (IllegalArgumentException e) { - // yay! - } - - try { - processingCheckpointer.checkpoint(tooSmall.sequenceNumber(), tooSmall.subSequenceNumber()); - fail("You shouldn't be able to checkpoint earlier than the initial checkpoint."); - } catch (IllegalArgumentException e) { - // yay! - } - - // advance to first - processingCheckpointer.checkpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); - - // prepare checkpoint at initial checkpoint value - PreparedCheckpointer doesNothingPreparedCheckpoint = - processingCheckpointer.prepareCheckpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); - assertThat(doesNothingPreparedCheckpoint instanceof DoesNothingPreparedCheckpointer, equalTo(true)); - assertThat(doesNothingPreparedCheckpoint.pendingCheckpoint(), equalTo(firstSequenceNumber)); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(firstSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - - // nothing happens after checkpointing a doesNothingPreparedCheckpoint - doesNothingPreparedCheckpoint.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(firstSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - - // advance to second - processingCheckpointer.prepareCheckpoint(secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(secondSequenceNumber)); - processingCheckpointer.checkpoint(secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(secondSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - - ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt = - { tooSmall, // Shouldn't be able to move before the first value we ever checkpointed - firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number - tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer - lastSequenceNumberOfShard, // Just another big value that we will use later - null, // Not a valid sequence number - new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string - ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max - ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value - ExtendedSequenceNumber.LATEST // Can't go back to an initial sentinel value - }; - for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) { - try { - processingCheckpointer.prepareCheckpoint(badCheckpointValue.sequenceNumber(), badCheckpointValue.subSequenceNumber()); - fail("checkpointing at bad or out of order sequence didn't throw exception"); - } catch (IllegalArgumentException e) { - - } catch (NullPointerException e) { - - } - assertThat("Checkpoint value should not have changed", checkpoint.getCheckpoint(shardId), - equalTo(secondSequenceNumber)); - assertThat("Last checkpoint value should not have changed", processingCheckpointer.lastCheckpointValue(), - equalTo(secondSequenceNumber)); - assertThat("Largest sequence number should not have changed", - processingCheckpointer.largestPermittedCheckpointValue(), equalTo(thirdSequenceNumber)); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - - } - - // advance to third number - processingCheckpointer.prepareCheckpoint(thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(thirdSequenceNumber)); - processingCheckpointer.checkpoint(thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(thirdSequenceNumber)); - - // Testing a feature that prevents checkpointing at SHARD_END twice - processingCheckpointer.largestPermittedCheckpointValue(lastSequenceNumberOfShard); - processingCheckpointer.sequenceNumberAtShardEnd(processingCheckpointer.largestPermittedCheckpointValue()); - processingCheckpointer.largestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END); - processingCheckpointer.prepareCheckpoint(lastSequenceNumberOfShard.sequenceNumber(), lastSequenceNumberOfShard.subSequenceNumber()); - assertThat( - "Preparing a checkpoing at the sequence number at the end of a shard should be the same as preparing a checkpoint at SHARD_END", - checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(ExtendedSequenceNumber.SHARD_END)); - } - - private enum CheckpointAction { - NONE, NO_SEQUENCE_NUMBER, WITH_SEQUENCE_NUMBER; - } - - private enum CheckpointerType { - CHECKPOINTER, PREPARED_CHECKPOINTER, PREPARE_THEN_CHECKPOINTER; - } - - /** - * Tests a bunch of mixed calls between checkpoint() and checkpoint(sequenceNumber) using a helper function. - * - * Also covers an edge case scenario where a shard consumer is started on a shard that never receives any records - * and is then shutdown - * - * @throws Exception - */ - @SuppressWarnings("serial") - @Test - public final void testMixedCheckpointCalls() throws Exception { - for (LinkedHashMap testPlan : getMixedCallsTestPlan()) { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - testMixedCheckpointCalls(processingCheckpointer, testPlan, CheckpointerType.CHECKPOINTER); - } - } - - /** - * similar to - * {@link ShardShardRecordProcessorCheckpointerTest#testMixedCheckpointCalls()} , - * but executes in two phase commit mode, where we prepare a checkpoint and then commit the prepared checkpoint - * - * @throws Exception - */ - @SuppressWarnings("serial") - @Test - public final void testMixedTwoPhaseCheckpointCalls() throws Exception { - for (LinkedHashMap testPlan : getMixedCallsTestPlan()) { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - testMixedCheckpointCalls(processingCheckpointer, testPlan, CheckpointerType.PREPARED_CHECKPOINTER); - } - } - - /** - * similar to - * {@link ShardShardRecordProcessorCheckpointerTest#testMixedCheckpointCalls()} , - * but executes in two phase commit mode, where we prepare a checkpoint, but we checkpoint using the - * RecordProcessorCheckpointer instead of the returned PreparedCheckpointer - * - * @throws Exception - */ - @SuppressWarnings("serial") - @Test - public final void testMixedTwoPhaseCheckpointCalls2() throws Exception { - for (LinkedHashMap testPlan : getMixedCallsTestPlan()) { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - testMixedCheckpointCalls(processingCheckpointer, testPlan, CheckpointerType.PREPARE_THEN_CHECKPOINTER); - } - } - - private List> getMixedCallsTestPlan() { - List> testPlans = new ArrayList>(); - - /* - * Simulate a scenario where the checkpointer is created at "latest". - * - * Then the processor is called with no records (e.g. no more records are added, but the processor might be - * called just to allow checkpointing). - * - * Then the processor is shutdown. - */ - testPlans.add(new LinkedHashMap() { - { - put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); - put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); - } - }); - // Nearly the same as the previous test, but we don't call checkpoint after LATEST - testPlans.add(new LinkedHashMap() { - { - put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NONE); - put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); - } - }); - - // Start with TRIM_HORIZON - testPlans.add(new LinkedHashMap() { - { - put(SentinelCheckpoint.TRIM_HORIZON.toString(), CheckpointAction.NONE); - put("1", CheckpointAction.NONE); - put("2", CheckpointAction.NO_SEQUENCE_NUMBER); - put("3", CheckpointAction.NONE); - put("4", CheckpointAction.WITH_SEQUENCE_NUMBER); - put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); - } - }); - - // Start with LATEST and a bit more complexity - testPlans.add(new LinkedHashMap() { - { - put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); - put("30", CheckpointAction.NONE); - put("332", CheckpointAction.WITH_SEQUENCE_NUMBER); - put("349", CheckpointAction.NONE); - put("4332", CheckpointAction.NO_SEQUENCE_NUMBER); - put("4338", CheckpointAction.NONE); - put("5349", CheckpointAction.WITH_SEQUENCE_NUMBER); - put("5358", CheckpointAction.NONE); - put("64332", CheckpointAction.NO_SEQUENCE_NUMBER); - put("64338", CheckpointAction.NO_SEQUENCE_NUMBER); - put("65358", CheckpointAction.WITH_SEQUENCE_NUMBER); - put("764338", CheckpointAction.WITH_SEQUENCE_NUMBER); - put("765349", CheckpointAction.NO_SEQUENCE_NUMBER); - put("765358", CheckpointAction.NONE); - put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); - } - }); - - return testPlans; - } - - /** - * A utility function to simplify various sequences of intermixed updates to the checkpointer, and calls to - * checpoint() and checkpoint(sequenceNumber). Takes a map where the key is a new sequence number to set in the - * checkpointer and the value is a CheckpointAction indicating an action to take: NONE -> Set the sequence number, - * don't do anything else NO_SEQUENCE_NUMBER -> Set the sequence number and call checkpoint() WITH_SEQUENCE_NUMBER - * -> Set the sequence number and call checkpoint(sequenceNumber) with that sequence number - * - * @param processingCheckpointer - * @param checkpointValueAndAction - * A map describing which checkpoint value to set in the checkpointer, and what action to take - * @throws Exception - */ - private void testMixedCheckpointCalls(ShardRecordProcessorCheckpointer processingCheckpointer, - LinkedHashMap checkpointValueAndAction, - CheckpointerType checkpointerType) throws Exception { - - for (Entry entry : checkpointValueAndAction.entrySet()) { - PreparedCheckpointer preparedCheckpoint = null; - ExtendedSequenceNumber lastCheckpointValue = processingCheckpointer.lastCheckpointValue(); - - if (SentinelCheckpoint.SHARD_END.toString().equals(entry.getKey())) { - // Before shard end, we will pretend to do what we expect the shutdown task to do - processingCheckpointer.sequenceNumberAtShardEnd(processingCheckpointer - .largestPermittedCheckpointValue()); - } - // Advance the largest checkpoint and check that it is updated. - processingCheckpointer.largestPermittedCheckpointValue(new ExtendedSequenceNumber(entry.getKey())); - assertThat("Expected the largest checkpoint value to be updated after setting it", - processingCheckpointer.largestPermittedCheckpointValue(), - equalTo(new ExtendedSequenceNumber(entry.getKey()))); - switch (entry.getValue()) { - case NONE: - // We were told to not checkpoint, so lets just make sure the last checkpoint value is the same as - // when this block started then continue to the next instruction - assertThat("Expected the last checkpoint value to stay the same if we didn't checkpoint", - processingCheckpointer.lastCheckpointValue(), equalTo(lastCheckpointValue)); - continue; - case NO_SEQUENCE_NUMBER: - switch (checkpointerType) { - case CHECKPOINTER: - processingCheckpointer.checkpoint(); - break; - case PREPARED_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); - preparedCheckpoint.checkpoint(); - case PREPARE_THEN_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); - processingCheckpointer.checkpoint( - preparedCheckpoint.pendingCheckpoint().sequenceNumber(), - preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); - } - break; - case WITH_SEQUENCE_NUMBER: - switch (checkpointerType) { - case CHECKPOINTER: - processingCheckpointer.checkpoint(entry.getKey()); - break; - case PREPARED_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); - preparedCheckpoint.checkpoint(); - case PREPARE_THEN_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); - processingCheckpointer.checkpoint( - preparedCheckpoint.pendingCheckpoint().sequenceNumber(), - preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); - } - break; - } - // We must have checkpointed to get here, so let's make sure our last checkpoint value is up to date - assertThat("Expected the last checkpoint value to change after checkpointing", - processingCheckpointer.lastCheckpointValue(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); - assertThat("Expected the largest checkpoint value to remain the same since the last set", - processingCheckpointer.largestPermittedCheckpointValue(), - equalTo(new ExtendedSequenceNumber(entry.getKey()))); - - assertThat(checkpoint.getCheckpoint(shardId), equalTo(new ExtendedSequenceNumber(entry.getKey()))); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), - equalTo(new ExtendedSequenceNumber(entry.getKey()))); - assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - } - } - - @Test - public final void testUnsetMetricsScopeDuringCheckpointing() throws Exception { - // First call to checkpoint - ShardRecordProcessorCheckpointer processingCheckpointer = - new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("5019"); - processingCheckpointer.largestPermittedCheckpointValue(sequenceNumber); - processingCheckpointer.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(sequenceNumber)); - } - - @Test - public final void testSetMetricsScopeDuringCheckpointing() throws Exception { - // First call to checkpoint - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); - - ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("5019"); - processingCheckpointer.largestPermittedCheckpointValue(sequenceNumber); - processingCheckpointer.checkpoint(); - assertThat(checkpoint.getCheckpoint(shardId), equalTo(sequenceNumber)); - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinatorTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinatorTest.java deleted file mode 100644 index e4dc7499..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinatorTest.java +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.coordinator; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.not; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; -import org.mockito.verification.VerificationMode; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.lifecycle.ShardConsumer; - -@RunWith(MockitoJUnitRunner.class) -public class GracefulShutdownCoordinatorTest { - - @Mock - private CountDownLatch shutdownCompleteLatch; - @Mock - private CountDownLatch notificationCompleteLatch; - @Mock - private Scheduler scheduler; - @Mock - private Callable contextCallable; - @Mock - private ConcurrentMap shardInfoConsumerMap; - - @Test - public void testAllShutdownCompletedAlready() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - when(shutdownCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); - when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); - - assertThat(requestedShutdownCallable.call(), equalTo(true)); - verify(shutdownCompleteLatch).await(anyLong(), any(TimeUnit.class)); - verify(notificationCompleteLatch).await(anyLong(), any(TimeUnit.class)); - verify(scheduler).shutdown(); - } - - @Test - public void testNotificationNotCompletedYet() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - mockLatchAwait(notificationCompleteLatch, false, true); - when(notificationCompleteLatch.getCount()).thenReturn(1L, 0L); - mockLatchAwait(shutdownCompleteLatch, true); - when(shutdownCompleteLatch.getCount()).thenReturn(1L, 1L, 0L); - - when(scheduler.shutdownComplete()).thenReturn(false, true); - mockShardInfoConsumerMap(1, 0); - - assertThat(requestedShutdownCallable.call(), equalTo(true)); - verify(notificationCompleteLatch, times(2)).await(anyLong(), any(TimeUnit.class)); - verify(notificationCompleteLatch).getCount(); - - verify(shutdownCompleteLatch).await(anyLong(), any(TimeUnit.class)); - verify(shutdownCompleteLatch, times(2)).getCount(); - - verify(scheduler).shutdown(); - } - - @Test - public void testShutdownNotCompletedYet() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - mockLatchAwait(notificationCompleteLatch, true); - mockLatchAwait(shutdownCompleteLatch, false, true); - when(shutdownCompleteLatch.getCount()).thenReturn(1L, 0L); - - when(scheduler.shutdownComplete()).thenReturn(false, true); - mockShardInfoConsumerMap(1, 0); - - assertThat(requestedShutdownCallable.call(), equalTo(true)); - verify(notificationCompleteLatch).await(anyLong(), any(TimeUnit.class)); - verify(notificationCompleteLatch, never()).getCount(); - - verify(shutdownCompleteLatch, times(2)).await(anyLong(), any(TimeUnit.class)); - verify(shutdownCompleteLatch, times(2)).getCount(); - - verify(scheduler).shutdown(); - } - - @Test - public void testMultipleAttemptsForNotification() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - mockLatchAwait(notificationCompleteLatch, false, false, true); - when(notificationCompleteLatch.getCount()).thenReturn(2L, 1L, 0L); - - mockLatchAwait(shutdownCompleteLatch, true); - when(shutdownCompleteLatch.getCount()).thenReturn(2L, 2L, 1L, 1L, 0L); - - when(scheduler.shutdownComplete()).thenReturn(false, false, false, true); - mockShardInfoConsumerMap(2, 1, 0); - - assertThat(requestedShutdownCallable.call(), equalTo(true)); - - verifyLatchAwait(notificationCompleteLatch, 3); - verify(notificationCompleteLatch, times(2)).getCount(); - - verifyLatchAwait(shutdownCompleteLatch, 1); - verify(shutdownCompleteLatch, times(4)).getCount(); - } - - @Test - public void testWorkerAlreadyShutdownAtNotification() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - mockLatchAwait(notificationCompleteLatch, false, true); - when(notificationCompleteLatch.getCount()).thenReturn(1L, 0L); - - mockLatchAwait(shutdownCompleteLatch, true); - when(shutdownCompleteLatch.getCount()).thenReturn(1L, 1L, 0L); - - when(scheduler.shutdownComplete()).thenReturn(true); - mockShardInfoConsumerMap(0); - - assertThat(requestedShutdownCallable.call(), equalTo(false)); - - verifyLatchAwait(notificationCompleteLatch); - verify(notificationCompleteLatch).getCount(); - - verifyLatchAwait(shutdownCompleteLatch, never()); - verify(shutdownCompleteLatch, times(3)).getCount(); - } - - @Test - public void testWorkerAlreadyShutdownAtComplete() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - mockLatchAwait(notificationCompleteLatch, true); - - mockLatchAwait(shutdownCompleteLatch, false, true); - when(shutdownCompleteLatch.getCount()).thenReturn(1L, 1L, 1L); - - when(scheduler.shutdownComplete()).thenReturn(true); - mockShardInfoConsumerMap(0); - - assertThat(requestedShutdownCallable.call(), equalTo(false)); - - verifyLatchAwait(notificationCompleteLatch); - verify(notificationCompleteLatch, never()).getCount(); - - verifyLatchAwait(shutdownCompleteLatch); - verify(shutdownCompleteLatch, times(3)).getCount(); - } - - @Test - public void testNotificationInterrupted() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenThrow(new InterruptedException()); - when(notificationCompleteLatch.getCount()).thenReturn(1L); - - when(shutdownCompleteLatch.getCount()).thenReturn(1L); - - assertThat(requestedShutdownCallable.call(), equalTo(false)); - verifyLatchAwait(notificationCompleteLatch); - verifyLatchAwait(shutdownCompleteLatch, never()); - verify(scheduler, never()).shutdown(); - } - - @Test - public void testShutdownInterrupted() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); - - when(shutdownCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenThrow(new InterruptedException()); - when(shutdownCompleteLatch.getCount()).thenReturn(1L); - - assertThat(requestedShutdownCallable.call(), equalTo(false)); - verifyLatchAwait(notificationCompleteLatch); - verifyLatchAwait(shutdownCompleteLatch); - verify(scheduler).shutdown(); - } - - @Test - public void testInterruptedAfterNotification() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenAnswer(invocation -> { - Thread.currentThread().interrupt(); - return true; - }); - - assertThat(requestedShutdownCallable.call(), equalTo(false)); - verifyLatchAwait(notificationCompleteLatch); - verifyLatchAwait(shutdownCompleteLatch, never()); - verify(scheduler, never()).shutdown(); - } - - @Test - public void testInterruptedAfterWorkerShutdown() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); - - doAnswer(invocation -> { - Thread.currentThread().interrupt(); - return true; - }).when(scheduler).shutdown(); - - assertThat(requestedShutdownCallable.call(), equalTo(false)); - verifyLatchAwait(notificationCompleteLatch); - verifyLatchAwait(shutdownCompleteLatch, never()); - verify(scheduler).shutdown(); - } - - @Test - public void testInterruptedDuringNotification() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenAnswer(invocation -> { - Thread.currentThread().interrupt(); - return false; - }); - when(notificationCompleteLatch.getCount()).thenReturn(1L); - - when(shutdownCompleteLatch.getCount()).thenReturn(1L); - - assertThat(requestedShutdownCallable.call(), equalTo(false)); - verifyLatchAwait(notificationCompleteLatch); - verify(notificationCompleteLatch).getCount(); - - verifyLatchAwait(shutdownCompleteLatch, never()); - verify(shutdownCompleteLatch).getCount(); - - verify(scheduler, never()).shutdown(); - } - - @Test - public void testInterruptedDuringShutdown() throws Exception { - Callable requestedShutdownCallable = buildRequestedShutdownCallable(); - - when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); - - when(shutdownCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenAnswer(invocation -> { - Thread.currentThread().interrupt(); - return false; - }); - when(shutdownCompleteLatch.getCount()).thenReturn(1L); - - assertThat(requestedShutdownCallable.call(), equalTo(false)); - verifyLatchAwait(notificationCompleteLatch); - verify(notificationCompleteLatch, never()).getCount(); - - verifyLatchAwait(shutdownCompleteLatch); - verify(shutdownCompleteLatch).getCount(); - - verify(scheduler).shutdown(); - } - - @Test(expected = IllegalStateException.class) - public void testWorkerShutdownCallableThrows() throws Exception { - Callable requestedShutdownCallable = new GracefulShutdownCoordinator().createGracefulShutdownCallable(contextCallable); - when(contextCallable.call()).thenThrow(new IllegalStateException("Bad Shutdown")); - - requestedShutdownCallable.call(); - } - - private void verifyLatchAwait(CountDownLatch latch) throws Exception { - verifyLatchAwait(latch, times(1)); - } - - private void verifyLatchAwait(CountDownLatch latch, int times) throws Exception { - verifyLatchAwait(latch, times(times)); - } - - private void verifyLatchAwait(CountDownLatch latch, VerificationMode verificationMode) throws Exception { - verify(latch, verificationMode).await(anyLong(), any(TimeUnit.class)); - } - - private void mockLatchAwait(CountDownLatch latch, Boolean initial, Boolean... remaining) throws Exception { - when(latch.await(anyLong(), any(TimeUnit.class))).thenReturn(initial, remaining); - } - - private Callable buildRequestedShutdownCallable() throws Exception { - GracefulShutdownContext context = new GracefulShutdownContext(shutdownCompleteLatch, - notificationCompleteLatch, scheduler); - when(contextCallable.call()).thenReturn(context); - return new GracefulShutdownCoordinator().createGracefulShutdownCallable(contextCallable); - } - - private void mockShardInfoConsumerMap(Integer initialItemCount, Integer... additionalItemCounts) { - when(scheduler.shardInfoShardConsumerMap()).thenReturn(shardInfoConsumerMap); - Boolean additionalEmptyStates[] = new Boolean[additionalItemCounts.length]; - for (int i = 0; i < additionalItemCounts.length; ++i) { - additionalEmptyStates[i] = additionalItemCounts[i] == 0; - } - when(shardInfoConsumerMap.size()).thenReturn(initialItemCount, additionalItemCounts); - when(shardInfoConsumerMap.isEmpty()).thenReturn(initialItemCount == 0, additionalEmptyStates); - } - -} \ No newline at end of file diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/SchedulerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/SchedulerTest.java deleted file mode 100644 index 9a10f6b3..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/SchedulerTest.java +++ /dev/null @@ -1,487 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.coordinator; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; -import static org.mockito.Matchers.same; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; -import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.checkpoint.Checkpoint; -import software.amazon.kinesis.checkpoint.CheckpointConfig; -import software.amazon.kinesis.checkpoint.CheckpointFactory; -import software.amazon.kinesis.exceptions.KinesisClientLibNonRetryableException; -import software.amazon.kinesis.leases.LeaseCoordinator; -import software.amazon.kinesis.leases.LeaseManagementConfig; -import software.amazon.kinesis.leases.LeaseManagementFactory; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.ShardDetector; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.leases.ShardSyncTaskManager; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher; -import software.amazon.kinesis.lifecycle.LifecycleConfig; -import software.amazon.kinesis.lifecycle.ShardConsumer; -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.LeaseLostInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.events.ShardEndedInput; -import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.MetricsConfig; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.processor.ProcessorConfig; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RetrievalConfig; -import software.amazon.kinesis.retrieval.RetrievalFactory; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * - */ -@RunWith(MockitoJUnitRunner.class) -public class SchedulerTest { - private final String tableName = "tableName"; - private final String workerIdentifier = "workerIdentifier"; - private final String applicationName = "applicationName"; - private final String streamName = "streamName"; - private final String namespace = "testNamespace"; - - private Scheduler scheduler; - private ShardRecordProcessorFactory shardRecordProcessorFactory; - private CheckpointConfig checkpointConfig; - private CoordinatorConfig coordinatorConfig; - private LeaseManagementConfig leaseManagementConfig; - private LifecycleConfig lifecycleConfig; - private MetricsConfig metricsConfig; - private ProcessorConfig processorConfig; - private RetrievalConfig retrievalConfig; - - @Mock - private KinesisAsyncClient kinesisClient; - @Mock - private DynamoDbAsyncClient dynamoDBClient; - @Mock - private CloudWatchAsyncClient cloudWatchClient; - @Mock - private RetrievalFactory retrievalFactory; - @Mock - private RecordsPublisher recordsPublisher; - @Mock - private LeaseCoordinator leaseCoordinator; - @Mock - private ShardSyncTaskManager shardSyncTaskManager; - @Mock - private DynamoDBLeaseRefresher dynamoDBLeaseRefresher; - @Mock - private ShardDetector shardDetector; - @Mock - private Checkpointer checkpoint; - - @Before - public void setup() { - shardRecordProcessorFactory = new TestShardRecordProcessorFactory(); - - checkpointConfig = new CheckpointConfig().checkpointFactory(new TestKinesisCheckpointFactory()); - coordinatorConfig = new CoordinatorConfig(applicationName).parentShardPollIntervalMillis(100L); - leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, streamName, - workerIdentifier).leaseManagementFactory(new TestKinesisLeaseManagementFactory()); - lifecycleConfig = new LifecycleConfig(); - metricsConfig = new MetricsConfig(cloudWatchClient, namespace); - processorConfig = new ProcessorConfig(shardRecordProcessorFactory); - retrievalConfig = new RetrievalConfig(kinesisClient, streamName, applicationName) - .retrievalFactory(retrievalFactory); - - when(leaseCoordinator.leaseRefresher()).thenReturn(dynamoDBLeaseRefresher); - when(shardSyncTaskManager.shardDetector()).thenReturn(shardDetector); - when(retrievalFactory.createGetRecordsCache(any(ShardInfo.class), any(MetricsFactory.class))).thenReturn(recordsPublisher); - - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); - } - - /** - * Test method for {@link Scheduler#applicationName()}. - */ - @Test - public void testGetStageName() { - final String stageName = "testStageName"; - coordinatorConfig = new CoordinatorConfig(stageName); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); - assertEquals(stageName, scheduler.applicationName()); - } - - @Test - public final void testCreateOrGetShardConsumer() { - final String shardId = "shardId-000000000000"; - final String concurrencyToken = "concurrencyToken"; - final ShardInfo shardInfo = new ShardInfo(shardId, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - final ShardConsumer shardConsumer1 = scheduler.createOrGetShardConsumer(shardInfo, shardRecordProcessorFactory); - assertNotNull(shardConsumer1); - final ShardConsumer shardConsumer2 = scheduler.createOrGetShardConsumer(shardInfo, shardRecordProcessorFactory); - assertNotNull(shardConsumer2); - - assertSame(shardConsumer1, shardConsumer2); - - final String anotherConcurrencyToken = "anotherConcurrencyToken"; - final ShardInfo shardInfo2 = new ShardInfo(shardId, anotherConcurrencyToken, null, - ExtendedSequenceNumber.TRIM_HORIZON); - final ShardConsumer shardConsumer3 = scheduler.createOrGetShardConsumer(shardInfo2, shardRecordProcessorFactory); - assertNotNull(shardConsumer3); - - assertNotSame(shardConsumer1, shardConsumer3); - } - - // TODO: figure out the behavior of the test. - @Test - public void testWorkerLoopWithCheckpoint() throws Exception { - final String shardId = "shardId-000000000000"; - final String concurrencyToken = "concurrencyToken"; - final ExtendedSequenceNumber firstSequenceNumber = ExtendedSequenceNumber.TRIM_HORIZON; - final ExtendedSequenceNumber secondSequenceNumber = new ExtendedSequenceNumber("1000"); - final ExtendedSequenceNumber finalSequenceNumber = new ExtendedSequenceNumber("2000"); - - final List initialShardInfo = Collections.singletonList( - new ShardInfo(shardId, concurrencyToken, null, firstSequenceNumber)); - final List firstShardInfo = Collections.singletonList( - new ShardInfo(shardId, concurrencyToken, null, secondSequenceNumber)); - final List secondShardInfo = Collections.singletonList( - new ShardInfo(shardId, concurrencyToken, null, finalSequenceNumber)); - - final Checkpoint firstCheckpoint = new Checkpoint(firstSequenceNumber, null); - - when(leaseCoordinator.getCurrentAssignments()).thenReturn(initialShardInfo, firstShardInfo, secondShardInfo); - when(checkpoint.getCheckpointObject(eq(shardId))).thenReturn(firstCheckpoint); - - Scheduler schedulerSpy = spy(scheduler); - schedulerSpy.runProcessLoop(); - schedulerSpy.runProcessLoop(); - schedulerSpy.runProcessLoop(); - - verify(schedulerSpy).buildConsumer(same(initialShardInfo.get(0)), eq(shardRecordProcessorFactory)); - verify(schedulerSpy, never()).buildConsumer(same(firstShardInfo.get(0)), eq(shardRecordProcessorFactory)); - verify(schedulerSpy, never()).buildConsumer(same(secondShardInfo.get(0)), eq(shardRecordProcessorFactory)); - verify(checkpoint).getCheckpointObject(eq(shardId)); - } - - @Test - public final void testCleanupShardConsumers() { - final String shard0 = "shardId-000000000000"; - final String shard1 = "shardId-000000000001"; - final String concurrencyToken = "concurrencyToken"; - final String anotherConcurrencyToken = "anotherConcurrencyToken"; - - final ShardInfo shardInfo0 = new ShardInfo(shard0, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - final ShardInfo shardInfo0WithAnotherConcurrencyToken = new ShardInfo(shard0, anotherConcurrencyToken, null, - ExtendedSequenceNumber.TRIM_HORIZON); - final ShardInfo shardInfo1 = new ShardInfo(shard1, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - - final ShardConsumer shardConsumer0 = scheduler.createOrGetShardConsumer(shardInfo0, shardRecordProcessorFactory); - final ShardConsumer shardConsumer0WithAnotherConcurrencyToken = - scheduler.createOrGetShardConsumer(shardInfo0WithAnotherConcurrencyToken, shardRecordProcessorFactory); - final ShardConsumer shardConsumer1 = scheduler.createOrGetShardConsumer(shardInfo1, shardRecordProcessorFactory); - - Set shards = new HashSet<>(); - shards.add(shardInfo0); - shards.add(shardInfo1); - scheduler.cleanupShardConsumers(shards); - - // verify shard consumer not present in assignedShards is shut down - assertTrue(shardConsumer0WithAnotherConcurrencyToken.isShutdownRequested()); - // verify shard consumers present in assignedShards aren't shut down - assertFalse(shardConsumer0.isShutdownRequested()); - assertFalse(shardConsumer1.isShutdownRequested()); - } - - @Test - public final void testInitializationFailureWithRetries() throws Exception { - doNothing().when(leaseCoordinator).initialize(); - when(shardDetector.listShards()).thenThrow(new RuntimeException()); - - scheduler.run(); - - verify(shardDetector, times(coordinatorConfig.maxInitializationAttempts())).listShards(); - } - - @Test - public final void testInitializationFailureWithRetriesWithConfiguredMaxInitializationAttempts() throws Exception { - final int maxInitializationAttempts = 5; - coordinatorConfig.maxInitializationAttempts(maxInitializationAttempts); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); - - doNothing().when(leaseCoordinator).initialize(); - when(shardDetector.listShards()).thenThrow(new RuntimeException()); - - scheduler.run(); - - // verify initialization was retried for maxInitializationAttempts times - verify(shardDetector, times(maxInitializationAttempts)).listShards(); - } - - - /*private void runAndTestWorker(int numShards, int threadPoolSize) throws Exception { - final int numberOfRecordsPerShard = 10; - final String kinesisShardPrefix = "kinesis-0-"; - final BigInteger startSeqNum = BigInteger.ONE; - List shardList = KinesisLocalFileDataCreator.createShardList(numShards, kinesisShardPrefix, startSeqNum); - Assert.assertEquals(numShards, shardList.size()); - List initialLeases = new ArrayList(); - for (Shard shard : shardList) { - Lease lease = ShardSyncer.newKCLLease(shard); - lease.setCheckpoint(ExtendedSequenceNumber.AT_TIMESTAMP); - initialLeases.add(lease); - } - runAndTestWorker(shardList, threadPoolSize, initialLeases, numberOfRecordsPerShard); - } - - private void runAndTestWorker(List shardList, - int threadPoolSize, - List initialLeases, - int numberOfRecordsPerShard) throws Exception { - File file = KinesisLocalFileDataCreator.generateTempDataFile(shardList, numberOfRecordsPerShard, "unitTestWT001"); - IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath()); - - Semaphore recordCounter = new Semaphore(0); - ShardSequenceVerifier shardSequenceVerifier = new ShardSequenceVerifier(shardList); - TestStreamletFactory recordProcessorFactory = new TestStreamletFactory(recordCounter, shardSequenceVerifier); - - ExecutorService executorService = Executors.newFixedThreadPool(threadPoolSize); - - SchedulerThread schedulerThread = runWorker(initialLeases); - - // TestStreamlet will release the semaphore once for every record it processes - recordCounter.acquire(numberOfRecordsPerShard * shardList.size()); - - // Wait a bit to allow the worker to spin against the end of the stream. - Thread.sleep(500L); - - testWorker(shardList, threadPoolSize, initialLeases, - numberOfRecordsPerShard, fileBasedProxy, recordProcessorFactory); - - schedulerThread.schedulerForThread().shutdown(); - executorService.shutdownNow(); - file.delete(); - } - - private SchedulerThread runWorker(final List initialLeases) throws Exception { - final int maxRecords = 2; - - final long leaseDurationMillis = 10000L; - final long epsilonMillis = 1000L; - final long idleTimeInMilliseconds = 2L; - - AmazonDynamoDB ddbClient = DynamoDBEmbedded.create().dynamoDBClient(); - LeaseManager leaseRefresher = new LeaseManager("foo", ddbClient); - leaseRefresher.createLeaseTableIfNotExists(1L, 1L); - for (Lease initialLease : initialLeases) { - leaseRefresher.createLeaseIfNotExists(initialLease); - } - - checkpointConfig = new CheckpointConfig("foo", ddbClient, workerIdentifier) - .failoverTimeMillis(leaseDurationMillis) - .epsilonMillis(epsilonMillis) - .leaseRefresher(leaseRefresher); - leaseManagementConfig = new LeaseManagementConfig("foo", ddbClient, kinesisClient, streamName, workerIdentifier) - .failoverTimeMillis(leaseDurationMillis) - .epsilonMillis(epsilonMillis); - retrievalConfig.initialPositionInStreamExtended(InitialPositionInStreamExtended.newInitialPositionAtTimestamp( - new Date(KinesisLocalFileDataCreator.STARTING_TIMESTAMP))) - .maxRecords(maxRecords) - .idleTimeBetweenReadsInMillis(idleTimeInMilliseconds); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); - - SchedulerThread schedulerThread = new SchedulerThread(scheduler); - schedulerThread.start(); - return schedulerThread; - } - - private void testWorker(List shardList, - int threadPoolSize, - List initialLeases, - int numberOfRecordsPerShard, - IKinesisProxy kinesisProxy, - TestStreamletFactory recordProcessorFactory) throws Exception { - recordProcessorFactory.getShardSequenceVerifier().verify(); - - // Gather values to compare across all processors of a given shard. - Map> shardStreamletsRecords = new HashMap>(); - Map shardsLastProcessorShutdownReason = new HashMap(); - Map shardsNumProcessRecordsCallsWithEmptyRecordList = new HashMap(); - for (TestStreamlet processor : recordProcessorFactory.getTestStreamlets()) { - String shardId = processor.shardId(); - if (shardStreamletsRecords.get(shardId) == null) { - shardStreamletsRecords.put(shardId, processor.getProcessedRecords()); - } else { - List records = shardStreamletsRecords.get(shardId); - records.addAll(processor.getProcessedRecords()); - shardStreamletsRecords.put(shardId, records); - } - if (shardsNumProcessRecordsCallsWithEmptyRecordList.get(shardId) == null) { - shardsNumProcessRecordsCallsWithEmptyRecordList.put(shardId, - processor.getNumProcessRecordsCallsWithEmptyRecordList()); - } else { - long totalShardsNumProcessRecordsCallsWithEmptyRecordList = - shardsNumProcessRecordsCallsWithEmptyRecordList.get(shardId) - + processor.getNumProcessRecordsCallsWithEmptyRecordList(); - shardsNumProcessRecordsCallsWithEmptyRecordList.put(shardId, - totalShardsNumProcessRecordsCallsWithEmptyRecordList); - } - shardsLastProcessorShutdownReason.put(processor.shardId(), processor.getShutdownReason()); - } - - // verify that all records were processed at least once - verifyAllRecordsOfEachShardWereConsumedAtLeastOnce(shardList, kinesisProxy, numberOfRecordsPerShard, shardStreamletsRecords); - shardList.forEach(shard -> { - final String iterator = kinesisProxy.getIterator(shard.shardId(), new Date(KinesisLocalFileDataCreator.STARTING_TIMESTAMP)); - final List records = kinesisProxy.get(iterator, numberOfRecordsPerShard).records(); - assertEquals(); - }); - for (Shard shard : shardList) { - String shardId = shard.shardId(); - String iterator = - fileBasedProxy.getIterator(shardId, new Date(KinesisLocalFileDataCreator.STARTING_TIMESTAMP)); - List expectedRecords = fileBasedProxy.get(iterator, numRecs).records(); - verifyAllRecordsWereConsumedAtLeastOnce(expectedRecords, shardStreamletsRecords.get(shardId)); - } - - // within a record processor all the incoming records should be ordered - verifyRecordsProcessedByEachProcessorWereOrdered(recordProcessorFactory); - - // for shards for which only one record processor was created, we verify that each record should be - // processed exactly once - verifyAllRecordsOfEachShardWithOnlyOneProcessorWereConsumedExactlyOnce(shardList, - kinesisProxy, - numberOfRecordsPerShard, - shardStreamletsRecords, - recordProcessorFactory); - - // if callProcessRecordsForEmptyRecordList flag is set then processors must have been invoked with empty record - // sets else they shouldn't have seen invoked with empty record sets - verifyNumProcessRecordsCallsWithEmptyRecordList(shardList, - shardsNumProcessRecordsCallsWithEmptyRecordList, - callProcessRecordsForEmptyRecordList); - - // verify that worker shutdown last processor of shards that were terminated - verifyLastProcessorOfClosedShardsWasShutdownWithTerminate(shardList, shardsLastProcessorShutdownReason); - } - - @Data - @EqualsAndHashCode(callSuper = true) - @Accessors(fluent = true) - private static class SchedulerThread extends Thread { - private final Scheduler schedulerForThread; - }*/ - - private static class TestShardRecordProcessorFactory implements ShardRecordProcessorFactory { - @Override - public ShardRecordProcessor shardRecordProcessor() { - return new ShardRecordProcessor() { - @Override - public void initialize(final InitializationInput initializationInput) { - // Do nothing. - } - - @Override - public void processRecords(final ProcessRecordsInput processRecordsInput) { - try { - processRecordsInput.checkpointer().checkpoint(); - } catch (KinesisClientLibNonRetryableException e) { - throw new RuntimeException(e); - } - } - - @Override - public void leaseLost(LeaseLostInput leaseLostInput) { - - } - - @Override - public void shardEnded(ShardEndedInput shardEndedInput) { - try { - shardEndedInput.checkpointer().checkpoint(); - } catch (KinesisClientLibNonRetryableException e) { - throw new RuntimeException(e); - } - } - - @Override - public void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput) { - - } - }; - } - } - - private class TestKinesisLeaseManagementFactory implements LeaseManagementFactory { - @Override - public LeaseCoordinator createLeaseCoordinator(MetricsFactory metricsFactory) { - return leaseCoordinator; - } - - @Override - public ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory) { - return shardSyncTaskManager; - } - - @Override - public DynamoDBLeaseRefresher createLeaseRefresher() { - return dynamoDBLeaseRefresher; - } - - @Override - public ShardDetector createShardDetector() { - return shardDetector; - } - } - - private class TestKinesisCheckpointFactory implements CheckpointFactory { - @Override - public Checkpointer createCheckpointer(final LeaseCoordinator leaseCoordinator, - final LeaseRefresher leaseRefresher) { - return checkpoint; - } - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ExceptionThrowingLeaseRefresher.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ExceptionThrowingLeaseRefresher.java deleted file mode 100644 index 1df3f14e..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ExceptionThrowingLeaseRefresher.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -import java.util.Arrays; -import java.util.List; - -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Mock LeaseRefresher by randomly throwing Leasing Exceptions. - * - */ -@RequiredArgsConstructor -@Slf4j -public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { - private static final Throwable EXCEPTION_MSG = new Throwable("Test Exception"); - - // The real local lease refresher which would do the real implementations. - private final LeaseRefresher leaseRefresher; - // Use array below to control in what situations we want to throw exceptions. - private int[] leaseRefresherMethodCallingCount = new int[ExceptionThrowingLeaseRefresherMethods.values().length]; - - /** - * Methods which we support (simulate exceptions). - */ - public enum ExceptionThrowingLeaseRefresherMethods { - CREATELEASETABLEIFNOTEXISTS(0), - LEASETABLEEXISTS(1), - WAITUNTILLEASETABLEEXISTS(2), - LISTLEASES(3), - CREATELEASEIFNOTEXISTS(4), - GETLEASE(5), - RENEWLEASE(6), - TAKELEASE(7), - EVICTLEASE(8), - DELETELEASE(9), - DELETEALL(10), - UPDATELEASE(11), - NONE(Integer.MIN_VALUE); - - private Integer index; - - ExceptionThrowingLeaseRefresherMethods(Integer index) { - this.index = index; - } - - Integer index() { - return this.index; - } - } - - // Define which method should throw exception and when it should throw exception. - private ExceptionThrowingLeaseRefresherMethods methodThrowingException = ExceptionThrowingLeaseRefresherMethods.NONE; - private int timeThrowingException = Integer.MAX_VALUE; - - /** - * Set parameters used for throwing exception. - * - * @param method which would throw exception - * @param throwingTime defines what time to throw exception - */ - void leaseRefresherThrowingExceptionScenario(ExceptionThrowingLeaseRefresherMethods method, int throwingTime) { - this.methodThrowingException = method; - this.timeThrowingException = throwingTime; - } - - /** - * Reset all parameters used for throwing exception. - */ - void clearLeaseRefresherThrowingExceptionScenario() { - Arrays.fill(leaseRefresherMethodCallingCount, 0); - this.methodThrowingException = ExceptionThrowingLeaseRefresherMethods.NONE; - this.timeThrowingException = Integer.MAX_VALUE; - } - - // Throw exception when the conditions are satisfied : - // 1). method equals to methodThrowingException - // 2). method calling count equals to what we want - private void throwExceptions(String methodName, ExceptionThrowingLeaseRefresherMethods method) - throws DependencyException { - // Increase calling count for this method - leaseRefresherMethodCallingCount[method.index()]++; - if (method.equals(methodThrowingException) - && (leaseRefresherMethodCallingCount[method.index()] == timeThrowingException)) { - // Throw Dependency Exception if all conditions are satisfied. - log.debug("Throwing DependencyException in {}", methodName); - throw new DependencyException(EXCEPTION_MSG); - } - } - - @Override - public boolean createLeaseTableIfNotExists(Long readCapacity, Long writeCapacity) - throws ProvisionedThroughputException, DependencyException { - throwExceptions("createLeaseTableIfNotExists", - ExceptionThrowingLeaseRefresherMethods.CREATELEASETABLEIFNOTEXISTS); - - return leaseRefresher.createLeaseTableIfNotExists(readCapacity, writeCapacity); - } - - @Override - public boolean leaseTableExists() throws DependencyException { - throwExceptions("leaseTableExists", ExceptionThrowingLeaseRefresherMethods.LEASETABLEEXISTS); - - return leaseRefresher.leaseTableExists(); - } - - @Override - public boolean waitUntilLeaseTableExists(long secondsBetweenPolls, long timeoutSeconds) throws DependencyException { - throwExceptions("waitUntilLeaseTableExists", ExceptionThrowingLeaseRefresherMethods.WAITUNTILLEASETABLEEXISTS); - - return leaseRefresher.waitUntilLeaseTableExists(secondsBetweenPolls, timeoutSeconds); - } - - @Override - public List listLeases() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - throwExceptions("listLeases", ExceptionThrowingLeaseRefresherMethods.LISTLEASES); - - return leaseRefresher.listLeases(); - } - - @Override - public boolean createLeaseIfNotExists(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - throwExceptions("createLeaseIfNotExists", ExceptionThrowingLeaseRefresherMethods.CREATELEASEIFNOTEXISTS); - - return leaseRefresher.createLeaseIfNotExists(lease); - } - - @Override - public boolean renewLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - throwExceptions("renewLease", ExceptionThrowingLeaseRefresherMethods.RENEWLEASE); - - return leaseRefresher.renewLease(lease); - } - - @Override - public boolean takeLease(Lease lease, String owner) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - throwExceptions("takeLease", ExceptionThrowingLeaseRefresherMethods.TAKELEASE); - - return leaseRefresher.takeLease(lease, owner); - } - - @Override - public boolean evictLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - throwExceptions("evictLease", ExceptionThrowingLeaseRefresherMethods.EVICTLEASE); - - return leaseRefresher.evictLease(lease); - } - - @Override - public void deleteLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - throwExceptions("deleteLease", ExceptionThrowingLeaseRefresherMethods.DELETELEASE); - - leaseRefresher.deleteLease(lease); - } - - @Override - public boolean updateLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - throwExceptions("updateLease", ExceptionThrowingLeaseRefresherMethods.UPDATELEASE); - - return leaseRefresher.updateLease(lease); - } - - @Override - public Lease getLease(String shardId) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - throwExceptions("getLease", ExceptionThrowingLeaseRefresherMethods.GETLEASE); - - return leaseRefresher.getLease(shardId); - } - - @Override - public void deleteAll() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - throwExceptions("deleteAll", ExceptionThrowingLeaseRefresherMethods.DELETEALL); - - leaseRefresher.deleteAll(); - } - - @Override - public boolean isLeaseTableEmpty() throws DependencyException, - InvalidStateException, ProvisionedThroughputException { - return false; - } - - @Override - public ExtendedSequenceNumber getCheckpoint(final String shardId) - throws ProvisionedThroughputException, InvalidStateException, DependencyException { - return null; - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/HierarchicalShardSyncerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/HierarchicalShardSyncerTest.java deleted file mode 100644 index 78d68b79..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/HierarchicalShardSyncerTest.java +++ /dev/null @@ -1,1707 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -// -// TODO: Fix the lack of DynamoDB Loca -// - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import org.apache.commons.lang3.StringUtils; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.awssdk.services.kinesis.model.HashKeyRange; -import software.amazon.awssdk.services.kinesis.model.SequenceNumberRange; -import software.amazon.awssdk.services.kinesis.model.Shard; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.exceptions.internal.KinesisClientLibIOException; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.metrics.MetricsScope; -import software.amazon.kinesis.metrics.NullMetricsScope; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -@RunWith(MockitoJUnitRunner.class) -// CHECKSTYLE:IGNORE JavaNCSS FOR NEXT 800 LINES -public class HierarchicalShardSyncerTest { - private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.LATEST); - private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = InitialPositionInStreamExtended - .newInitialPositionAtTimestamp(new Date(1000L)); - private static final int EXPONENT = 128; - private static final String LEASE_OWNER = "TestOwnere"; - private static final MetricsScope SCOPE = new NullMetricsScope(); - - private final boolean cleanupLeasesOfCompletedShards = true; - private final boolean ignoreUnexpectedChildShards = false; - - private HierarchicalShardSyncer hierarchicalShardSyncer; - - /** - * Old/Obsolete max value of a sequence number (2^128 -1). - */ - public static final BigInteger MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE); - - @Mock - private ShardDetector shardDetector; - @Mock - private DynamoDBLeaseRefresher dynamoDBLeaseRefresher; - - @Before - public void setup() { - hierarchicalShardSyncer = new HierarchicalShardSyncer(); - } - - /** - * Test determineNewLeasesToCreate() where there are no shards - */ - @Test - public void testDetermineNewLeasesToCreateNoShards() { - final List shards = Collections.emptyList(); - final List leases = Collections.emptyList(); - - assertThat(HierarchicalShardSyncer.determineNewLeasesToCreate(shards, leases, INITIAL_POSITION_LATEST).isEmpty(), - equalTo(true)); - } - - /** - * Test determineNewLeasesToCreate() where there are no leases and no resharding operations have been performed - */ - @Test - public void testDetermineNewLeasesToCreate0Leases0Reshards() { - final String shardId0 = "shardId-0"; - final String shardId1 = "shardId-1"; - final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); - - final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), - ShardObjectHelper.newShard(shardId1, null, null, sequenceRange)); - final List currentLeases = Collections.emptyList(); - - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - INITIAL_POSITION_LATEST); - final Set newLeaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set expectedLeaseShardIds = new HashSet<>(Arrays.asList(shardId0, shardId1)); - - assertThat(newLeases.size(), equalTo(expectedLeaseShardIds.size())); - assertThat(newLeaseKeys, equalTo(expectedLeaseShardIds)); - } - - /** - * Test determineNewLeasesToCreate() where there are no leases and no resharding operations have been performed, but - * one of the shards was marked as inconsistent. - */ - @Test - public void testDetermineNewLeasesToCreate0Leases0Reshards1Inconsistent() { - final String shardId0 = "shardId-0"; - final String shardId1 = "shardId-1"; - final String shardId2 = "shardId-2"; - final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); - - final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), - ShardObjectHelper.newShard(shardId1, null, null, sequenceRange), - ShardObjectHelper.newShard(shardId2, shardId1, null, sequenceRange)); - final List currentLeases = Collections.emptyList(); - - final Set inconsistentShardIds = new HashSet<>(Collections.singletonList(shardId2)); - - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - INITIAL_POSITION_LATEST, inconsistentShardIds); - final Set newLeaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set expectedLeaseShardIds = new HashSet<>(Arrays.asList(shardId0, shardId1)); - assertThat(newLeases.size(), equalTo(expectedLeaseShardIds.size())); - assertThat(newLeaseKeys, equalTo(expectedLeaseShardIds)); - } - - /** - * Test bootstrapShardLeases() starting at TRIM_HORIZON ("beginning" of stream) - */ - @Test - public void testBootstrapShardLeasesAtTrimHorizon() throws Exception { - testCheckAndCreateLeasesForShardsIfMissing(INITIAL_POSITION_TRIM_HORIZON); - } - - /** - * Test bootstrapShardLeases() starting at LATEST (tip of stream) - */ - @Test - public void testBootstrapShardLeasesAtLatest() throws Exception { - testCheckAndCreateLeasesForShardsIfMissing(INITIAL_POSITION_LATEST); - } - - @Test - public void testCheckAndCreateLeasesForShardsIfMissingAtLatest() throws Exception { - final List shards = constructShardListForGraphA(); - - final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true); - - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - cleanupLeasesOfCompletedShards, false, SCOPE); - - final Set expectedShardIds = new HashSet<>( - Arrays.asList("shardId-4", "shardId-8", "shardId-9", "shardId-10")); - - final List requestLeases = leaseCaptor.getAllValues(); - final Set requestLeaseKeys = requestLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set extendedSequenceNumbers = requestLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - assertThat(requestLeases.size(), equalTo(expectedShardIds.size())); - assertThat(requestLeaseKeys, equalTo(expectedShardIds)); - assertThat(extendedSequenceNumbers.size(), equalTo(1)); - - extendedSequenceNumbers.forEach(seq -> assertThat(seq, equalTo(ExtendedSequenceNumber.LATEST))); - - verify(shardDetector).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedShardIds.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - - } - - @Test - public void testCheckAndCreateLeasesForNewShardsAtTrimHorizon() throws Exception { - testCheckAndCreateLeaseForShardsIfMissing(constructShardListForGraphA(), INITIAL_POSITION_TRIM_HORIZON); - } - - @Test - public void testCheckAndCreateLeasesForNewShardsAtTimestamp() throws Exception { - testCheckAndCreateLeaseForShardsIfMissing(constructShardListForGraphA(), INITIAL_POSITION_AT_TIMESTAMP); - } - - @Test(expected = KinesisClientLibIOException.class) - public void testCheckAndCreateLeasesForNewShardsWhenParentIsOpen() throws Exception { - final List shards = new ArrayList<>(constructShardListForGraphA()); - final SequenceNumberRange range = shards.get(0).sequenceNumberRange().toBuilder().endingSequenceNumber(null) - .build(); - final Shard shard = shards.get(3).toBuilder().sequenceNumberRange(range).build(); - shards.remove(3); - shards.add(3, shard); - - when(shardDetector.listShards()).thenReturn(shards); - - try { - hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, - INITIAL_POSITION_TRIM_HORIZON, cleanupLeasesOfCompletedShards, false, SCOPE); - } finally { - verify(shardDetector).listShards(); - verify(dynamoDBLeaseRefresher, never()).listLeases(); - } - } - - /** - * Test checkAndCreateLeasesForNewShards() when a parent is open and children of open parents are being ignored. - */ - @Test - public void testCheckAndCreateLeasesForNewShardsWhenParentIsOpenAndIgnoringInconsistentChildren() throws Exception { - final List shards = new ArrayList<>(constructShardListForGraphA()); - final Shard shard = shards.get(5); - assertThat(shard.shardId(), equalTo("shardId-5")); - - shards.remove(5); - - // shardId-5 in graph A has two children (shardId-9 and shardId-10). if shardId-5 - // is not closed, those children should be ignored when syncing shards, no leases - // should be obtained for them, and we should obtain a lease on the still-open - // parent. - shards.add(5, - shard.toBuilder() - .sequenceNumberRange(shard.sequenceNumberRange().toBuilder().endingSequenceNumber(null).build()) - .build()); - - final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true); - - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - cleanupLeasesOfCompletedShards, true, SCOPE); - - final List leases = leaseCaptor.getAllValues(); - final Set leaseKeys = leases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set leaseSequenceNumbers = leases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - final Set expectedShardIds = new HashSet<>(Arrays.asList("shardId-4", "shardId-5", "shardId-8")); - - assertThat(leaseKeys.size(), equalTo(expectedShardIds.size())); - assertThat(leaseKeys, equalTo(expectedShardIds)); - assertThat(leaseSequenceNumbers.size(), equalTo(1)); - - leaseSequenceNumbers.forEach(seq -> assertThat(seq, equalTo(ExtendedSequenceNumber.LATEST))); - - verify(shardDetector).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedShardIds.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - } - - @Test - public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShard() throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShard(ExtendedSequenceNumber.TRIM_HORIZON, - INITIAL_POSITION_TRIM_HORIZON); - } - - @Test - public void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShard() throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShard(ExtendedSequenceNumber.AT_TIMESTAMP, - INITIAL_POSITION_AT_TIMESTAMP); - } - - private void testCheckAndCreateLeasesForNewShardsAndClosedShard(final ExtendedSequenceNumber sequenceNumber, - final InitialPositionInStreamExtended position) throws Exception { - final String shardIdPrefix = "shardId-%d"; - final List shards = constructShardListForGraphA(); - final List leases = createLeasesFromShards(shards, sequenceNumber, LEASE_OWNER); - - // Marking shardId-0 as ShardEnd. - leases.stream().filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())).findFirst() - .ifPresent(lease -> lease.checkpoint(ExtendedSequenceNumber.SHARD_END)); - - // Marking child of shardId-0 to be processed and not at TRIM_HORIZON. - leases.stream().filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())).findFirst() - .ifPresent(lease -> lease.checkpoint(new ExtendedSequenceNumber("1"))); - - final ArgumentCaptor leaseCreateCaptor = ArgumentCaptor.forClass(Lease.class); - final ArgumentCaptor leaseDeleteCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()).thenReturn(leases); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture())).thenReturn(true); - doNothing().when(dynamoDBLeaseRefresher).deleteLease(leaseDeleteCaptor.capture()); - - // Initial call: No leases present, create leases. - hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - - final Set createLeases = new HashSet<>(leaseCreateCaptor.getAllValues()); - - final Set expectedCreateLeases = new HashSet<>(createLeasesFromShards(shards, sequenceNumber, null)); - - assertThat(createLeases, equalTo(expectedCreateLeases)); - - verify(shardDetector, times(1)).listShards(); - verify(dynamoDBLeaseRefresher, times(1)).listLeases(); - verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - - // Second call: Leases present, with shardId-0 being at ShardEnd causing cleanup. - hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - final List deleteLeases = leaseDeleteCaptor.getAllValues(); - final Set shardIds = deleteLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set sequenceNumbers = deleteLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - final Set expectedShardIds = new HashSet<>(Collections.singletonList(String.format(shardIdPrefix, 0))); - final Set expectedSequenceNumbers = new HashSet<>( - Collections.singletonList(ExtendedSequenceNumber.SHARD_END)); - - assertThat(deleteLeases.size(), equalTo(1)); - assertThat(shardIds, equalTo(expectedShardIds)); - assertThat(sequenceNumbers, equalTo(expectedSequenceNumbers)); - - verify(shardDetector, times(2)).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, times(2)).listLeases(); - verify(dynamoDBLeaseRefresher, times(1)).deleteLease(any(Lease.class)); - } - - @Test(expected = DependencyException.class) - public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithDeleteLeaseExceptions() - throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShardWithDeleteLeaseExceptions(ExtendedSequenceNumber.TRIM_HORIZON, - INITIAL_POSITION_TRIM_HORIZON); - } - - @Test(expected = DependencyException.class) - public void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShardWithDeleteLeaseExceptions() - throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShardWithDeleteLeaseExceptions(ExtendedSequenceNumber.AT_TIMESTAMP, - INITIAL_POSITION_AT_TIMESTAMP); - } - - private void testCheckAndCreateLeasesForNewShardsAndClosedShardWithDeleteLeaseExceptions( - final ExtendedSequenceNumber sequenceNumber, final InitialPositionInStreamExtended position) - throws Exception { - final String shardIdPrefix = "shardId-%d"; - final List shards = constructShardListForGraphA(); - final List leases = createLeasesFromShards(shards, sequenceNumber, LEASE_OWNER); - - // Marking shardId-0 as ShardEnd. - leases.stream().filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())).findFirst() - .ifPresent(lease -> lease.checkpoint(ExtendedSequenceNumber.SHARD_END)); - - // Marking child of shardId-0 to be processed and not at TRIM_HORIZON. - leases.stream().filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())).findFirst() - .ifPresent(lease -> lease.checkpoint(new ExtendedSequenceNumber("1"))); - - final ArgumentCaptor leaseCreateCaptor = ArgumentCaptor.forClass(Lease.class); - final ArgumentCaptor leaseDeleteCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()).thenReturn(leases); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture())).thenReturn(true); - doThrow(new DependencyException(new Throwable("Throw for DeleteLease"))).doNothing() - .when(dynamoDBLeaseRefresher).deleteLease(leaseDeleteCaptor.capture()); - - // Initial call: Call to create leases. - hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - - final Set createLeases = new HashSet<>(leaseCreateCaptor.getAllValues()); - - final Set expectedCreateLeases = new HashSet<>(createLeasesFromShards(shards, sequenceNumber, null)); - - assertThat(createLeases, equalTo(expectedCreateLeases)); - - verify(shardDetector, times(1)).listShards(); - verify(dynamoDBLeaseRefresher, times(1)).listLeases(); - verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - - try { - // Second call: Leases already present. ShardId-0 is at ShardEnd and needs to be cleaned up. Delete fails. - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - } finally { - List deleteLeases = leaseDeleteCaptor.getAllValues(); - Set shardIds = deleteLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - Set sequenceNumbers = deleteLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - final Set expectedShardIds = new HashSet<>( - Collections.singletonList(String.format(shardIdPrefix, 0))); - final Set expectedSequenceNumbers = new HashSet<>( - Collections.singletonList(ExtendedSequenceNumber.SHARD_END)); - - assertThat(deleteLeases.size(), equalTo(1)); - assertThat(shardIds, equalTo(expectedShardIds)); - assertThat(sequenceNumbers, equalTo(expectedSequenceNumbers)); - - verify(shardDetector, times(2)).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, times(2)).listLeases(); - verify(dynamoDBLeaseRefresher, times(1)).deleteLease(any(Lease.class)); - - // Final call: Leases already present. ShardId-0 is at ShardEnd and needs to be cleaned up. Delete passes. - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - - deleteLeases = leaseDeleteCaptor.getAllValues(); - - shardIds = deleteLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - sequenceNumbers = deleteLeases.stream().map(Lease::checkpoint).collect(Collectors.toSet()); - - assertThat(deleteLeases.size(), equalTo(2)); - assertThat(shardIds, equalTo(expectedShardIds)); - assertThat(sequenceNumbers, equalTo(expectedSequenceNumbers)); - - verify(shardDetector, times(3)).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, times(3)).listLeases(); - verify(dynamoDBLeaseRefresher, times(2)).deleteLease(any(Lease.class)); - } - } - - @Test(expected = DependencyException.class) - public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithListLeasesExceptions() - throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions(ExtendedSequenceNumber.TRIM_HORIZON, - INITIAL_POSITION_TRIM_HORIZON); - } - - @Test(expected = DependencyException.class) - public void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShardWithListLeasesExceptions() - throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions(ExtendedSequenceNumber.AT_TIMESTAMP, - INITIAL_POSITION_AT_TIMESTAMP); - } - - private void testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions( - final ExtendedSequenceNumber sequenceNumber, final InitialPositionInStreamExtended position) - throws Exception { - final String shardIdPrefix = "shardId-%d"; - final List shards = constructShardListForGraphA(); - final List leases = createLeasesFromShards(shards, sequenceNumber, LEASE_OWNER); - - // Marking shardId-0 as ShardEnd. - leases.stream().filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())).findFirst() - .ifPresent(lease -> lease.checkpoint(ExtendedSequenceNumber.SHARD_END)); - - // Marking child of shardId-0 to be processed and not at TRIM_HORIZON. - leases.stream().filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())).findFirst() - .ifPresent(lease -> lease.checkpoint(new ExtendedSequenceNumber("1"))); - - final ArgumentCaptor leaseCreateCaptor = ArgumentCaptor.forClass(Lease.class); - final ArgumentCaptor leaseDeleteCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()) - .thenThrow(new DependencyException(new Throwable("Throw for ListLeases"))) - .thenReturn(Collections.emptyList()).thenReturn(leases); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture())).thenReturn(true); - doNothing().when(dynamoDBLeaseRefresher).deleteLease(leaseDeleteCaptor.capture()); - - try { - // Initial call: Call to create leases. Fails on ListLeases - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - } finally { - verify(shardDetector, times(1)).listShards(); - verify(dynamoDBLeaseRefresher, times(1)).listLeases(); - verify(dynamoDBLeaseRefresher, never()).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - - // Second call: Leases not present, leases will be created. - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - - final Set createLeases = new HashSet<>(leaseCreateCaptor.getAllValues()); - final Set expectedCreateLeases = new HashSet<>(createLeasesFromShards(shards, sequenceNumber, null)); - - assertThat(createLeases, equalTo(expectedCreateLeases)); - - verify(shardDetector, times(2)).listShards(); - verify(dynamoDBLeaseRefresher, times(2)).listLeases(); - verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - - // Final call: Leases present, belongs to TestOwner, shardId-0 is at ShardEnd should be cleaned up. - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - - final List deleteLeases = leaseDeleteCaptor.getAllValues(); - final Set shardIds = deleteLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set sequenceNumbers = deleteLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - final Set expectedShardIds = new HashSet<>( - Collections.singletonList(String.format(shardIdPrefix, 0))); - final Set expectedSequenceNumbers = new HashSet<>( - Collections.singletonList(ExtendedSequenceNumber.SHARD_END)); - - assertThat(deleteLeases.size(), equalTo(1)); - assertThat(shardIds, equalTo(expectedShardIds)); - assertThat(sequenceNumbers, equalTo(expectedSequenceNumbers)); - - verify(shardDetector, times(3)).listShards(); - verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, times(3)).listLeases(); - verify(dynamoDBLeaseRefresher, times(1)).deleteLease(any(Lease.class)); - } - } - - @Test(expected = DependencyException.class) - public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithCreateLeaseExceptions() - throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions(ExtendedSequenceNumber.TRIM_HORIZON, - INITIAL_POSITION_TRIM_HORIZON); - } - - @Test(expected = DependencyException.class) - public void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShardWithCreateLeaseExceptions() - throws Exception { - testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions(ExtendedSequenceNumber.AT_TIMESTAMP, - INITIAL_POSITION_AT_TIMESTAMP); - } - - private void testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions( - final ExtendedSequenceNumber sequenceNumber, final InitialPositionInStreamExtended position) - throws Exception { - final String shardIdPrefix = "shardId-%d"; - final List shards = constructShardListForGraphA(); - final List leases = createLeasesFromShards(shards, sequenceNumber, LEASE_OWNER); - - // Marking shardId-0 as ShardEnd. - leases.stream().filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())).findFirst() - .ifPresent(lease -> lease.checkpoint(ExtendedSequenceNumber.SHARD_END)); - - // Marking child of shardId-0 to be processed and not at TRIM_HORIZON. - leases.stream().filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())).findFirst() - .ifPresent(lease -> lease.checkpoint(new ExtendedSequenceNumber("1"))); - - final ArgumentCaptor leaseCreateCaptor = ArgumentCaptor.forClass(Lease.class); - final ArgumentCaptor leaseDeleteCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()) - .thenReturn(Collections.emptyList()).thenReturn(leases); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture())) - .thenThrow(new DependencyException(new Throwable("Throw for CreateLease"))).thenReturn(true); - doNothing().when(dynamoDBLeaseRefresher).deleteLease(leaseDeleteCaptor.capture()); - - try { - // Initial call: No leases present, create leases. Create lease Fails - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - } finally { - verify(shardDetector, times(1)).listShards(); - verify(dynamoDBLeaseRefresher, times(1)).listLeases(); - verify(dynamoDBLeaseRefresher, times(1)).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - - final Set createLeases = new HashSet<>(leaseCreateCaptor.getAllValues()); - final Set expectedCreateLeases = new HashSet<>(createLeasesFromShards(shards, sequenceNumber, null)); - - assertThat(createLeases, equalTo(expectedCreateLeases)); - verify(shardDetector, times(2)).listShards(); - verify(dynamoDBLeaseRefresher, times(2)).listLeases(); - verify(dynamoDBLeaseRefresher, times(1 + expectedCreateLeases.size())) - .createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - - // Final call: Leases are present, shardId-0 is at ShardEnd needs to be cleaned up. - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - - final List deleteLeases = leaseDeleteCaptor.getAllValues(); - final Set shardIds = deleteLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set sequenceNumbers = deleteLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - - final Set expectedShardIds = new HashSet<>( - Collections.singletonList(String.format(shardIdPrefix, 0))); - final Set expectedSequenceNumbers = new HashSet<>( - Collections.singletonList(ExtendedSequenceNumber.SHARD_END)); - - assertThat(deleteLeases.size(), equalTo(1)); - assertThat(shardIds, equalTo(expectedShardIds)); - assertThat(sequenceNumbers, equalTo(expectedSequenceNumbers)); - - verify(shardDetector, times(3)).listShards(); - verify(dynamoDBLeaseRefresher, times(1 + expectedCreateLeases.size())) - .createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, times(3)).listLeases(); - verify(dynamoDBLeaseRefresher, times(1)).deleteLease(any(Lease.class)); - } - } - - private Lease createLeaseFromShard(final Shard shard, final ExtendedSequenceNumber checkpoint, - final String leaseOwner) { - return createLeasesFromShards(Collections.singletonList(shard), checkpoint, leaseOwner).get(0); - } - - private List createLeasesFromShards(final List shards, final ExtendedSequenceNumber checkpoint, - final String leaseOwner) { - return shards.stream().map(shard -> { - final Set parentShardIds = new HashSet<>(); - if (StringUtils.isNotEmpty(shard.parentShardId())) { - parentShardIds.add(shard.parentShardId()); - } - if (StringUtils.isNotEmpty(shard.adjacentParentShardId())) { - parentShardIds.add(shard.adjacentParentShardId()); - } - return new Lease(shard.shardId(), leaseOwner, 0L, UUID.randomUUID(), 0L, checkpoint, null, 0L, - parentShardIds); - }).collect(Collectors.toList()); - } - - @Test - public void testCleanUpGarbageLeaseForNonExistentShard() throws Exception { - final List shards = constructShardListForGraphA(); - final String garbageShardId = "shardId-garbage-001"; - final Shard garbageShard = ShardObjectHelper.newShard(garbageShardId, null, null, - ShardObjectHelper.newSequenceNumberRange("101", null)); - final Lease garbageLease = createLeaseFromShard(garbageShard, new ExtendedSequenceNumber("99"), LEASE_OWNER); - final List leases = new ArrayList<>( - createLeasesFromShards(shards, ExtendedSequenceNumber.TRIM_HORIZON, LEASE_OWNER)); - leases.add(garbageLease); - - final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(leases); - doNothing().when(dynamoDBLeaseRefresher).deleteLease(leaseCaptor.capture()); - - hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, - INITIAL_POSITION_TRIM_HORIZON, cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, SCOPE); - - assertThat(leaseCaptor.getAllValues().size(), equalTo(1)); - assertThat(leaseCaptor.getValue(), equalTo(garbageLease)); - - verify(shardDetector, times(2)).listShards(); - verify(dynamoDBLeaseRefresher).listLeases(); - verify(dynamoDBLeaseRefresher).deleteLease(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).createLeaseIfNotExists(any(Lease.class)); - } - - private void testCheckAndCreateLeasesForShardsIfMissing(InitialPositionInStreamExtended initialPosition) - throws Exception { - final String shardId0 = "shardId-0"; - final String shardId1 = "shardId-1"; - final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); - final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), - ShardObjectHelper.newShard(shardId1, null, null, sequenceRange)); - - testCheckAndCreateLeaseForShardsIfMissing(shards, initialPosition); - } - - private void testCheckAndCreateLeaseForShardsIfMissing(final List shards, - final InitialPositionInStreamExtended initialPosition) throws Exception { - final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class); - - when(shardDetector.listShards()).thenReturn(shards); - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()); - when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true); - - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, initialPosition, - cleanupLeasesOfCompletedShards, false, SCOPE); - - final List leases = leaseCaptor.getAllValues(); - final Set leaseKeys = leases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final Set leaseSequenceNumbers = leases.stream().map(Lease::checkpoint) - .collect(Collectors.toSet()); - final Set expectedLeaseKeys = shards.stream().map(Shard::shardId).collect(Collectors.toSet()); - final Set expectedSequenceNumbers = new HashSet<>(Collections - .singletonList(new ExtendedSequenceNumber(initialPosition.getInitialPositionInStream().name()))); - - assertThat(leases.size(), equalTo(shards.size())); - assertThat(leaseKeys, equalTo(expectedLeaseKeys)); - assertThat(leaseSequenceNumbers, equalTo(expectedSequenceNumbers)); - - verify(shardDetector).listShards(); - verify(dynamoDBLeaseRefresher, times(shards.size())).createLeaseIfNotExists(any(Lease.class)); - verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class)); - } - - @Test - public void testDetermineNewLeasesToCreateStartingPosition() { - final String shardId0 = "shardId-0"; - final String shardId1 = "shardId-1"; - final List currentLeases = new ArrayList<>(); - final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); - - final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange), - ShardObjectHelper.newShard(shardId1, null, null, sequenceRange)); - - final Set initialPositions = new HashSet<>( - Arrays.asList(INITIAL_POSITION_LATEST, INITIAL_POSITION_TRIM_HORIZON)); - - final Set expectedLeaseShardIds = new HashSet<>(Arrays.asList(shardId0, shardId1)); - - for (InitialPositionInStreamExtended initialPosition : initialPositions) { - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - initialPosition); - assertThat(newLeases.size(), equalTo(2)); - - for (Lease lease : newLeases) { - assertThat(expectedLeaseShardIds.contains(lease.leaseKey()), equalTo(true)); - assertThat(lease.checkpoint(), - equalTo(new ExtendedSequenceNumber(initialPosition.getInitialPositionInStream().toString()))); - } - } - } - - @Test - public void testDetermineNewLeasesToCreateIgnoreClosedShard() { - final String lastShardId = "shardId-1"; - final List currentLeases = new ArrayList<>(); - - final List shards = Arrays.asList( - ShardObjectHelper.newShard("shardId-0", null, null, - ShardObjectHelper.newSequenceNumberRange("303", "404")), - ShardObjectHelper.newShard(lastShardId, null, null, - ShardObjectHelper.newSequenceNumberRange("405", null))); - - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - INITIAL_POSITION_LATEST); - - assertThat(newLeases.size(), equalTo(1)); - assertThat(newLeases.get(0).leaseKey(), equalTo(lastShardId)); - } - -// /** -// * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position Latest) -// * Shard structure (each level depicts a stream segment): -// * 0 1 2 3 4 5- shards till epoch 102 -// * \ / \ / | | -// * 6 7 4 5- shards from epoch 103 - 205 -// * \ / | /\ -// * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) -// * Current leases: (3, 4, 5) -// */ - @Test - public void testDetermineNewLeasesToCreateSplitMergeLatest1() { - final List shards = constructShardListForGraphA(); - final List currentLeases = Arrays.asList(newLease("shardId-3"), newLease("shardId-4"), - newLease("shardId-5")); - - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - INITIAL_POSITION_LATEST); - - final Map expectedShardIdCheckpointMap = new HashMap<>(); - expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.TRIM_HORIZON); - expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.TRIM_HORIZON); - expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.TRIM_HORIZON); - expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST); - expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.LATEST); - expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.TRIM_HORIZON); - - assertThat(newLeases.size(), equalTo(expectedShardIdCheckpointMap.size())); - for (Lease lease : newLeases) { - assertThat("Unexpected lease: " + lease, expectedShardIdCheckpointMap.containsKey(lease.leaseKey()), - equalTo(true)); - assertThat(lease.checkpoint(), equalTo(expectedShardIdCheckpointMap.get(lease.leaseKey()))); - } - } - -// /** -// * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position Latest) -// * Shard structure (each level depicts a stream segment): -// * 0 1 2 3 4 5- shards till epoch 102 -// * \ / \ / | | -// * 6 7 4 5- shards from epoch 103 - 205 -// * \ / | /\ -// * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) -// * Current leases: (4, 5, 7) -// */ - @Test - public void testDetermineNewLeasesToCreateSplitMergeLatest2() { - final List shards = constructShardListForGraphA(); - final List currentLeases = Arrays.asList(newLease("shardId-4"), newLease("shardId-5"), - newLease("shardId-7")); - - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - INITIAL_POSITION_LATEST); - - final Map expectedShardIdCheckpointMap = new HashMap<>(); - expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.TRIM_HORIZON); - expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.TRIM_HORIZON); - expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.TRIM_HORIZON); - expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST); - - assertThat(newLeases.size(), equalTo(expectedShardIdCheckpointMap.size())); - for (Lease lease : newLeases) { - assertThat("Unexpected lease: " + lease, expectedShardIdCheckpointMap.containsKey(lease.leaseKey()), - equalTo(true)); - assertThat(lease.checkpoint(), equalTo(expectedShardIdCheckpointMap.get(lease.leaseKey()))); - } - } - -// /** -// * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position TrimHorizon) -// * Shard structure (each level depicts a stream segment): -// * 0 1 2 3 4 5- shards till epoch 102 -// * \ / \ / | | -// * 6 7 4 5- shards from epoch 103 - 205 -// * \ / | /\ -// * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) -// * Current leases: (3, 4, 5) -// */ - @Test - public void testDetermineNewLeasesToCreateSplitMergeHorizon1() { - final List shards = constructShardListForGraphA(); - final List currentLeases = Arrays.asList(newLease("shardId-3"), newLease("shardId-4"), - newLease("shardId-5")); - - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - INITIAL_POSITION_TRIM_HORIZON); - - final Set leaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final List checkpoints = newLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toList()); - final Set checkpoint = new HashSet<>(checkpoints); - - final Set expectedLeaseKeys = new HashSet<>(Arrays.asList("shardId-0", "shardId-1", "shardId-2", - "shardId-6", "shardId-7", "shardId-8", "shardId-9", "shardId-10")); - final Set expectedCheckpoint = new HashSet<>( - Collections.singletonList(ExtendedSequenceNumber.TRIM_HORIZON)); - - assertThat(newLeases.size(), equalTo(expectedLeaseKeys.size())); - assertThat(checkpoints.size(), equalTo(expectedLeaseKeys.size())); - assertThat(leaseKeys, equalTo(expectedLeaseKeys)); - assertThat(checkpoint, equalTo(expectedCheckpoint)); - } - -// /** -// * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position TrimHorizon) -// * Shard structure (each level depicts a stream segment): -// * 0 1 2 3 4 5- shards till epoch 102 -// * \ / \ / | | -// * 6 7 4 5- shards from epoch 103 - 205 -// * \ / | /\ -// * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) -// * Current leases: (4, 5, 7) -// */ - @Test - public void testDetermineNewLeasesToCreateSplitMergeHorizon2() { - final List shards = constructShardListForGraphA(); - final List currentLeases = Arrays.asList(newLease("shardId-4"), newLease("shardId-5"), - newLease("shardId-7")); - - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - INITIAL_POSITION_TRIM_HORIZON); - - final Set leaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final List checkpoints = newLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toList()); - final Set checkpoint = new HashSet<>(checkpoints); - - final Set expectedLeaseKeys = new HashSet<>( - Arrays.asList("shardId-8", "shardId-9", "shardId-10", "shardId-6", "shardId-0", "shardId-1")); - final Set expectedCheckpoint = new HashSet<>( - Collections.singletonList(ExtendedSequenceNumber.TRIM_HORIZON)); - - assertThat(newLeases.size(), equalTo(expectedLeaseKeys.size())); - assertThat(checkpoints.size(), equalTo(expectedLeaseKeys.size())); - assertThat(leaseKeys, equalTo(expectedLeaseKeys)); - assertThat(checkpoint, equalTo(expectedCheckpoint)); - } - -// /** -// * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position TrimHorizon) -// * For shard graph B (see the construct method doc for structure). -// * -// * Current leases: empty set -// */ - @Test - public void testDetermineNewLeasesToCreateGraphBNoInitialLeasesTrim() { - final List shards = constructShardListForGraphB(); - final List currentLeases = new ArrayList<>(); - - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - INITIAL_POSITION_TRIM_HORIZON); - - final Set leaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final List checkpoints = newLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toList()); - final Set checkpoint = new HashSet<>(checkpoints); - - final Set expectedCheckpoint = new HashSet<>( - Collections.singletonList(ExtendedSequenceNumber.TRIM_HORIZON)); - final Set expectedLeaseKeys = IntStream.range(0, 11).mapToObj(id -> String.format("shardId-%d", id)) - .collect(Collectors.toSet()); - - assertThat(newLeases.size(), equalTo(expectedLeaseKeys.size())); - assertThat(checkpoints.size(), equalTo(expectedLeaseKeys.size())); - assertThat(leaseKeys, equalTo(expectedLeaseKeys)); - assertThat(checkpoint, equalTo(expectedCheckpoint)); - } - -// /** -// * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position AT_TIMESTAMP) -// * Shard structure (each level depicts a stream segment): -// * 0 1 2 3 4 5- shards till epoch 102 -// * \ / \ / | | -// * 6 7 4 5- shards from epoch 103 - 205 -// * \ / | /\ -// * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) -// * Current leases: (3, 4, 5) -// */ - @Test - public void testDetermineNewLeasesToCreateSplitMergeAtTimestamp1() { - final List shards = constructShardListForGraphA(); - final List currentLeases = Arrays.asList(newLease("shardId-3"), newLease("shardId-4"), - newLease("shardId-5")); - - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - INITIAL_POSITION_AT_TIMESTAMP); - final Set leaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final List checkpoints = newLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toList()); - final Set checkpoint = new HashSet<>(checkpoints); - - final Set expectedLeaseKeys = new HashSet<>(Arrays.asList("shardId-0", "shardId-1", "shardId-2", - "shardId-6", "shardId-7", "shardId-8", "shardId-9", "shardId-10")); - final Set expectedCheckpoint = new HashSet<>( - Collections.singletonList(ExtendedSequenceNumber.AT_TIMESTAMP)); - - assertThat(newLeases.size(), equalTo(expectedLeaseKeys.size())); - assertThat(checkpoints.size(), equalTo(expectedLeaseKeys.size())); - assertThat(leaseKeys, equalTo(expectedLeaseKeys)); - assertThat(checkpoint, equalTo(expectedCheckpoint)); - } - -// /** -// * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position AT_TIMESTAMP) -// * Shard structure (each level depicts a stream segment): -// * 0 1 2 3 4 5- shards till epoch 102 -// * \ / \ / | | -// * 6 7 4 5- shards from epoch 103 - 205 -// * \ / | /\ -// * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) -// * Current leases: (4, 5, 7) -// */ - @Test - public void testDetermineNewLeasesToCreateSplitMergeAtTimestamp2() { - final List shards = constructShardListForGraphA(); - final List currentLeases = Arrays.asList(newLease("shardId-4"), newLease("shardId-5"), - newLease("shardId-7")); - - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - INITIAL_POSITION_AT_TIMESTAMP); - final Set leaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final List checkpoints = newLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toList()); - final Set checkpoint = new HashSet<>(checkpoints); - - final Set expectedLeaseKeys = new HashSet<>( - Arrays.asList("shardId-0", "shardId-1", "shardId-6", "shardId-8", "shardId-9", "shardId-10")); - final Set expectedCheckpoint = new HashSet<>( - Collections.singletonList(ExtendedSequenceNumber.AT_TIMESTAMP)); - - assertThat(newLeases.size(), equalTo(expectedLeaseKeys.size())); - assertThat(checkpoints.size(), equalTo(expectedLeaseKeys.size())); - assertThat(leaseKeys, equalTo(expectedLeaseKeys)); - assertThat(checkpoint, equalTo(expectedCheckpoint)); - } - - /** - * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position AT_TIMESTAMP) For shard graph B (see the - * construct method doc for structure). Current leases: empty set - */ - @Test - public void testDetermineNewLeasesToCreateGraphBNoInitialLeasesAtTimestamp() { - final List shards = constructShardListForGraphB(); - final List currentLeases = new ArrayList<>(); - - final List newLeases = HierarchicalShardSyncer.determineNewLeasesToCreate(shards, currentLeases, - INITIAL_POSITION_AT_TIMESTAMP); - final Set leaseKeys = newLeases.stream().map(Lease::leaseKey).collect(Collectors.toSet()); - final List checkpoints = newLeases.stream().map(Lease::checkpoint) - .collect(Collectors.toList()); - final Set checkpoint = new HashSet<>(checkpoints); - - final Set expectedLeaseKeys = IntStream.range(0, shards.size()) - .mapToObj(id -> String.format("shardId-%d", id)).collect(Collectors.toSet()); - final Set expectedCheckpoint = new HashSet<>( - Collections.singletonList(ExtendedSequenceNumber.AT_TIMESTAMP)); - - assertThat(newLeases.size(), equalTo(expectedLeaseKeys.size())); - assertThat(checkpoints.size(), equalTo(expectedLeaseKeys.size())); - assertThat(leaseKeys, equalTo(expectedLeaseKeys)); - assertThat(checkpoint, equalTo(expectedCheckpoint)); - } - - /* - * Helper method to construct a shard list for graph A. Graph A is defined below. Shard structure (y-axis is - * epochs): 0 1 2 3 4 5- shards till epoch 102 \ / \ / | | 6 7 4 5- shards from epoch 103 - 205 \ / | /\ 8 4 9 10 - - * shards from epoch 206 (open - no ending sequenceNumber) - */ - private List constructShardListForGraphA() { - final SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("11", "102"); - final SequenceNumberRange range1 = ShardObjectHelper.newSequenceNumberRange("11", null); - final SequenceNumberRange range2 = ShardObjectHelper.newSequenceNumberRange("11", "205"); - final SequenceNumberRange range3 = ShardObjectHelper.newSequenceNumberRange("103", "205"); - final SequenceNumberRange range4 = ShardObjectHelper.newSequenceNumberRange("206", null); - - return Arrays.asList( - ShardObjectHelper.newShard("shardId-0", null, null, range0, - ShardObjectHelper.newHashKeyRange("0", "99")), - ShardObjectHelper.newShard("shardId-1", null, null, range0, - ShardObjectHelper.newHashKeyRange("100", "199")), - ShardObjectHelper.newShard("shardId-2", null, null, range0, - ShardObjectHelper.newHashKeyRange("200", "299")), - ShardObjectHelper.newShard("shardId-3", null, null, range0, - ShardObjectHelper.newHashKeyRange("300", "399")), - ShardObjectHelper.newShard("shardId-4", null, null, range1, - ShardObjectHelper.newHashKeyRange("400", "499")), - ShardObjectHelper.newShard("shardId-5", null, null, range2, - ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY)), - ShardObjectHelper.newShard("shardId-6", "shardId-0", "shardId-1", range3, - ShardObjectHelper.newHashKeyRange("0", "199")), - ShardObjectHelper.newShard("shardId-7", "shardId-2", "shardId-3", range3, - ShardObjectHelper.newHashKeyRange("200", "399")), - ShardObjectHelper.newShard("shardId-8", "shardId-6", "shardId-7", range4, - ShardObjectHelper.newHashKeyRange("0", "399")), - ShardObjectHelper.newShard("shardId-9", "shardId-5", null, range4, - ShardObjectHelper.newHashKeyRange("500", "799")), - ShardObjectHelper.newShard("shardId-10", null, "shardId-5", range4, - ShardObjectHelper.newHashKeyRange("800", ShardObjectHelper.MAX_HASH_KEY))); - } - -// /* -// * Helper method to construct a shard list for graph B. Graph B is defined below. -// * Shard structure (x-axis is epochs): -// * 0 3 6 9 -// * \ / \ / \ / -// * 2 5 8 -// * / \ / \ / \ -// * 1 4 7 10 -// */ - private List constructShardListForGraphB() { - final SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("1000", "1049"); - final SequenceNumberRange range1 = ShardObjectHelper.newSequenceNumberRange("1050", "1099"); - final SequenceNumberRange range2 = ShardObjectHelper.newSequenceNumberRange("1100", "1149"); - final SequenceNumberRange range3 = ShardObjectHelper.newSequenceNumberRange("1150", "1199"); - final SequenceNumberRange range4 = ShardObjectHelper.newSequenceNumberRange("1200", "1249"); - final SequenceNumberRange range5 = ShardObjectHelper.newSequenceNumberRange("1250", "1299"); - final SequenceNumberRange range6 = ShardObjectHelper.newSequenceNumberRange("1300", null); - - final HashKeyRange hashRange0 = ShardObjectHelper.newHashKeyRange("0", "499"); - final HashKeyRange hashRange1 = ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY); - final HashKeyRange hashRange2 = ShardObjectHelper.newHashKeyRange("0", ShardObjectHelper.MAX_HASH_KEY); - - return Arrays.asList(ShardObjectHelper.newShard("shardId-0", null, null, range0, hashRange0), - ShardObjectHelper.newShard("shardId-1", null, null, range0, hashRange1), - ShardObjectHelper.newShard("shardId-2", "shardId-0", "shardId-1", range1, hashRange2), - ShardObjectHelper.newShard("shardId-3", "shardId-2", null, range2, hashRange0), - ShardObjectHelper.newShard("shardId-4", "shardId-2", null, range2, hashRange1), - ShardObjectHelper.newShard("shardId-5", "shardId-3", "shardId-4", range3, hashRange2), - ShardObjectHelper.newShard("shardId-6", "shardId-5", null, range4, hashRange0), - ShardObjectHelper.newShard("shardId-7", "shardId-5", null, range4, hashRange1), - ShardObjectHelper.newShard("shardId-8", "shardId-6", "shardId-7", range5, hashRange2), - ShardObjectHelper.newShard("shardId-9", "shardId-8", null, range6, hashRange0), - ShardObjectHelper.newShard("shardId-10", null, "shardId-8", range6, hashRange1)); - } - - /** - * Test CheckIfDescendantAndAddNewLeasesForAncestors when shardId is null - */ - @Test - public void testCheckIfDescendantAndAddNewLeasesForAncestorsNullShardId() { - final Map memoizationContext = new HashMap<>(); - - assertThat(HierarchicalShardSyncer - .checkIfDescendantAndAddNewLeasesForAncestors(null, INITIAL_POSITION_LATEST, null, null, - null, memoizationContext), equalTo(false)); - } - - /** - * Test CheckIfDescendantAndAddNewLeasesForAncestors when shard has been trimmed - */ - @Test - public void testCheckIfDescendantAndAddNewLeasesForAncestorsTrimmedShard() { - final String shardId = "shardId-trimmed"; - final Map memoizationContext = new HashMap<>(); - - assertThat(HierarchicalShardSyncer - .checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, null, - new HashMap<>(), null, memoizationContext), equalTo(false)); - } - - /** - * Test CheckIfDescendantAndAddNewLeasesForAncestors when there is a current lease for the shard - */ - @Test - public void testCheckIfDescendantAndAddNewLeasesForAncestorsForShardWithCurrentLease() { - final String shardId = "shardId-current"; - final Set shardIdsOfCurrentLeases = new HashSet<>(Collections.singletonList(shardId)); - final Map newLeaseMap = Collections.emptyMap(); - final Map memoizationContext = new HashMap<>(); - final Map kinesisShards = new HashMap<>(); - kinesisShards.put(shardId, ShardObjectHelper.newShard(shardId, null, null, null)); - - assertThat( - HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, - shardIdsOfCurrentLeases, kinesisShards, newLeaseMap, memoizationContext), equalTo(true)); - assertThat(newLeaseMap.isEmpty(), equalTo(true)); - } - - /** - * Test CheckIfDescendantAndAddNewLeasesForAncestors - two parents, two ancestors, not descendant - */ - @Test - public void testCheckIfDescendantAndAddNewLeasesForAncestors2P2ANotDescendant() { - final String parentShardId = "shardId-parent"; - final String adjacentParentShardId = "shardId-adjacentParent"; - final String shardId = "shardId-9-1"; - final Set shardIdsOfCurrentLeases = Collections.emptySet(); - final Map newLeaseMap = Collections.emptyMap(); - final Map memoizationContext = new HashMap<>(); - final Map kinesisShards = new HashMap<>(); - - kinesisShards.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); - kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); - kinesisShards.put(shardId, ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null)); - - assertThat( - HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, - shardIdsOfCurrentLeases, kinesisShards, newLeaseMap, memoizationContext), equalTo(false)); - assertThat(newLeaseMap.isEmpty(), equalTo(true)); - } - -// /** -// * Test CheckIfDescendantAndAddNewLeasesForAncestors - two parents, there is a lease for one parent. -// */ -// @Test - // public void testCheckIfDescendantAndAddNewLeasesForAncestors2P2A1PDescendant() { -// Set shardIdsOfCurrentLeases = new HashSet(); -// Map newLeaseMap = new HashMap(); -// Map kinesisShards = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// kinesisShards.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); -// shardIdsOfCurrentLeases.add(parentShardId); -// -// String adjacentParentShardId = "shardId-adjacentParent"; -// kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); -// -// String shardId = "shardId-9-1"; -// Shard shard = ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null); -// kinesisShards.put(shardId, shard); -// -// Map memoizationContext = new HashMap<>(); -// assertTrue(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, -// shardIdsOfCurrentLeases, -// kinesisShards, -// newLeaseMap, -// memoizationContext)); -// assertEquals(1, newLeaseMap.size()); -// assertTrue(newLeaseMap.containsKey(adjacentParentShardId)); -// Lease adjacentParentLease = newLeaseMap.get(adjacentParentShardId); -// assertEquals(ExtendedSequenceNumber.LATEST, adjacentParentLease.checkpoint()); -// } -// -// /** -// * Test parentShardIds() when the shard has no parents. -// */ -// @Test - // public void testGetParentShardIdsNoParents() { -// Shard shard = new Shard(); -// assertTrue(ShardSyncer.getParentShardIds(shard, null).isEmpty()); -// } -// -// /** -// * Test parentShardIds() when the shard has no parents. -// */ -// @Test - // public void testGetParentShardIdsTrimmedParents() { -// Map shardMap = new HashMap(); -// Shard shard = ShardObjectHelper.newShard("shardId-test", "foo", "bar", null); -// assertTrue(ShardSyncer.getParentShardIds(shard, shardMap).isEmpty()); -// } -// -// /** -// * Test parentShardIds() when the shard has a single parent. -// */ -// @Test - // public void testGetParentShardIdsSingleParent() { -// Map shardMap = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); -// -// Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, null, null); -// Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// -// shard.setParentShardId(null); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertTrue(parentShardIds.isEmpty()); -// -// shard.setAdjacentParentShardId(parentShardId); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// } -// -// /** -// * Test parentShardIds() when the shard has two parents, one is trimmed. -// */ -// @Test - // public void testGetParentShardIdsOneTrimmedParent() { -// Map shardMap = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// Shard parent = ShardObjectHelper.newShard(parentShardId, null, null, null); -// -// String adjacentParentShardId = "shardId-adjacentParent"; -// Shard adjacentParent = ShardObjectHelper.newShard(adjacentParentShardId, null, null, null); -// -// Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); -// -// shardMap.put(parentShardId, parent); -// Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// -// shardMap.remove(parentShardId); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertTrue(parentShardIds.isEmpty()); -// -// shardMap.put(adjacentParentShardId, adjacentParent); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(adjacentParentShardId)); -// } -// -// /** -// * Test parentShardIds() when the shard has two parents. -// */ -// @Test - // public void testGetParentShardIdsTwoParents() { -// Map shardMap = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); -// -// String adjacentParentShardId = "shardId-adjacentParent"; -// shardMap.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); -// -// Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); -// -// Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(2, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// assertTrue(parentShardIds.contains(adjacentParentShardId)); -// } -// -// /** -// */ -// @Test - // public void testNewLease() { -// Shard shard = new Shard(); -// String shardId = "shardId-95"; -// shard.setShardId(shardId); -// String parentShardId = "shardId-parent"; -// String adjacentParentShardId = "shardId-adjacentParent"; -// shard.setParentShardId(parentShardId); -// shard.setAdjacentParentShardId(adjacentParentShardId); -// -// Lease lease = ShardSyncer.newKCLLease(shard); -// assertEquals(shardId, lease.leaseKey()); -// assertNull(lease.checkpoint()); -// Set parentIds = lease.parentShardIds(); -// assertEquals(2, parentIds.size()); -// assertTrue(parentIds.contains(parentShardId)); -// assertTrue(parentIds.contains(adjacentParentShardId)); -// } -// -// /** -// * Test method for constructShardIdToShardMap. -// * -// * . -// */ -// @Test - // public void testConstructShardIdToShardMap() { -// List shards = new ArrayList(2); -// shards.add(ShardObjectHelper.newShard("shardId-0", null, null, null)); -// shards.add(ShardObjectHelper.newShard("shardId-1", null, null, null)); -// -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// assertEquals(shards.size(), shardIdToShardMap.size()); -// for (Shard shard : shards) { -// assertSame(shard, shardIdToShardMap.get(shard.getShardId())); -// } -// } -// -// /** -// * Test getOpenShards() - no shards are open. -// */ -// @Test - // public void testGetOpenShardsNoneOpen() { -// List shards = new ArrayList(); -// shards.add(ShardObjectHelper.newShard("shardId-9384", -// null, -// null, -// ShardObjectHelper.newSequenceNumberRange("123", "345"))); -// assertTrue(ShardSyncer.getOpenShards(shards).isEmpty()); -// } -// -// /** -// * Test getOpenShards() - test null and max end sequence number. -// */ -// @Test - // public void testGetOpenShardsNullAndMaxEndSeqNum() { -// List shards = new ArrayList(); -// String shardId = "shardId-2738"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("123", null); -// shards.add(ShardObjectHelper.newShard(shardId, null, null, sequenceNumberRange)); -// -// // Verify shard is considered open when it has a null end sequence number -// List openShards = ShardSyncer.getOpenShards(shards); -// assertEquals(1, openShards.size()); -// assertEquals(shardId, openShards.get(0).getShardId()); -// -// // Close shard before testing for max sequence number -// sequenceNumberRange.setEndingSequenceNumber("1000"); -// openShards = ShardSyncer.getOpenShards(shards); -// assertTrue(openShards.isEmpty()); -// -// // Verify shard is considered closed when the end sequence number is set to max allowed sequence number -// sequenceNumberRange.setEndingSequenceNumber(MAX_SEQUENCE_NUMBER.toString()); -// openShards = ShardSyncer.getOpenShards(shards); -// assertEquals(0, openShards.size()); -// } -// -// /** -// * Test isCandidateForCleanup -// * -// * @throws KinesisClientLibIOException -// */ -// @Test - // public void testIsCandidateForCleanup() throws KinesisClientLibIOException { -// String parentShardId = "shardId-0000"; -// String adjacentParentShardId = "shardId-0001"; -// String shardId = "shardId-0002"; -// Lease lease = newLease(shardId); -// List parentShardIds = new ArrayList<>(); -// parentShardIds.add(parentShardId); -// parentShardIds.add(adjacentParentShardId); -// lease.parentShardIds(parentShardIds); -// Set currentKinesisShardIds = new HashSet<>(); -// -// currentKinesisShardIds.add(shardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.clear(); -// assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.add(parentShardId); -// // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.clear(); -// assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.add(adjacentParentShardId); -// // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// currentKinesisShardIds.add(parentShardId); -// // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// currentKinesisShardIds.add(shardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// } -// -// /** -// * Test isCandidateForCleanup -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) - // public void testIsCandidateForCleanupParentExists() throws KinesisClientLibIOException { -// String parentShardId = "shardId-0000"; -// String adjacentParentShardId = "shardId-0001"; -// String shardId = "shardId-0002"; -// Lease lease = newLease(shardId); -// List parentShardIds = new ArrayList<>(); -// parentShardIds.add(parentShardId); -// parentShardIds.add(adjacentParentShardId); -// lease.parentShardIds(parentShardIds); -// Set currentKinesisShardIds = new HashSet<>(); -// -// currentKinesisShardIds.add(parentShardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// } -// -// /** -// * Test isCandidateForCleanup -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) - // public void testIsCandidateForCleanupAdjacentParentExists() throws KinesisClientLibIOException { -// String parentShardId = "shardId-0000"; -// String adjacentParentShardId = "shardId-0001"; -// String shardId = "shardId-0002"; -// Lease lease = newLease(shardId); -// List parentShardIds = new ArrayList<>(); -// parentShardIds.add(parentShardId); -// parentShardIds.add(adjacentParentShardId); -// lease.parentShardIds(parentShardIds); -// Set currentKinesisShardIds = new HashSet<>(); -// -// currentKinesisShardIds.add(adjacentParentShardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// } -// -// /** -// * Test cleanup of lease for a shard that has been fully processed (and processing of child shards has begun). -// * -// * @throws DependencyException -// * @throws InvalidStateException -// * @throws ProvisionedThroughputException -// */ -// @Test - // public void testCleanupLeaseForClosedShard() -// throws DependencyException, InvalidStateException, ProvisionedThroughputException { -// String closedShardId = "shardId-2"; -// Lease leaseForClosedShard = newLease(closedShardId); -// leaseForClosedShard.checkpoint(new ExtendedSequenceNumber("1234")); -// dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseForClosedShard); -// -// Set childShardIds = new HashSet<>(); -// List trackedLeases = new ArrayList<>(); -// Set parentShardIds = new HashSet<>(); -// parentShardIds.add(closedShardId); -// String childShardId1 = "shardId-5"; -// Lease childLease1 = newLease(childShardId1); -// childLease1.parentShardIds(parentShardIds); -// childLease1.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); -// String childShardId2 = "shardId-7"; -// Lease childLease2 = newLease(childShardId2); -// childLease2.parentShardIds(parentShardIds); -// childLease2.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); -// Map trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// -// // empty list of leases -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // closed shard has not been fully processed yet (checkpoint != SHARD_END) -// trackedLeases.add(leaseForClosedShard); -// trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // closed shard has been fully processed yet (checkpoint == SHARD_END) -// leaseForClosedShard.checkpoint(ExtendedSequenceNumber.SHARD_END); -// dynamoDBLeaseRefresher.updateLease(leaseForClosedShard); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // lease for only one child exists -// childShardIds.add(childShardId1); -// childShardIds.add(childShardId2); -// dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseForClosedShard); -// dynamoDBLeaseRefresher.createLeaseIfNotExists(childLease1); -// trackedLeases.add(childLease1); -// trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // leases for both children exists, but they are both at TRIM_HORIZON -// dynamoDBLeaseRefresher.createLeaseIfNotExists(childLease2); -// trackedLeases.add(childLease2); -// trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // leases for both children exists, one is at TRIM_HORIZON -// childLease1.checkpoint(new ExtendedSequenceNumber("34890")); -// dynamoDBLeaseRefresher.updateLease(childLease1); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // leases for both children exists, NONE of them are at TRIM_HORIZON -// childLease2.checkpoint(new ExtendedSequenceNumber("43789")); -// dynamoDBLeaseRefresher.updateLease(childLease2); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// } -// -// /** -// * Test we can handle trimmed Kinesis shards (absent from the shard list), and valid closed shards. -// * -// * @throws KinesisClientLibIOException -// */ -// @Test - // public void testAssertShardCoveredOrAbsentTestAbsentAndValid() throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// Shard closedShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); -// SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); -// Shard child1 = -// ShardObjectHelper.newShard("shardId-54879", expectedClosedShardId, null, childSequenceNumberRange); -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // test for case where shard has been trimmed (absent from list) -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // Populate shards. -// shards.add(closedShard); -// shards.add(child1); -// shardIdToShardMap.put(expectedClosedShardId, closedShard); -// shardIdToShardMap.put(child1.getShardId(), child1); -// shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// -// // test degenerate split/merge -// child1.setHashKeyRange(hashKeyRange); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // test merge -// child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("10", "2985")); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("3", "25")); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // test split -// HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); -// HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); -// child1.setHashKeyRange(childHashKeyRange1); -// Shard child2 = ShardObjectHelper.newShard("shardId-43789", -// null, -// expectedClosedShardId, -// childSequenceNumberRange, -// childHashKeyRange2); -// shards.add(child2); -// shardIdToShardMap.put(child2.getShardId(), child2); -// shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// -// /** -// * Test we throw an exception if the shard is open -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) - // public void testAssertShardCoveredOrAbsentTestOpen() throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", null); -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// Shard openShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); -// shards.add(openShard); -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// -// /** -// * Test we throw an exception if there are no children -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) - // public void testAssertShardCoveredOrAbsentTestNoChildren() throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// Shard closedShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); -// shards.add(closedShard); -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// -// /** -// * Test we throw an exception if children don't cover hash key range (min of children > min of parent) -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) - // public void testAssertShardCoveredOrAbsentTestIncompleteSplitMin() throws KinesisClientLibIOException { -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("12", "15"); -// HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); -// testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); -// } -// -// /** -// * Test we throw an exception if children don't cover hash key range (max of children < max of parent) -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) - // public void testAssertShardCoveredOrAbsentTestIncompleteSplitMax() throws KinesisClientLibIOException { -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); -// HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "23"); -// testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); -// } -// -// private void testAssertShardCoveredOrAbsentTestIncompleteSplit(HashKeyRange parentHashKeyRange, -// HashKeyRange child1HashKeyRange, -// HashKeyRange child2HashKeyRange) -// throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); -// Shard closedShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, parentHashKeyRange); -// shards.add(closedShard); -// -// SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); -// Shard child1 = ShardObjectHelper.newShard("shardId-43789", -// null, -// expectedClosedShardId, -// childSequenceNumberRange, -// child1HashKeyRange); -// shards.add(child1); -// Shard child2 = ShardObjectHelper.newShard("shardId-43789", -// null, -// expectedClosedShardId, -// childSequenceNumberRange, -// child2HashKeyRange); -// shards.add(child2); -// -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// - /** - * Helper method. - * - * @param shardId - * @return - */ - private static Lease newLease(final String shardId) { - final Lease lease = new Lease(); - lease.leaseKey(shardId); - - return lease; - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/KinesisShardDetectorTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/KinesisShardDetectorTest.java deleted file mode 100644 index be25a360..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/KinesisShardDetectorTest.java +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Amazon Software License - * (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at - * http://aws.amazon.com/asl/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package software.amazon.kinesis.leases; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.LimitExceededException; -import software.amazon.awssdk.services.kinesis.model.ListShardsRequest; -import software.amazon.awssdk.services.kinesis.model.ListShardsResponse; -import software.amazon.awssdk.services.kinesis.model.ResourceInUseException; -import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; -import software.amazon.awssdk.services.kinesis.model.Shard; - -/** - * - */ -@RunWith(MockitoJUnitRunner.class) -public class KinesisShardDetectorTest { - - private static final String STREAM_NAME = "TestStream"; - private static final long LIST_SHARDS_BACKOFF_TIME_IN_MILLIS = 50L; - private static final int MAX_LIST_SHARDS_RETRY_ATTEMPTS = 5; - private static final long LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS = 10; - private static final int MAX_CACHE_MISSES_BEFORE_RELOAD = 10; - private static final int CACHE_MISS_WARNING_MODULUS = 2; - private static final String SHARD_ID = "shardId-%012d"; - - private KinesisShardDetector shardDetector; - - @Mock - private KinesisAsyncClient client; - - @Before - public void setup() { - shardDetector = new KinesisShardDetector(client, STREAM_NAME, LIST_SHARDS_BACKOFF_TIME_IN_MILLIS, - MAX_LIST_SHARDS_RETRY_ATTEMPTS, LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS, - MAX_CACHE_MISSES_BEFORE_RELOAD, CACHE_MISS_WARNING_MODULUS); - } - - @Test - public void testListShardsSingleResponse() { - final List expectedShards = new ArrayList<>(); - final ListShardsResponse listShardsResponse = ListShardsResponse.builder().nextToken(null) - .shards(expectedShards).build(); - final CompletableFuture future = CompletableFuture.completedFuture(listShardsResponse); - - when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); - - final List shards = shardDetector.listShards(); - - assertThat(shards, equalTo(expectedShards)); - verify(client).listShards(eq(ListShardsRequest.builder().streamName(STREAM_NAME).build())); - } - - @Test(expected = IllegalStateException.class) - public void testListShardsNullResponse() { - final CompletableFuture future = CompletableFuture.completedFuture(null); - - when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); - - try { - shardDetector.listShards(); - } finally { - verify(client, times(MAX_LIST_SHARDS_RETRY_ATTEMPTS)) - .listShards(eq(ListShardsRequest.builder().streamName(STREAM_NAME).build())); - } - } - - @Test - public void testListShardsResouceInUse() { - final CompletableFuture future = CompletableFuture.supplyAsync(() -> { - throw ResourceInUseException.builder().build(); - }); - - when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); - - final List shards = shardDetector.listShards(); - - assertThat(shards, nullValue()); - verify(client).listShards(eq(ListShardsRequest.builder().streamName(STREAM_NAME).build())); - - } - - @Test(expected = LimitExceededException.class) - public void testListShardsThrottled() { - final CompletableFuture future = CompletableFuture.supplyAsync(() -> { - throw LimitExceededException.builder().build(); - }); - - when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); - - try { - shardDetector.listShards(); - } finally { - verify(client, times(MAX_LIST_SHARDS_RETRY_ATTEMPTS)) - .listShards(eq(ListShardsRequest.builder().streamName(STREAM_NAME).build())); - } - } - - @Test(expected = ResourceNotFoundException.class) - public void testListShardsResourceNotFound() { - final CompletableFuture future = CompletableFuture.supplyAsync(() -> { - throw ResourceNotFoundException.builder().build(); - }); - - when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); - - try { - shardDetector.listShards(); - } finally { - verify(client).listShards(eq(ListShardsRequest.builder().streamName(STREAM_NAME).build())); - } - } - - @Test - public void testGetShard() { - final String shardId = String.format(SHARD_ID, 1); - - shardDetector.cachedShardMap(createShardList()); - - final Shard shard = shardDetector.shard(shardId); - - assertThat(shard, equalTo(Shard.builder().shardId(shardId).build())); - verify(client, never()).listShards(any(ListShardsRequest.class)); - } - - @Test - public void testGetShardEmptyCache() { - final String shardId = String.format(SHARD_ID, 1); - final CompletableFuture future = CompletableFuture - .completedFuture(ListShardsResponse.builder().shards(createShardList()).build()); - - when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); - - final Shard shard = shardDetector.shard(shardId); - - assertThat(shard, equalTo(Shard.builder().shardId(shardId).build())); - verify(client).listShards(eq(ListShardsRequest.builder().streamName(STREAM_NAME).build())); - } - - @Test - public void testGetShardNonExistentShard() { - final String shardId = String.format(SHARD_ID, 5); - - shardDetector.cachedShardMap(createShardList()); - - final Shard shard = shardDetector.shard(shardId); - - assertThat(shard, nullValue()); - assertThat(shardDetector.cacheMisses().get(), equalTo(1)); - verify(client, never()).listShards(any(ListShardsRequest.class)); - } - - @Test - public void testGetShardNewShardForceRefresh() { - final String shardId = String.format(SHARD_ID, 5); - final List shards = new ArrayList<>(createShardList()); - shards.add(Shard.builder().shardId(shardId).build()); - - final CompletableFuture future = CompletableFuture - .completedFuture(ListShardsResponse.builder().shards(shards).build()); - - shardDetector.cachedShardMap(createShardList()); - - when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); - - final List responses = IntStream.range(0, MAX_CACHE_MISSES_BEFORE_RELOAD + 1) - .mapToObj(x -> shardDetector.shard(shardId)).collect(Collectors.toList()); - - IntStream.range(0, MAX_CACHE_MISSES_BEFORE_RELOAD).forEach(x -> { - assertThat(responses.get(x), nullValue()); - }); - - assertThat(responses.get(MAX_CACHE_MISSES_BEFORE_RELOAD), equalTo(Shard.builder().shardId(shardId).build())); - verify(client).listShards(eq(ListShardsRequest.builder().streamName(STREAM_NAME).build())); - } - - @Test - public void testGetShardNonExistentShardForceRefresh() { - final String shardId = String.format(SHARD_ID, 5); - final CompletableFuture future = CompletableFuture - .completedFuture(ListShardsResponse.builder().shards(createShardList()).build()); - - shardDetector.cachedShardMap(createShardList()); - - when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); - - final List responses = IntStream.range(0, MAX_CACHE_MISSES_BEFORE_RELOAD + 1) - .mapToObj(x -> shardDetector.shard(shardId)).collect(Collectors.toList()); - - responses.forEach(response -> assertThat(response, nullValue())); - assertThat(shardDetector.cacheMisses().get(), equalTo(0)); - verify(client).listShards(eq(ListShardsRequest.builder().streamName(STREAM_NAME).build())); - } - - private List createShardList() { - return Arrays.asList(Shard.builder().shardId(String.format(SHARD_ID, 0)).build(), - Shard.builder().shardId(String.format(SHARD_ID, 1)).build(), - Shard.builder().shardId(String.format(SHARD_ID, 2)).build(), - Shard.builder().shardId(String.format(SHARD_ID, 3)).build(), - Shard.builder().shardId(String.format(SHARD_ID, 4)).build()); - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseBuilder.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseBuilder.java deleted file mode 100644 index 3265a1ad..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseBuilder.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -import java.util.HashSet; -import java.util.Set; -import java.util.UUID; - -import lombok.Setter; -import lombok.experimental.Accessors; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -@Setter -@Accessors(fluent = true) -public class LeaseBuilder { - private String leaseKey; - private String leaseOwner; - private Long leaseCounter = 0L; - private UUID concurrencyToken; - private Long lastCounterIncrementNanos; - private ExtendedSequenceNumber checkpoint; - private ExtendedSequenceNumber pendingCheckpoint; - private Long ownerSwitchesSinceCheckpoint = 0L; - private Set parentShardIds = new HashSet<>(); - - public Lease build() { - return new Lease(leaseKey, leaseOwner, leaseCounter, concurrencyToken, lastCounterIncrementNanos, - checkpoint, pendingCheckpoint, ownerSwitchesSinceCheckpoint, parentShardIds); - } -} \ No newline at end of file diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationTest.java deleted file mode 100644 index 9cc770db..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationTest.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -import org.junit.Rule; -import org.junit.rules.TestWatcher; -import org.junit.runner.Description; -import org.mockito.Mock; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; -import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseSerializer; -import software.amazon.kinesis.leases.dynamodb.TableCreatorCallback; - -@Slf4j -public class LeaseIntegrationTest { - private LeaseSerializer leaseSerializer = new DynamoDBLeaseSerializer(); - - protected static DynamoDBLeaseRefresher leaseRefresher; - protected static DynamoDbAsyncClient ddbClient = DynamoDbAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); - - protected String tableName = "nagl_ShardProgress"; - - @Mock - protected TableCreatorCallback tableCreatorCallback; - - @Rule - public TestWatcher watcher = new TestWatcher() { - - @Override - protected void starting(Description description) { - if (leaseRefresher == null) { - // Do some static setup once per class. - - leaseRefresher = new DynamoDBLeaseRefresher(tableName, ddbClient, leaseSerializer, true, - tableCreatorCallback); - } - - try { - if (!leaseRefresher.leaseTableExists()) { - log.info("Creating lease table"); - leaseRefresher.createLeaseTableIfNotExists(10L, 10L); - - leaseRefresher.waitUntilLeaseTableExists(10, 500); - } - - log.info("Beginning test case {}", description.getMethodName()); - for (Lease lease : leaseRefresher.listLeases()) { - leaseRefresher.deleteLease(lease); - } - } catch (Exception e) { - String message = - "Test case " + description.getMethodName() + " fails because of exception during init: " + e; - log.error(message); - throw new RuntimeException(message, e); - } - } - }; - -} - diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardObjectHelper.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardObjectHelper.java deleted file mode 100644 index 9421a4c9..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardObjectHelper.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.List; - - -import software.amazon.awssdk.services.kinesis.model.HashKeyRange; -import software.amazon.awssdk.services.kinesis.model.SequenceNumberRange; -import software.amazon.awssdk.services.kinesis.model.Shard; - -/** - * Helper class to create Shard, SequenceRange and related objects. - */ -public class ShardObjectHelper { - - private static final int EXPONENT = 128; - - /** - * Max value of a sequence number (2^128 -1). Useful for defining sequence number range for a shard. - */ - static final String MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); - - /** - * Min value of a sequence number (0). Useful for defining sequence number range for a shard. - */ - static final String MIN_SEQUENCE_NUMBER = BigInteger.ZERO.toString(); - - /** - * Max value of a hash key (2^128 -1). Useful for defining hash key range for a shard. - */ - public static final String MAX_HASH_KEY = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); - - /** - * Min value of a hash key (0). Useful for defining sequence number range for a shard. - */ - public static final String MIN_HASH_KEY = BigInteger.ZERO.toString(); - - /** - * - */ - private ShardObjectHelper() { - } - - - /** Helper method to create a new shard object. - * @param shardId - * @param parentShardId - * @param adjacentParentShardId - * @param sequenceNumberRange - * @return - */ - static Shard newShard(String shardId, - String parentShardId, - String adjacentParentShardId, - SequenceNumberRange sequenceNumberRange) { - return newShard(shardId, parentShardId, adjacentParentShardId, sequenceNumberRange, null); - } - - /** Helper method to create a new shard object. - * @param shardId - * @param parentShardId - * @param adjacentParentShardId - * @param sequenceNumberRange - * @param hashKeyRange - * @return - */ - public static Shard newShard(String shardId, - String parentShardId, - String adjacentParentShardId, - SequenceNumberRange sequenceNumberRange, - HashKeyRange hashKeyRange) { - return Shard.builder().shardId(shardId).parentShardId(parentShardId).adjacentParentShardId(adjacentParentShardId).sequenceNumberRange(sequenceNumberRange).hashKeyRange(hashKeyRange).build(); - } - - /** Helper method. - * @param startingSequenceNumber - * @param endingSequenceNumber - * @return - */ - public static SequenceNumberRange newSequenceNumberRange(String startingSequenceNumber, String endingSequenceNumber) { - return SequenceNumberRange.builder().startingSequenceNumber(startingSequenceNumber).endingSequenceNumber(endingSequenceNumber).build(); - } - - /** Helper method. - * @param startingHashKey - * @param endingHashKey - * @return - */ - public static HashKeyRange newHashKeyRange(String startingHashKey, String endingHashKey) { - return HashKeyRange.builder().startingHashKey(startingHashKey).endingHashKey(endingHashKey).build(); - } - - static List getParentShardIds(Shard shard) { - List parentShardIds = new ArrayList<>(2); - if (shard.adjacentParentShardId() != null) { - parentShardIds.add(shard.adjacentParentShardId()); - } - if (shard.parentShardId() != null) { - parentShardIds.add(shard.parentShardId()); - } - return parentShardIds; - } - - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSyncTaskIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSyncTaskIntegrationTest.java deleted file mode 100644 index a89e8e56..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSyncTaskIntegrationTest.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; - -//import java.net.URI; - -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Ignore; -import org.junit.Test; - -//import software.amazon.awssdk.core.client.builder.ClientAsyncHttpConfiguration; -//import software.amazon.awssdk.http.nio.netty.NettySdkHttpClientFactory; -import software.amazon.awssdk.regions.Region; -import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest; -import software.amazon.awssdk.services.kinesis.model.KinesisException; -import software.amazon.awssdk.services.kinesis.model.Shard; -import software.amazon.awssdk.services.kinesis.model.StreamStatus; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseSerializer; -import software.amazon.kinesis.leases.dynamodb.TableCreatorCallback; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.NullMetricsFactory; - -/** - * WARN: to run this integration test you'll have to provide a AwsCredentials.properties file on the classpath. - */ -// TODO: fix tests -@Ignore -public class ShardSyncTaskIntegrationTest { - private static final String STREAM_NAME = "IntegrationTestStream02"; - private static final boolean USE_CONSISTENT_READS = true; - private static final int MAX_CACHE_MISSES_BEFORE_RELOAD = 1000; - private static final long LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS = 30; - private static final int CACHE_MISS_WARNING_MODULUS = 250; - private static final MetricsFactory NULL_METRICS_FACTORY = new NullMetricsFactory(); - private static KinesisAsyncClient kinesisClient; - - private LeaseRefresher leaseRefresher; - private ShardDetector shardDetector; - private HierarchicalShardSyncer hierarchicalShardSyncer; - - @BeforeClass - public static void setUpBeforeClass() throws Exception { -// ClientAsyncHttpConfiguration configuration = ClientAsyncHttpConfiguration.builder().httpClientFactory( -// NettySdkHttpClientFactory.builder().trustAllCertificates(true).maxConnectionsPerEndpoint(10).build()) -// .build(); -// kinesisClient = KinesisAsyncClient.builder().asyncHttpConfiguration(configuration) -// .endpointOverride(new URI("https://aws-kinesis-alpha.corp.amazon.com")).region(Region.US_EAST_1) -// .build(); -// - try { - CreateStreamRequest req = CreateStreamRequest.builder().streamName(STREAM_NAME).shardCount(1).build(); - kinesisClient.createStream(req); - } catch (KinesisException ase) { - ase.printStackTrace(); - } - StreamStatus status; -// do { -// status = StreamStatus.fromValue(kinesisClient.describeStreamSummary( -// DescribeStreamSummaryRequest.builder().streamName(STREAM_NAME).build()).get() -// .streamDescriptionSummary().streamStatusString()); -// } while (status != StreamStatus.ACTIVE); -// - } - - @Before - public void setup() { - DynamoDbAsyncClient client = DynamoDbAsyncClient.builder().region(Region.US_EAST_1).build(); - leaseRefresher = - new DynamoDBLeaseRefresher("ShardSyncTaskIntegrationTest", client, new DynamoDBLeaseSerializer(), - USE_CONSISTENT_READS, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); - - shardDetector = new KinesisShardDetector(kinesisClient, STREAM_NAME, 500L, 50, - LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS, MAX_CACHE_MISSES_BEFORE_RELOAD, CACHE_MISS_WARNING_MODULUS); - hierarchicalShardSyncer = new HierarchicalShardSyncer(); - } - - /** - * Test method for call(). - * - * @throws DependencyException - * @throws InvalidStateException - * @throws ProvisionedThroughputException - */ - @Test - public final void testCall() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - if (!leaseRefresher.leaseTableExists()) { - final Long readCapacity = 10L; - final Long writeCapacity = 10L; - leaseRefresher.createLeaseTableIfNotExists(readCapacity, writeCapacity); - } - leaseRefresher.deleteAll(); - Set shardIds = shardDetector.listShards().stream().map(Shard::shardId).collect(Collectors.toSet()); - ShardSyncTask syncTask = new ShardSyncTask(shardDetector, leaseRefresher, - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST), false, false, 0L, - hierarchicalShardSyncer, NULL_METRICS_FACTORY); - syncTask.call(); - List leases = leaseRefresher.listLeases(); - Set leaseKeys = new HashSet<>(); - for (Lease lease : leases) { - leaseKeys.add(lease.leaseKey()); - } - - // Verify that all shardIds had leases for them - Assert.assertEquals(shardIds.size(), leases.size()); - shardIds.removeAll(leaseKeys); - Assert.assertTrue(shardIds.isEmpty()); - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBCheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBCheckpointerTest.java deleted file mode 100644 index ff09f207..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBCheckpointerTest.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.dynamodb; - -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.UUID; - -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.kinesis.checkpoint.dynamodb.DynamoDBCheckpointer; -import software.amazon.kinesis.exceptions.KinesisClientLibException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseCoordinator; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -@RunWith(MockitoJUnitRunner.class) -public class DynamoDBCheckpointerTest { - private static final String SHARD_ID = "shardId-test"; - private static final ExtendedSequenceNumber TEST_CHKPT = new ExtendedSequenceNumber("string-test"); - private static final UUID TEST_UUID = UUID.randomUUID(); - private static final String OPERATION = "TestOperation"; - - @Mock - private LeaseRefresher leaseRefresher; - @Mock - private LeaseCoordinator leaseCoordinator; - - private DynamoDBCheckpointer dynamoDBCheckpointer; - - @Before - public void setup() { - dynamoDBCheckpointer = new DynamoDBCheckpointer(leaseCoordinator, leaseRefresher); - dynamoDBCheckpointer.operation(OPERATION); - } - - @Test(expected = ShutdownException.class) - public void testSetCheckpointWithUnownedShardId() throws KinesisClientLibException, DependencyException, - InvalidStateException, ProvisionedThroughputException { - final Lease lease = new Lease(); - when(leaseCoordinator.getCurrentlyHeldLease(eq(SHARD_ID))).thenReturn(lease); - when(leaseCoordinator.updateLease(eq(lease), eq(TEST_UUID), eq(OPERATION), eq(SHARD_ID))).thenReturn(false); - try { - dynamoDBCheckpointer.setCheckpoint(SHARD_ID, TEST_CHKPT, TEST_UUID.toString()); - } finally { - verify(leaseCoordinator).getCurrentlyHeldLease(eq(SHARD_ID)); - verify(leaseCoordinator).updateLease(eq(lease), eq(TEST_UUID), eq(OPERATION), eq(SHARD_ID)); - } - } - -// @Test(expected = DependencyException.class) -// public void testWaitLeaseTableTimeout() -// throws DependencyException, ProvisionedThroughputException, IllegalStateException { -// Set mock lease manager to return false in waiting -// doReturn(false).when(leaseRefresher).waitUntilLeaseTableExists(anyLong(), anyLong()); -// leaseCoordinator.initialize(); -// } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorIntegrationTest.java deleted file mode 100644 index abadd4a8..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorIntegrationTest.java +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.dynamodb; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; -import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; -import software.amazon.kinesis.checkpoint.dynamodb.DynamoDBCheckpointer; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseCoordinator; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.LeasingException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.NullMetricsFactory; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -@RunWith(MockitoJUnitRunner.class) -public class DynamoDBLeaseCoordinatorIntegrationTest { - private static final int ATTEMPTS = 20; - private static final String OPERATION = "TestOperation"; - - private static final String TABLE_NAME = DynamoDBLeaseCoordinatorIntegrationTest.class.getSimpleName(); - private static final String WORKER_ID = UUID.randomUUID().toString(); - private static final long LEASE_DURATION_MILLIS = 5000L; - private static final long EPSILON_MILLIS = 25L; - private static final int MAX_LEASES_FOR_WORKER = Integer.MAX_VALUE; - private static final int MAX_LEASES_TO_STEAL_AT_ONE_TIME = 1; - private static final int MAX_LEASE_RENEWER_THREAD_COUNT = 20; - private static final long INITIAL_LEASE_TABLE_READ_CAPACITY = 10L; - private static final long INITIAL_LEASE_TABLE_WRITE_CAPACITY = 10L; - - private static DynamoDBLeaseRefresher leaseRefresher; - private static DynamoDBCheckpointer dynamoDBCheckpointer; - - private LeaseCoordinator coordinator; - private final String leaseKey = "shd-1"; - private final MetricsFactory metricsFactory = new NullMetricsFactory(); - - @Before - public void setup() throws ProvisionedThroughputException, DependencyException, InvalidStateException { - final boolean useConsistentReads = true; - if (leaseRefresher == null) { - DynamoDbAsyncClient dynamoDBClient = DynamoDbAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); - leaseRefresher = new DynamoDBLeaseRefresher(TABLE_NAME, dynamoDBClient, new DynamoDBLeaseSerializer(), - useConsistentReads, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); - } - leaseRefresher.createLeaseTableIfNotExists(10L, 10L); - - int retryLeft = ATTEMPTS; - - while (!leaseRefresher.leaseTableExists()) { - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - // Sleep called. - } - retryLeft--; - if (retryLeft == 0) { - if (!leaseRefresher.leaseTableExists()) { - fail("Failed to create table"); - } - } - } - - leaseRefresher.deleteAll(); - coordinator = new DynamoDBLeaseCoordinator(leaseRefresher, WORKER_ID, LEASE_DURATION_MILLIS, - EPSILON_MILLIS, MAX_LEASES_FOR_WORKER, MAX_LEASES_TO_STEAL_AT_ONE_TIME, MAX_LEASE_RENEWER_THREAD_COUNT, - INITIAL_LEASE_TABLE_READ_CAPACITY, INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); - dynamoDBCheckpointer = new DynamoDBCheckpointer(coordinator, leaseRefresher); - dynamoDBCheckpointer.operation(OPERATION); - - coordinator.start(); - } - - /** - * Tests update checkpoint success. - */ - @Test - public void testUpdateCheckpoint() throws Exception { - TestHarnessBuilder builder = new TestHarnessBuilder(); - builder.withLease(leaseKey, null).build(); - - // Run the taker and renewer in-between getting the Lease object and calling checkpoint - coordinator.runLeaseTaker(); - coordinator.runLeaseRenewer(); - - Lease lease = coordinator.getCurrentlyHeldLease(leaseKey); - if (lease == null) { - List leases = leaseRefresher.listLeases(); - for (Lease kinesisClientLease : leases) { - System.out.println(kinesisClientLease); - } - } - - assertNotNull(lease); - ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint"); - // lease's leaseCounter is wrong at this point, but it shouldn't matter. - assertTrue(dynamoDBCheckpointer.setCheckpoint(lease.leaseKey(), newCheckpoint, lease.concurrencyToken())); - - Lease fromDynamo = leaseRefresher.getLease(lease.leaseKey()); - - lease.leaseCounter(lease.leaseCounter() + 1); - lease.checkpoint(newCheckpoint); - lease.leaseOwner(coordinator.workerIdentifier()); - assertEquals(lease, fromDynamo); - } - - /** - * Tests if getAllAssignments() returns all leases - */ - @Test - public void testGetAllAssignments() throws Exception { - TestHarnessBuilder builder = new TestHarnessBuilder(); - - Map addedLeases = builder.withLease("1", WORKER_ID) - .withLease("2", WORKER_ID) - .withLease("3", WORKER_ID) - .withLease("4", WORKER_ID) - .withLease("5", WORKER_ID) - .build(); - - // Run the taker - coordinator.runLeaseTaker(); - - List allLeases = coordinator.allLeases(); - assertThat(allLeases.size(), equalTo(addedLeases.size())); - assertThat(allLeases.containsAll(addedLeases.values()), equalTo(true)); - } - - /** - * Tests updateCheckpoint when the lease has changed out from under us. - */ - @Test - public void testUpdateCheckpointLeaseUpdated() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(); - builder.withLease(leaseKey, null).build(); - - coordinator.runLeaseTaker(); - coordinator.runLeaseRenewer(); - Lease lease = coordinator.getCurrentlyHeldLease(leaseKey); - - assertNotNull(lease); - leaseRefresher.renewLease(coordinator.getCurrentlyHeldLease(leaseKey)); - - ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint"); - assertFalse(dynamoDBCheckpointer.setCheckpoint(lease.leaseKey(), newCheckpoint, lease.concurrencyToken())); - - Lease fromDynamo = leaseRefresher.getLease(lease.leaseKey()); - - lease.leaseCounter(lease.leaseCounter() + 1); - // Counter and owner changed, but checkpoint did not. - lease.leaseOwner(coordinator.workerIdentifier()); - assertEquals(lease, fromDynamo); - } - - /** - * Tests updateCheckpoint with a bad concurrency token. - */ - @Test - public void testUpdateCheckpointBadConcurrencyToken() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(); - builder.withLease(leaseKey, null).build(); - - coordinator.runLeaseTaker(); - coordinator.runLeaseRenewer(); - Lease lease = coordinator.getCurrentlyHeldLease(leaseKey); - - assertNotNull(lease); - - ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint"); - assertFalse(dynamoDBCheckpointer.setCheckpoint(lease.leaseKey(), newCheckpoint, UUID.randomUUID())); - - Lease fromDynamo = leaseRefresher.getLease(lease.leaseKey()); - - // Owner should be the only thing that changed. - lease.leaseOwner(coordinator.workerIdentifier()); - assertEquals(lease, fromDynamo); - } - - public static class TestHarnessBuilder { - - private Map leases = new HashMap<>(); - - public TestHarnessBuilder withLease(String shardId, String owner) { - Lease lease = new Lease(); - lease.checkpoint(new ExtendedSequenceNumber("checkpoint")); - lease.ownerSwitchesSinceCheckpoint(0L); - lease.leaseCounter(0L); - lease.leaseOwner(owner); - lease.parentShardIds(Collections.singleton("parentShardId")); - lease.leaseKey(shardId); - leases.put(shardId, lease); - return this; - } - - public Map build() throws LeasingException { - for (Lease lease : leases.values()) { - leaseRefresher.createLeaseIfNotExists(lease); - if (lease.leaseOwner() != null) { - lease.lastCounterIncrementNanos(System.nanoTime()); - } - } - return leases; - } - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherIntegrationTest.java deleted file mode 100644 index b94c8305..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherIntegrationTest.java +++ /dev/null @@ -1,300 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.dynamodb; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.verify; - -import java.util.Collection; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; - -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseIntegrationTest; -import software.amazon.kinesis.leases.exceptions.LeasingException; - -@RunWith(MockitoJUnitRunner.class) -public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest { - - @Before - public void setup() { - doNothing().when(tableCreatorCallback).performAction( - eq(TableCreatorCallbackInput.builder().dynamoDbClient(ddbClient).tableName(tableName).build())); - } - - /** - * Test listLeases when no records are present. - */ - @Test - public void testListNoRecords() throws LeasingException { - List leases = leaseRefresher.listLeases(); - assertTrue(leases.isEmpty()); - } - - /** - * Tests listLeases when records are present. Exercise dynamo's paging functionality. - */ - @Test - public void testListWithRecords() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - - int numRecordsToPut = 10; - - for (int i = 0; i < numRecordsToPut; i++) { - builder.withLease(Integer.toString(i)); - } - - Collection expected = builder.build().values(); - - // The / 3 here ensures that we will test Dynamo's paging mechanics. - List actual = leaseRefresher.list(numRecordsToPut / 3); - - for (Lease lease : actual) { - assertNotNull(expected.remove(lease)); - } - - assertTrue(expected.isEmpty()); - } - - /** - * Tests getLease when a record is present. - */ - @Test - public void testGetLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - Lease expected = builder.withLease("1").build().get("1"); - - Lease actual = leaseRefresher.getLease(expected.leaseKey()); - assertEquals(expected, actual); - } - - /** - * Tests leaseRefresher.get() when the looked-for record is absent. - */ - @Test - public void testGetNull() throws LeasingException { - Lease actual = leaseRefresher.getLease("bogusShardId"); - assertNull(actual); - } - - /** - * Tests leaseRefresher.holdLease's success scenario. - */ - @Test - public void testRenewLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - Lease lease = builder.withLease("1").build().get("1"); - Long originalLeaseCounter = lease.leaseCounter(); - - leaseRefresher.renewLease(lease); - assertTrue(originalLeaseCounter + 1 == lease.leaseCounter()); - - Lease fromDynamo = leaseRefresher.getLease(lease.leaseKey()); - - assertEquals(lease, fromDynamo); - } - - /** - * Tests leaseRefresher.holdLease when the lease has changed out from under us. - */ - @Test - public void testHoldUpdatedLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - Lease lease = builder.withLease("1").build().get("1"); - - Lease leaseCopy = leaseRefresher.getLease(lease.leaseKey()); - - // lose lease - leaseRefresher.takeLease(lease, "bar"); - - assertFalse(leaseRefresher.renewLease(leaseCopy)); - } - - /** - * Tests takeLease when the lease is not already owned. - */ - @Test - public void testTakeUnownedLease() throws LeasingException { - testTakeLease(false); - } - - /** - * Tests takeLease when the lease is already owned. - */ - @Test - public void testTakeOwnedLease() throws LeasingException { - testTakeLease(true); - } - - private void testTakeLease(boolean owned) throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - Lease lease = builder.withLease("1", owned ? "originalOwner" : null).build().get("1"); - Long originalLeaseCounter = lease.leaseCounter(); - - String newOwner = "newOwner"; - leaseRefresher.takeLease(lease, newOwner); - assertTrue(originalLeaseCounter + 1 == lease.leaseCounter()); - assertTrue((owned ? 1 : 0) == lease.ownerSwitchesSinceCheckpoint()); - assertEquals(newOwner, lease.leaseOwner()); - - Lease fromDynamo = leaseRefresher.getLease(lease.leaseKey()); - - assertEquals(lease, fromDynamo); - } - - /** - * Tests takeLease when the lease has changed out from under us. - */ - @Test - public void testTakeUpdatedLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - Lease lease = builder.withLease("1").build().get("1"); - - Lease leaseCopy = leaseRefresher.getLease(lease.leaseKey()); - - String newOwner = "newOwner"; - leaseRefresher.takeLease(lease, newOwner); - - assertFalse(leaseRefresher.takeLease(leaseCopy, newOwner)); - } - - /** - * Tests evictLease when the lease is currently unowned. - */ - public void testEvictUnownedLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - Lease lease = builder.withLease("1", null).build().get("1"); - - assertFalse(leaseRefresher.evictLease(lease)); - } - - /** - * Tests evictLease when the lease is currently owned. - */ - @Test - public void testEvictOwnedLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - Lease lease = builder.withLease("1").build().get("1"); - Long originalLeaseCounter = lease.leaseCounter(); - - leaseRefresher.evictLease(lease); - assertNull(lease.leaseOwner()); - assertTrue(originalLeaseCounter + 1 == lease.leaseCounter()); - - Lease fromDynamo = leaseRefresher.getLease(lease.leaseKey()); - - assertEquals(lease, fromDynamo); - } - - /** - * Tests evictLease when the lease has changed out from under us. Note that evicting leases - * is conditional on the lease owner, unlike everything else which is conditional on the - * lease counter. - */ - @Test - public void testEvictChangedLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - Lease lease = builder.withLease("1").build().get("1"); - - // Change the owner only - this should cause our optimistic lock to fail. - lease.leaseOwner("otherOwner"); - assertFalse(leaseRefresher.evictLease(lease)); - } - - /** - * Tests deleteLease when a lease exists. - */ - @Test - public void testDeleteLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - Lease lease = builder.withLease("1").build().get("1"); - - leaseRefresher.deleteLease(lease); - - Lease newLease = leaseRefresher.getLease(lease.leaseKey()); - assertNull(newLease); - } - - /** - * Tests deleteLease when a lease does not exist. - */ - @Test - public void testDeleteNonexistentLease() throws LeasingException { - Lease lease = new Lease(); - lease.leaseKey("1"); - // The lease has not been written to DDB - try to delete it and expect success. - - leaseRefresher.deleteLease(lease); - } - - @Test - public void testWaitUntilLeaseTableExists() throws LeasingException { - DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher("nagl_ShardProgress", ddbClient, - new DynamoDBLeaseSerializer(), true, tableCreatorCallback) { - @Override - long sleep(long timeToSleepMillis) { - fail("Should not sleep"); - return 0L; - } - - }; - - assertTrue(refresher.waitUntilLeaseTableExists(1, 1)); - } - - @Test - public void testWaitUntilLeaseTableExistsTimeout() throws LeasingException { - /* - * Just using AtomicInteger for the indirection it provides. - */ - final AtomicInteger sleepCounter = new AtomicInteger(0); - DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher("nonexistentTable", ddbClient, - new DynamoDBLeaseSerializer(), true, tableCreatorCallback) { - @Override - long sleep(long timeToSleepMillis) { - assertEquals(1000L, timeToSleepMillis); - sleepCounter.incrementAndGet(); - return 1000L; - } - - }; - - assertFalse(refresher.waitUntilLeaseTableExists(2, 1)); - assertEquals(1, sleepCounter.get()); - } - - @Test - public void testTableCreatorCallback() throws Exception { - DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher(tableName, ddbClient, - new DynamoDBLeaseSerializer(), true, tableCreatorCallback); - - refresher.performPostTableCreationAction(); - - verify(tableCreatorCallback).performAction( - eq(TableCreatorCallbackInput.builder().dynamoDbClient(ddbClient).tableName(tableName).build())); - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerTest.java deleted file mode 100644 index c2aae598..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerTest.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.dynamodb; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.Executors; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.metrics.NullMetricsFactory; - -@RunWith(MockitoJUnitRunner.class) -public class DynamoDBLeaseRenewerTest { - private final String workerIdentifier = "WorkerId"; - private final long leaseDurationMillis = 10000; - private DynamoDBLeaseRenewer renewer; - private List leasesToRenew; - - @Mock - private LeaseRefresher leaseRefresher; - - private static Lease newLease(String leaseKey) { - return new Lease(leaseKey, "LeaseOwner", 0L, UUID.randomUUID(), System.nanoTime(), null, null, null, new HashSet<>()); - } - - @Before - public void before() { - leasesToRenew = null; - renewer = new DynamoDBLeaseRenewer(leaseRefresher, workerIdentifier, leaseDurationMillis, - Executors.newCachedThreadPool(), new NullMetricsFactory()); - } - - @After - public void after() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - if (leasesToRenew == null) { - return; - } - for (Lease lease : leasesToRenew) { - verify(leaseRefresher, times(1)).renewLease(eq(lease)); - } - } - - @Test - public void testLeaseRenewerHoldsGoodLeases() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - /* - * Prepare leases to be renewed - * 2 Good - */ - Lease lease1 = newLease("1"); - Lease lease2 = newLease("2"); - leasesToRenew = Arrays.asList(lease1,lease2); - renewer.addLeasesToRenew(leasesToRenew); - - doReturn(true).when(leaseRefresher).renewLease(lease1); - doReturn(true).when(leaseRefresher).renewLease(lease2); - - renewer.renewLeases(); - - assertEquals(2, renewer.getCurrentlyHeldLeases().size()); - } - - @Test - public void testLeaseRenewerDoesNotRenewExpiredLease() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - String leaseKey = "expiredLease"; - long initialCounterIncrementNanos = 5L; // "expired" time. - Lease lease1 = newLease(leaseKey); - lease1.lastCounterIncrementNanos(initialCounterIncrementNanos); - - leasesToRenew = new ArrayList<>(); - leasesToRenew.add(lease1); - doReturn(true).when(leaseRefresher).renewLease(lease1); - renewer.addLeasesToRenew(leasesToRenew); - - assertTrue(lease1.isExpired(1, System.nanoTime())); - assertNull(renewer.getCurrentlyHeldLease(leaseKey)); - renewer.renewLeases(); - // Don't renew lease(s) with same key if getCurrentlyHeldLease returned null previously - assertNull(renewer.getCurrentlyHeldLease(leaseKey)); - assertFalse(renewer.getCurrentlyHeldLeases().containsKey(leaseKey)); - - // Clear the list to avoid triggering expectation mismatch in after(). - leasesToRenew.clear(); - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerTest.java deleted file mode 100644 index 458d9cdf..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerTest.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.dynamodb; - -import java.util.ArrayList; -import java.util.List; - -import junit.framework.Assert; - -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseTaker; - -/** - * - */ -public class DynamoDBLeaseTakerTest { - - /** - * @throws java.lang.Exception - */ - @BeforeClass - public static void setUpBeforeClass() throws Exception { - } - - /** - * @throws java.lang.Exception - */ - @AfterClass - public static void tearDownAfterClass() throws Exception { - } - - /** - * @throws java.lang.Exception - */ - @Before - public void setUp() throws Exception { - } - - /** - * @throws java.lang.Exception - */ - @After - public void tearDown() throws Exception { - } - - /** - * Test method for {@link DynamoDBLeaseTaker#stringJoin(java.util.Collection, java.lang.String)}. - */ - @Test - public final void testStringJoin() { - List strings = new ArrayList<>(); - - strings.add("foo"); - Assert.assertEquals("foo", DynamoDBLeaseTaker.stringJoin(strings, ", ")); - - strings.add("bar"); - Assert.assertEquals("foo, bar", DynamoDBLeaseTaker.stringJoin(strings, ", ")); - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/TestHarnessBuilder.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/TestHarnessBuilder.java deleted file mode 100644 index 1ea73a3e..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/TestHarnessBuilder.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases.dynamodb; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Callable; - -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseRenewer; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.LeasingException; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -public class TestHarnessBuilder { - - private long currentTimeNanos; - - private Map leases = new HashMap<>(); - private DynamoDBLeaseRefresher leaseRefresher; - private Map originalLeases = new HashMap<>(); - - private Callable timeProvider = new Callable() { - - @Override - public Long call() throws Exception { - return currentTimeNanos; - } - - }; - - public TestHarnessBuilder(final DynamoDBLeaseRefresher leaseRefresher) { - this.leaseRefresher = leaseRefresher; - } - - public TestHarnessBuilder withLease(String shardId) { - return withLease(shardId, "leaseOwner"); - } - - public TestHarnessBuilder withLease(String shardId, String owner) { - Lease lease = createLease(shardId, owner); - Lease originalLease = createLease(shardId, owner); - - leases.put(shardId, lease); - originalLeases.put(shardId, originalLease); - return this; - } - - private Lease createLease(String shardId, String owner) { - Lease lease = new Lease(); - lease.checkpoint(new ExtendedSequenceNumber("checkpoint")); - lease.ownerSwitchesSinceCheckpoint(0L); - lease.leaseCounter(0L); - lease.leaseOwner(owner); - lease.parentShardIds(Collections.singleton("parentShardId")); - lease.leaseKey(shardId); - - return lease; - } - - public Map build() throws LeasingException { - for (Lease lease : leases.values()) { - leaseRefresher.createLeaseIfNotExists(lease); - if (lease.leaseOwner() != null) { - lease.lastCounterIncrementNanos(System.nanoTime()); - } - } - - currentTimeNanos = System.nanoTime(); - - return leases; - } - - public void passTime(long millis) { - currentTimeNanos += millis * 1000000; - } - - public Map takeMutateAssert(DynamoDBLeaseTaker taker, int numToTake) - throws LeasingException { - Map result = taker.takeLeases(timeProvider); - assertEquals(numToTake, result.size()); - - for (Lease actual : result.values()) { - Lease original = leases.get(actual.leaseKey()); - assertNotNull(original); - - mutateAssert(taker.getWorkerIdentifier(), original, actual); - } - - return result; - } - - public Map takeMutateAssert(DynamoDBLeaseTaker taker, String... takenShardIds) - throws LeasingException { - Map result = taker.takeLeases(timeProvider); - assertEquals(takenShardIds.length, result.size()); - - for (String shardId : takenShardIds) { - Lease original = leases.get(shardId); - assertNotNull(original); - - Lease actual = result.get(shardId); - assertNotNull(actual); - - mutateAssert(taker.getWorkerIdentifier(), original, actual); - } - - return result; - } - - private void mutateAssert(String newWorkerIdentifier, Lease original, Lease actual) { - original.leaseCounter(original.leaseCounter() + 1); - if (original.leaseOwner() != null && !newWorkerIdentifier.equals(original.leaseOwner())) { - original.ownerSwitchesSinceCheckpoint(original.ownerSwitchesSinceCheckpoint() + 1); - } - original.leaseOwner(newWorkerIdentifier); - - assertEquals(original, actual); // Assert the contents of the lease - } - - public void addLeasesToRenew(LeaseRenewer renewer, String... shardIds) - throws DependencyException, InvalidStateException { - List leasesToRenew = new ArrayList(); - - for (String shardId : shardIds) { - Lease lease = leases.get(shardId); - assertNotNull(lease); - leasesToRenew.add(lease); - } - - renewer.addLeasesToRenew(leasesToRenew); - } - - public Map renewMutateAssert(LeaseRenewer renewer, String... renewedShardIds) - throws DependencyException, InvalidStateException { - renewer.renewLeases(); - - Map heldLeases = renewer.getCurrentlyHeldLeases(); - assertEquals(renewedShardIds.length, heldLeases.size()); - - for (String shardId : renewedShardIds) { - Lease original = originalLeases.get(shardId); - assertNotNull(original); - - Lease actual = heldLeases.get(shardId); - assertNotNull(actual); - - original.leaseCounter(original.leaseCounter() + 1); - assertEquals(original, actual); - } - - return heldLeases; - } - - public void renewAllLeases() throws LeasingException { - for (Lease lease : leases.values()) { - leaseRefresher.renewLease(lease); - } - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTaskTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTaskTest.java deleted file mode 100644 index dcf94ff3..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTaskTest.java +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import java.util.ArrayList; -import java.util.List; - -import org.junit.Before; -import org.junit.Test; - -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * - */ -public class BlockOnParentShardTaskTest { - private final long backoffTimeInMillis = 50L; - private final String shardId = "shardId-97"; - private final String concurrencyToken = "testToken"; - private final List emptyParentShardIds = new ArrayList(); - private ShardInfo shardInfo; - - @Before - public void setup() { - shardInfo = new ShardInfo(shardId, concurrencyToken, emptyParentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); - } - - /** - * Test call() when there are no parent shards. - * @throws ProvisionedThroughputException - * @throws InvalidStateException - * @throws DependencyException - */ - @Test - public final void testCallNoParents() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - LeaseRefresher leaseRefresher = mock(LeaseRefresher.class); - when(leaseRefresher.getLease(shardId)).thenReturn(null); - - BlockOnParentShardTask task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); - TaskResult result = task.call(); - assertNull(result.getException()); - } - - /** - * Test call() when there are 1-2 parent shards that have been fully processed. - * @throws ProvisionedThroughputException - * @throws InvalidStateException - * @throws DependencyException - */ - @Test - public final void testCallWhenParentsHaveFinished() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - - ShardInfo shardInfo = null; - BlockOnParentShardTask task = null; - String parent1ShardId = "shardId-1"; - String parent2ShardId = "shardId-2"; - List parentShardIds = new ArrayList<>(); - TaskResult result = null; - - Lease parent1Lease = new Lease(); - parent1Lease.checkpoint(ExtendedSequenceNumber.SHARD_END); - Lease parent2Lease = new Lease(); - parent2Lease.checkpoint(ExtendedSequenceNumber.SHARD_END); - - LeaseRefresher leaseRefresher = mock(LeaseRefresher.class); - when(leaseRefresher.getLease(parent1ShardId)).thenReturn(parent1Lease); - when(leaseRefresher.getLease(parent2ShardId)).thenReturn(parent2Lease); - - // test single parent - parentShardIds.add(parent1ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); - task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); - result = task.call(); - assertNull(result.getException()); - - // test two parents - parentShardIds.add(parent2ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); - task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); - result = task.call(); - assertNull(result.getException()); - } - - /** - * Test call() when there are 1-2 parent shards that have NOT been fully processed. - * @throws ProvisionedThroughputException - * @throws InvalidStateException - * @throws DependencyException - */ - @Test - public final void testCallWhenParentsHaveNotFinished() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - - ShardInfo shardInfo = null; - BlockOnParentShardTask task = null; - String parent1ShardId = "shardId-1"; - String parent2ShardId = "shardId-2"; - List parentShardIds = new ArrayList<>(); - TaskResult result = null; - - Lease parent1Lease = new Lease(); - parent1Lease.checkpoint(ExtendedSequenceNumber.LATEST); - Lease parent2Lease = new Lease(); - // mock a sequence number checkpoint - parent2Lease.checkpoint(new ExtendedSequenceNumber("98182584034")); - - LeaseRefresher leaseRefresher = mock(LeaseRefresher.class); - when(leaseRefresher.getLease(parent1ShardId)).thenReturn(parent1Lease); - when(leaseRefresher.getLease(parent2ShardId)).thenReturn(parent2Lease); - - // test single parent - parentShardIds.add(parent1ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); - task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); - result = task.call(); - assertNotNull(result.getException()); - - // test two parents - parentShardIds.add(parent2ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); - task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); - result = task.call(); - assertNotNull(result.getException()); - } - - /** - * Test call() with 1 parent shard before and after it is completely processed. - * @throws ProvisionedThroughputException - * @throws InvalidStateException - * @throws DependencyException - */ - @Test - public final void testCallBeforeAndAfterAParentFinishes() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { - - BlockOnParentShardTask task = null; - String parentShardId = "shardId-1"; - List parentShardIds = new ArrayList<>(); - parentShardIds.add(parentShardId); - ShardInfo shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); - TaskResult result = null; - Lease parentLease = new Lease(); - LeaseRefresher leaseRefresher = mock(LeaseRefresher.class); - when(leaseRefresher.getLease(parentShardId)).thenReturn(parentLease); - - // test when parent shard has not yet been fully processed - parentLease.checkpoint(new ExtendedSequenceNumber("98182584034")); - task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); - result = task.call(); - assertNotNull(result.getException()); - - // test when parent has been fully processed - parentLease.checkpoint(ExtendedSequenceNumber.SHARD_END); - task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); - result = task.call(); - assertNull(result.getException()); - } - - /** - * Test to verify we return the right task type. - */ - @Test - public final void testGetTaskType() { - BlockOnParentShardTask task = new BlockOnParentShardTask(shardInfo, null, backoffTimeInMillis); - assertEquals(TaskType.BLOCK_ON_PARENT_SHARDS, task.taskType()); - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ConsumerStatesTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ConsumerStatesTest.java deleted file mode 100644 index 9382b491..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ConsumerStatesTest.java +++ /dev/null @@ -1,474 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState; - -import java.lang.reflect.Field; -import java.util.Optional; -import java.util.concurrent.ExecutorService; - -import org.hamcrest.Condition; -import org.hamcrest.Description; -import org.hamcrest.Matcher; -import org.hamcrest.TypeSafeDiagnosingMatcher; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.ShardDetector; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.leases.HierarchicalShardSyncer; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.retrieval.AggregatorUtil; -import software.amazon.kinesis.retrieval.RecordsPublisher; - -@RunWith(MockitoJUnitRunner.class) -public class ConsumerStatesTest { - private static final String STREAM_NAME = "TestStream"; - private static final InitialPositionInStreamExtended INITIAL_POSITION_IN_STREAM = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - - private ShardConsumer consumer; - private ShardConsumerArgument argument; - - @Mock - private ShardRecordProcessor shardRecordProcessor; - @Mock - private ShardRecordProcessorCheckpointer recordProcessorCheckpointer; - @Mock - private ExecutorService executorService; - @Mock - private ShardInfo shardInfo; - @Mock - private LeaseRefresher leaseRefresher; - @Mock - private Checkpointer checkpointer; - @Mock - private ShutdownNotification shutdownNotification; - @Mock - private InitialPositionInStreamExtended initialPositionInStream; - @Mock - private RecordsPublisher recordsPublisher; - @Mock - private KinesisAsyncClient kinesisClient; - @Mock - private ShardDetector shardDetector; - @Mock - private HierarchicalShardSyncer hierarchicalShardSyncer; - @Mock - private MetricsFactory metricsFactory; - @Mock - private ProcessRecordsInput processRecordsInput; - @Mock - private TaskExecutionListener taskExecutionListener; - - private long parentShardPollIntervalMillis = 0xCAFE; - private boolean cleanupLeasesOfCompletedShards = true; - private long taskBackoffTimeMillis = 0xBEEF; - private ShutdownReason reason = ShutdownReason.SHARD_END; - private boolean skipShardSyncAtWorkerInitializationIfLeasesExist = true; - private long listShardsBackoffTimeInMillis = 50L; - private int maxListShardsRetryAttempts = 10; - private boolean shouldCallProcessRecordsEvenForEmptyRecordList = true; - private boolean ignoreUnexpectedChildShards = false; - private long idleTimeInMillis = 1000L; - private Optional logWarningForTaskAfterMillis = Optional.empty(); - - @Before - public void setup() { - argument = new ShardConsumerArgument(shardInfo, STREAM_NAME, leaseRefresher, executorService, recordsPublisher, - shardRecordProcessor, checkpointer, recordProcessorCheckpointer, parentShardPollIntervalMillis, - taskBackoffTimeMillis, skipShardSyncAtWorkerInitializationIfLeasesExist, - listShardsBackoffTimeInMillis, maxListShardsRetryAttempts, - shouldCallProcessRecordsEvenForEmptyRecordList, idleTimeInMillis, INITIAL_POSITION_IN_STREAM, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, shardDetector, new AggregatorUtil(), - hierarchicalShardSyncer, metricsFactory); - consumer = spy( - new ShardConsumer(recordsPublisher, executorService, shardInfo, logWarningForTaskAfterMillis, argument, taskExecutionListener)); - - when(shardInfo.shardId()).thenReturn("shardId-000000000000"); - when(recordProcessorCheckpointer.checkpointer()).thenReturn(checkpointer); - } - - private static final Class LEASE_REFRESHER_CLASS = (Class) (Class) LeaseRefresher.class; - - @Test - public void blockOnParentStateTest() { - ConsumerState state = ShardConsumerState.WAITING_ON_PARENT_SHARDS.consumerState(); - - ConsumerTask task = state.createTask(argument, consumer, null); - - assertThat(task, taskWith(BlockOnParentShardTask.class, ShardInfo.class, "shardInfo", equalTo(shardInfo))); - assertThat(task, taskWith(BlockOnParentShardTask.class, LEASE_REFRESHER_CLASS, "leaseRefresher", - equalTo(leaseRefresher))); - assertThat(task, taskWith(BlockOnParentShardTask.class, Long.class, "parentShardPollIntervalMillis", - equalTo(parentShardPollIntervalMillis))); - - assertThat(state.successTransition(), equalTo(ShardConsumerState.INITIALIZING.consumerState())); - for (ShutdownReason shutdownReason : ShutdownReason.values()) { - assertThat(state.shutdownTransition(shutdownReason), - equalTo(ShardConsumerState.SHUTDOWN_COMPLETE.consumerState())); - } - - assertThat(state.state(), equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)); - assertThat(state.taskType(), equalTo(TaskType.BLOCK_ON_PARENT_SHARDS)); - - } - - @Test - public void initializingStateTest() { - ConsumerState state = ShardConsumerState.INITIALIZING.consumerState(); - ConsumerTask task = state.createTask(argument, consumer, null); - - assertThat(task, initTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); - assertThat(task, initTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, initTask(Checkpointer.class, "checkpoint", equalTo(checkpointer))); - assertThat(task, initTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); - assertThat(task, initTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); - - assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), - equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); - - assertThat(state.state(), equalTo(ShardConsumerState.INITIALIZING)); - assertThat(state.taskType(), equalTo(TaskType.INITIALIZE)); - } - - @Test - public void processingStateTestSynchronous() { - - ConsumerState state = ShardConsumerState.PROCESSING.consumerState(); - ConsumerTask task = state.createTask(argument, consumer, null); - - assertThat(task, procTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); - assertThat(task, procTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, procTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); - assertThat(task, procTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); - - assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), - equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); - - assertThat(state.state(), equalTo(ShardConsumerState.PROCESSING)); - assertThat(state.taskType(), equalTo(TaskType.PROCESS)); - - } - - @Test - public void processingStateTestAsynchronous() { - - ConsumerState state = ShardConsumerState.PROCESSING.consumerState(); - ConsumerTask task = state.createTask(argument, consumer, null); - - assertThat(task, procTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); - assertThat(task, procTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, procTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); - assertThat(task, procTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); - - assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), - equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); - - assertThat(state.state(), equalTo(ShardConsumerState.PROCESSING)); - assertThat(state.taskType(), equalTo(TaskType.PROCESS)); - - } - - @Test - public void processingStateRecordsFetcher() { - - ConsumerState state = ShardConsumerState.PROCESSING.consumerState(); - ConsumerTask task = state.createTask(argument, consumer, null); - - assertThat(task, procTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); - assertThat(task, procTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, procTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); - assertThat(task, procTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); - - assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), - equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); - - assertThat(state.state(), equalTo(ShardConsumerState.PROCESSING)); - assertThat(state.taskType(), equalTo(TaskType.PROCESS)); - } - - @Test - public void shutdownRequestState() { - ConsumerState state = ShardConsumerState.SHUTDOWN_REQUESTED.consumerState(); - - consumer.gracefulShutdown(shutdownNotification); - ConsumerTask task = state.createTask(argument, consumer, null); - - assertThat(task, shutdownReqTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, shutdownReqTask(RecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); - assertThat(task, - shutdownReqTask(ShutdownNotification.class, "shutdownNotification", equalTo(shutdownNotification))); - - assertThat(state.successTransition(), equalTo(ConsumerStates.SHUTDOWN_REQUEST_COMPLETION_STATE)); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), - equalTo(ConsumerStates.SHUTDOWN_REQUEST_COMPLETION_STATE)); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - - assertThat(state.state(), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED)); - assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN_NOTIFICATION)); - - } - - @Test - public void shutdownRequestCompleteStateTest() { - ConsumerState state = ConsumerStates.SHUTDOWN_REQUEST_COMPLETION_STATE; - - assertThat(state.createTask(argument, consumer, null), nullValue()); - - assertThat(state.successTransition(), equalTo(state)); - - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(state)); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), - equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - - assertThat(state.state(), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED)); - assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN_NOTIFICATION)); - - } - - // TODO: Fix this test - @Ignore - @Test - public void shuttingDownStateTest() { - consumer.markForShutdown(ShutdownReason.SHARD_END); - ConsumerState state = ShardConsumerState.SHUTTING_DOWN.consumerState(); - ConsumerTask task = state.createTask(argument, consumer, null); - - assertThat(task, shutdownTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); - assertThat(task, shutdownTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, shutdownTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); - assertThat(task, shutdownTask(ShutdownReason.class, "reason", equalTo(reason))); - assertThat(task, shutdownTask(LEASE_REFRESHER_CLASS, "leaseRefresher", equalTo(leaseRefresher))); - assertThat(task, shutdownTask(InitialPositionInStreamExtended.class, "initialPositionInStream", - equalTo(initialPositionInStream))); - assertThat(task, - shutdownTask(Boolean.class, "cleanupLeasesOfCompletedShards", equalTo(cleanupLeasesOfCompletedShards))); - assertThat(task, shutdownTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); - - assertThat(state.successTransition(), equalTo(ShardConsumerState.SHUTDOWN_COMPLETE.consumerState())); - - for (ShutdownReason reason : ShutdownReason.values()) { - assertThat(state.shutdownTransition(reason), - equalTo(ShardConsumerState.SHUTDOWN_COMPLETE.consumerState())); - } - - assertThat(state.state(), equalTo(ShardConsumerState.SHUTTING_DOWN)); - assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN)); - - } - - @Test - public void shutdownCompleteStateTest() { - consumer.gracefulShutdown(shutdownNotification); - - ConsumerState state = ShardConsumerState.SHUTDOWN_COMPLETE.consumerState(); - - assertThat(state.createTask(argument, consumer, null), nullValue()); - verify(consumer, times(2)).shutdownNotification(); - verify(shutdownNotification).shutdownComplete(); - - assertThat(state.successTransition(), equalTo(state)); - for (ShutdownReason reason : ShutdownReason.values()) { - assertThat(state.shutdownTransition(reason), equalTo(state)); - } - - assertThat(state.state(), equalTo(ShardConsumerState.SHUTDOWN_COMPLETE)); - assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN_COMPLETE)); - } - - @Test - public void shutdownCompleteStateNullNotificationTest() { - ConsumerState state = ShardConsumerState.SHUTDOWN_COMPLETE.consumerState(); - - when(consumer.shutdownNotification()).thenReturn(null); - assertThat(state.createTask(argument, consumer, null), nullValue()); - - verify(consumer).shutdownNotification(); - verify(shutdownNotification, never()).shutdownComplete(); - } - - static ReflectionPropertyMatcher shutdownTask(Class valueTypeClass, - String propertyName, Matcher matcher) { - return taskWith(ShutdownTask.class, valueTypeClass, propertyName, matcher); - } - - static ReflectionPropertyMatcher shutdownReqTask( - Class valueTypeClass, String propertyName, Matcher matcher) { - return taskWith(ShutdownNotificationTask.class, valueTypeClass, propertyName, matcher); - } - - static ReflectionPropertyMatcher procTask(Class valueTypeClass, - String propertyName, Matcher matcher) { - return taskWith(ProcessTask.class, valueTypeClass, propertyName, matcher); - } - - static ReflectionPropertyMatcher initTask(Class valueTypeClass, - String propertyName, Matcher matcher) { - return taskWith(InitializeTask.class, valueTypeClass, propertyName, matcher); - } - - static ReflectionPropertyMatcher taskWith(Class taskTypeClass, - Class valueTypeClass, String propertyName, Matcher matcher) { - return new ReflectionPropertyMatcher<>(taskTypeClass, valueTypeClass, matcher, propertyName); - } - - private static class ReflectionPropertyMatcher - extends TypeSafeDiagnosingMatcher { - - private final Class taskTypeClass; - private final Class valueTypeClazz; - private final Matcher matcher; - private final String propertyName; - private final Field matchingField; - - private ReflectionPropertyMatcher(Class taskTypeClass, Class valueTypeClass, - Matcher matcher, String propertyName) { - this.taskTypeClass = taskTypeClass; - this.valueTypeClazz = valueTypeClass; - this.matcher = matcher; - this.propertyName = propertyName; - - Field[] fields = taskTypeClass.getDeclaredFields(); - Field matching = null; - for (Field field : fields) { - if (propertyName.equals(field.getName())) { - matching = field; - } - } - this.matchingField = matching; - - } - - @Override - protected boolean matchesSafely(ConsumerTask item, Description mismatchDescription) { - - return Condition.matched(item, mismatchDescription).and(new Condition.Step() { - @Override - public Condition apply(ConsumerTask value, Description mismatch) { - if (taskTypeClass.equals(value.getClass())) { - return Condition.matched(taskTypeClass.cast(value), mismatch); - } - mismatch.appendText("Expected task type of ").appendText(taskTypeClass.getName()) - .appendText(" but was ").appendText(value.getClass().getName()); - return Condition.notMatched(); - } - }).and(new Condition.Step() { - @Override - public Condition apply(TaskType value, Description mismatch) { - if (matchingField == null) { - mismatch.appendText("Field ").appendText(propertyName).appendText(" not present in ") - .appendText(taskTypeClass.getName()); - return Condition.notMatched(); - } - - try { - return Condition.matched(getValue(value), mismatch); - } catch (RuntimeException re) { - mismatch.appendText("Failure while retrieving value for ").appendText(propertyName); - return Condition.notMatched(); - } - - } - }).and(new Condition.Step() { - @Override - public Condition apply(Object value, Description mismatch) { - if (value != null && !valueTypeClazz.isAssignableFrom(value.getClass())) { - mismatch.appendText("Expected a value of type ").appendText(valueTypeClazz.getName()) - .appendText(" but was ").appendText(value.getClass().getName()); - return Condition.notMatched(); - } - return Condition.matched(valueTypeClazz.cast(value), mismatch); - } - }).matching(matcher); - } - - @Override - public void describeTo(Description description) { - description - .appendText( - "A " + taskTypeClass.getName() + " task with the property " + propertyName + " matching ") - .appendDescriptionOf(matcher); - } - - private Object getValue(TaskType task) { - - matchingField.setAccessible(true); - try { - return matchingField.get(task); - } catch (IllegalAccessException e) { - throw new RuntimeException("Failed to retrieve the value for " + matchingField.getName()); - } - } - } - -} \ No newline at end of file diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ProcessTaskTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ProcessTaskTest.java deleted file mode 100644 index 0260fab1..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ProcessTaskTest.java +++ /dev/null @@ -1,595 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Amazon Software License - * (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at - * http://aws.amazon.com/asl/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import static org.hamcrest.CoreMatchers.allOf; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.beans.HasPropertyWithValue.hasProperty; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.io.ByteArrayOutputStream; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.security.MessageDigest; -import java.time.Instant; -import java.time.temporal.ChronoUnit; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.TimeUnit; - -import org.hamcrest.Description; -import org.hamcrest.Matcher; -import org.hamcrest.TypeSafeDiagnosingMatcher; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import com.google.protobuf.ByteString; - -import lombok.Data; -import lombok.Getter; -import software.amazon.awssdk.services.kinesis.model.HashKeyRange; -import software.amazon.awssdk.services.kinesis.model.Shard; -import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; -import software.amazon.kinesis.leases.ShardDetector; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.metrics.NullMetricsFactory; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.retrieval.AggregatorUtil; -import software.amazon.kinesis.retrieval.KinesisClientRecord; -import software.amazon.kinesis.retrieval.ThrottlingReporter; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import software.amazon.kinesis.retrieval.kpl.Messages; -import software.amazon.kinesis.retrieval.kpl.Messages.AggregatedRecord; - -@RunWith(MockitoJUnitRunner.class) -public class ProcessTaskTest { - private static final long IDLE_TIME_IN_MILLISECONDS = 100L; - - private boolean shouldCallProcessRecordsEvenForEmptyRecordList = true; - private boolean skipShardSyncAtWorkerInitializationIfLeasesExist = true; - private ShardInfo shardInfo; - - @Mock - private ProcessRecordsInput processRecordsInput; - @Mock - private ShardDetector shardDetector; - - - private static final byte[] TEST_DATA = new byte[] { 1, 2, 3, 4 }; - - private final String shardId = "shard-test"; - private final long taskBackoffTimeMillis = 1L; - - @Mock - private ShardRecordProcessor shardRecordProcessor; - @Mock - private ShardRecordProcessorCheckpointer checkpointer; - @Mock - private ThrottlingReporter throttlingReporter; - - private ProcessTask processTask; - - - @Before - public void setUpProcessTask() { - when(checkpointer.checkpointer()).thenReturn(mock(Checkpointer.class)); - - shardInfo = new ShardInfo(shardId, null, null, null); - } - - private ProcessTask makeProcessTask(ProcessRecordsInput processRecordsInput) { - return makeProcessTask(processRecordsInput, new AggregatorUtil(), - skipShardSyncAtWorkerInitializationIfLeasesExist); - } - - private ProcessTask makeProcessTask(ProcessRecordsInput processRecordsInput, AggregatorUtil aggregatorUtil, - boolean skipShardSync) { - return new ProcessTask(shardInfo, shardRecordProcessor, checkpointer, taskBackoffTimeMillis, - skipShardSync, shardDetector, throttlingReporter, - processRecordsInput, shouldCallProcessRecordsEvenForEmptyRecordList, IDLE_TIME_IN_MILLISECONDS, - aggregatorUtil, new NullMetricsFactory()); - } - - @Test - public void testProcessTaskWithShardEndReached() { - - processTask = makeProcessTask(processRecordsInput); - when(processRecordsInput.isAtShardEnd()).thenReturn(true); - - TaskResult result = processTask.call(); - assertThat(result, shardEndTaskResult(true)); - } - - private KinesisClientRecord makeKinesisClientRecord(String partitionKey, String sequenceNumber, Instant arrival) { - return KinesisClientRecord.builder().partitionKey(partitionKey).sequenceNumber(sequenceNumber) - .approximateArrivalTimestamp(arrival).data(ByteBuffer.wrap(TEST_DATA)).build(); - } - - @Test - public void testNonAggregatedKinesisRecord() { - final String sqn = new BigInteger(128, new Random()).toString(); - final String pk = UUID.randomUUID().toString(); - final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS)); - final KinesisClientRecord r = makeKinesisClientRecord(pk, sqn, ts.toInstant()); - - ShardRecordProcessorOutcome outcome = testWithRecord(r); - - assertEquals(1, outcome.getProcessRecordsCall().records().size()); - - KinesisClientRecord pr = outcome.getProcessRecordsCall().records().get(0); - assertEquals(pk, pr.partitionKey()); - assertEquals(ts.toInstant(), pr.approximateArrivalTimestamp()); - byte[] b = pr.data().array(); - assertThat(b, equalTo(TEST_DATA)); - - assertEquals(sqn, outcome.getCheckpointCall().sequenceNumber()); - assertEquals(0, outcome.getCheckpointCall().subSequenceNumber()); - } - - @Data - static class ShardRecordProcessorOutcome { - final ProcessRecordsInput processRecordsCall; - final ExtendedSequenceNumber checkpointCall; - } - - @Test - public void testDeaggregatesRecord() { - final String sqn = new BigInteger(128, new Random()).toString(); - final String pk = UUID.randomUUID().toString(); - final Instant ts = Instant.now().minus(4, ChronoUnit.HOURS); - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey("-").data(generateAggregatedRecord(pk)) - .sequenceNumber(sqn).approximateArrivalTimestamp(ts).build(); - - processTask = makeProcessTask(processRecordsInput); - ShardRecordProcessorOutcome outcome = testWithRecord(record); - - List actualRecords = outcome.getProcessRecordsCall().records(); - - assertEquals(3, actualRecords.size()); - for (KinesisClientRecord pr : actualRecords) { - assertThat(pr, instanceOf(KinesisClientRecord.class)); - assertEquals(pk, pr.partitionKey()); - assertEquals(ts, pr.approximateArrivalTimestamp()); - - byte[] actualData = new byte[pr.data().limit()]; - pr.data().get(actualData); - assertThat(actualData, equalTo(TEST_DATA)); - } - - assertEquals(sqn, outcome.getCheckpointCall().sequenceNumber()); - assertEquals(actualRecords.size() - 1, outcome.getCheckpointCall().subSequenceNumber()); - } - - @Test - public void testDeaggregatesRecordWithNoArrivalTimestamp() { - final String sqn = new BigInteger(128, new Random()).toString(); - final String pk = UUID.randomUUID().toString(); - - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey("-").data(generateAggregatedRecord(pk)) - .sequenceNumber(sqn).build(); - - processTask = makeProcessTask(processRecordsInput); - ShardRecordProcessorOutcome outcome = testWithRecord(record); - - List actualRecords = outcome.getProcessRecordsCall().records(); - - assertEquals(3, actualRecords.size()); - for (KinesisClientRecord actualRecord : actualRecords) { - assertThat(actualRecord.partitionKey(), equalTo(pk)); - assertThat(actualRecord.approximateArrivalTimestamp(), nullValue()); - } - } - - @Test - public void testLargestPermittedCheckpointValue() { - // Some sequence number value from previous processRecords call to mock. - final BigInteger previousCheckpointSqn = new BigInteger(128, new Random()); - - // Values for this processRecords call. - final int numberOfRecords = 104; - // Start these batch of records's sequence number that is greater than previous checkpoint value. - final BigInteger startingSqn = previousCheckpointSqn.add(BigInteger.valueOf(10)); - final List records = generateConsecutiveRecords(numberOfRecords, "-", ByteBuffer.wrap(TEST_DATA), - new Date(), startingSqn); - - processTask = makeProcessTask(processRecordsInput); - ShardRecordProcessorOutcome outcome = testWithRecords(records, - new ExtendedSequenceNumber(previousCheckpointSqn.toString()), - new ExtendedSequenceNumber(previousCheckpointSqn.toString())); - - final ExtendedSequenceNumber expectedLargestPermittedEsqn = new ExtendedSequenceNumber( - startingSqn.add(BigInteger.valueOf(numberOfRecords - 1)).toString()); - assertEquals(expectedLargestPermittedEsqn, outcome.getCheckpointCall()); - } - - @Test - public void testLargestPermittedCheckpointValueWithEmptyRecords() { - // Some sequence number value from previous processRecords call. - final BigInteger baseSqn = new BigInteger(128, new Random()); - final ExtendedSequenceNumber lastCheckpointEspn = new ExtendedSequenceNumber(baseSqn.toString()); - final ExtendedSequenceNumber largestPermittedEsqn = new ExtendedSequenceNumber( - baseSqn.add(BigInteger.valueOf(100)).toString()); - - processTask = makeProcessTask(processRecordsInput); - ShardRecordProcessorOutcome outcome = testWithRecords(Collections.emptyList(), lastCheckpointEspn, - largestPermittedEsqn); - - // Make sure that even with empty records, largest permitted sequence number does not change. - assertEquals(largestPermittedEsqn, outcome.getCheckpointCall()); - } - - @Test - public void testFilterBasedOnLastCheckpointValue() { - // Explanation of setup: - // * Assume in previous processRecord call, user got 3 sub-records that all belonged to one - // Kinesis record. So sequence number was X, and sub-sequence numbers were 0, 1, 2. - // * 2nd sub-record was checkpointed (extended sequnce number X.1). - // * Worker crashed and restarted. So now DDB has checkpoint value of X.1. - // Test: - // * Now in the subsequent processRecords call, KCL should filter out X.0 and X.1. - BigInteger previousCheckpointSqn = new BigInteger(128, new Random()); - long previousCheckpointSsqn = 1; - - // Values for this processRecords call. - String startingSqn = previousCheckpointSqn.toString(); - String pk = UUID.randomUUID().toString(); - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey("-").data(generateAggregatedRecord(pk)) - .sequenceNumber(startingSqn).build(); - - processTask = makeProcessTask(processRecordsInput); - ShardRecordProcessorOutcome outcome = testWithRecords(Collections.singletonList(record), - new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn), - new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn)); - - List actualRecords = outcome.getProcessRecordsCall().records(); - - // First two records should be dropped - and only 1 remaining records should be there. - assertThat(actualRecords.size(), equalTo(1)); - - // Verify user record's extended sequence number and other fields. - KinesisClientRecord actualRecord = actualRecords.get(0); - assertThat(actualRecord.partitionKey(), equalTo(pk)); - assertThat(actualRecord.sequenceNumber(), equalTo(startingSqn)); - assertThat(actualRecord.subSequenceNumber(), equalTo(previousCheckpointSsqn + 1)); - assertThat(actualRecord.approximateArrivalTimestamp(), nullValue()); - - // Expected largest permitted sequence number will be last sub-record sequence number. - final ExtendedSequenceNumber expectedLargestPermittedEsqn = new ExtendedSequenceNumber( - previousCheckpointSqn.toString(), 2L); - assertEquals(expectedLargestPermittedEsqn, outcome.getCheckpointCall()); - } - - @Test - public void testDiscardReshardedKplData() throws Exception { - BigInteger sequenceNumber = new BigInteger(120, ThreadLocalRandom.current()); - - String lowHashKey = BigInteger.ONE.shiftLeft(60).toString(); - String highHashKey = BigInteger.ONE.shiftLeft(68).toString(); - - ControlledHashAggregatorUtil aggregatorUtil = new ControlledHashAggregatorUtil(lowHashKey, highHashKey); - AggregatedRecord.Builder aggregatedRecord = AggregatedRecord.newBuilder(); - Instant approximateArrivalTime = Instant.now(); - int recordIndex = 0; - sequenceNumber = sequenceNumber.add(BigInteger.ONE); - for (int i = 0; i < 5; ++i) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - recordIndex, approximateArrivalTime); - aggregatorUtil.addInRange(expectedRecord); - recordIndex++; - } - - sequenceNumber = sequenceNumber.add(BigInteger.ONE); - for (int i = 0; i < 5; ++i) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - recordIndex, approximateArrivalTime); - aggregatorUtil.addBelowRange(expectedRecord); - recordIndex++; - } - - sequenceNumber = sequenceNumber.add(BigInteger.ONE); - for (int i = 0; i < 5; ++i) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - recordIndex, approximateArrivalTime); - aggregatorUtil.addAboveRange(expectedRecord); - recordIndex++; - } - - byte[] payload = aggregatedRecord.build().toByteArray(); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - bos.write(new byte[] { -13, -119, -102, -62 }); - bos.write(payload); - bos.write(md5(payload)); - - ByteBuffer rawRecordData = ByteBuffer.wrap(bos.toByteArray()); - - KinesisClientRecord rawRecord = KinesisClientRecord.builder().data(rawRecordData) - .approximateArrivalTimestamp(approximateArrivalTime).partitionKey("p-01") - .sequenceNumber(sequenceNumber.toString()).build(); - - when(shardDetector.shard(any())).thenReturn(Shard.builder().shardId("Shard-01") - .hashKeyRange(HashKeyRange.builder().startingHashKey(lowHashKey).endingHashKey(highHashKey).build()) - .build()); - - when(processRecordsInput.records()).thenReturn(Collections.singletonList(rawRecord)); - ProcessTask processTask = makeProcessTask(processRecordsInput, aggregatorUtil, false); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, - new ExtendedSequenceNumber(sequenceNumber.subtract(BigInteger.valueOf(100)).toString(), 0L), - new ExtendedSequenceNumber(sequenceNumber.toString(), recordIndex + 1L)); - - assertThat(outcome.processRecordsCall.records().size(), equalTo(0)); - } - - @Test - public void testAllInShardKplData() throws Exception { - BigInteger sequenceNumber = new BigInteger(120, ThreadLocalRandom.current()); - - String lowHashKey = BigInteger.ONE.shiftLeft(60).toString(); - String highHashKey = BigInteger.ONE.shiftLeft(68).toString(); - - ControlledHashAggregatorUtil aggregatorUtil = new ControlledHashAggregatorUtil(lowHashKey, highHashKey); - - List expectedRecords = new ArrayList<>(); - List rawRecords = new ArrayList<>(); - - for (int i = 0; i < 3; ++i) { - AggregatedRecord.Builder aggregatedRecord = AggregatedRecord.newBuilder(); - Instant approximateArrivalTime = Instant.now().minus(i + 4, ChronoUnit.SECONDS); - sequenceNumber = sequenceNumber.add(BigInteger.ONE); - for (int j = 0; j < 2; ++j) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - j, approximateArrivalTime); - aggregatorUtil.addInRange(expectedRecord); - expectedRecords.add(expectedRecord); - } - - byte[] payload = aggregatedRecord.build().toByteArray(); - ByteArrayOutputStream bos = new ByteArrayOutputStream(); - bos.write(AggregatorUtil.AGGREGATED_RECORD_MAGIC); - bos.write(payload); - bos.write(md5(payload)); - - ByteBuffer rawRecordData = ByteBuffer.wrap(bos.toByteArray()); - - KinesisClientRecord rawRecord = KinesisClientRecord.builder().data(rawRecordData) - .approximateArrivalTimestamp(approximateArrivalTime).partitionKey("pa-" + i) - .sequenceNumber(sequenceNumber.toString()).build(); - - rawRecords.add(rawRecord); - } - - when(shardDetector.shard(any())).thenReturn(Shard.builder().shardId("Shard-01") - .hashKeyRange(HashKeyRange.builder().startingHashKey(lowHashKey).endingHashKey(highHashKey).build()) - .build()); - - when(processRecordsInput.records()).thenReturn(rawRecords); - ProcessTask processTask = makeProcessTask(processRecordsInput, aggregatorUtil, false); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, new ExtendedSequenceNumber(sequenceNumber.subtract(BigInteger.valueOf(100)).toString(), 0L), - new ExtendedSequenceNumber(sequenceNumber.toString(), 0L)); - - assertThat(outcome.processRecordsCall.records(), equalTo(expectedRecords)); - } - - private KinesisClientRecord createAndRegisterAggregatedRecord(BigInteger sequenceNumber, - AggregatedRecord.Builder aggregatedRecord, int i, Instant approximateArrivalTime) { - byte[] dataArray = new byte[1024]; - ThreadLocalRandom.current().nextBytes(dataArray); - ByteBuffer data = ByteBuffer.wrap(dataArray); - - KinesisClientRecord expectedRecord = KinesisClientRecord.builder().partitionKey("p-" + i) - .sequenceNumber(sequenceNumber.toString()).approximateArrivalTimestamp(approximateArrivalTime) - .data(data).subSequenceNumber(i).aggregated(true).build(); - - Messages.Record kplRecord = Messages.Record.newBuilder().setData(ByteString.copyFrom(dataArray)) - .setPartitionKeyIndex(i).build(); - aggregatedRecord.addPartitionKeyTable(expectedRecord.partitionKey()).addRecords(kplRecord); - - return expectedRecord; - } - - private enum RecordRangeState { - BELOW_RANGE, IN_RANGE, ABOVE_RANGE - } - - @Getter - private static class ControlledHashAggregatorUtil extends AggregatorUtil { - - private final BigInteger lowHashKey; - private final BigInteger highHashKey; - private final long width; - private final Map recordRanges = new HashMap<>(); - - ControlledHashAggregatorUtil(String lowHashKey, String highHashKey) { - this.lowHashKey = new BigInteger(lowHashKey); - this.highHashKey = new BigInteger(highHashKey); - this.width = this.highHashKey.subtract(this.lowHashKey).mod(BigInteger.valueOf(Long.MAX_VALUE)).longValue() - - 1; - } - - void add(KinesisClientRecord record, RecordRangeState recordRangeState) { - recordRanges.put(record.partitionKey(), recordRangeState); - } - - void addInRange(KinesisClientRecord record) { - add(record, RecordRangeState.IN_RANGE); - } - - void addBelowRange(KinesisClientRecord record) { - add(record, RecordRangeState.BELOW_RANGE); - } - - void addAboveRange(KinesisClientRecord record) { - add(record, RecordRangeState.ABOVE_RANGE); - } - - @Override - protected BigInteger effectiveHashKey(String partitionKey, String explicitHashKey) { - RecordRangeState rangeState = recordRanges.get(partitionKey); - assertThat(rangeState, not(nullValue())); - - switch (rangeState) { - case BELOW_RANGE: - return lowHashKey.subtract(BigInteger.valueOf(ThreadLocalRandom.current().nextInt()).abs()); - case IN_RANGE: - return lowHashKey.add(BigInteger.valueOf(ThreadLocalRandom.current().nextLong(width))); - case ABOVE_RANGE: - return highHashKey.add(BigInteger.ONE) - .add(BigInteger.valueOf(ThreadLocalRandom.current().nextInt()).abs()); - default: - throw new IllegalStateException("Unknown range state: " + rangeState); - } - } - } - - private ShardRecordProcessorOutcome testWithRecord(KinesisClientRecord record) { - return testWithRecords(Collections.singletonList(record), ExtendedSequenceNumber.TRIM_HORIZON, - ExtendedSequenceNumber.TRIM_HORIZON); - } - - private ShardRecordProcessorOutcome testWithRecords(List records, - ExtendedSequenceNumber lastCheckpointValue, ExtendedSequenceNumber largestPermittedCheckpointValue) { - return testWithRecords(records, lastCheckpointValue, largestPermittedCheckpointValue, new AggregatorUtil()); - } - - private ShardRecordProcessorOutcome testWithRecords(List records, ExtendedSequenceNumber lastCheckpointValue, - ExtendedSequenceNumber largestPermittedCheckpointValue, AggregatorUtil aggregatorUtil) { - when(processRecordsInput.records()).thenReturn(records); - return testWithRecords( - makeProcessTask(processRecordsInput, aggregatorUtil, skipShardSyncAtWorkerInitializationIfLeasesExist), - lastCheckpointValue, largestPermittedCheckpointValue); - } - - private ShardRecordProcessorOutcome testWithRecords(ProcessTask processTask, ExtendedSequenceNumber lastCheckpointValue, - ExtendedSequenceNumber largestPermittedCheckpointValue) { - when(checkpointer.lastCheckpointValue()).thenReturn(lastCheckpointValue); - when(checkpointer.largestPermittedCheckpointValue()).thenReturn(largestPermittedCheckpointValue); - processTask.call(); - verify(throttlingReporter).success(); - verify(throttlingReporter, never()).throttled(); - ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(ProcessRecordsInput.class); - verify(shardRecordProcessor).processRecords(recordsCaptor.capture()); - - ArgumentCaptor esnCaptor = ArgumentCaptor.forClass(ExtendedSequenceNumber.class); - verify(checkpointer).largestPermittedCheckpointValue(esnCaptor.capture()); - - return new ShardRecordProcessorOutcome(recordsCaptor.getValue(), esnCaptor.getValue()); - - } - - /** - * See the KPL documentation on GitHub for more details about the binary format. - * - * @param pk - * Partition key to use. All the records will have the same partition key. - * @return ByteBuffer containing the serialized form of the aggregated record, along with the necessary header and - * footer. - */ - private static ByteBuffer generateAggregatedRecord(String pk) { - ByteBuffer bb = ByteBuffer.allocate(1024); - bb.put(new byte[] { -13, -119, -102, -62 }); - - Messages.Record r = Messages.Record.newBuilder().setData(ByteString.copyFrom(TEST_DATA)).setPartitionKeyIndex(0) - .build(); - - byte[] payload = AggregatedRecord.newBuilder().addPartitionKeyTable(pk).addRecords(r).addRecords(r) - .addRecords(r).build().toByteArray(); - - bb.put(payload); - bb.put(md5(payload)); - bb.limit(bb.position()); - bb.rewind(); - return bb; - } - - private static List generateConsecutiveRecords(int numberOfRecords, String partitionKey, ByteBuffer data, - Date arrivalTimestamp, BigInteger startSequenceNumber) { - List records = new ArrayList<>(); - for (int i = 0; i < numberOfRecords; ++i) { - String seqNum = startSequenceNumber.add(BigInteger.valueOf(i)).toString(); - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey(partitionKey).data(data) - .sequenceNumber(seqNum).approximateArrivalTimestamp(arrivalTimestamp.toInstant()).build(); - records.add(record); - } - return records; - } - - private static byte[] md5(byte[] b) { - try { - MessageDigest md = MessageDigest.getInstance("MD5"); - return md.digest(b); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - private static TaskResultMatcher shardEndTaskResult(boolean isAtShardEnd) { - TaskResult expected = new TaskResult(null, isAtShardEnd); - return taskResult(expected); - } - - private static TaskResultMatcher exceptionTaskResult(Exception ex) { - TaskResult expected = new TaskResult(ex, false); - return taskResult(expected); - } - - private static TaskResultMatcher taskResult(TaskResult expected) { - return new TaskResultMatcher(expected); - } - - private static class TaskResultMatcher extends TypeSafeDiagnosingMatcher { - - Matcher matchers; - - TaskResultMatcher(TaskResult expected) { - if (expected == null) { - matchers = nullValue(TaskResult.class); - } else { - matchers = allOf(notNullValue(TaskResult.class), - hasProperty("shardEndReached", equalTo(expected.isShardEndReached())), - hasProperty("exception", equalTo(expected.getException()))); - } - - } - - @Override - protected boolean matchesSafely(TaskResult item, Description mismatchDescription) { - if (!matchers.matches(item)) { - matchers.describeMismatch(item, mismatchDescription); - return false; - } - return true; - } - - @Override - public void describeTo(Description description) { - description.appendDescriptionOf(matchers); - } - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerTest.java deleted file mode 100644 index 114d4d47..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerTest.java +++ /dev/null @@ -1,713 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.lifecycle; - -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -import java.time.Instant; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; - -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.events.TaskExecutionListenerInput; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Unit tests of {@link ShardConsumer}. - */ -@RunWith(MockitoJUnitRunner.class) -@Slf4j -public class ShardConsumerTest { - - private final String shardId = "shardId-0-0"; - private final String concurrencyToken = "TestToken"; - private ShardInfo shardInfo; - private TaskExecutionListenerInput initialTaskInput; - private TaskExecutionListenerInput processTaskInput; - private TaskExecutionListenerInput shutdownTaskInput; - private TaskExecutionListenerInput shutdownRequestedTaskInput; - private TaskExecutionListenerInput shutdownRequestedAwaitTaskInput; - - private ExecutorService executorService; - @Mock - private RecordsPublisher recordsPublisher; - @Mock - private ShutdownNotification shutdownNotification; - @Mock - private ConsumerState initialState; - @Mock - private ConsumerTask initializeTask; - @Mock - private ConsumerState processingState; - @Mock - private ConsumerTask processingTask; - @Mock - private ConsumerState shutdownState; - @Mock - private ConsumerTask shutdownTask; - @Mock - private TaskResult initializeTaskResult; - @Mock - private TaskResult processingTaskResult; - @Mock - private ConsumerState shutdownCompleteState; - @Mock - private ShardConsumerArgument shardConsumerArgument; - @Mock - private ConsumerState shutdownRequestedState; - @Mock - private ConsumerTask shutdownRequestedTask; - @Mock - private ConsumerState shutdownRequestedAwaitState; - @Mock - private TaskExecutionListener taskExecutionListener; - - private ProcessRecordsInput processRecordsInput; - - private Optional logWarningForTaskAfterMillis = Optional.empty(); - - @Rule - public TestName testName = new TestName(); - - @Before - public void before() { - shardInfo = new ShardInfo(shardId, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - ThreadFactory factory = new ThreadFactoryBuilder().setNameFormat("test-" + testName.getMethodName() + "-%04d") - .setDaemon(true).build(); - executorService = new ThreadPoolExecutor(4, 4, 1, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), factory); - - processRecordsInput = ProcessRecordsInput.builder().isAtShardEnd(false).cacheEntryTime(Instant.now()) - .millisBehindLatest(1000L).records(Collections.emptyList()).build(); - initialTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo) - .taskType(TaskType.INITIALIZE).build(); - processTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo) - .taskType(TaskType.PROCESS).build(); - shutdownRequestedTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo) - .taskType(TaskType.SHUTDOWN_NOTIFICATION).build(); - shutdownRequestedAwaitTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo) - .taskType(TaskType.SHUTDOWN_COMPLETE).build(); - shutdownTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo) - .taskType(TaskType.SHUTDOWN).build(); - } - - @After - public void after() { - List remainder = executorService.shutdownNow(); - assertThat(remainder.isEmpty(), equalTo(true)); - } - - private class TestPublisher implements RecordsPublisher { - - final CyclicBarrier barrier = new CyclicBarrier(2); - final CyclicBarrier requestBarrier = new CyclicBarrier(2); - - Subscriber subscriber; - final Subscription subscription = mock(Subscription.class); - - TestPublisher() { - this(false); - } - - TestPublisher(boolean enableCancelAwait) { - doAnswer(a -> { - requestBarrier.await(); - return null; - }).when(subscription).request(anyLong()); - doAnswer(a -> { - if (enableCancelAwait) { - requestBarrier.await(); - } - return null; - }).when(subscription).cancel(); - } - - @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, - InitialPositionInStreamExtended initialPositionInStreamExtended) { - - } - - @Override - public void shutdown() { - - } - - @Override - public void subscribe(Subscriber s) { - subscriber = s; - subscriber.onSubscribe(subscription); - try { - barrier.await(); - } catch (Exception ex) { - throw new RuntimeException(ex); - } - } - - public void awaitSubscription() throws InterruptedException, BrokenBarrierException { - barrier.await(); - barrier.reset(); - } - - public void awaitRequest() throws InterruptedException, BrokenBarrierException { - requestBarrier.await(); - requestBarrier.reset(); - } - - public void awaitInitialSetup() throws InterruptedException, BrokenBarrierException { - awaitRequest(); - awaitSubscription(); - } - - public void publish() { - publish(processRecordsInput); - } - - public void publish(ProcessRecordsInput input) { - subscriber.onNext(input); - } - } - - @Test - public void simpleTest() throws Exception { - CyclicBarrier taskCallBarrier = new CyclicBarrier(2); - - mockSuccessfulInitialize(null); - - mockSuccessfulProcessing(taskCallBarrier); - - mockSuccessfulShutdown(null); - - TestPublisher cache = new TestPublisher(); - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, logWarningForTaskAfterMillis, - shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener); - - boolean initComplete = false; - while (!initComplete) { - initComplete = consumer.initializeComplete().get(); - } - - consumer.subscribe(); - cache.awaitInitialSetup(); - - cache.publish(); - awaitAndResetBarrier(taskCallBarrier); - cache.awaitRequest(); - - cache.publish(); - awaitAndResetBarrier(taskCallBarrier); - cache.awaitRequest(); - - consumer.leaseLost(); - boolean shutdownComplete = consumer.shutdownComplete().get(); - while (!shutdownComplete) { - shutdownComplete = consumer.shutdownComplete().get(); - } - - verify(cache.subscription, times(3)).request(anyLong()); - verify(cache.subscription).cancel(); - verify(processingState, times(2)).createTask(eq(shardConsumerArgument), eq(consumer), any()); - verify(taskExecutionListener, times(1)).beforeTaskExecution(initialTaskInput); - verify(taskExecutionListener, times(2)).beforeTaskExecution(processTaskInput); - verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - - verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); - verify(taskExecutionListener, times(2)).afterTaskExecution(processTaskInput); - verify(taskExecutionListener, times(1)).afterTaskExecution(shutdownTaskInput); - verifyNoMoreInteractions(taskExecutionListener); - } - - - - @Test - public void testDataArrivesAfterProcessing2() throws Exception { - - CyclicBarrier taskCallBarrier = new CyclicBarrier(2); - - mockSuccessfulInitialize(null); - - mockSuccessfulProcessing(taskCallBarrier); - - mockSuccessfulShutdown(null); - - TestPublisher cache = new TestPublisher(); - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, logWarningForTaskAfterMillis, - shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener); - - boolean initComplete = false; - while (!initComplete) { - initComplete = consumer.initializeComplete().get(); - } - - consumer.subscribe(); - cache.awaitInitialSetup(); - - cache.publish(); - awaitAndResetBarrier(taskCallBarrier); - - verify(processingState).createTask(any(), any(), any()); - verify(processingTask).call(); - - cache.awaitRequest(); - - cache.publish(); - awaitAndResetBarrier(taskCallBarrier); - verify(processingState, times(2)).createTask(any(), any(), any()); - verify(processingTask, times(2)).call(); - - cache.awaitRequest(); - - cache.publish(); - awaitAndResetBarrier(taskCallBarrier); - cache.awaitRequest(); - - log.info("Starting shutdown"); - consumer.leaseLost(); - boolean shutdownComplete; - do { - shutdownComplete = consumer.shutdownComplete().get(); - } while (!shutdownComplete); - - verify(processingState, times(3)).createTask(any(), any(), any()); - verify(processingTask, times(3)).call(); - verify(processingState).shutdownTransition(eq(ShutdownReason.LEASE_LOST)); - verify(shutdownState).shutdownTransition(eq(ShutdownReason.LEASE_LOST)); - verify(taskExecutionListener, times(1)).beforeTaskExecution(initialTaskInput); - verify(taskExecutionListener, times(3)).beforeTaskExecution(processTaskInput); - verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - - verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); - verify(taskExecutionListener, times(3)).afterTaskExecution(processTaskInput); - verify(taskExecutionListener, times(1)).afterTaskExecution(shutdownTaskInput); - verifyNoMoreInteractions(taskExecutionListener); - } - - @SuppressWarnings("unchecked") - @Test - @Ignore - public final void testInitializationStateUponFailure() throws Exception { - ShardConsumer consumer = new ShardConsumer(recordsPublisher, executorService, shardInfo, - logWarningForTaskAfterMillis, shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener); - - when(initialState.createTask(eq(shardConsumerArgument), eq(consumer), any())).thenReturn(initializeTask); - when(initializeTask.call()).thenReturn(new TaskResult(new Exception("Bad"))); - when(initializeTask.taskType()).thenReturn(TaskType.INITIALIZE); - when(initialState.failureTransition()).thenReturn(initialState); - - CyclicBarrier taskBarrier = new CyclicBarrier(2); - - when(initialState.requiresDataAvailability()).thenAnswer(i -> { - taskBarrier.await(); - return false; - }); - - consumer.executeLifecycle(); - for (int i = 0; i < 4; ++i) { - awaitAndResetBarrier(taskBarrier); - } - - verify(initialState, times(5)).createTask(eq(shardConsumerArgument), eq(consumer), any()); - verify(initialState, never()).successTransition(); - verify(initialState, never()).shutdownTransition(any()); - } - - /** - * Test method to verify consumer stays in INITIALIZING state when InitializationTask fails. - */ - @SuppressWarnings("unchecked") - @Test(expected = RejectedExecutionException.class) - public final void testInitializationStateUponSubmissionFailure() throws Exception { - - ExecutorService failingService = mock(ExecutorService.class); - ShardConsumer consumer = new ShardConsumer(recordsPublisher, failingService, shardInfo, - logWarningForTaskAfterMillis, shardConsumerArgument, initialState, t -> t, 1, taskExecutionListener); - - doThrow(new RejectedExecutionException()).when(failingService).execute(any()); - - boolean initComplete; - do { - initComplete = consumer.initializeComplete().get(); - } while (!initComplete); - verifyZeroInteractions(taskExecutionListener); - } - - @Test - public void testErrorThrowableInInitialization() throws Exception { - ShardConsumer consumer = new ShardConsumer(recordsPublisher, executorService, shardInfo, - logWarningForTaskAfterMillis, shardConsumerArgument, initialState, t -> t, 1, taskExecutionListener); - - when(initialState.createTask(any(), any(), any())).thenReturn(initializeTask); - when(initialState.taskType()).thenReturn(TaskType.INITIALIZE); - when(initializeTask.call()).thenAnswer(i -> { - throw new Error("Error"); - }); - - try { - consumer.initializeComplete().get(); - } catch (ExecutionException ee) { - assertThat(ee.getCause(), instanceOf(Error.class)); - } - verify(taskExecutionListener, times(1)).beforeTaskExecution(initialTaskInput); - verifyNoMoreInteractions(taskExecutionListener); - } - - @Test - public void testRequestedShutdownWhileQuiet() throws Exception { - - CyclicBarrier taskBarrier = new CyclicBarrier(2); - - TestPublisher cache = new TestPublisher(); - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, logWarningForTaskAfterMillis, - shardConsumerArgument, initialState, t -> t, 1, taskExecutionListener); - - mockSuccessfulInitialize(null); - - mockSuccessfulProcessing(taskBarrier); - - when(processingState.shutdownTransition(eq(ShutdownReason.REQUESTED))).thenReturn(shutdownRequestedState); - when(shutdownRequestedState.requiresDataAvailability()).thenReturn(false); - when(shutdownRequestedState.createTask(any(), any(), any())).thenReturn(shutdownRequestedTask); - when(shutdownRequestedState.taskType()).thenReturn(TaskType.SHUTDOWN_NOTIFICATION); - when(shutdownRequestedTask.call()).thenReturn(new TaskResult(null)); - - when(shutdownRequestedState.shutdownTransition(eq(ShutdownReason.REQUESTED))) - .thenReturn(shutdownRequestedAwaitState); - when(shutdownRequestedState.shutdownTransition(eq(ShutdownReason.LEASE_LOST))).thenReturn(shutdownState); - when(shutdownRequestedAwaitState.requiresDataAvailability()).thenReturn(false); - when(shutdownRequestedAwaitState.createTask(any(), any(), any())).thenReturn(null); - when(shutdownRequestedAwaitState.shutdownTransition(eq(ShutdownReason.REQUESTED))) - .thenReturn(shutdownRequestedState); - when(shutdownRequestedAwaitState.shutdownTransition(eq(ShutdownReason.LEASE_LOST))).thenReturn(shutdownState); - when(shutdownRequestedAwaitState.taskType()).thenReturn(TaskType.SHUTDOWN_COMPLETE); - - mockSuccessfulShutdown(null); - - boolean init = consumer.initializeComplete().get(); - while (!init) { - init = consumer.initializeComplete().get(); - } - - consumer.subscribe(); - cache.awaitInitialSetup(); - - cache.publish(); - awaitAndResetBarrier(taskBarrier); - cache.awaitRequest(); - - cache.publish(); - awaitAndResetBarrier(taskBarrier); - cache.awaitRequest(); - - consumer.gracefulShutdown(shutdownNotification); - boolean shutdownComplete = consumer.shutdownComplete().get(); - assertThat(shutdownComplete, equalTo(false)); - shutdownComplete = consumer.shutdownComplete().get(); - assertThat(shutdownComplete, equalTo(false)); - - consumer.leaseLost(); - shutdownComplete = consumer.shutdownComplete().get(); - assertThat(shutdownComplete, equalTo(false)); - shutdownComplete = consumer.shutdownComplete().get(); - assertThat(shutdownComplete, equalTo(true)); - - verify(processingState, times(2)).createTask(any(), any(), any()); - verify(shutdownRequestedState, never()).shutdownTransition(eq(ShutdownReason.LEASE_LOST)); - verify(shutdownRequestedState).createTask(any(), any(), any()); - verify(shutdownRequestedState).shutdownTransition(eq(ShutdownReason.REQUESTED)); - verify(shutdownRequestedAwaitState).createTask(any(), any(), any()); - verify(shutdownRequestedAwaitState).shutdownTransition(eq(ShutdownReason.LEASE_LOST)); - verify(taskExecutionListener, times(1)).beforeTaskExecution(initialTaskInput); - verify(taskExecutionListener, times(2)).beforeTaskExecution(processTaskInput); - verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownRequestedTaskInput); - verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownRequestedAwaitTaskInput); - verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownRequestedTaskInput = shutdownRequestedTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - // No task is created/run for this shutdownRequestedAwaitState, so there's no task outcome. - - verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); - verify(taskExecutionListener, times(2)).afterTaskExecution(processTaskInput); - verify(taskExecutionListener, times(1)).afterTaskExecution(shutdownRequestedTaskInput); - verify(taskExecutionListener, times(1)).afterTaskExecution(shutdownRequestedAwaitTaskInput); - verify(taskExecutionListener, times(1)).afterTaskExecution(shutdownTaskInput); - verifyNoMoreInteractions(taskExecutionListener); - } - - @Test - public void testExceptionInProcessingStopsRequests() throws Exception { - TestPublisher cache = new TestPublisher(); - - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, Optional.of(1L), - shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener); - - mockSuccessfulInitialize(null); - mockSuccessfulProcessing(null); - - CyclicBarrier taskCallBarrier = new CyclicBarrier(2); - final RuntimeException expectedException = new RuntimeException("Whee"); - when(processingTask.call()).thenAnswer(a -> { - try { - throw expectedException; - } finally { - taskCallBarrier.await(); - } - }); - - boolean initComplete; - do { - initComplete = consumer.initializeComplete().get(); - } while (!initComplete); - - consumer.subscribe(); - cache.awaitInitialSetup(); - - cache.publish(); - awaitAndResetBarrier(taskCallBarrier); - cache.awaitRequest(); - - Throwable healthCheckOutcome = consumer.healthCheck(); - - assertThat(healthCheckOutcome, equalTo(expectedException)); - - verify(cache.subscription, times(2)).request(anyLong()); - verify(taskExecutionListener, times(1)).beforeTaskExecution(initialTaskInput); - verify(taskExecutionListener, times(1)).beforeTaskExecution(processTaskInput); - - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - - verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); - verifyNoMoreInteractions(taskExecutionListener); - } - - @Test - public void testLongRunningTasks() throws Exception { - - TestPublisher cache = new TestPublisher(); - - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, Optional.of(1L), - shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener); - - CyclicBarrier taskArriveBarrier = new CyclicBarrier(2); - CyclicBarrier taskDepartBarrier = new CyclicBarrier(2); - - mockSuccessfulInitialize(taskArriveBarrier, taskDepartBarrier); - mockSuccessfulProcessing(taskArriveBarrier, taskDepartBarrier); - mockSuccessfulShutdown(taskArriveBarrier, taskDepartBarrier); - - CompletableFuture initSuccess = consumer.initializeComplete(); - - awaitAndResetBarrier(taskArriveBarrier); - assertThat(consumer.taskRunningTime(), notNullValue()); - consumer.healthCheck(); - awaitAndResetBarrier(taskDepartBarrier); - - assertThat(initSuccess.get(), equalTo(false)); - verify(initializeTask).call(); - - initSuccess = consumer.initializeComplete(); - verify(initializeTask).call(); - assertThat(initSuccess.get(), equalTo(true)); - consumer.healthCheck(); - - assertThat(consumer.taskRunningTime(), nullValue()); - - consumer.subscribe(); - cache.awaitInitialSetup(); - - cache.publish(); - - awaitAndResetBarrier(taskArriveBarrier); - Instant previousTaskStartTime = consumer.taskDispatchedAt(); - assertThat(consumer.taskRunningTime(), notNullValue()); - consumer.healthCheck(); - awaitAndResetBarrier(taskDepartBarrier); - - consumer.healthCheck(); - - cache.requestBarrier.await(); - assertThat(consumer.taskRunningTime(), nullValue()); - cache.requestBarrier.reset(); - - cache.publish(); - - awaitAndResetBarrier(taskArriveBarrier); - Instant currentTaskStartTime = consumer.taskDispatchedAt(); - assertThat(currentTaskStartTime, not(equalTo(previousTaskStartTime))); - awaitAndResetBarrier(taskDepartBarrier); - - cache.requestBarrier.await(); - assertThat(consumer.taskRunningTime(), nullValue()); - cache.requestBarrier.reset(); - - consumer.leaseLost(); - - assertThat(consumer.isShutdownRequested(), equalTo(true)); - CompletableFuture shutdownComplete = consumer.shutdownComplete(); - - awaitAndResetBarrier(taskArriveBarrier); - assertThat(consumer.taskRunningTime(), notNullValue()); - awaitAndResetBarrier(taskDepartBarrier); - - assertThat(shutdownComplete.get(), equalTo(false)); - - shutdownComplete = consumer.shutdownComplete(); - assertThat(shutdownComplete.get(), equalTo(true)); - - assertThat(consumer.taskRunningTime(), nullValue()); - consumer.healthCheck(); - - verify(taskExecutionListener, times(1)).beforeTaskExecution(initialTaskInput); - verify(taskExecutionListener, times(2)).beforeTaskExecution(processTaskInput); - verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - - verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); - verify(taskExecutionListener, times(2)).afterTaskExecution(processTaskInput); - verify(taskExecutionListener, times(1)).afterTaskExecution(shutdownTaskInput); - verifyNoMoreInteractions(taskExecutionListener); - } - - private void mockSuccessfulShutdown(CyclicBarrier taskCallBarrier) { - mockSuccessfulShutdown(taskCallBarrier, null); - } - - private void mockSuccessfulShutdown(CyclicBarrier taskArriveBarrier, CyclicBarrier taskDepartBarrier) { - when(shutdownState.createTask(eq(shardConsumerArgument), any(), any())).thenReturn(shutdownTask); - when(shutdownState.taskType()).thenReturn(TaskType.SHUTDOWN); - when(shutdownTask.taskType()).thenReturn(TaskType.SHUTDOWN); - when(shutdownTask.call()).thenAnswer(i -> { - awaitBarrier(taskArriveBarrier); - awaitBarrier(taskDepartBarrier); - return new TaskResult(null); - }); - when(shutdownState.shutdownTransition(any())).thenReturn(shutdownCompleteState); - when(shutdownState.state()).thenReturn(ConsumerStates.ShardConsumerState.SHUTTING_DOWN); - - when(shutdownCompleteState.isTerminal()).thenReturn(true); - } - - private void mockSuccessfulProcessing(CyclicBarrier taskCallBarrier) { - mockSuccessfulProcessing(taskCallBarrier, null); - } - - private void mockSuccessfulProcessing(CyclicBarrier taskCallBarrier, CyclicBarrier taskInterlockBarrier) { - when(processingState.createTask(eq(shardConsumerArgument), any(), any())).thenReturn(processingTask); - when(processingState.requiresDataAvailability()).thenReturn(true); - when(processingState.taskType()).thenReturn(TaskType.PROCESS); - when(processingTask.taskType()).thenReturn(TaskType.PROCESS); - when(processingTask.call()).thenAnswer(i -> { - awaitBarrier(taskCallBarrier); - awaitBarrier(taskInterlockBarrier); - return processingTaskResult; - }); - when(processingTaskResult.getException()).thenReturn(null); - when(processingState.successTransition()).thenReturn(processingState); - when(processingState.shutdownTransition(any())).thenReturn(shutdownState); - when(processingState.state()).thenReturn(ConsumerStates.ShardConsumerState.PROCESSING); - } - - private void mockSuccessfulInitialize(CyclicBarrier taskCallBarrier) { - mockSuccessfulInitialize(taskCallBarrier, null); - } - - private void mockSuccessfulInitialize(CyclicBarrier taskCallBarrier, CyclicBarrier taskInterlockBarrier) { - - when(initialState.createTask(eq(shardConsumerArgument), any(), any())).thenReturn(initializeTask); - when(initialState.taskType()).thenReturn(TaskType.INITIALIZE); - when(initializeTask.taskType()).thenReturn(TaskType.INITIALIZE); - when(initializeTask.call()).thenAnswer(i -> { - awaitBarrier(taskCallBarrier); - awaitBarrier(taskInterlockBarrier); - return initializeTaskResult; - }); - when(initializeTaskResult.getException()).thenReturn(null); - when(initialState.requiresDataAvailability()).thenReturn(false); - when(initialState.successTransition()).thenReturn(processingState); - when(initialState.state()).thenReturn(ConsumerStates.ShardConsumerState.INITIALIZING); - - } - - private void awaitBarrier(CyclicBarrier barrier) throws Exception { - if (barrier != null) { - barrier.await(); - } - } - - private void awaitAndResetBarrier(CyclicBarrier barrier) throws Exception { - barrier.await(); - barrier.reset(); - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownReasonTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownReasonTest.java deleted file mode 100644 index 6ee54b3c..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownReasonTest.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -import org.junit.Assert; -import org.junit.Test; - -/** - * Unit tests of ShutdownReason enum class. - */ -public class ShutdownReasonTest { - - @Test - public void testTransitionZombie() { - Assert.assertFalse(ShutdownReason.LEASE_LOST.canTransitionTo(ShutdownReason.SHARD_END)); - assertFalse(ShutdownReason.LEASE_LOST.canTransitionTo(ShutdownReason.REQUESTED)); - } - - @Test - public void testTransitionTerminate() { - assertTrue(ShutdownReason.SHARD_END.canTransitionTo(ShutdownReason.LEASE_LOST)); - assertFalse(ShutdownReason.SHARD_END.canTransitionTo(ShutdownReason.REQUESTED)); - } - - @Test - public void testTransitionRequested() { - assertTrue(ShutdownReason.REQUESTED.canTransitionTo(ShutdownReason.LEASE_LOST)); - assertTrue(ShutdownReason.REQUESTED.canTransitionTo(ShutdownReason.SHARD_END)); - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownTaskTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownTaskTest.java deleted file mode 100644 index 07fb92ae..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownTaskTest.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.Collections; - -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.exceptions.internal.KinesisClientLibIOException; -import software.amazon.kinesis.leases.HierarchicalShardSyncer; -import software.amazon.kinesis.leases.LeaseRefresher; -import software.amazon.kinesis.leases.ShardDetector; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.NullMetricsFactory; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import software.amazon.kinesis.utils.TestStreamlet; - -/** - * - */ -@RunWith(MockitoJUnitRunner.class) -public class ShutdownTaskTest { - private static final long TASK_BACKOFF_TIME_MILLIS = 1L; - private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - private static final ShutdownReason TERMINATE_SHUTDOWN_REASON = ShutdownReason.SHARD_END; - private static final MetricsFactory NULL_METRICS_FACTORY = new NullMetricsFactory(); - - private final String concurrencyToken = "testToken4398"; - private final String shardId = "shardId-0000397840"; - private boolean cleanupLeasesOfCompletedShards = false; - private boolean ignoreUnexpectedChildShards = false; - private ShardRecordProcessor shardRecordProcessor; - private ShardInfo shardInfo; - private ShutdownTask task; - - @Mock - private RecordsPublisher recordsPublisher; - @Mock - private ShardRecordProcessorCheckpointer recordProcessorCheckpointer; - @Mock - private Checkpointer checkpointer; - @Mock - private LeaseRefresher leaseRefresher; - @Mock - private ShardDetector shardDetector; - @Mock - private HierarchicalShardSyncer hierarchicalShardSyncer; - - @Before - public void setUp() throws Exception { - doNothing().when(recordsPublisher).shutdown(); - when(recordProcessorCheckpointer.checkpointer()).thenReturn(checkpointer); - - shardInfo = new ShardInfo(shardId, concurrencyToken, Collections.emptySet(), - ExtendedSequenceNumber.LATEST); - shardRecordProcessor = new TestStreamlet(); - - task = new ShutdownTask(shardInfo, shardDetector, shardRecordProcessor, recordProcessorCheckpointer, - TERMINATE_SHUTDOWN_REASON, INITIAL_POSITION_TRIM_HORIZON, cleanupLeasesOfCompletedShards, - ignoreUnexpectedChildShards, leaseRefresher, TASK_BACKOFF_TIME_MILLIS, recordsPublisher, - hierarchicalShardSyncer, NULL_METRICS_FACTORY); - } - - /** - * Test method for {@link ShutdownTask#call()}. - */ - @Test - public final void testCallWhenApplicationDoesNotCheckpoint() { - when(recordProcessorCheckpointer.lastCheckpointValue()).thenReturn(new ExtendedSequenceNumber("3298")); - final TaskResult result = task.call(); - assertNotNull(result.getException()); - assertTrue(result.getException() instanceof IllegalArgumentException); - } - - /** - * Test method for {@link ShutdownTask#call()}. - */ - @Test - public final void testCallWhenSyncingShardsThrows() throws Exception { - when(recordProcessorCheckpointer.lastCheckpointValue()).thenReturn(ExtendedSequenceNumber.SHARD_END); - when(shardDetector.listShards()).thenReturn(null); - doAnswer((invocation) -> { - throw new KinesisClientLibIOException("KinesisClientLibIOException"); - }).when(hierarchicalShardSyncer) - .checkAndCreateLeaseForNewShards(shardDetector, leaseRefresher, INITIAL_POSITION_TRIM_HORIZON, - cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, - NULL_METRICS_FACTORY.createMetrics()); - - TaskResult result = task.call(); - assertNotNull(result.getException()); - assertTrue(result.getException() instanceof KinesisClientLibIOException); - verify(recordsPublisher).shutdown(); - } - - /** - * Test method for {@link ShutdownTask#taskType()}. - */ - @Test - public final void testGetTaskType() { - assertEquals(TaskType.SHUTDOWN, task.taskType()); - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/AccumulatingMetricsScopeTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/AccumulatingMetricsScopeTest.java deleted file mode 100644 index e7188073..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/AccumulatingMetricsScopeTest.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -import org.junit.Assert; -import org.junit.Test; - -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - - -public class AccumulatingMetricsScopeTest { - - private static class TestScope extends AccumulateByNameMetricsScope { - public void assertMetrics(MetricDatum... expectedData) { - for (MetricDatum expected : expectedData) { - MetricDatum actual = data.remove(expected.metricName()); - Assert.assertEquals(expected, actual); - } - - Assert.assertEquals("Data should be empty at the end of assertMetrics", 0, data.size()); - } - } - - @Test - public void testSingleAdd() { - TestScope scope = new TestScope(); - - scope.addData("name", 2.0, StandardUnit.COUNT); - scope.assertMetrics(TestHelper.constructDatum("name", StandardUnit.COUNT, 2.0, 2.0, 2.0, 1)); - } - - @Test - public void testAccumulate() { - TestScope scope = new TestScope(); - - scope.addData("name", 2.0, StandardUnit.COUNT); - scope.addData("name", 3.0, StandardUnit.COUNT); - scope.assertMetrics(TestHelper.constructDatum("name", StandardUnit.COUNT, 3.0, 2.0, 5.0, 2)); - } - - @Test(expected = IllegalArgumentException.class) - public void testAccumulateWrongUnit() { - TestScope scope = new TestScope(); - - scope.addData("name", 2.0, StandardUnit.COUNT); - scope.addData("name", 3.0, StandardUnit.MEGABITS); - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/MetricAccumulatingQueueTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/MetricAccumulatingQueueTest.java deleted file mode 100644 index 24ecc611..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/MetricAccumulatingQueueTest.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; - -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import software.amazon.awssdk.services.cloudwatch.model.Dimension; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - - -public class MetricAccumulatingQueueTest { - - private static final int MAX_QUEUE_SIZE = 5; - private MetricAccumulatingQueue queue; - - @Before - public void setup() { - this.queue = new MetricAccumulatingQueue<>(MAX_QUEUE_SIZE); - } - - private Dimension dim(String name, String value) { - return Dimension.builder().name(name).value(value).build(); - } - - /* - * Test whether the MetricDatums offered into the queue will accumulate data based on the same metricName and - * output those datums with the correctly accumulated output. - */ - @Test - public void testAccumulation() { - Collection dimensionsA = Collections.singleton(dim("name","a")); - Collection dimensionsB = Collections.singleton(dim("name","b")); - String keyA = "a"; - String keyB = "b"; - - MetricDatum datum1 = - TestHelper.constructDatum(keyA, StandardUnit.COUNT, 10, 5, 15, 2).toBuilder().dimensions(dimensionsA).build(); - queue.offer(new CloudWatchMetricKey(datum1), datum1); - MetricDatum datum2 = - TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2).toBuilder().dimensions(dimensionsA).build(); - queue.offer(new CloudWatchMetricKey(datum2), datum2); - - MetricDatum datum3 = - TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2).toBuilder().dimensions(dimensionsB).build(); - queue.offer(new CloudWatchMetricKey(datum3), datum3); - - MetricDatum datum4 = TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2); - queue.offer(new CloudWatchMetricKey(datum4), datum4); - queue.offer(new CloudWatchMetricKey(datum4), datum4); - - MetricDatum datum5 = - TestHelper.constructDatum(keyB, StandardUnit.COUNT, 100, 10, 110, 2).toBuilder().dimensions(dimensionsA).build(); - queue.offer(new CloudWatchMetricKey(datum5), datum5); - - Assert.assertEquals(4, queue.size()); - List> items = queue.drain(4); - - Assert.assertEquals(items.get(0).datum, TestHelper.constructDatum(keyA, StandardUnit.COUNT, 10, 1, 17, 4) - .toBuilder().dimensions(dimensionsA).build()); - Assert.assertEquals(items.get(1).datum, datum3); - Assert.assertEquals(items.get(2).datum, TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 4, 4)); - Assert.assertEquals(items.get(3).datum, TestHelper.constructDatum(keyB, StandardUnit.COUNT, 100, 10, 110, 2) - .toBuilder().dimensions(dimensionsA).build()); - } - - /* - * Test that the number of MetricDatum that can be added to our queue is capped at the MAX_QUEUE_SIZE. - * Therefore, any datums added to the queue that is greater than the capacity of our queue will be dropped. - */ - @Test - public void testDrop() { - for (int i = 0; i < MAX_QUEUE_SIZE; i++) { - MetricDatum datum = TestHelper.constructDatum(Integer.toString(i), StandardUnit.COUNT, 1, 1, 2, 2); - CloudWatchMetricKey key = new CloudWatchMetricKey(datum); - Assert.assertTrue(queue.offer(key, datum)); - } - - MetricDatum datum = TestHelper.constructDatum("foo", StandardUnit.COUNT, 1, 1, 2, 2); - Assert.assertFalse(queue.offer(new CloudWatchMetricKey(datum), datum)); - Assert.assertEquals(MAX_QUEUE_SIZE, queue.size()); - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/TestHelper.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/TestHelper.java deleted file mode 100644 index 6b4bce51..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/TestHelper.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.metrics; - - -import software.amazon.awssdk.services.cloudwatch.model.Dimension; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; -import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; - -public class TestHelper { - public static MetricDatum constructDatum(String name, - StandardUnit unit, - double maximum, - double minimum, - double sum, - double count) { - return MetricDatum.builder().metricName(name) - .unit(unit) - .statisticValues(StatisticSet.builder().maximum(maximum) - .minimum(minimum) - .sum(sum) - .sampleCount(count).build()).build(); - } - - public static Dimension constructDimension(String name, String value) { - return Dimension.builder().name(name).value(value).build(); - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/AWSExceptionManagerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/AWSExceptionManagerTest.java deleted file mode 100644 index 4ffaab02..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/AWSExceptionManagerTest.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval; - -import org.junit.Test; - -import lombok.Getter; -import lombok.extern.slf4j.Slf4j; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.isA; -import static org.junit.Assert.assertThat; - -@Slf4j -public class AWSExceptionManagerTest { - - @Test - public void testSpecificException() { - AWSExceptionManager manager = new AWSExceptionManager(); - final String EXPECTED_HANDLING_MARKER = "Handled-TestException"; - - manager.add(TestException.class, t -> { - log.info("Handling test exception: {} -> {}", t.getMessage(), t.getAdditionalMessage()); - return new RuntimeException(EXPECTED_HANDLING_MARKER, t); - }); - - TestException te = new TestException("Main Mesage", "Sub Message"); - - - RuntimeException converted = manager.apply(te); - - assertThat(converted, isA(RuntimeException.class)); - assertThat(converted.getMessage(), equalTo(EXPECTED_HANDLING_MARKER)); - assertThat(converted.getCause(), equalTo(te)); - - } - - @Test - public void testParentException() { - AWSExceptionManager manager = new AWSExceptionManager(); - final String EXPECTED_HANDLING_MARKER = "Handled-IllegalStateException"; - manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i)); - manager.add(Exception.class, i -> new RuntimeException("RawException", i)); - manager.add(IllegalStateException.class, i -> new RuntimeException(EXPECTED_HANDLING_MARKER, i)); - - TestException testException = new TestException("IllegalStateTest", "Stuff"); - - RuntimeException converted = manager.apply(testException); - - assertThat(converted.getMessage(), equalTo(EXPECTED_HANDLING_MARKER)); - assertThat(converted.getCause(), equalTo(testException)); - } - - @Test - public void testDefaultHandler() { - final String EXPECTED_HANDLING_MARKER = "Handled-Default"; - AWSExceptionManager manager = new AWSExceptionManager().defaultFunction(i -> new RuntimeException(EXPECTED_HANDLING_MARKER, i)); - - manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i)); - manager.add(Exception.class, i -> new RuntimeException("RawException", i)); - manager.add(IllegalStateException.class, i -> new RuntimeException("IllegalState", i)); - - Throwable t = new StackOverflowError("Whoops"); - - RuntimeException converted = manager.apply(t); - - assertThat(converted.getMessage(), equalTo(EXPECTED_HANDLING_MARKER)); - assertThat(converted.getCause(), equalTo(t)); - } - - @Test - public void testIdHandler() { - AWSExceptionManager manager = new AWSExceptionManager(); - - manager.add(IllegalArgumentException.class, i -> new RuntimeException("IllegalArgument", i)); - manager.add(Exception.class, i -> new RuntimeException("RawException", i)); - manager.add(IllegalStateException.class, i -> i); - - TestException te = new TestException("Main Message", "Sub Message"); - RuntimeException converted = manager.apply(te); - - assertThat(converted.getClass(), equalTo(TestException.class)); - assertThat(converted, equalTo(te)); - } - - @Getter - private static class TestException extends IllegalStateException { - - private final String additionalMessage; - - public TestException(String message, String additionalMessage) { - super(message); - this.additionalMessage = additionalMessage; - } - } - -} \ No newline at end of file diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/IteratorBuilderTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/IteratorBuilderTest.java deleted file mode 100644 index 5b04bf8d..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/IteratorBuilderTest.java +++ /dev/null @@ -1,212 +0,0 @@ -package software.amazon.kinesis.retrieval; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.MatcherAssert.assertThat; - -import java.time.Instant; -import java.util.Date; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.function.Supplier; - -import org.junit.Test; - -import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; -import software.amazon.awssdk.services.kinesis.model.ShardIteratorType; -import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest; -import software.amazon.kinesis.checkpoint.SentinelCheckpoint; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; - -public class IteratorBuilderTest { - - private static final String SHARD_ID = "Shard-001"; - private static final String STREAM_NAME = "Stream"; - private static final String CONSUMER_ARN = "arn:stream"; - private static final Instant TIMESTAMP = Instant.parse("2018-04-26T13:03:00Z"); - private static final String SEQUENCE_NUMBER = "1234"; - - @Test - public void subscribeLatestTest() { - latestTest(this::stsBase, this::verifyStsBase, IteratorBuilder::request, WrappedRequest::wrapped); - } - - @Test - public void getShardLatestTest() { - latestTest(this::gsiBase, this::verifyGsiBase, IteratorBuilder::request, WrappedRequest::wrapped); - } - - @Test - public void subscribeTrimTest() { - trimHorizonTest(this::stsBase, this::verifyStsBase, IteratorBuilder::request, WrappedRequest::wrapped); - } - - @Test - public void getShardTrimTest() { - trimHorizonTest(this::gsiBase, this::verifyGsiBase, IteratorBuilder::request, WrappedRequest::wrapped); - } - - @Test - public void subscribeSequenceNumberTest() { - sequenceNumber(this::stsBase, this::verifyStsBase, IteratorBuilder::request, WrappedRequest::wrapped); - } - - @Test - public void subscribeReconnectTest() { - sequenceNumber(this::stsBase, this::verifyStsBase, IteratorBuilder::reconnectRequest, WrappedRequest::wrapped, - ShardIteratorType.AFTER_SEQUENCE_NUMBER); - } - - @Test - public void getShardSequenceNumberTest() { - sequenceNumber(this::gsiBase, this::verifyGsiBase, IteratorBuilder::request, WrappedRequest::wrapped); - } - - @Test - public void subscribeTimestampTest() { - timeStampTest(this::stsBase, this::verifyStsBase, IteratorBuilder::request, WrappedRequest::wrapped); - } - - @Test - public void getShardTimestampTest() { - timeStampTest(this::gsiBase, this::verifyGsiBase, IteratorBuilder::request, WrappedRequest::wrapped); - } - - - private interface IteratorApply { - T apply(T base, String sequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended); - } - - private void latestTest(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, - Function> toRequest) { - String sequenceNumber = SentinelCheckpoint.LATEST.name(); - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.LATEST); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, sequenceNumber, initialPosition, - ShardIteratorType.LATEST, null, null); - } - - private void trimHorizonTest(Supplier supplier, Consumer baseVerifier, - IteratorApply iteratorRequest, Function> toRequest) { - String sequenceNumber = SentinelCheckpoint.TRIM_HORIZON.name(); - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, sequenceNumber, initialPosition, - ShardIteratorType.TRIM_HORIZON, null, null); - } - - private void sequenceNumber(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, - Function> toRequest) { - sequenceNumber(supplier, baseVerifier, iteratorRequest, toRequest, ShardIteratorType.AT_SEQUENCE_NUMBER); - } - - private void sequenceNumber(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, - Function> toRequest, ShardIteratorType shardIteratorType) { - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, SEQUENCE_NUMBER, initialPosition, - shardIteratorType, "1234", null); - } - - private void timeStampTest(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, - Function> toRequest) { - String sequenceNumber = SentinelCheckpoint.AT_TIMESTAMP.name(); - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPositionAtTimestamp(new Date(TIMESTAMP.toEpochMilli())); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, sequenceNumber, initialPosition, - ShardIteratorType.AT_TIMESTAMP, null, TIMESTAMP); - } - - private void updateTest(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, - Function> toRequest, String sequenceNumber, - InitialPositionInStreamExtended initialPositionInStream, ShardIteratorType expectedShardIteratorType, - String expectedSequenceNumber, Instant expectedTimestamp) { - T base = supplier.get(); - T updated = iteratorRequest.apply(base, sequenceNumber, initialPositionInStream); - WrappedRequest request = toRequest.apply(updated); - baseVerifier.accept(request.request()); - assertThat(request.shardIteratorType(), equalTo(expectedShardIteratorType)); - assertThat(request.sequenceNumber(), equalTo(expectedSequenceNumber)); - assertThat(request.timestamp(), equalTo(expectedTimestamp)); - - } - - private interface WrappedRequest { - ShardIteratorType shardIteratorType(); - - String sequenceNumber(); - - Instant timestamp(); - - R request(); - - static WrappedRequest wrapped(SubscribeToShardRequest.Builder builder) { - SubscribeToShardRequest req = builder.build(); - return new WrappedRequest() { - @Override - public ShardIteratorType shardIteratorType() { - return req.startingPosition().type(); - } - - @Override - public String sequenceNumber() { - return req.startingPosition().sequenceNumber(); - } - - @Override - public Instant timestamp() { - return req.startingPosition().timestamp(); - } - - @Override - public SubscribeToShardRequest request() { - return req; - } - }; - } - - static WrappedRequest wrapped(GetShardIteratorRequest.Builder builder) { - GetShardIteratorRequest req = builder.build(); - return new WrappedRequest() { - @Override - public ShardIteratorType shardIteratorType() { - return req.shardIteratorType(); - } - - @Override - public String sequenceNumber() { - return req.startingSequenceNumber(); - } - - @Override - public Instant timestamp() { - return req.timestamp(); - } - - @Override - public GetShardIteratorRequest request() { - return req; - } - }; - } - } - - private void verifyStsBase(SubscribeToShardRequest req) { - assertThat(req.shardId(), equalTo(SHARD_ID)); - assertThat(req.consumerARN(), equalTo(CONSUMER_ARN)); - } - - private void verifyGsiBase(GetShardIteratorRequest req) { - assertThat(req.streamName(), equalTo(STREAM_NAME)); - assertThat(req.shardId(), equalTo(SHARD_ID)); - } - - private SubscribeToShardRequest.Builder stsBase() { - return SubscribeToShardRequest.builder().shardId(SHARD_ID).consumerARN(CONSUMER_ARN); - } - - private GetShardIteratorRequest.Builder gsiBase() { - return GetShardIteratorRequest.builder().shardId(SHARD_ID).streamName(STREAM_NAME); - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConfigTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConfigTest.java deleted file mode 100644 index 465db328..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConfigTest.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.fanout; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.retrieval.RetrievalFactory; - -@RunWith(MockitoJUnitRunner.class) -public class FanOutConfigTest { - - private static final String TEST_CONSUMER_ARN = "TestConsumerArn"; - private static final String TEST_APPLICATION_NAME = "TestApplication"; - private static final String TEST_STREAM_NAME = "TestStream"; - private static final String TEST_CONSUMER_NAME = "TestConsumerName"; - - @Mock - private FanOutConsumerRegistration consumerRegistration; - @Mock - private KinesisAsyncClient kinesisClient; - - @Test - public void testNoRegisterIfConsumerArnSet() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).consumerArn(TEST_CONSUMER_ARN); - RetrievalFactory retrievalFactory = config.retrievalFactory(); - - assertThat(retrievalFactory, not(nullValue())); - verify(consumerRegistration, never()).getOrCreateStreamConsumerArn(); - } - - @Test - public void testRegisterCalledWhenConsumerArnUnset() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).applicationName(TEST_APPLICATION_NAME) - .streamName(TEST_STREAM_NAME); - RetrievalFactory retrievalFactory = config.retrievalFactory(); - - assertThat(retrievalFactory, not(nullValue())); - verify(consumerRegistration).getOrCreateStreamConsumerArn(); - } - - @Test - public void testDependencyExceptionInConsumerCreation() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).applicationName(TEST_APPLICATION_NAME) - .streamName(TEST_STREAM_NAME); - DependencyException de = new DependencyException("Bad", null); - when(consumerRegistration.getOrCreateStreamConsumerArn()).thenThrow(de); - try { - config.retrievalFactory(); - } catch (RuntimeException e) { - verify(consumerRegistration).getOrCreateStreamConsumerArn(); - assertThat(e.getCause(), equalTo(de)); - } - } - - @Test - public void testCreationWithApplicationName() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).applicationName(TEST_APPLICATION_NAME) - .streamName(TEST_STREAM_NAME); - RetrievalFactory factory = config.retrievalFactory(); - - assertThat(factory, not(nullValue())); - - TestingConfig testingConfig = (TestingConfig) config; - assertThat(testingConfig.stream, equalTo(TEST_STREAM_NAME)); - assertThat(testingConfig.consumerToCreate, equalTo(TEST_APPLICATION_NAME)); - } - - @Test - public void testCreationWithConsumerName() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).consumerName(TEST_CONSUMER_NAME) - .streamName(TEST_STREAM_NAME); - RetrievalFactory factory = config.retrievalFactory(); - - assertThat(factory, not(nullValue())); - - TestingConfig testingConfig = (TestingConfig) config; - assertThat(testingConfig.stream, equalTo(TEST_STREAM_NAME)); - assertThat(testingConfig.consumerToCreate, equalTo(TEST_CONSUMER_NAME)); - } - - @Test - public void testCreationWithBothConsumerApplication() throws Exception { - FanOutConfig config = new TestingConfig(kinesisClient).applicationName(TEST_APPLICATION_NAME) - .consumerName(TEST_CONSUMER_NAME).streamName(TEST_STREAM_NAME); - RetrievalFactory factory = config.retrievalFactory(); - - assertThat(factory, not(nullValue())); - - TestingConfig testingConfig = (TestingConfig) config; - assertThat(testingConfig.stream, equalTo(TEST_STREAM_NAME)); - assertThat(testingConfig.consumerToCreate, equalTo(TEST_CONSUMER_NAME)); - } - - private class TestingConfig extends FanOutConfig { - - String stream; - String consumerToCreate; - - public TestingConfig(KinesisAsyncClient kinesisClient) { - super(kinesisClient); - } - - @Override - protected FanOutConsumerRegistration createConsumerRegistration(KinesisAsyncClient client, String stream, - String consumerToCreate) { - this.stream = stream; - this.consumerToCreate = consumerToCreate; - return consumerRegistration; - } - } - -} \ No newline at end of file diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistrationTest.java deleted file mode 100644 index f6f23714..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistrationTest.java +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Amazon Software License - * (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at - * http://aws.amazon.com/asl/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.fanout; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.concurrent.CompletableFuture; - -import org.apache.commons.lang3.StringUtils; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.Consumer; -import software.amazon.awssdk.services.kinesis.model.ConsumerDescription; -import software.amazon.awssdk.services.kinesis.model.ConsumerStatus; -import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest; -import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerResponse; -import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryRequest; -import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryResponse; -import software.amazon.awssdk.services.kinesis.model.LimitExceededException; -import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerRequest; -import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse; -import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; -import software.amazon.awssdk.services.kinesis.model.StreamDescriptionSummary; -import software.amazon.awssdk.services.kinesis.model.StreamStatus; -import software.amazon.kinesis.leases.exceptions.DependencyException; - -/** - * - */ -@RunWith(MockitoJUnitRunner.class) -public class FanOutConsumerRegistrationTest { - private static final String STREAM_NAME = "TestStream"; - private static final String CONSUMER_NAME = "TestConsumer"; - private static final String STREAM_ARN = "TestStreamArn"; - private static final String CONSUMER_ARN = "TestConsumerArn"; - private static final int MAX_DSS_RETRIES = 5; - private static final int MAX_DSC_RETRIES = 5; - private static final int RSC_RETRIES = 5; - private static final long BACKOFF_MILLIS = 50L; - - @Mock - private KinesisAsyncClient client; - - private FanOutConsumerRegistration consumerRegistration; - - @Before - public void setup() { - consumerRegistration = new FanOutConsumerRegistration(client, STREAM_NAME, CONSUMER_NAME, MAX_DSS_RETRIES, - MAX_DSC_RETRIES, RSC_RETRIES, BACKOFF_MILLIS); - } - - @Test - public void testConsumerAlreadyExists() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); - final CompletableFuture dscFuture = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); - - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); - - final String consumerArn = consumerRegistration.getOrCreateStreamConsumerArn(); - - assertThat(consumerArn, equalTo(CONSUMER_ARN)); - - verify(client).describeStreamConsumer(eq(createDescribeStreamConsumerRequest(null))); - verify(client).describeStreamSummary(eq(createDescribeStreamSummaryRequest())); - verify(client, never()).registerStreamConsumer(any(RegisterStreamConsumerRequest.class)); - } - - @Test - public void testConsumerAlreadyExistsMultipleCalls() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); - final CompletableFuture dscFuture = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); - - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); - - final String firstCall = consumerRegistration.getOrCreateStreamConsumerArn(); - - final String secondCall = consumerRegistration.getOrCreateStreamConsumerArn(); - - assertThat(firstCall, equalTo(CONSUMER_ARN)); - assertThat(secondCall, equalTo(CONSUMER_ARN)); - - verify(client).describeStreamConsumer(eq(createDescribeStreamConsumerRequest(null))); - verify(client).describeStreamSummary(eq(createDescribeStreamSummaryRequest())); - verify(client, never()).registerStreamConsumer(any(RegisterStreamConsumerRequest.class)); - } - - @Test(expected = LimitExceededException.class) - public void testDescribeStreamConsumerThrottled() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); - final CompletableFuture dscFuture = CompletableFuture.supplyAsync(() -> { - throw LimitExceededException.builder().build(); - }); - - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); - - try { - consumerRegistration.getOrCreateStreamConsumerArn(); - } finally { - verify(client).describeStreamSummary(eq(createDescribeStreamSummaryRequest())); - verify(client, times(MAX_DSC_RETRIES)) - .describeStreamConsumer(eq(createDescribeStreamConsumerRequest(null))); - } - } - - @Test(expected = DependencyException.class) - public void testRegisterStreamConsumerThrottled() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); - final CompletableFuture dscFuture = CompletableFuture.supplyAsync(() -> { - throw ResourceNotFoundException.builder().build(); - }); - final CompletableFuture rscFuture = CompletableFuture.supplyAsync(() -> { - throw LimitExceededException.builder().build(); - }); - - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); - when(client.registerStreamConsumer(any(RegisterStreamConsumerRequest.class))).thenReturn(rscFuture); - - try { - consumerRegistration.getOrCreateStreamConsumerArn(); - } finally { - verify(client, times(RSC_RETRIES)) - .registerStreamConsumer(eq(createRegisterStreamConsumerRequest())); - // Verify that DescribeStreamConsumer was called for at least RegisterStreamConsumer retries + 1 at start. - verify(client).describeStreamConsumer(eq(createDescribeStreamConsumerRequest(null))); - } - } - - @Test - public void testNewRegisterStreamConsumer() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); - final CompletableFuture failureResponse = CompletableFuture.supplyAsync(() -> { - throw ResourceNotFoundException.builder().build(); - }); - final CompletableFuture intermidateResponse = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.CREATING)); - final CompletableFuture successResponse = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); - final CompletableFuture rscFuture = CompletableFuture - .completedFuture(createRegisterStreamConsumerResponse()); - - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(failureResponse) - .thenReturn(intermidateResponse).thenReturn(successResponse); - when(client.registerStreamConsumer(any(RegisterStreamConsumerRequest.class))).thenReturn(rscFuture); - - final String consumerArn = consumerRegistration.getOrCreateStreamConsumerArn(); - - assertThat(consumerArn, equalTo(CONSUMER_ARN)); - - verify(client).registerStreamConsumer(eq(createRegisterStreamConsumerRequest())); - verify(client).describeStreamSummary(eq(createDescribeStreamSummaryRequest())); - verify(client).describeStreamConsumer(eq(createDescribeStreamConsumerRequest(null))); - verify(client, times(2)) - .describeStreamConsumer(eq(createDescribeStreamConsumerRequest(CONSUMER_ARN))); - } - - @Test(expected = IllegalStateException.class) - public void testStreamConsumerStuckInCreating() throws Exception { - final CompletableFuture dssFuture = CompletableFuture.completedFuture( - createDescribeStreamSummaryResponse()); - final CompletableFuture dscFuture = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.CREATING)); - - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); - - try { - consumerRegistration.getOrCreateStreamConsumerArn(); - } finally { - verify(client).describeStreamSummary(eq(createDescribeStreamSummaryRequest())); - // Verify that the call to DSC was made for the max retry attempts and one for the initial response object. - verify(client).describeStreamConsumer(eq(createDescribeStreamConsumerRequest(null))); - verify(client, times(MAX_DSC_RETRIES)) - .describeStreamConsumer(eq(createDescribeStreamConsumerRequest(CONSUMER_ARN))); - verify(client, never()).registerStreamConsumer(any(RegisterStreamConsumerRequest.class)); - } - - } - - private DescribeStreamSummaryRequest createDescribeStreamSummaryRequest() { - return DescribeStreamSummaryRequest.builder().streamName(STREAM_NAME).build(); - } - - private DescribeStreamSummaryResponse createDescribeStreamSummaryResponse() { - return DescribeStreamSummaryResponse.builder().streamDescriptionSummary(StreamDescriptionSummary.builder() - .streamName(STREAM_NAME).streamARN(STREAM_ARN).streamStatus(StreamStatus.ACTIVE).build()).build(); - } - - private DescribeStreamConsumerRequest createDescribeStreamConsumerRequest(final String consumerArn) { - if (StringUtils.isEmpty(consumerArn)) { - return DescribeStreamConsumerRequest.builder().streamARN(STREAM_ARN).consumerName(CONSUMER_NAME).build(); - } - return DescribeStreamConsumerRequest.builder().consumerARN(consumerArn).build(); - } - - private DescribeStreamConsumerResponse createDescribeStreamConsumerResponse(final ConsumerStatus status) { - return DescribeStreamConsumerResponse.builder().consumerDescription(ConsumerDescription.builder() - .consumerStatus(status).consumerARN(CONSUMER_ARN).consumerName(CONSUMER_NAME).build()).build(); - } - - private RegisterStreamConsumerRequest createRegisterStreamConsumerRequest() { - return RegisterStreamConsumerRequest.builder().streamARN(STREAM_ARN).consumerName(CONSUMER_NAME).build(); - } - - private RegisterStreamConsumerResponse createRegisterStreamConsumerResponse() { - return RegisterStreamConsumerResponse.builder().consumer(Consumer.builder().consumerName(CONSUMER_NAME) - .consumerARN(CONSUMER_ARN).consumerStatus(ConsumerStatus.CREATING).build()).build(); - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisherTest.java deleted file mode 100644 index 50896f99..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisherTest.java +++ /dev/null @@ -1,412 +0,0 @@ -package software.amazon.kinesis.retrieval.fanout; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -import java.nio.ByteBuffer; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import io.netty.handler.timeout.ReadTimeoutException; -import org.hamcrest.Description; -import org.hamcrest.Matcher; -import org.hamcrest.TypeSafeDiagnosingMatcher; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.core.async.SdkPublisher; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.Record; -import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; -import software.amazon.awssdk.services.kinesis.model.ShardIteratorType; -import software.amazon.awssdk.services.kinesis.model.StartingPosition; -import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEvent; -import software.amazon.awssdk.services.kinesis.model.SubscribeToShardEventStream; -import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.retrieval.KinesisClientRecord; -import software.amazon.kinesis.retrieval.RetryableRetrievalException; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -@RunWith(MockitoJUnitRunner.class) -@Slf4j -public class FanOutRecordsPublisherTest { - - private static final String SHARD_ID = "Shard-001"; - private static final String CONSUMER_ARN = "arn:consumer"; - - @Mock - private KinesisAsyncClient kinesisClient; - @Mock - private SdkPublisher publisher; - @Mock - private Subscription subscription; - @Mock - private Subscriber subscriber; - - private SubscribeToShardEvent batchEvent; - - @Test - public void simpleTest() throws Exception { - FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - - ArgumentCaptor captor = ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); - - doNothing().when(publisher).subscribe(captor.capture()); - - source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); - - List receivedInput = new ArrayList<>(); - - source.subscribe(new Subscriber() { - Subscription subscription; - - @Override - public void onSubscribe(Subscription s) { - subscription = s; - subscription.request(1); - } - - @Override - public void onNext(ProcessRecordsInput input) { - receivedInput.add(input); - subscription.request(1); - } - - @Override - public void onError(Throwable t) { - log.error("Caught throwable in subscriber", t); - fail("Caught throwable in subscriber"); - } - - @Override - public void onComplete() { - fail("OnComplete called when not expected"); - } - }); - - verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); - flowCaptor.getValue().onEventStream(publisher); - captor.getValue().onSubscribe(subscription); - - List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); - - batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records).build(); - - captor.getValue().onNext(batchEvent); - captor.getValue().onNext(batchEvent); - captor.getValue().onNext(batchEvent); - - verify(subscription, times(4)).request(1); - assertThat(receivedInput.size(), equalTo(3)); - - receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { - assertThat(clientRecordsList.size(), equalTo(matchers.size())); - for (int i = 0; i < clientRecordsList.size(); ++i) { - assertThat(clientRecordsList.get(i), matchers.get(i)); - } - }); - - } - - @Test - public void largeRequestTest() throws Exception { - FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - - ArgumentCaptor captor = ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); - - doNothing().when(publisher).subscribe(captor.capture()); - - source.start(ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); - - List receivedInput = new ArrayList<>(); - - source.subscribe(new Subscriber() { - Subscription subscription; - - @Override - public void onSubscribe(Subscription s) { - subscription = s; - subscription.request(3); - } - - @Override - public void onNext(ProcessRecordsInput input) { - receivedInput.add(input); - subscription.request(1); - } - - @Override - public void onError(Throwable t) { - log.error("Caught throwable in subscriber", t); - fail("Caught throwable in subscriber"); - } - - @Override - public void onComplete() { - fail("OnComplete called when not expected"); - } - }); - - verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); - flowCaptor.getValue().onEventStream(publisher); - captor.getValue().onSubscribe(subscription); - - List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); - - batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records).build(); - - captor.getValue().onNext(batchEvent); - captor.getValue().onNext(batchEvent); - captor.getValue().onNext(batchEvent); - - verify(subscription, times(4)).request(1); - assertThat(receivedInput.size(), equalTo(3)); - - receivedInput.stream().map(ProcessRecordsInput::records).forEach(clientRecordsList -> { - assertThat(clientRecordsList.size(), equalTo(matchers.size())); - for (int i = 0; i < clientRecordsList.size(); ++i) { - assertThat(clientRecordsList.get(i), matchers.get(i)); - } - }); - - } - - @Test - public void testResourceNotFoundForShard() { - FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); - ArgumentCaptor inputCaptor = ArgumentCaptor.forClass(ProcessRecordsInput.class); - - source.subscribe(subscriber); - - verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); - FanOutRecordsPublisher.RecordFlow recordFlow = flowCaptor.getValue(); - recordFlow.exceptionOccurred(new RuntimeException(ResourceNotFoundException.builder().build())); - - verify(subscriber).onSubscribe(any()); - verify(subscriber, never()).onError(any()); - verify(subscriber).onNext(inputCaptor.capture()); - verify(subscriber).onComplete(); - - ProcessRecordsInput input = inputCaptor.getValue(); - assertThat(input.isAtShardEnd(), equalTo(true)); - assertThat(input.records().isEmpty(), equalTo(true)); - } - - @Test - public void testReadTimeoutExceptionForShard() { - FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); - - source.subscribe(subscriber); - - verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); - FanOutRecordsPublisher.RecordFlow recordFlow = flowCaptor.getValue(); - recordFlow.exceptionOccurred(new RuntimeException(ReadTimeoutException.INSTANCE)); - - verify(subscriber).onSubscribe(any()); - verify(subscriber).onError(any(RetryableRetrievalException.class)); - verify(subscriber, never()).onNext(any()); - verify(subscriber, never()).onComplete(); - } - - @Test - public void testContinuesAfterSequence() { - FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); - - doNothing().when(publisher).subscribe(captor.capture()); - - source.start(new ExtendedSequenceNumber("0"), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); - - NonFailingSubscriber nonFailingSubscriber = new NonFailingSubscriber(); - - source.subscribe(nonFailingSubscriber); - - SubscribeToShardRequest expected = SubscribeToShardRequest.builder().consumerARN(CONSUMER_ARN).shardId(SHARD_ID) - .startingPosition(StartingPosition.builder().sequenceNumber("0") - .type(ShardIteratorType.AT_SEQUENCE_NUMBER).build()) - .build(); - - verify(kinesisClient).subscribeToShard(eq(expected), flowCaptor.capture()); - - flowCaptor.getValue().onEventStream(publisher); - captor.getValue().onSubscribe(subscription); - - List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); - - batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records) - .continuationSequenceNumber("3").build(); - - captor.getValue().onNext(batchEvent); - captor.getValue().onComplete(); - flowCaptor.getValue().complete(); - - ArgumentCaptor nextSubscribeCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor nextFlowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); - - - SubscribeToShardRequest nextExpected = SubscribeToShardRequest.builder().consumerARN(CONSUMER_ARN).shardId(SHARD_ID) - .startingPosition(StartingPosition.builder().sequenceNumber("3") - .type(ShardIteratorType.AFTER_SEQUENCE_NUMBER).build()) - .build(); - - verify(kinesisClient).subscribeToShard(eq(nextExpected), nextFlowCaptor.capture()); - reset(publisher); - doNothing().when(publisher).subscribe(nextSubscribeCaptor.capture()); - - nextFlowCaptor.getValue().onEventStream(publisher); - nextSubscribeCaptor.getValue().onSubscribe(subscription); - - - List nextRecords = Stream.of(4, 5, 6).map(this::makeRecord).collect(Collectors.toList()); - List nextMatchers = nextRecords.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); - - batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(nextRecords) - .continuationSequenceNumber("6").build(); - nextSubscribeCaptor.getValue().onNext(batchEvent); - - verify(subscription, times(4)).request(1); - - assertThat(nonFailingSubscriber.received.size(), equalTo(2)); - - verifyRecords(nonFailingSubscriber.received.get(0).records(), matchers); - verifyRecords(nonFailingSubscriber.received.get(1).records(), nextMatchers); - - } - - private void verifyRecords(List clientRecordsList, List matchers) { - assertThat(clientRecordsList.size(), equalTo(matchers.size())); - for (int i = 0; i < clientRecordsList.size(); ++i) { - assertThat(clientRecordsList.get(i), matchers.get(i)); - } - } - - private static class NonFailingSubscriber implements Subscriber { - final List received = new ArrayList<>(); - Subscription subscription; - - @Override - public void onSubscribe(Subscription s) { - subscription = s; - subscription.request(1); - } - - @Override - public void onNext(ProcessRecordsInput input) { - received.add(input); - subscription.request(1); - } - - @Override - public void onError(Throwable t) { - log.error("Caught throwable in subscriber", t); - fail("Caught throwable in subscriber"); - } - - @Override - public void onComplete() { - fail("OnComplete called when not expected"); - } - } - - private Record makeRecord(int sequenceNumber) { - SdkBytes buffer = SdkBytes.fromByteArray(new byte[] { 1, 2, 3 }); - return Record.builder().data(buffer).approximateArrivalTimestamp(Instant.now()) - .sequenceNumber(Integer.toString(sequenceNumber)).partitionKey("A").build(); - } - - private static class KinesisClientRecordMatcher extends TypeSafeDiagnosingMatcher { - - private final KinesisClientRecord expected; - private final Matcher partitionKeyMatcher; - private final Matcher sequenceNumberMatcher; - private final Matcher approximateArrivalMatcher; - private final Matcher dataMatcher; - - public KinesisClientRecordMatcher(Record record) { - expected = KinesisClientRecord.fromRecord(record); - partitionKeyMatcher = equalTo(expected.partitionKey()); - sequenceNumberMatcher = equalTo(expected.sequenceNumber()); - approximateArrivalMatcher = equalTo(expected.approximateArrivalTimestamp()); - dataMatcher = equalTo(expected.data()); - - } - - @Override - protected boolean matchesSafely(KinesisClientRecord item, Description mismatchDescription) { - boolean matches = matchAndDescribe(partitionKeyMatcher, item.partitionKey(), "partitionKey", - mismatchDescription); - matches &= matchAndDescribe(sequenceNumberMatcher, item.sequenceNumber(), "sequenceNumber", - mismatchDescription); - matches &= matchAndDescribe(approximateArrivalMatcher, item.approximateArrivalTimestamp(), - "approximateArrivalTimestamp", mismatchDescription); - matches &= matchAndDescribe(dataMatcher, item.data(), "data", mismatchDescription); - return matches; - } - - private boolean matchAndDescribe(Matcher matcher, T value, String field, - Description mismatchDescription) { - if (!matcher.matches(value)) { - mismatchDescription.appendText(field).appendText(": "); - matcher.describeMismatch(value, mismatchDescription); - return false; - } - return true; - } - - @Override - public void describeTo(Description description) { - description.appendText("A kinesis client record with: ").appendText("PartitionKey: ") - .appendDescriptionOf(partitionKeyMatcher).appendText(" SequenceNumber: ") - .appendDescriptionOf(sequenceNumberMatcher).appendText(" Approximate Arrival Time: ") - .appendDescriptionOf(approximateArrivalMatcher).appendText(" Data: ") - .appendDescriptionOf(dataMatcher); - } - - } - -} \ No newline at end of file diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyIntegrationTest.java deleted file mode 100644 index a37cf7a1..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyIntegrationTest.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval.polling; - -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.core.IsEqual.equalTo; -import static org.junit.Assert.assertNull; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.concurrent.CompletionService; -import java.util.concurrent.ExecutorCompletionService; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.RejectedExecutionHandler; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.runners.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.kinesis.retrieval.DataFetcherResult; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.NullMetricsFactory; - -@RunWith(MockitoJUnitRunner.class) -public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { - private static final int CORE_POOL_SIZE = 1; - private static final int MAX_POOL_SIZE = 2; - private static final int TIME_TO_LIVE = 5; - private static final int RETRY_GET_RECORDS_IN_SECONDS = 2; - private static final int SLEEP_GET_RECORDS_IN_SECONDS = 10; - private static final MetricsFactory NULL_METRICS_FACTORY = new NullMetricsFactory(); - - private final String streamName = "testStream"; - private final String shardId = "shardId-000000000000"; - - @Mock - private Supplier> completionServiceSupplier; - @Mock - private DataFetcherResult result; - @Mock - private KinesisAsyncClient kinesisClient; - - private CompletionService completionService; - private GetRecordsResponse getRecordsResponse; - - private AsynchronousGetRecordsRetrievalStrategy getRecordsRetrivalStrategy; - private KinesisDataFetcher dataFetcher; - private ExecutorService executorService; - private RejectedExecutionHandler rejectedExecutionHandler; - private int numberOfRecords = 10; - - - @Before - public void setup() { - dataFetcher = spy(new KinesisDataFetcherForTests(kinesisClient, streamName, shardId, numberOfRecords)); - rejectedExecutionHandler = spy(new ThreadPoolExecutor.AbortPolicy()); - executorService = spy(new ThreadPoolExecutor( - CORE_POOL_SIZE, - MAX_POOL_SIZE, - TIME_TO_LIVE, - TimeUnit.SECONDS, - new LinkedBlockingQueue<>(1), - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("getrecords-worker-%d").build(), - rejectedExecutionHandler)); - completionService = spy(new ExecutorCompletionService(executorService)); - getRecordsRetrivalStrategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, executorService, - RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, "shardId-0001"); - getRecordsResponse = GetRecordsResponse.builder().build(); - - when(completionServiceSupplier.get()).thenReturn(completionService); - when(result.accept()).thenReturn(getRecordsResponse); - } - - @Test - public void oneRequestMultithreadTest() { - when(result.accept()).thenReturn(null); - GetRecordsResponse getRecordsResult = getRecordsRetrivalStrategy.getRecords(numberOfRecords); - verify(dataFetcher, atLeast(getLeastNumberOfCalls())).getRecords(); - verify(executorService, atLeast(getLeastNumberOfCalls())).execute(any()); - assertNull(getRecordsResult); - } - - @Test - public void multiRequestTest() { - ExecutorCompletionService completionService1 = spy(new ExecutorCompletionService(executorService)); - when(completionServiceSupplier.get()).thenReturn(completionService1); - GetRecordsResponse getRecordsResult = getRecordsRetrivalStrategy.getRecords(numberOfRecords); - verify(dataFetcher, atLeast(getLeastNumberOfCalls())).getRecords(); - verify(executorService, atLeast(getLeastNumberOfCalls())).execute(any()); - assertThat(getRecordsResult, equalTo(getRecordsResponse)); - - when(result.accept()).thenReturn(null); - ExecutorCompletionService completionService2 = spy(new ExecutorCompletionService(executorService)); - when(completionServiceSupplier.get()).thenReturn(completionService2); - getRecordsResult = getRecordsRetrivalStrategy.getRecords(numberOfRecords); - assertThat(getRecordsResult, nullValue(GetRecordsResponse.class)); - } - - @Test(expected = ExpiredIteratorException.class) - public void testExpiredIteratorExcpetion() throws InterruptedException { - when(dataFetcher.getRecords()).thenAnswer(new Answer() { - @Override - public DataFetcherResult answer(final InvocationOnMock invocationOnMock) throws Throwable { - Thread.sleep(SLEEP_GET_RECORDS_IN_SECONDS * 1000); - throw ExpiredIteratorException.builder().message("ExpiredIterator").build(); - } - }); - - try { - getRecordsRetrivalStrategy.getRecords(numberOfRecords); - } finally { - verify(dataFetcher, atLeast(getLeastNumberOfCalls())).getRecords(); - verify(executorService, atLeast(getLeastNumberOfCalls())).execute(any()); - } - } - - private int getLeastNumberOfCalls() { - int leastNumberOfCalls = 0; - for (int i = MAX_POOL_SIZE; i > 0; i--) { - if (i * RETRY_GET_RECORDS_IN_SECONDS <= SLEEP_GET_RECORDS_IN_SECONDS) { - leastNumberOfCalls = i; - break; - } - } - return leastNumberOfCalls; - } - - @After - public void shutdown() { - getRecordsRetrivalStrategy.shutdown(); - verify(executorService).shutdownNow(); - } - - private class KinesisDataFetcherForTests extends KinesisDataFetcher { - public KinesisDataFetcherForTests(final KinesisAsyncClient kinesisClient, final String streamName, - final String shardId, final int maxRecords) { - super(kinesisClient, streamName, shardId, maxRecords, NULL_METRICS_FACTORY); - } - - @Override - public DataFetcherResult getRecords() { - try { - Thread.sleep(SLEEP_GET_RECORDS_IN_SECONDS * 1000); - } catch (InterruptedException e) { - // Do nothing - } - - return result; - } - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyTest.java deleted file mode 100644 index 55fee449..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyTest.java +++ /dev/null @@ -1,185 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval.polling; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.concurrent.CompletionService; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; - -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.kinesis.retrieval.DataFetcherResult; - -/** - * - */ -@RunWith(MockitoJUnitRunner.class) -public class AsynchronousGetRecordsRetrievalStrategyTest { - - private static final long RETRY_GET_RECORDS_IN_SECONDS = 5; - private static final String SHARD_ID = "ShardId-0001"; - @Mock - private KinesisDataFetcher dataFetcher; - @Mock - private ExecutorService executorService; - @Mock - private Supplier> completionServiceSupplier; - @Mock - private CompletionService completionService; - @Mock - private Future successfulFuture; - @Mock - private Future blockedFuture; - @Mock - private DataFetcherResult dataFetcherResult; - - private GetRecordsResponse expectedResponses; - - @Before - public void before() { - expectedResponses = GetRecordsResponse.builder().build(); - - when(completionServiceSupplier.get()).thenReturn(completionService); - when(dataFetcherResult.getResult()).thenReturn(expectedResponses); - when(dataFetcherResult.accept()).thenReturn(expectedResponses); - } - - @Test - public void testSingleSuccessfulRequestFuture() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); - - when(executorService.isShutdown()).thenReturn(false); - when(completionService.submit(any())).thenReturn(successfulFuture); - when(completionService.poll(anyLong(), any())).thenReturn(successfulFuture); - when(successfulFuture.get()).thenReturn(dataFetcherResult); - - GetRecordsResponse result = strategy.getRecords(10); - - verify(executorService).isShutdown(); - verify(completionService).submit(any()); - verify(completionService).poll(eq(RETRY_GET_RECORDS_IN_SECONDS), eq(TimeUnit.SECONDS)); - verify(successfulFuture).get(); - verify(successfulFuture).cancel(eq(true)); - - assertThat(result, equalTo(expectedResponses)); - } - - @Test - public void testBlockedAndSuccessfulFuture() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); - - when(executorService.isShutdown()).thenReturn(false); - when(completionService.submit(any())).thenReturn(blockedFuture).thenReturn(successfulFuture); - when(completionService.poll(anyLong(), any())).thenReturn(null).thenReturn(successfulFuture); - when(successfulFuture.get()).thenReturn(dataFetcherResult); - when(successfulFuture.cancel(anyBoolean())).thenReturn(false); - when(blockedFuture.cancel(anyBoolean())).thenReturn(true); - when(successfulFuture.isCancelled()).thenReturn(false); - when(blockedFuture.isCancelled()).thenReturn(true); - - GetRecordsResponse actualResults = strategy.getRecords(10); - - verify(completionService, times(2)).submit(any()); - verify(completionService, times(2)).poll(eq(RETRY_GET_RECORDS_IN_SECONDS), eq(TimeUnit.SECONDS)); - verify(successfulFuture).get(); - verify(blockedFuture, never()).get(); - verify(successfulFuture).cancel(eq(true)); - verify(blockedFuture).cancel(eq(true)); - - assertThat(actualResults, equalTo(expectedResponses)); - } - - @Test(expected = IllegalStateException.class) - public void testStrategyIsShutdown() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); - - when(executorService.isShutdown()).thenReturn(true); - - strategy.getRecords(10); - } - - @Test - public void testPoolOutOfResources() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); - - when(executorService.isShutdown()).thenReturn(false); - when(completionService.submit(any())).thenReturn(blockedFuture).thenThrow(new RejectedExecutionException("Rejected!")).thenReturn(successfulFuture); - when(completionService.poll(anyLong(), any())).thenReturn(null).thenReturn(null).thenReturn(successfulFuture); - when(successfulFuture.get()).thenReturn(dataFetcherResult); - when(successfulFuture.cancel(anyBoolean())).thenReturn(false); - when(blockedFuture.cancel(anyBoolean())).thenReturn(true); - when(successfulFuture.isCancelled()).thenReturn(false); - when(blockedFuture.isCancelled()).thenReturn(true); - - GetRecordsResponse actualResult = strategy.getRecords(10); - - verify(completionService, times(3)).submit(any()); - verify(completionService, times(3)).poll(eq(RETRY_GET_RECORDS_IN_SECONDS), eq(TimeUnit.SECONDS)); - verify(successfulFuture).cancel(eq(true)); - verify(blockedFuture).cancel(eq(true)); - - - assertThat(actualResult, equalTo(expectedResponses)); - } - - @Test (expected = ExpiredIteratorException.class) - public void testExpiredIteratorExceptionCase() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); - Future successfulFuture2 = mock(Future.class); - - when(executorService.isShutdown()).thenReturn(false); - when(completionService.submit(any())).thenReturn(successfulFuture, successfulFuture2); - when(completionService.poll(anyLong(), any())).thenReturn(null).thenReturn(successfulFuture); - when(successfulFuture.get()).thenThrow(new ExecutionException(ExpiredIteratorException.builder().message("ExpiredException").build())); - - try { - strategy.getRecords(10); - } finally { - verify(executorService).isShutdown(); - verify(completionService, times(2)).submit(any()); - verify(completionService, times(2)).poll(eq(RETRY_GET_RECORDS_IN_SECONDS), eq(TimeUnit.SECONDS)); - verify(successfulFuture).cancel(eq(true)); - verify(successfulFuture2).cancel(eq(true)); - } - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcherTest.java deleted file mode 100644 index a0fa7063..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcherTest.java +++ /dev/null @@ -1,443 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval.polling; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.util.Arrays; -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; - -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.kinesis.exceptions.KinesisClientLibException; -import software.amazon.kinesis.common.InitialPositionInStream; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; - -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; -import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse; -import software.amazon.awssdk.services.kinesis.model.Record; -import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; -import software.amazon.awssdk.services.kinesis.model.ShardIteratorType; -import software.amazon.awssdk.utils.CollectionUtils; -import software.amazon.kinesis.checkpoint.SentinelCheckpoint; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.NullMetricsFactory; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.retrieval.DataFetcherResult; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Unit tests for KinesisDataFetcher. - */ -@RunWith(MockitoJUnitRunner.class) -public class KinesisDataFetcherTest { - private static final int MAX_RECORDS = 1; - private static final String STREAM_NAME = "streamName"; - private static final String SHARD_ID = "shardId-1"; - private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); - private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = - InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(1000)); - private static final MetricsFactory NULL_METRICS_FACTORY = new NullMetricsFactory(); - - private KinesisDataFetcher kinesisDataFetcher; - - @Mock - private KinesisAsyncClient kinesisClient; - - @Before - public void setup() { - kinesisDataFetcher = new KinesisDataFetcher(kinesisClient, STREAM_NAME, SHARD_ID, MAX_RECORDS, NULL_METRICS_FACTORY); - } - - /** - * Test initialize() with the LATEST iterator instruction - */ - @Test - public final void testInitializeLatest() throws Exception { - testInitializeAndFetch(ShardIteratorType.LATEST.toString(), - ShardIteratorType.LATEST.toString(), - INITIAL_POSITION_LATEST); - } - - /** - * Test initialize() with the TIME_ZERO iterator instruction - */ - @Test - public final void testInitializeTimeZero() throws Exception { - testInitializeAndFetch(ShardIteratorType.TRIM_HORIZON.toString(), - ShardIteratorType.TRIM_HORIZON.toString(), - INITIAL_POSITION_TRIM_HORIZON); - } - - /** - * Test initialize() with the AT_TIMESTAMP iterator instruction - */ - @Test - public final void testInitializeAtTimestamp() throws Exception { - testInitializeAndFetch(ShardIteratorType.AT_TIMESTAMP.toString(), - ShardIteratorType.AT_TIMESTAMP.toString(), - INITIAL_POSITION_AT_TIMESTAMP); - } - - - /** - * Test initialize() when a flushpoint exists. - */ - @Ignore - @Test - public final void testInitializeFlushpoint() throws Exception { - testInitializeAndFetch("foo", "123", INITIAL_POSITION_LATEST); - } - - /** - * Test initialize() with an invalid iterator instruction - */ - @Test(expected = IllegalArgumentException.class) - public final void testInitializeInvalid() throws Exception { - testInitializeAndFetch("foo", null, INITIAL_POSITION_LATEST); - } - - private CompletableFuture makeGetShardIteratorResonse(String shardIterator) - throws InterruptedException, ExecutionException { - return CompletableFuture.completedFuture( - GetShardIteratorResponse.builder().shardIterator(shardIterator).build()); - } - - @Test - public void testadvanceIteratorTo() throws KinesisClientLibException, InterruptedException, ExecutionException { - final Checkpointer checkpoint = mock(Checkpointer.class); - final String iteratorA = "foo"; - final String iteratorB = "bar"; - final String seqA = "123"; - final String seqB = "456"; - - ArgumentCaptor shardIteratorRequestCaptor = - ArgumentCaptor.forClass(GetShardIteratorRequest.class); - - when(kinesisClient.getShardIterator(shardIteratorRequestCaptor.capture())) - .thenReturn(makeGetShardIteratorResonse(iteratorA)) - .thenReturn(makeGetShardIteratorResonse(iteratorA)) - .thenReturn(makeGetShardIteratorResonse(iteratorB)); - when(checkpoint.getCheckpoint(SHARD_ID)).thenReturn(new ExtendedSequenceNumber(seqA)); - - kinesisDataFetcher.initialize(seqA, null); - kinesisDataFetcher.advanceIteratorTo(seqA, null); - kinesisDataFetcher.advanceIteratorTo(seqB, null); - - final List shardIteratorRequests = shardIteratorRequestCaptor.getAllValues(); - assertEquals(3, shardIteratorRequests.size()); - - int count = 0; - for (GetShardIteratorRequest request : shardIteratorRequests) { - assertEquals(STREAM_NAME, request.streamName()); - assertEquals(SHARD_ID, request.shardId()); - assertEquals(ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), request.shardIteratorTypeAsString()); - if (count == 2) { - assertEquals(seqB, request.startingSequenceNumber()); - } else { - assertEquals(seqA, request.startingSequenceNumber()); - } - count++; - } - } - - private GetShardIteratorRequest makeGetShardIteratorRequest(String shardIteratorType) { - return GetShardIteratorRequest.builder().shardIteratorType(shardIteratorType).streamName(STREAM_NAME) - .shardId(SHARD_ID).build(); - } - - @Test - public void testadvanceIteratorToTrimHorizonLatestAndAtTimestamp() throws InterruptedException, ExecutionException { - final ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(GetShardIteratorRequest.class); - final String iteratorHorizon = "TRIM_HORIZON"; - final String iteratorLatest = "LATEST"; - final String iteratorAtTimestamp = "AT_TIMESTAMP"; - final Map requestsMap = Arrays.stream( - new String[] {iteratorHorizon, iteratorLatest, iteratorAtTimestamp}) - .map(this::makeGetShardIteratorRequest) - .collect(Collectors.toMap(r -> ShardIteratorType.valueOf(r.shardIteratorTypeAsString()), r -> r)); - GetShardIteratorRequest tsReq = requestsMap.get(ShardIteratorType.AT_TIMESTAMP); - requestsMap.put(ShardIteratorType.AT_TIMESTAMP, tsReq.toBuilder().timestamp(INITIAL_POSITION_AT_TIMESTAMP.getTimestamp().toInstant()).build()); - - when(kinesisClient.getShardIterator(requestCaptor.capture())) - .thenReturn(makeGetShardIteratorResonse(iteratorHorizon)) - .thenReturn(makeGetShardIteratorResonse(iteratorLatest)) - .thenReturn(makeGetShardIteratorResonse(iteratorAtTimestamp)); - - kinesisDataFetcher.advanceIteratorTo(ShardIteratorType.TRIM_HORIZON.toString(), INITIAL_POSITION_TRIM_HORIZON); - assertEquals(iteratorHorizon, kinesisDataFetcher.getNextIterator()); - - kinesisDataFetcher.advanceIteratorTo(ShardIteratorType.LATEST.toString(), INITIAL_POSITION_LATEST); - assertEquals(iteratorLatest, kinesisDataFetcher.getNextIterator()); - - kinesisDataFetcher.advanceIteratorTo(ShardIteratorType.AT_TIMESTAMP.toString(), INITIAL_POSITION_AT_TIMESTAMP); - assertEquals(iteratorAtTimestamp, kinesisDataFetcher.getNextIterator()); - - final List requests = requestCaptor.getAllValues(); - assertEquals(3, requests.size()); - requests.forEach(request -> { - final ShardIteratorType type = ShardIteratorType.fromValue(request.shardIteratorTypeAsString()); - assertEquals(requestsMap.get(type), request); - requestsMap.remove(type); - }); - assertEquals(0, requestsMap.size()); - } - - private GetRecordsRequest makeGetRecordsRequest(String shardIterator) { - return GetRecordsRequest.builder().shardIterator(shardIterator).limit(MAX_RECORDS).build(); - } - - @Test - public void testGetRecordsWithResourceNotFoundException() throws InterruptedException, ExecutionException { - final ArgumentCaptor iteratorCaptor = - ArgumentCaptor.forClass(GetShardIteratorRequest.class); - final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); - // Set up arguments used by proxy - final String nextIterator = "TestShardIterator"; - - final GetShardIteratorRequest expectedIteratorRequest = makeGetShardIteratorRequest(ShardIteratorType.LATEST.name()); - final GetRecordsRequest expectedRecordsRequest = makeGetRecordsRequest(nextIterator); - - final CompletableFuture future = mock(CompletableFuture.class); - - // Set up proxy mock methods - when(kinesisClient.getShardIterator(iteratorCaptor.capture())) - .thenReturn(makeGetShardIteratorResonse(nextIterator)); - when(kinesisClient.getRecords(recordsCaptor.capture())).thenReturn(future); - when(future.get()).thenThrow( - new ExecutionException(ResourceNotFoundException.builder().message("Test Exception").build())); - - // Create data fectcher and initialize it with latest type checkpoint - kinesisDataFetcher.initialize(SentinelCheckpoint.LATEST.toString(), INITIAL_POSITION_LATEST); - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = - new SynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher); - try { - // Call records of dataFetcher which will throw an exception - getRecordsRetrievalStrategy.getRecords(MAX_RECORDS); - } finally { - // Test shard has reached the end - assertTrue("Shard should reach the end", kinesisDataFetcher.isShardEndReached()); - assertEquals(expectedIteratorRequest, iteratorCaptor.getValue()); - assertEquals(expectedRecordsRequest, recordsCaptor.getValue()); - } - } - - @Test - public void testNonNullGetRecords() throws InterruptedException, ExecutionException { - final String nextIterator = "TestIterator"; - final ArgumentCaptor iteratorCaptor = - ArgumentCaptor.forClass(GetShardIteratorRequest.class); - final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); - final GetShardIteratorRequest expectedIteratorRequest = makeGetShardIteratorRequest(ShardIteratorType.LATEST.name()); - final GetRecordsRequest expectedRecordsRequest = makeGetRecordsRequest(nextIterator); - - final CompletableFuture future = mock(CompletableFuture.class); - - when(kinesisClient.getShardIterator(iteratorCaptor.capture())) - .thenReturn(makeGetShardIteratorResonse(nextIterator)); - when(kinesisClient.getRecords(recordsCaptor.capture())).thenReturn(future); - when(future.get()).thenThrow( - new ExecutionException(ResourceNotFoundException.builder().message("Test Exception").build())); - - kinesisDataFetcher.initialize(SentinelCheckpoint.LATEST.toString(), INITIAL_POSITION_LATEST); - DataFetcherResult dataFetcherResult = kinesisDataFetcher.getRecords(); - - assertNotNull(dataFetcherResult); - assertEquals(expectedIteratorRequest, iteratorCaptor.getValue()); - assertEquals(expectedRecordsRequest, recordsCaptor.getValue()); - } - - private CompletableFuture makeGetRecordsResponse(String nextIterator, List records) - throws InterruptedException, ExecutionException{ - return CompletableFuture.completedFuture(GetRecordsResponse.builder().nextShardIterator(nextIterator) - .records(CollectionUtils.isNullOrEmpty(records) ? Collections.emptyList() : records) - .build()); - } - - @Test - public void testFetcherDoesNotAdvanceWithoutAccept() throws InterruptedException, ExecutionException { - final ArgumentCaptor iteratorCaptor = - ArgumentCaptor.forClass(GetShardIteratorRequest.class); - final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); - final String initialIterator = "InitialIterator"; - final String nextIterator1 = "NextIteratorOne"; - final String nextIterator2 = "NextIteratorTwo"; - final CompletableFuture nonAdvancingResult1 = makeGetRecordsResponse(initialIterator, null); - final CompletableFuture nonAdvancingResult2 = makeGetRecordsResponse(nextIterator1, null); - final CompletableFuture finalNonAdvancingResult = makeGetRecordsResponse(nextIterator2, null); - final CompletableFuture advancingResult1 = makeGetRecordsResponse(nextIterator1, null); - final CompletableFuture advancingResult2 = makeGetRecordsResponse(nextIterator2, null); - final CompletableFuture finalAdvancingResult = makeGetRecordsResponse(null, null); - - when(kinesisClient.getShardIterator(iteratorCaptor.capture())) - .thenReturn(makeGetShardIteratorResonse(initialIterator)); - when(kinesisClient.getRecords(recordsCaptor.capture())).thenReturn(nonAdvancingResult1, advancingResult1, - nonAdvancingResult2, advancingResult2, finalNonAdvancingResult, finalAdvancingResult); - - kinesisDataFetcher.initialize("TRIM_HORIZON", - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON)); - - assertNoAdvance(nonAdvancingResult1.get(), initialIterator); - assertAdvanced(advancingResult1.get(), initialIterator, nextIterator1); - - assertNoAdvance(nonAdvancingResult2.get(), nextIterator1); - assertAdvanced(advancingResult2.get(), nextIterator1, nextIterator2); - - assertNoAdvance(finalNonAdvancingResult.get(), nextIterator2); - assertAdvanced(finalAdvancingResult.get(), nextIterator2, null); - - - - verify(kinesisClient, times(2)).getRecords(eq(makeGetRecordsRequest(initialIterator))); - verify(kinesisClient, times(2)).getRecords(eq(makeGetRecordsRequest(nextIterator1))); - verify(kinesisClient, times(2)).getRecords(eq(makeGetRecordsRequest(nextIterator2))); - - reset(kinesisClient); - - DataFetcherResult terminal = kinesisDataFetcher.getRecords(); - assertTrue(terminal.isShardEnd()); - assertNotNull(terminal.getResult()); - - final GetRecordsResponse terminalResult = terminal.getResult(); - assertNotNull(terminalResult.records()); - assertEquals(0, terminalResult.records().size()); - assertNull(terminalResult.nextShardIterator()); - assertEquals(kinesisDataFetcher.TERMINAL_RESULT, terminal); - - verify(kinesisClient, never()).getRecords(any(GetRecordsRequest.class)); - } - - @Test - @Ignore - public void testRestartIterator() throws InterruptedException, ExecutionException { - GetRecordsResponse getRecordsResult = mock(GetRecordsResponse.class); - GetRecordsResponse restartGetRecordsResponse = makeGetRecordsResponse(null, null).get(); - Record record = mock(Record.class); - final String nextShardIterator = "NextShardIterator"; - final String sequenceNumber = "SequenceNumber"; - - when(getRecordsResult.records()).thenReturn(Collections.singletonList(record)); - when(getRecordsResult.nextShardIterator()).thenReturn(nextShardIterator); - when(record.sequenceNumber()).thenReturn(sequenceNumber); - - kinesisDataFetcher.initialize(InitialPositionInStream.LATEST.toString(), INITIAL_POSITION_LATEST); - assertEquals(getRecordsResult, kinesisDataFetcher.getRecords().accept()); - - kinesisDataFetcher.restartIterator(); - assertEquals(restartGetRecordsResponse, kinesisDataFetcher.getRecords().accept()); - } - - @Test (expected = IllegalStateException.class) - public void testRestartIteratorNotInitialized() { - kinesisDataFetcher.restartIterator(); - } - - private DataFetcherResult assertAdvanced(GetRecordsResponse expectedResult, String previousValue, String nextValue) { - DataFetcherResult acceptResult = kinesisDataFetcher.getRecords(); - assertEquals(expectedResult, acceptResult.getResult()); - - assertEquals(previousValue, kinesisDataFetcher.getNextIterator()); - assertFalse(kinesisDataFetcher.isShardEndReached()); - - assertEquals(expectedResult, acceptResult.accept()); - assertEquals(nextValue, kinesisDataFetcher.getNextIterator()); - if (nextValue == null) { - assertTrue(kinesisDataFetcher.isShardEndReached()); - } - - verify(kinesisClient, times(2)).getRecords(eq(makeGetRecordsRequest(previousValue))); - - return acceptResult; - } - - private DataFetcherResult assertNoAdvance(final GetRecordsResponse expectedResult, final String previousValue) { - assertEquals(previousValue, kinesisDataFetcher.getNextIterator()); - DataFetcherResult noAcceptResult = kinesisDataFetcher.getRecords(); - assertEquals(expectedResult, noAcceptResult.getResult()); - - assertEquals(previousValue, kinesisDataFetcher.getNextIterator()); - - verify(kinesisClient).getRecords(eq(makeGetRecordsRequest(previousValue))); - - return noAcceptResult; - } - - private void testInitializeAndFetch(final String iteratorType, - final String seqNo, - final InitialPositionInStreamExtended initialPositionInStream) throws Exception { - final ArgumentCaptor iteratorCaptor = - ArgumentCaptor.forClass(GetShardIteratorRequest.class); - final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); - final String iterator = "foo"; - final List expectedRecords = Collections.emptyList(); - GetShardIteratorRequest expectedIteratorRequest = - makeGetShardIteratorRequest(iteratorType); - if (iteratorType.equals(ShardIteratorType.AT_TIMESTAMP.toString())) { - expectedIteratorRequest = expectedIteratorRequest.toBuilder().timestamp(initialPositionInStream.getTimestamp().toInstant()).build(); - } else if (iteratorType.equals(ShardIteratorType.AT_SEQUENCE_NUMBER.toString())) { - expectedIteratorRequest = expectedIteratorRequest.toBuilder().startingSequenceNumber(seqNo).build(); - } - final GetRecordsRequest expectedRecordsRequest = makeGetRecordsRequest(iterator); - - when(kinesisClient.getShardIterator(iteratorCaptor.capture())) - .thenReturn(makeGetShardIteratorResonse(iterator)); - - when(kinesisClient.getRecords(recordsCaptor.capture())) - .thenReturn(makeGetRecordsResponse(null, expectedRecords)); - - Checkpointer checkpoint = mock(Checkpointer.class); - when(checkpoint.getCheckpoint(SHARD_ID)).thenReturn(new ExtendedSequenceNumber(seqNo)); - - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = - new SynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher); - kinesisDataFetcher.initialize(seqNo, initialPositionInStream); - - assertEquals(expectedRecords, getRecordsRetrievalStrategy.getRecords(MAX_RECORDS).records()); - verify(kinesisClient, times(1)).getShardIterator(eq(expectedIteratorRequest)); - verify(kinesisClient, times(1)).getRecords(eq(expectedRecordsRequest)); - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherIntegrationTest.java deleted file mode 100644 index 96943e24..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherIntegrationTest.java +++ /dev/null @@ -1,246 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.polling; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; - -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.runners.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; - -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; -import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse; -import software.amazon.awssdk.services.kinesis.model.Record; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.NullMetricsFactory; -import software.amazon.kinesis.retrieval.DataFetcherResult; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * These are the integration tests for the PrefetchRecordsPublisher class. - */ -@RunWith(MockitoJUnitRunner.class) -@Slf4j -public class PrefetchRecordsPublisherIntegrationTest { - private static final int MAX_SIZE = 3; - private static final int MAX_BYTE_SIZE = 5 * 1024 * 1024; - private static final int MAX_RECORDS_COUNT = 30_000; - private static final int MAX_RECORDS_PER_CALL = 10_000; - private static final long IDLE_MILLIS_BETWEEN_CALLS = 500L; - private static final MetricsFactory NULL_METRICS_FACTORY = new NullMetricsFactory(); - - private PrefetchRecordsPublisher getRecordsCache; - private GetRecordsRetrievalStrategy getRecordsRetrievalStrategy; - private KinesisDataFetcher dataFetcher; - private ExecutorService executorService; - private List records; - private String operation = "ProcessTask"; - private String streamName = "streamName"; - private String shardId = "shardId-000000000000"; - - @Mock - private KinesisAsyncClient kinesisClient; - @Mock - private ExtendedSequenceNumber extendedSequenceNumber; - @Mock - private InitialPositionInStreamExtended initialPosition; - - @Before - public void setup() throws InterruptedException, ExecutionException { - records = new ArrayList<>(); - dataFetcher = spy(new KinesisDataFetcherForTest(kinesisClient, streamName, shardId, MAX_RECORDS_PER_CALL)); - getRecordsRetrievalStrategy = Mockito.spy(new SynchronousGetRecordsRetrievalStrategy(dataFetcher)); - executorService = spy(Executors.newFixedThreadPool(1)); - CompletableFuture future = mock(CompletableFuture.class); - - when(extendedSequenceNumber.sequenceNumber()).thenReturn("LATEST"); - when(future.get()).thenReturn(GetShardIteratorResponse.builder().shardIterator("TestIterator").build()); - when(kinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(future); - - getRecordsCache = new PrefetchRecordsPublisher(MAX_SIZE, - MAX_BYTE_SIZE, - MAX_RECORDS_COUNT, - MAX_RECORDS_PER_CALL, - getRecordsRetrievalStrategy, - executorService, - IDLE_MILLIS_BETWEEN_CALLS, - new NullMetricsFactory(), - operation, - "test-shard"); - } - - @Test - public void testRollingCache() { - getRecordsCache.start(extendedSequenceNumber, initialPosition); - sleep(IDLE_MILLIS_BETWEEN_CALLS); - - ProcessRecordsInput processRecordsInput1 = getRecordsCache.getNextResult(); - - assertTrue(processRecordsInput1.records().isEmpty()); - assertEquals(processRecordsInput1.millisBehindLatest(), new Long(1000)); - assertNotNull(processRecordsInput1.cacheEntryTime()); - - ProcessRecordsInput processRecordsInput2 = getRecordsCache.getNextResult(); - - assertNotEquals(processRecordsInput1, processRecordsInput2); - } - - @Test - public void testFullCache() { - getRecordsCache.start(extendedSequenceNumber, initialPosition); - sleep(MAX_SIZE * IDLE_MILLIS_BETWEEN_CALLS); - - assertEquals(getRecordsCache.getRecordsResultQueue.size(), MAX_SIZE); - - ProcessRecordsInput processRecordsInput1 = getRecordsCache.getNextResult(); - ProcessRecordsInput processRecordsInput2 = getRecordsCache.getNextResult(); - - assertNotEquals(processRecordsInput1, processRecordsInput2); - } - - @Ignore - @Test - public void testDifferentShardCaches() { - final ExecutorService executorService2 = spy(Executors.newFixedThreadPool(1)); - final KinesisDataFetcher kinesisDataFetcher = spy(new KinesisDataFetcher(kinesisClient, streamName, shardId, MAX_RECORDS_PER_CALL, NULL_METRICS_FACTORY)); - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy2 = - spy(new AsynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher, 5 , 5, shardId)); - final PrefetchRecordsPublisher recordsPublisher2 = new PrefetchRecordsPublisher( - MAX_SIZE, - MAX_BYTE_SIZE, - MAX_RECORDS_COUNT, - MAX_RECORDS_PER_CALL, - getRecordsRetrievalStrategy2, - executorService2, - IDLE_MILLIS_BETWEEN_CALLS, - new NullMetricsFactory(), - operation, - "test-shard-2"); - - getRecordsCache.start(extendedSequenceNumber, initialPosition); - sleep(IDLE_MILLIS_BETWEEN_CALLS); - - final Record record = mock(Record.class); - final SdkBytes byteBuffer = SdkBytes.fromByteArray(new byte[512 * 1024]); - when(record.data()).thenReturn(byteBuffer); - - records.add(record); - records.add(record); - records.add(record); - records.add(record); - recordsPublisher2.start(extendedSequenceNumber, initialPosition); - - sleep(IDLE_MILLIS_BETWEEN_CALLS); - - ProcessRecordsInput p1 = getRecordsCache.getNextResult(); - - ProcessRecordsInput p2 = recordsPublisher2.getNextResult(); - - assertNotEquals(p1, p2); - assertTrue(p1.records().isEmpty()); - assertFalse(p2.records().isEmpty()); - assertEquals(p2.records().size(), records.size()); - - recordsPublisher2.shutdown(); - sleep(100L); - verify(executorService2).shutdownNow(); -// verify(getRecordsRetrievalStrategy2).shutdown(); - } - - @Test - public void testExpiredIteratorException() { - when(dataFetcher.getRecords()).thenAnswer(new Answer() { - @Override - public DataFetcherResult answer(final InvocationOnMock invocationOnMock) throws Throwable { - throw ExpiredIteratorException.builder().message("ExpiredIterator").build(); - } - }).thenCallRealMethod(); - doNothing().when(dataFetcher).restartIterator(); - - getRecordsCache.start(extendedSequenceNumber, initialPosition); - sleep(IDLE_MILLIS_BETWEEN_CALLS); - - ProcessRecordsInput processRecordsInput = getRecordsCache.getNextResult(); - - assertNotNull(processRecordsInput); - assertTrue(processRecordsInput.records().isEmpty()); - verify(dataFetcher).restartIterator(); - } - - @After - public void shutdown() { - getRecordsCache.shutdown(); - sleep(100L); - verify(executorService).shutdownNow(); -// verify(getRecordsRetrievalStrategy).shutdown(); - } - - private void sleep(long millis) { - try { - Thread.sleep(millis); - } catch (InterruptedException e) {} - } - - private class KinesisDataFetcherForTest extends KinesisDataFetcher { - public KinesisDataFetcherForTest(final KinesisAsyncClient kinesisClient, - final String streamName, - final String shardId, - final int maxRecords) { - super(kinesisClient, streamName, shardId, maxRecords, NULL_METRICS_FACTORY); - } - - @Override - public DataFetcherResult getRecords() { - GetRecordsResponse getRecordsResult = GetRecordsResponse.builder().records(new ArrayList<>(records)).millisBehindLatest(1000L).build(); - - return new AdvancingResult(getRecordsResult); - } - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherTest.java deleted file mode 100644 index 7fb82ea6..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherTest.java +++ /dev/null @@ -1,343 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - -package software.amazon.kinesis.retrieval.polling; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyInt; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.atMost; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -import io.reactivex.Flowable; -import io.reactivex.schedulers.Schedulers; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.awssdk.services.kinesis.model.Record; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.metrics.NullMetricsFactory; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.KinesisClientRecord; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - -/** - * Test class for the PrefetchRecordsPublisher class. - */ -@RunWith(MockitoJUnitRunner.class) -@Slf4j -public class PrefetchRecordsPublisherTest { - private static final int SIZE_512_KB = 512 * 1024; - private static final int SIZE_1_MB = 2 * SIZE_512_KB; - private static final int MAX_RECORDS_PER_CALL = 10000; - private static final int MAX_SIZE = 5; - private static final int MAX_RECORDS_COUNT = 15000; - private static final long IDLE_MILLIS_BETWEEN_CALLS = 0L; - - @Mock - private GetRecordsRetrievalStrategy getRecordsRetrievalStrategy; - @Mock - private KinesisDataFetcher dataFetcher; - @Mock - private InitialPositionInStreamExtended initialPosition; - @Mock - private ExtendedSequenceNumber sequenceNumber; - - private List records; - private ExecutorService executorService; - private LinkedBlockingQueue spyQueue; - private PrefetchRecordsPublisher getRecordsCache; - private String operation = "ProcessTask"; - private GetRecordsResponse getRecordsResponse; - private Record record; - - @Before - public void setup() { - when(getRecordsRetrievalStrategy.getDataFetcher()).thenReturn(dataFetcher); - - executorService = spy(Executors.newFixedThreadPool(1)); - getRecordsCache = new PrefetchRecordsPublisher( - MAX_SIZE, - 3 * SIZE_1_MB, - MAX_RECORDS_COUNT, - MAX_RECORDS_PER_CALL, - getRecordsRetrievalStrategy, - executorService, - IDLE_MILLIS_BETWEEN_CALLS, - new NullMetricsFactory(), - operation, - "shardId"); - spyQueue = spy(getRecordsCache.getRecordsResultQueue); - records = spy(new ArrayList<>()); - getRecordsResponse = GetRecordsResponse.builder().records(records).build(); - - when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))).thenReturn(getRecordsResponse); - } - - @Test - public void testGetRecords() { - record = Record.builder().data(createByteBufferWithSize(SIZE_512_KB)).build(); - - when(records.size()).thenReturn(1000); - - final List expectedRecords = records.stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); - - getRecordsCache.start(sequenceNumber, initialPosition); - ProcessRecordsInput result = getRecordsCache.getNextResult(); - - assertEquals(expectedRecords, result.records()); - - verify(executorService).execute(any()); - verify(getRecordsRetrievalStrategy, atLeast(1)).getRecords(eq(MAX_RECORDS_PER_CALL)); - } - - // TODO: Broken test - @Test - @Ignore - public void testFullCacheByteSize() { - record = Record.builder().data(createByteBufferWithSize(SIZE_1_MB)).build(); - - when(records.size()).thenReturn(500); - - records.add(record); - - getRecordsCache.start(sequenceNumber, initialPosition); - - // Sleep for a few seconds for the cache to fill up. - sleep(2000); - - verify(getRecordsRetrievalStrategy, times(3)).getRecords(eq(MAX_RECORDS_PER_CALL)); - assertEquals(spyQueue.size(), 3); - } - - @Test - public void testFullCacheRecordsCount() { - int recordsSize = 4500; - when(records.size()).thenReturn(recordsSize); - - getRecordsCache.start(sequenceNumber, initialPosition); - - sleep(2000); - - int callRate = (int) Math.ceil((double) MAX_RECORDS_COUNT/recordsSize); -// TODO: fix this verification -// verify(getRecordsRetrievalStrategy, times(callRate)).getRecords(MAX_RECORDS_PER_CALL); -// assertEquals(spyQueue.size(), callRate); - assertTrue(callRate < MAX_SIZE); - } - - @Test - public void testFullCacheSize() { - int recordsSize = 200; - when(records.size()).thenReturn(recordsSize); - - getRecordsCache.start(sequenceNumber, initialPosition); - - // Sleep for a few seconds for the cache to fill up. - sleep(2000); - - verify(getRecordsRetrievalStrategy, times(MAX_SIZE + 1)).getRecords(eq(MAX_RECORDS_PER_CALL)); - assertEquals(spyQueue.size(), MAX_SIZE); - } - - // TODO: Broken tests - @Test - @Ignore - public void testMultipleCacheCalls() { - int recordsSize = 20; - record = Record.builder().data(createByteBufferWithSize(1024)).build(); - - IntStream.range(0, recordsSize).forEach(i -> records.add(record)); - final List expectedRecords = records.stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); - - getRecordsCache.start(sequenceNumber, initialPosition); - ProcessRecordsInput processRecordsInput = getRecordsCache.getNextResult(); - - verify(executorService).execute(any()); - assertEquals(expectedRecords, processRecordsInput.records()); - assertNotNull(processRecordsInput.cacheEntryTime()); - assertNotNull(processRecordsInput.cacheExitTime()); - - sleep(2000); - - ProcessRecordsInput processRecordsInput2 = getRecordsCache.getNextResult(); - assertNotEquals(processRecordsInput, processRecordsInput2); - assertEquals(expectedRecords, processRecordsInput2.records()); - assertNotEquals(processRecordsInput2.timeSpentInCache(), Duration.ZERO); - - assertTrue(spyQueue.size() <= MAX_SIZE); - } - - @Test(expected = IllegalStateException.class) - public void testGetNextRecordsWithoutStarting() { - verify(executorService, times(0)).execute(any()); - getRecordsCache.getNextResult(); - } - - @Test(expected = IllegalStateException.class) - public void testCallAfterShutdown() { - when(executorService.isShutdown()).thenReturn(true); - getRecordsCache.getNextResult(); - } - - @Test - public void testExpiredIteratorException() { - log.info("Starting tests"); - when(getRecordsRetrievalStrategy.getRecords(MAX_RECORDS_PER_CALL)).thenThrow(ExpiredIteratorException.class) - .thenReturn(getRecordsResponse); - - getRecordsCache.start(sequenceNumber, initialPosition); - - doNothing().when(dataFetcher).restartIterator(); - - getRecordsCache.getNextResult(); - - sleep(1000); - - verify(dataFetcher).restartIterator(); - } - - @Test(timeout = 1000L) - public void testNoDeadlockOnFullQueue() { - // - // Fixes https://github.com/awslabs/amazon-kinesis-client/issues/448 - // - // This test is to verify that the drain of a blocked queue no longer deadlocks. - // If the test times out before starting the subscriber it means something went wrong while filling the queue. - // After the subscriber is started one of the things that can trigger a timeout is a deadlock. - // - GetRecordsResponse response = GetRecordsResponse.builder().records( - Record.builder().data(SdkBytes.fromByteArray(new byte[] { 1, 2, 3 })).sequenceNumber("123").build()) - .build(); - when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenReturn(response); - - getRecordsCache.start(sequenceNumber, initialPosition); - - // - // Wait for the queue to fill up, and the publisher to block on adding items to the queue. - // - log.info("Waiting for queue to fill up"); - while (getRecordsCache.getRecordsResultQueue.size() < MAX_SIZE) { - Thread.yield(); - } - - log.info("Queue is currently at {} starting subscriber", getRecordsCache.getRecordsResultQueue.size()); - AtomicInteger receivedItems = new AtomicInteger(0); - final int expectedItems = MAX_SIZE * 3; - - Object lock = new Object(); - - Subscriber subscriber = new Subscriber() { - Subscription sub; - - @Override - public void onSubscribe(Subscription s) { - sub = s; - s.request(1); - } - - @Override - public void onNext(ProcessRecordsInput processRecordsInput) { - receivedItems.incrementAndGet(); - if (receivedItems.get() >= expectedItems) { - synchronized (lock) { - log.info("Notifying waiters"); - lock.notifyAll(); - } - sub.cancel(); - } else { - sub.request(1); - } - } - - @Override - public void onError(Throwable t) { - log.error("Caught error", t); - throw new RuntimeException(t); - } - - @Override - public void onComplete() { - fail("onComplete not expected in this test"); - } - }; - - synchronized (lock) { - log.info("Awaiting notification"); - Flowable.fromPublisher(getRecordsCache).subscribeOn(Schedulers.computation()) - .observeOn(Schedulers.computation(), true, 8).subscribe(subscriber); - try { - lock.wait(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - } - verify(getRecordsRetrievalStrategy, atLeast(expectedItems)).getRecords(anyInt()); - assertThat(receivedItems.get(), equalTo(expectedItems)); - } - - @After - public void shutdown() { - getRecordsCache.shutdown(); - verify(executorService).shutdownNow(); - } - - private void sleep(long millis) { - try { - Thread.sleep(millis); - } catch (InterruptedException e) {} - } - - private SdkBytes createByteBufferWithSize(int size) { - return SdkBytes.fromByteArray(new byte[size]); - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/RecordsFetcherFactoryTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/RecordsFetcherFactoryTest.java deleted file mode 100644 index 2b38f042..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/RecordsFetcherFactoryTest.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval.polling; - -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; - -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.retrieval.DataFetchingStrategy; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.RecordsFetcherFactory; -import software.amazon.kinesis.retrieval.RecordsPublisher; - -public class RecordsFetcherFactoryTest { - private String shardId = "TestShard"; - private RecordsFetcherFactory recordsFetcherFactory; - - @Mock - private GetRecordsRetrievalStrategy getRecordsRetrievalStrategy; - @Mock - private MetricsFactory metricsFactory; - - @Before - public void setUp() { - MockitoAnnotations.initMocks(this); - recordsFetcherFactory = new SimpleRecordsFetcherFactory(); - } - - @Test - @Ignore -// TODO: remove test no longer holds true - public void createDefaultRecordsFetcherTest() { - RecordsPublisher recordsCache = recordsFetcherFactory.createRecordsFetcher(getRecordsRetrievalStrategy, shardId, - metricsFactory, 1); - assertThat(recordsCache, instanceOf(BlockingRecordsPublisher.class)); - } - - @Test - public void createPrefetchRecordsFetcherTest() { - recordsFetcherFactory.dataFetchingStrategy(DataFetchingStrategy.PREFETCH_CACHED); - RecordsPublisher recordsCache = recordsFetcherFactory.createRecordsFetcher(getRecordsRetrievalStrategy, shardId, - metricsFactory, 1); - assertThat(recordsCache, instanceOf(PrefetchRecordsPublisher.class)); - } - -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamlet.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamlet.java deleted file mode 100644 index 7703ca6a..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamlet.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.utils; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Semaphore; - -import software.amazon.kinesis.exceptions.InvalidStateException; -import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.KinesisClientLibNonRetryableException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.exceptions.ThrottlingException; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.leases.ShardSequenceVerifier; -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.LeaseLostInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownReason; -import software.amazon.kinesis.lifecycle.events.ShardEndedInput; -import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.processor.ShutdownNotificationAware; -import software.amazon.kinesis.retrieval.KinesisClientRecord; - -/** - * Streamlet that tracks records it's seen - useful for testing. - */ -@Slf4j -public class TestStreamlet implements ShardRecordProcessor, ShutdownNotificationAware { - private List records = new ArrayList<>(); - - private Set processedSeqNums = new HashSet(); // used for deduping - - private Semaphore sem; // used to allow test cases to wait for all records to be processed - - private String shardId; - - // record the last shutdown reason we were called with. - private ShutdownReason shutdownReason; - private ShardSequenceVerifier shardSequenceVerifier; - private long numProcessRecordsCallsWithEmptyRecordList; - private boolean shutdownNotificationCalled; - - private final CountDownLatch initializeLatch = new CountDownLatch(1); - private final CountDownLatch notifyShutdownLatch = new CountDownLatch(1); - private final CountDownLatch shutdownLatch = new CountDownLatch(1); - - public TestStreamlet() { - - } - - public TestStreamlet(Semaphore sem, ShardSequenceVerifier shardSequenceVerifier) { - this(); - this.sem = sem; - this.shardSequenceVerifier = shardSequenceVerifier; - } - - public List getProcessedRecords() { - return records; - } - - @Override - public void initialize(InitializationInput input) { - shardId = input.shardId(); - if (shardSequenceVerifier != null) { - shardSequenceVerifier.registerInitialization(shardId); - } - initializeLatch.countDown(); - } - - @Override - public void processRecords(ProcessRecordsInput input) { - List dataRecords = input.records(); - RecordProcessorCheckpointer checkpointer = input.checkpointer(); - if ((dataRecords != null) && (!dataRecords.isEmpty())) { - for (KinesisClientRecord record : dataRecords) { - log.debug("Processing record: {}", record); - String seqNum = record.sequenceNumber(); - if (!processedSeqNums.contains(seqNum)) { - records.add(record); - processedSeqNums.add(seqNum); - } - } - } - if (dataRecords.isEmpty()) { - numProcessRecordsCallsWithEmptyRecordList++; - } - try { - checkpointer.checkpoint(); - } catch (ThrottlingException | ShutdownException - | KinesisClientLibDependencyException | InvalidStateException e) { - // Continue processing records and checkpoint next time if we get a transient error. - // Don't checkpoint if the processor has been shutdown. - log.debug("Caught exception while checkpointing: ", e); - } - - if (sem != null) { - sem.release(dataRecords.size()); - } - } - - @Override - public void leaseLost(LeaseLostInput leaseLostInput) { - if (shardSequenceVerifier != null) { - shardSequenceVerifier.registerShutdown(shardId, ShutdownReason.LEASE_LOST); - } - shutdownLatch.countDown(); - } - - @Override - public void shardEnded(ShardEndedInput shardEndedInput) { - if (shardSequenceVerifier != null) { - shardSequenceVerifier.registerShutdown(shardId, ShutdownReason.SHARD_END); - } - try { - shardEndedInput.checkpointer().checkpoint(); - } catch (KinesisClientLibNonRetryableException e) { - log.error("Caught exception when checkpointing while shutdown.", e); - throw new RuntimeException(e); - } - shutdownLatch.countDown(); - } - - @Override - public void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput) { - - } - - /** - * @return the shardId - */ - public String getShardId() { - return shardId; - } - - /** - * @return the shutdownReason - */ - public ShutdownReason getShutdownReason() { - return shutdownReason; - } - - /** - * @return the numProcessRecordsCallsWithEmptyRecordList - */ - public long getNumProcessRecordsCallsWithEmptyRecordList() { - return numProcessRecordsCallsWithEmptyRecordList; - } - - public boolean isShutdownNotificationCalled() { - return shutdownNotificationCalled; - } - - @Override - public void shutdownRequested(RecordProcessorCheckpointer checkpointer) { - shutdownNotificationCalled = true; - notifyShutdownLatch.countDown(); - } - - public CountDownLatch getInitializeLatch() { - return initializeLatch; - } - - public CountDownLatch getNotifyShutdownLatch() { - return notifyShutdownLatch; - } - - public CountDownLatch getShutdownLatch() { - return shutdownLatch; - } -} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamletFactory.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamletFactory.java deleted file mode 100644 index 93c0803b..00000000 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/TestStreamletFactory.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.utils; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.Semaphore; - -import software.amazon.kinesis.leases.ShardSequenceVerifier; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; -import software.amazon.kinesis.processor.ShardRecordProcessor; - -/** - * Factory for TestStreamlet record processors. - */ -public class TestStreamletFactory implements ShardRecordProcessorFactory { - - // Will be passed to the TestStreamlet. Can be used to check if all records have been processed. - private Semaphore semaphore; - private ShardSequenceVerifier shardSequenceVerifier; - List testStreamlets = new ArrayList<>(); - - /** - * Constructor. - */ - public TestStreamletFactory(Semaphore semaphore, ShardSequenceVerifier shardSequenceVerifier) { - this.semaphore = semaphore; - this.shardSequenceVerifier = shardSequenceVerifier; - } - - @Override - public synchronized ShardRecordProcessor shardRecordProcessor() { - TestStreamlet processor = new TestStreamlet(semaphore, shardSequenceVerifier); - testStreamlets.add(processor); - return processor; - } - - Semaphore getSemaphore() { - return semaphore; - } - - public ShardSequenceVerifier getShardSequenceVerifier() { - return shardSequenceVerifier; - } - - /** - * @return the testStreamlets - */ - public List getTestStreamlets() { - return testStreamlets; - } - -} diff --git a/amazon-kinesis-client/src/test/resources/logback.xml b/amazon-kinesis-client/src/test/resources/logback.xml deleted file mode 100644 index ddbc6fc9..00000000 --- a/amazon-kinesis-client/src/test/resources/logback.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - %d [%thread] %-5level %logger{36} [%mdc{ShardId:-NONE}] - %msg %n - - - - - - - - - - - - \ No newline at end of file diff --git a/build.properties b/build.properties new file mode 100644 index 00000000..9a6b868a --- /dev/null +++ b/build.properties @@ -0,0 +1,10 @@ +source.. = src/main/java,\ + src/main/resources +output.. = bin/ + +bin.includes = LICENSE.txt,\ + NOTICE.txt,\ + META-INF/,\ + . + +jre.compilation.profile = JavaSE-1.7 diff --git a/formatter/formatter.xml b/formatter/formatter.xml deleted file mode 100644 index b3d12a5c..00000000 --- a/formatter/formatter.xml +++ /dev/null @@ -1,291 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/pom.xml b/pom.xml index 7c5cdc7b..9503b971 100644 --- a/pom.xml +++ b/pom.xml @@ -1,26 +1,12 @@ - 4.0.0 - software.amazon.kinesis - amazon-kinesis-client-pom - pom - Amazon Kinesis Client Library - 2.0.5 + com.amazonaws + amazon-kinesis-client + jar + Amazon Kinesis Client Library for Java + 1.7.4 The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data from Amazon Kinesis. @@ -30,10 +16,6 @@ https://github.com/awslabs/amazon-kinesis-client.git - - 1.11.272 - - Amazon Software License @@ -42,10 +24,93 @@ - - amazon-kinesis-client - amazon-kinesis-client-multilang - + + 1.11.91 + 1.0.392 + libsqlite4java + ${project.build.directory}/test-lib + + + + + com.amazonaws + aws-java-sdk-dynamodb + ${aws-java-sdk.version} + + + com.amazonaws + aws-java-sdk-kinesis + ${aws-java-sdk.version} + + + com.amazonaws + aws-java-sdk-cloudwatch + ${aws-java-sdk.version} + + + com.google.guava + guava + 18.0 + + + com.google.protobuf + protobuf-java + 2.6.1 + + + commons-lang + commons-lang + 2.6 + + + commons-logging + commons-logging + 1.1.3 + + + org.projectlombok + lombok + 1.16.10 + provided + + + + + junit + junit + 4.11 + test + + + + org.mockito + mockito-all + 1.10.19 + test + + + + org.hamcrest + hamcrest-all + 1.3 + test + + + + com.amazonaws + DynamoDBLocal + 1.11.0.1 + test + + + + + + dynamodb-local + DynamoDB Local Release Repository + http://dynamodb-local.s3-website-us-west-2.amazonaws.com/release + + @@ -58,17 +123,6 @@ - - - ossrh - https://oss.sonatype.org/content/repositories/snapshots - - - ossrh - https://oss.sonatype.org/service/local/staging/deploy/maven2/ - - - @@ -77,16 +131,130 @@ maven-compiler-plugin 3.2 - 1.8 - 1.8 + 1.7 + 1.7 UTF-8 - + + + org.apache.maven.plugins + maven-surefire-plugin + 2.19.1 + + + **/*IntegrationTest.java + + + + sqlite4java.library.path + ${sqlite4java.libpath} + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + 2.19.1 + + + **/*IntegrationTest.java + + + + + + integration-test + verify + + + + + + org.apache.maven.plugins + maven-dependency-plugin + + + copy + test-compile + + copy + + + + + + com.almworks.sqlite4java + ${sqlite4java.native}-osx + ${sqlite4java.version} + dylib + true + ${sqlite4java.libpath} + + + + + + com.almworks.sqlite4java + ${sqlite4java.native}-linux-i386 + ${sqlite4java.version} + so + true + ${sqlite4java.libpath} + + + + + com.almworks.sqlite4java + ${sqlite4java.native}-linux-amd64 + ${sqlite4java.version} + so + true + ${sqlite4java.libpath} + + + + + + com.almworks.sqlite4java + sqlite4java-win32-x86 + ${sqlite4java.version} + dll + true + ${sqlite4java.libpath} + + + + + com.almworks.sqlite4java + sqlite4java-win32-x64 + ${sqlite4java.version} + dll + true + ${sqlite4java.libpath} + + + + + + + + + + + disable-java8-doclint + + [1.8,) + + + -Xdoclint:none + + publishing @@ -94,7 +262,7 @@ org.apache.maven.plugins maven-gpg-plugin - 1.6 + 1.5 sign-artifacts @@ -105,17 +273,6 @@ - - org.sonatype.plugins - nexus-staging-maven-plugin - 1.6.8 - true - - sonatype-nexus-staging - https://oss.sonatype.org - false - - diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoder.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/AWSCredentialsProviderPropertyValueDecoder.java similarity index 60% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoder.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/AWSCredentialsProviderPropertyValueDecoder.java index f6e1883c..9976b071 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoder.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/AWSCredentialsProviderPropertyValueDecoder.java @@ -1,5 +1,5 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Amazon Software License (the "License"). * You may not use this file except in compliance with the License. @@ -12,22 +12,23 @@ * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ -package com.amazonaws.services.kinesis.multilang.config; +package com.amazonaws.services.kinesis.clientlibrary.config; -import java.lang.reflect.Constructor; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.lang.reflect.Constructor; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProviderChain; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSCredentialsProviderChain; /** * Get AWSCredentialsProvider property. */ -@Slf4j -class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecoder { +class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecoder { + private static final Log LOG = LogFactory.getLog(AWSCredentialsProviderPropertyValueDecoder.class); private static final String AUTH_PREFIX = "com.amazonaws.auth."; private static final String LIST_DELIMITER = ","; private static final String ARG_DELIMITER = "|"; @@ -41,18 +42,17 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode /** * Get AWSCredentialsProvider property. * - * @param value - * property value as String + * @param value property value as String * @return corresponding variable in correct type */ @Override - public AwsCredentialsProvider decodeValue(String value) { + public AWSCredentialsProvider decodeValue(String value) { if (value != null) { List providerNames = getProviderNames(value); - List providers = getValidCredentialsProviders(providerNames); - AwsCredentialsProvider[] ps = new AwsCredentialsProvider[providers.size()]; + List providers = getValidCredentialsProviders(providerNames); + AWSCredentialsProvider[] ps = new AWSCredentialsProvider[providers.size()]; providers.toArray(ps); - return AwsCredentialsProviderChain.builder().credentialsProviders(ps).build(); + return new AWSCredentialsProviderChain(ps); } else { throw new IllegalArgumentException("Property AWSCredentialsProvider is missing."); } @@ -62,35 +62,35 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode * @return list of supported types */ @Override - public List> getSupportedTypes() { - return Arrays.asList(AwsCredentialsProvider.class); + public List> getSupportedTypes() { + return Arrays.asList(AWSCredentialsProvider.class); } /* * Convert string list to a list of valid credentials providers. */ - private static List getValidCredentialsProviders(List providerNames) { - List credentialsProviders = new ArrayList(); + private static List getValidCredentialsProviders(List providerNames) { + List credentialsProviders = new ArrayList(); for (String providerName : providerNames) { if (providerName.contains(ARG_DELIMITER)) { - String[] nameAndArgs = providerName.split("\\" + ARG_DELIMITER); - Class[] argTypes = new Class[nameAndArgs.length - 1]; - Arrays.fill(argTypes, String.class); - try { - Class className = Class.forName(nameAndArgs[0]); - Constructor c = className.getConstructor(argTypes); - credentialsProviders.add((AwsCredentialsProvider) c - .newInstance(Arrays.copyOfRange(nameAndArgs, 1, nameAndArgs.length))); - } catch (Exception e) { - log.debug("Can't find any credentials provider matching {}.", providerName); - } + String[] nameAndArgs = providerName.split("\\" + ARG_DELIMITER); + Class[] argTypes = new Class[nameAndArgs.length - 1]; + Arrays.fill(argTypes, String.class); + try { + Class className = Class.forName(nameAndArgs[0]); + Constructor c = className.getConstructor(argTypes); + credentialsProviders.add((AWSCredentialsProvider) c.newInstance( + Arrays.copyOfRange(nameAndArgs, 1, nameAndArgs.length))); + } catch (Exception e) { + LOG.debug("Can't find any credentials provider matching " + providerName + "."); + } } else { - try { - Class className = Class.forName(providerName); - credentialsProviders.add((AwsCredentialsProvider) className.newInstance()); - } catch (Exception e) { - log.debug("Can't find any credentials provider matching {}.", providerName); - } + try { + Class className = Class.forName(providerName); + credentialsProviders.add((AWSCredentialsProvider) className.newInstance()); + } catch (Exception e) { + LOG.debug("Can't find any credentials provider matching " + providerName + "."); + } } } return credentialsProviders; diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/DatePropertyValueDecoder.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/BooleanPropertyValueDecoder.java similarity index 56% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/DatePropertyValueDecoder.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/BooleanPropertyValueDecoder.java index 591c90cc..ede0294d 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/DatePropertyValueDecoder.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/BooleanPropertyValueDecoder.java @@ -1,5 +1,5 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Amazon Software License (the "License"). * You may not use this file except in compliance with the License. @@ -12,21 +12,20 @@ * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ -package com.amazonaws.services.kinesis.multilang.config; +package com.amazonaws.services.kinesis.clientlibrary.config; import java.util.Arrays; -import java.util.Date; import java.util.List; /** - * Provide Date property. + * Provide boolean property. */ -public class DatePropertyValueDecoder implements IPropertyValueDecoder { +class BooleanPropertyValueDecoder implements IPropertyValueDecoder { /** * Constructor. */ - DatePropertyValueDecoder() { + BooleanPropertyValueDecoder() { } /** @@ -34,20 +33,16 @@ public class DatePropertyValueDecoder implements IPropertyValueDecoder { * @return corresponding variable in correct type */ @Override - public Date decodeValue(String value) { - try { - return new Date(Long.parseLong(value) * 1000L); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("Date property value must be numeric."); - } + public Boolean decodeValue(String value) { + return Boolean.parseBoolean(value); } /** * @return list of supported types */ @Override - public List> getSupportedTypes() { - return Arrays.asList(Date.class); + public List> getSupportedTypes() { + return Arrays.asList(boolean.class, Boolean.class); } } diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/ClientConfigurationPropertyValueDecoder.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/ClientConfigurationPropertyValueDecoder.java new file mode 100644 index 00000000..abd3d7f0 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/ClientConfigurationPropertyValueDecoder.java @@ -0,0 +1,50 @@ +/* + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.config; + +import java.util.Arrays; +import java.util.List; + +import com.amazonaws.ClientConfiguration; +/** + * Get ClientConfiguration property. + */ +class ClientConfigurationPropertyValueDecoder implements IPropertyValueDecoder { + + /** + * Constructor. + */ + ClientConfigurationPropertyValueDecoder() { + } + + /** + * @param value property value as String + * @return corresponding variable in correct type + */ + @Override + public ClientConfiguration decodeValue(String value) { + throw new UnsupportedOperationException("ClientConfiguration is currently not supported"); + } + + /** + * Get supported types. + * @return a list of supported class + */ + @Override + public List> getSupportedTypes() { + return Arrays.asList(ClientConfiguration.class); + } + +} diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/IPropertyValueDecoder.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/IPropertyValueDecoder.java similarity index 50% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/IPropertyValueDecoder.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/IPropertyValueDecoder.java index bc23b18b..d6ef5542 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/IPropertyValueDecoder.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/IPropertyValueDecoder.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package com.amazonaws.services.kinesis.multilang.config; +package com.amazonaws.services.kinesis.clientlibrary.config; import java.util.List; diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/InitialPositionInStreamPropertyValueDecoder.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/InitialPositionInStreamPropertyValueDecoder.java similarity index 53% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/InitialPositionInStreamPropertyValueDecoder.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/InitialPositionInStreamPropertyValueDecoder.java index 0b44273a..63b6e306 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/InitialPositionInStreamPropertyValueDecoder.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/InitialPositionInStreamPropertyValueDecoder.java @@ -1,23 +1,23 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package com.amazonaws.services.kinesis.multilang.config; +package com.amazonaws.services.kinesis.clientlibrary.config; import java.util.Arrays; import java.util.List; -import software.amazon.kinesis.common.InitialPositionInStream; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream; /** * Get an InitialiPosition enum property. diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/IntegerPropertyValueDecoder.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/IntegerPropertyValueDecoder.java similarity index 51% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/IntegerPropertyValueDecoder.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/IntegerPropertyValueDecoder.java index 012ea2b6..ec1248e7 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/IntegerPropertyValueDecoder.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/IntegerPropertyValueDecoder.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package com.amazonaws.services.kinesis.multilang.config; +package com.amazonaws.services.kinesis.clientlibrary.config; import java.util.Arrays; import java.util.List; diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/KinesisClientLibConfigurator.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/KinesisClientLibConfigurator.java similarity index 76% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/KinesisClientLibConfigurator.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/KinesisClientLibConfigurator.java index 853a7cc9..e239f967 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/KinesisClientLibConfigurator.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/KinesisClientLibConfigurator.java @@ -1,22 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package com.amazonaws.services.kinesis.multilang.config; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; +package com.amazonaws.services.kinesis.clientlibrary.config; import java.io.IOException; import java.io.InputStream; @@ -32,6 +28,12 @@ import java.util.Properties; import java.util.Set; import java.util.UUID; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration; + /** * KinesisClientLibConfigurator constructs a KinesisClientLibConfiguration from java properties file. The following * three properties must be provided. 1) "applicationName" 2) "streamName" 3) "AWSCredentialsProvider" @@ -40,8 +42,9 @@ import java.util.UUID; * KinesisClientLibConfiguration and has a corresponding "with{variableName}" setter method, will be read in, and its * value in properties file will be assigned to corresponding variable in KinesisClientLibConfiguration. */ -@Slf4j public class KinesisClientLibConfigurator { + + private static final Log LOG = LogFactory.getLog(KinesisClientLibConfigurator.class); private static final String PREFIX = "with"; // Required properties @@ -63,13 +66,13 @@ public class KinesisClientLibConfigurator { Arrays.asList(new IntegerPropertyValueDecoder(), new LongPropertyValueDecoder(), new BooleanPropertyValueDecoder(), - new DatePropertyValueDecoder(), new AWSCredentialsProviderPropertyValueDecoder(), new StringPropertyValueDecoder(), new InitialPositionInStreamPropertyValueDecoder(), + new ClientConfigurationPropertyValueDecoder(), new SetPropertyValueDecoder()); - classToDecoder = new Hashtable<>(); + classToDecoder = new Hashtable, IPropertyValueDecoder>(); for (IPropertyValueDecoder getter : getters) { for (Class clazz : getter.getSupportedTypes()) { /* @@ -79,10 +82,10 @@ public class KinesisClientLibConfigurator { classToDecoder.put(clazz, getter); } } - nameToMethods = new Hashtable<>(); + nameToMethods = new Hashtable>(); for (Method method : KinesisClientLibConfiguration.class.getMethods()) { if (!nameToMethods.containsKey(method.getName())) { - nameToMethods.put(method.getName(), new ArrayList<>()); + nameToMethods.put(method.getName(), new ArrayList()); } nameToMethods.get(method.getName()).add(method); } @@ -101,11 +104,11 @@ public class KinesisClientLibConfigurator { // The three minimum required arguments for constructor are obtained first. They are all mandatory, all of them // should be provided. If any of these three failed to be set, program will fail. IPropertyValueDecoder stringValueDecoder = new StringPropertyValueDecoder(); - IPropertyValueDecoder awsCPPropGetter = + IPropertyValueDecoder awsCPPropGetter = new AWSCredentialsProviderPropertyValueDecoder(); String applicationName = stringValueDecoder.decodeValue(properties.getProperty(PROP_APP_NAME)); String streamName = stringValueDecoder.decodeValue(properties.getProperty(PROP_STREAM_NAME)); - AwsCredentialsProvider provider = + AWSCredentialsProvider provider = awsCPPropGetter.decodeValue(properties.getProperty(PROP_CREDENTIALS_PROVIDER_KINESIS)); if (applicationName == null || applicationName.isEmpty()) { @@ -116,7 +119,7 @@ public class KinesisClientLibConfigurator { } // Decode the DynamoDB credentials provider if it exists. If not use the Kinesis credentials provider. - AwsCredentialsProvider providerDynamoDB; + AWSCredentialsProvider providerDynamoDB; String propCredentialsProviderDynamoDBValue = properties.getProperty(PROP_CREDENTIALS_PROVIDER_DYNAMODB); if (propCredentialsProviderDynamoDBValue == null) { providerDynamoDB = provider; @@ -125,7 +128,7 @@ public class KinesisClientLibConfigurator { } // Decode the CloudWatch credentials provider if it exists. If not use the Kinesis credentials provider. - AwsCredentialsProvider providerCloudWatch; + AWSCredentialsProvider providerCloudWatch; String propCredentialsProviderCloudWatchValue = properties.getProperty(PROP_CREDENTIALS_PROVIDER_CLOUDWATCH); if (propCredentialsProviderCloudWatchValue == null) { providerCloudWatch = provider; @@ -137,8 +140,8 @@ public class KinesisClientLibConfigurator { String workerId = stringValueDecoder.decodeValue(properties.getProperty(PROP_WORKER_ID)); if (workerId == null || workerId.isEmpty()) { workerId = UUID.randomUUID().toString(); - log.info("Value of workerId is not provided in the properties. WorkerId is automatically assigned as: {}", - workerId); + LOG.info("Value of workerId is not provided in the properties. WorkerId is automatically " + + "assigned as: " + workerId); } KinesisClientLibConfiguration config = @@ -199,27 +202,38 @@ public class KinesisClientLibConfigurator { IPropertyValueDecoder decoder = classToDecoder.get(paramType); try { method.invoke(config, decoder.decodeValue(propertyValue)); - log.info("Successfully set property {} with value {}", propertyKey, propertyValue); + LOG.info(String.format("Successfully set property %s with value %s", + propertyKey, + propertyValue)); return; } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { // At this point, we really thought that we could call this method. - log.warn("Encountered an error while invoking method %s with value {}. Exception was {}", - method, propertyValue, e); + LOG.warn(String.format("Encountered an error while invoking method %s with value %s. " + + "Exception was %s", + method, + propertyValue, + e)); } catch (UnsupportedOperationException e) { - log.warn("The property {} is not supported as type {} at this time.", propertyKey, - paramType); + LOG.warn(String.format("The property %s is not supported as type %s at this time.", + propertyKey, + paramType)); } } else { - log.debug("No method for decoding parameters of type {} so method {} could not be invoked.", - paramType, method); + LOG.debug(String.format("No method for decoding parameters of type %s so method %s could not " + + "be invoked.", + paramType, + method)); } } else { - log.debug("Method {} doesn't look like it is appropriate for setting property {}. Looking for" - + " something called {}.", method, propertyKey, targetMethodName); + LOG.debug(String.format("Method %s doesn't look like it is appropriate for setting property %s. " + + "Looking for something called %s.", + method, + propertyKey, + targetMethodName)); } } } else { - log.debug(String.format("There was no appropriately named method for setting property %s.", propertyKey)); + LOG.debug(String.format("There was no appropriately named method for setting property %s.", propertyKey)); } } } diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/LongPropertyValueDecoder.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/LongPropertyValueDecoder.java similarity index 51% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/LongPropertyValueDecoder.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/LongPropertyValueDecoder.java index 1382b153..7d63960c 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/LongPropertyValueDecoder.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/LongPropertyValueDecoder.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package com.amazonaws.services.kinesis.multilang.config; +package com.amazonaws.services.kinesis.clientlibrary.config; import java.util.Arrays; import java.util.List; diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/SetPropertyValueDecoder.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/SetPropertyValueDecoder.java similarity index 67% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/SetPropertyValueDecoder.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/SetPropertyValueDecoder.java index 6dfe2dbe..c6eea476 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/SetPropertyValueDecoder.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/SetPropertyValueDecoder.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package com.amazonaws.services.kinesis.multilang.config; +package com.amazonaws.services.kinesis.clientlibrary.config; import java.util.Arrays; import java.util.HashSet; diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/StringPropertyValueDecoder.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/StringPropertyValueDecoder.java similarity index 58% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/StringPropertyValueDecoder.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/StringPropertyValueDecoder.java index d5cc0482..d9e4339f 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/config/StringPropertyValueDecoder.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/config/StringPropertyValueDecoder.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package com.amazonaws.services.kinesis.multilang.config; +package com.amazonaws.services.kinesis.clientlibrary.config; import java.util.Arrays; import java.util.List; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/InvalidStateException.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/InvalidStateException.java similarity index 54% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/InvalidStateException.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/InvalidStateException.java index 35bb10bd..a9bb5bdf 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/InvalidStateException.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/InvalidStateException.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.exceptions; +package com.amazonaws.services.kinesis.clientlibrary.exceptions; /** * This is thrown when the Amazon Kinesis Client Library encounters issues with its internal state (e.g. DynamoDB table diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibDependencyException.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibDependencyException.java similarity index 58% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibDependencyException.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibDependencyException.java index ea7ff619..fef3c1b1 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibDependencyException.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibDependencyException.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.exceptions; +package com.amazonaws.services.kinesis.clientlibrary.exceptions; /** * This is thrown when the Amazon Kinesis Client Library encounters issues talking to its dependencies diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibException.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibException.java similarity index 55% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibException.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibException.java index 50e8ee05..5e77649f 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibException.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibException.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.exceptions; +package com.amazonaws.services.kinesis.clientlibrary.exceptions; /** * Abstract class for exceptions of the Amazon Kinesis Client Library. diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibNonRetryableException.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibNonRetryableException.java similarity index 51% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibNonRetryableException.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibNonRetryableException.java index 81ca389a..c32409d5 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibNonRetryableException.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibNonRetryableException.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.exceptions; +package com.amazonaws.services.kinesis.clientlibrary.exceptions; /** * Non-retryable exceptions. Simply retrying the same request/operation is not expected to succeed. diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibRetryableException.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibRetryableException.java similarity index 53% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibRetryableException.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibRetryableException.java index 3229046a..537278d1 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibRetryableException.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/KinesisClientLibRetryableException.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.exceptions; +package com.amazonaws.services.kinesis.clientlibrary.exceptions; /** * Retryable exceptions (e.g. transient errors). The request/operation is expected to succeed upon (back off and) retry. diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/ShutdownException.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/ShutdownException.java new file mode 100644 index 00000000..1f5b38e1 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/ShutdownException.java @@ -0,0 +1,39 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.exceptions; + +/** + * The RecordProcessor instance has been shutdown (e.g. and attempts a checkpoint). + */ +public class ShutdownException extends KinesisClientLibNonRetryableException { + + private static final long serialVersionUID = 1L; + + /** + * @param message provides more details about the cause and potential ways to debug/address. + */ + public ShutdownException(String message) { + super(message); + } + + /** + * @param message provides more details about the cause and potential ways to debug/address. + * @param e Cause of the exception + */ + public ShutdownException(String message, Exception e) { + super(message, e); + } + +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ThrottlingException.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/ThrottlingException.java similarity index 52% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ThrottlingException.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/ThrottlingException.java index 8349ac34..7e483ba5 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ThrottlingException.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/ThrottlingException.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.exceptions; +package com.amazonaws.services.kinesis.clientlibrary.exceptions; /** * Thrown when requests are throttled by a service (e.g. DynamoDB when storing a checkpoint). diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/BlockedOnParentShardException.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/internal/BlockedOnParentShardException.java similarity index 51% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/BlockedOnParentShardException.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/internal/BlockedOnParentShardException.java index d3a88fab..88cca44e 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/BlockedOnParentShardException.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/internal/BlockedOnParentShardException.java @@ -1,21 +1,21 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.exceptions.internal; +package com.amazonaws.services.kinesis.clientlibrary.exceptions.internal; -import software.amazon.kinesis.exceptions.KinesisClientLibRetryableException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibRetryableException; /** * Used internally in the Amazon Kinesis Client Library. Indicates that we cannot start processing data for a shard diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/internal/KinesisClientLibIOException.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/internal/KinesisClientLibIOException.java new file mode 100644 index 00000000..951aedf9 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/exceptions/internal/KinesisClientLibIOException.java @@ -0,0 +1,44 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.exceptions.internal; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibRetryableException; + +/** + * Thrown when we encounter issues when reading/writing information (e.g. shard information from Kinesis may not be + * current/complete). + */ +public class KinesisClientLibIOException extends KinesisClientLibRetryableException { + private static final long serialVersionUID = 1L; + + /** + * Constructor. + * + * @param message Error message. + */ + public KinesisClientLibIOException(String message) { + super(message); + } + + /** + * Constructor. + * + * @param message Error message. + * @param e Cause. + */ + public KinesisClientLibIOException(String message, Exception e) { + super(message, e); + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/ICheckpoint.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/ICheckpoint.java new file mode 100644 index 00000000..d559bfc0 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/ICheckpoint.java @@ -0,0 +1,49 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.interfaces; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; + +/** + * Interface for checkpoint trackers. + */ +public interface ICheckpoint { + + /** + * Record a checkpoint for a shard (e.g. sequence and subsequence numbers of last record processed + * by application). Upon failover, record processing is resumed from this point. + * + * @param shardId Checkpoint is specified for this shard. + * @param checkpointValue Value of the checkpoint (e.g. Kinesis sequence number and subsequence number) + * @param concurrencyToken Used with conditional writes to prevent stale updates + * (e.g. if there was a fail over to a different record processor, we don't want to + * overwrite it's checkpoint) + * @throws KinesisClientLibException Thrown if we were unable to save the checkpoint + */ + void setCheckpoint(String shardId, ExtendedSequenceNumber checkpointValue, String concurrencyToken) + throws KinesisClientLibException; + + /** + * Get the current checkpoint stored for the specified shard. Useful for checking that the parent shard + * has been completely processed before we start processing the child shard. + * + * @param shardId Current checkpoint for this shard is fetched + * @return Current checkpoint for this shard, null if there is no record for this shard. + * @throws KinesisClientLibException Thrown if we are unable to fetch the checkpoint + */ + ExtendedSequenceNumber getCheckpoint(String shardId) throws KinesisClientLibException; + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/IRecordProcessor.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/IRecordProcessor.java new file mode 100644 index 00000000..89cf092a --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/IRecordProcessor.java @@ -0,0 +1,62 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.interfaces; + +import java.util.List; + +import com.amazonaws.services.kinesis.model.Record; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason; + +/** + * The Amazon Kinesis Client Library will instantiate record processors to process data records fetched from Amazon + * Kinesis. + */ +public interface IRecordProcessor { + + /** + * Invoked by the Amazon Kinesis Client Library before data records are delivered to the RecordProcessor instance + * (via processRecords). + * + * @param shardId The record processor will be responsible for processing records of this shard. + */ + void initialize(String shardId); + + /** + * Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the + * application. + * Upon fail over, the new instance will get records with sequence number > checkpoint position + * for each partition key. + * + * @param records Data records to be processed + * @param checkpointer RecordProcessor should use this instance to checkpoint their progress. + */ + void processRecords(List records, IRecordProcessorCheckpointer checkpointer); + + /** + * Invoked by the Amazon Kinesis Client Library to indicate it will no longer send data records to this + * RecordProcessor instance. The reason parameter indicates: + * a/ ShutdownReason.TERMINATE - The shard has been closed and there will not be any more records to process. The + * record processor should checkpoint (after doing any housekeeping) to acknowledge that it has successfully + * completed processing all records in this shard. + * b/ ShutdownReason.ZOMBIE: A fail over has occurred and a different record processor is (or will be) responsible + * for processing records. + * + * @param checkpointer RecordProcessor should use this instance to checkpoint. + * @param reason Reason for the shutdown (ShutdownReason.TERMINATE indicates the shard is closed and there are no + * more records to process. Shutdown.ZOMBIE indicates a fail over has occurred). + */ + void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason); + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/IRecordProcessorCheckpointer.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/IRecordProcessorCheckpointer.java new file mode 100644 index 00000000..f64d3c43 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/IRecordProcessorCheckpointer.java @@ -0,0 +1,123 @@ +/* + * Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.interfaces; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException; +import com.amazonaws.services.kinesis.model.Record; + +/** + * Used by RecordProcessors when they want to checkpoint their progress. + * The Amazon Kinesis Client Library will pass an object implementing this interface to RecordProcessors, so they can + * checkpoint their progress. + */ +public interface IRecordProcessorCheckpointer { + + /** + * This method will checkpoint the progress at the last data record that was delivered to the record processor. + * Upon fail over (after a successful checkpoint() call), the new/replacement RecordProcessor instance + * will receive data records whose sequenceNumber > checkpoint position (for each partition key). + * In steady state, applications should checkpoint periodically (e.g. once every 5 minutes). + * Calling this API too frequently can slow down the application (because it puts pressure on the underlying + * checkpoint storage layer). + * + * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @throws ShutdownException The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @throws InvalidStateException Can't store checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @throws KinesisClientLibDependencyException Encountered an issue when storing the checkpoint. The application can + * backoff and retry. + */ + void checkpoint() + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; + + /** + * This method will checkpoint the progress at the provided record. This method is analogous to + * {@link #checkpoint()} but provides the ability to specify the record at which to + * checkpoint. + * + * @param record A record at which to checkpoint in this shard. Upon failover, + * the Kinesis Client Library will start fetching records after this record's sequence number. + * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @throws ShutdownException The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @throws InvalidStateException Can't store checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @throws KinesisClientLibDependencyException Encountered an issue when storing the checkpoint. The application can + * backoff and retry. + */ + void checkpoint(Record record) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; + + /** + * This method will checkpoint the progress at the provided sequenceNumber. This method is analogous to + * {@link #checkpoint()} but provides the ability to specify the sequence number at which to + * checkpoint. + * + * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover, + * the Kinesis Client Library will start fetching records after this sequence number. + * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @throws ShutdownException The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @throws InvalidStateException Can't store checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @throws KinesisClientLibDependencyException Encountered an issue when storing the checkpoint. The application can + * backoff and retry. + * @throws IllegalArgumentException The sequence number is invalid for one of the following reasons: + * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the + * greatest sequence number seen by the associated record processor. + * 2.) It is not a valid sequence number for a record in this shard. + */ + void checkpoint(String sequenceNumber) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException; + + + /** + * This method will checkpoint the progress at the provided sequenceNumber and subSequenceNumber, the latter for + * aggregated records produced with the Producer Library. This method is analogous to {@link #checkpoint()} + * but provides the ability to specify the sequence and subsequence numbers at which to checkpoint. + * + * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover, the Kinesis + * Client Library will start fetching records after the given sequence and subsequence numbers. + * @param subSequenceNumber A subsequence number at which to checkpoint within this shard. Upon failover, the + * Kinesis Client Library will start fetching records after the given sequence and subsequence numbers. + * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. + * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. + * @throws ShutdownException The record processor instance has been shutdown. Another instance may have + * started processing some of these records already. + * The application should abort processing via this RecordProcessor instance. + * @throws InvalidStateException Can't store checkpoint. + * Unable to store the checkpoint in the DynamoDB table (e.g. table doesn't exist). + * @throws KinesisClientLibDependencyException Encountered an issue when storing the checkpoint. The application can + * backoff and retry. + * @throws IllegalArgumentException The sequence number is invalid for one of the following reasons: + * 1.) It appears to be out of range, i.e. it is smaller than the last check point value, or larger than the + * greatest sequence number seen by the associated record processor. + * 2.) It is not a valid sequence number for a record in this shard. + */ + void checkpoint(String sequenceNumber, long subSequenceNumber) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException; +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/IRecordProcessorFactory.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/IRecordProcessorFactory.java new file mode 100644 index 00000000..b87fd7ec --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/IRecordProcessorFactory.java @@ -0,0 +1,30 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.interfaces; + +/** + * The Amazon Kinesis Client Library will use this to instantiate a record processor per shard. + * Clients may choose to create separate instantiations, or re-use instantiations. + */ +public interface IRecordProcessorFactory { + + /** + * Returns a record processor to be used for processing data records for a (assigned) shard. + * + * @return Returns a processor object. + */ + IRecordProcessor createProcessor(); + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IRecordProcessor.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IRecordProcessor.java new file mode 100644 index 00000000..25087f03 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IRecordProcessor.java @@ -0,0 +1,55 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.interfaces.v2; + +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput; + +/** + * The Amazon Kinesis Client Library will instantiate record processors to process data records fetched from Amazon + * Kinesis. + */ +public interface IRecordProcessor { + + /** + * Invoked by the Amazon Kinesis Client Library before data records are delivered to the RecordProcessor instance + * (via processRecords). + * + * @param initializationInput Provides information related to initialization + */ + void initialize(InitializationInput initializationInput); + + /** + * Process data records. The Amazon Kinesis Client Library will invoke this method to deliver data records to the + * application. + * Upon fail over, the new instance will get records with sequence number > checkpoint position + * for each partition key. + * + * @param processRecordsInput Provides the records to be processed as well as information and capabilities related + * to them (eg checkpointing). + */ + void processRecords(ProcessRecordsInput processRecordsInput); + + /** + * Invoked by the Amazon Kinesis Client Library to indicate it will no longer send data records to this + * RecordProcessor instance. + * + * @param shutdownInput Provides information and capabilities (eg checkpointing) related to shutdown of this record + * processor. + */ + void shutdown(ShutdownInput shutdownInput); + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IRecordProcessorFactory.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IRecordProcessorFactory.java new file mode 100644 index 00000000..08010ee7 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IRecordProcessorFactory.java @@ -0,0 +1,31 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.interfaces.v2; + + +/** + * The Amazon Kinesis Client Library will use this to instantiate a record processor per shard. + * Clients may choose to create separate instantiations, or re-use instantiations. + */ +public interface IRecordProcessorFactory { + + /** + * Returns a record processor to be used for processing data records for a (assigned) shard. + * + * @return Returns a processor object. + */ + IRecordProcessor createProcessor(); + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IShutdownNotificationAware.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IShutdownNotificationAware.java new file mode 100644 index 00000000..82a18a0e --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/interfaces/v2/IShutdownNotificationAware.java @@ -0,0 +1,19 @@ +package com.amazonaws.services.kinesis.clientlibrary.interfaces.v2; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer; + +/** + * Allows a record processor to indicate it's aware of requested shutdowns, and handle the request. + */ +public interface IShutdownNotificationAware { + + /** + * Called when the worker has been requested to shutdown, and gives the record processor a chance to checkpoint. + * + * The record processor will still have shutdown called. + * + * @param checkpointer the checkpointer that can be used to save progress. + */ + void shutdownRequested(IRecordProcessorCheckpointer checkpointer); + +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SentinelCheckpoint.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/SentinelCheckpoint.java similarity index 50% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SentinelCheckpoint.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/SentinelCheckpoint.java index 435f9cc2..d4442b82 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SentinelCheckpoint.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/SentinelCheckpoint.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.checkpoint; +package com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint; /** * Enumeration of the sentinel values of checkpoints. diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/BlockOnParentShardTask.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/BlockOnParentShardTask.java new file mode 100644 index 00000000..465dda46 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/BlockOnParentShardTask.java @@ -0,0 +1,108 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.BlockedOnParentShardException; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; + +/** + * Task to block until processing of all data records in the parent shard(s) is completed. + * We check if we have checkpoint(s) for the parent shard(s). + * If a checkpoint for a parent shard is found, we poll and wait until the checkpoint value is SHARD_END + * (application has checkpointed after processing all records in the shard). + * If we don't find a checkpoint for the parent shard(s), we assume they have been trimmed and directly + * proceed with processing data from the shard. + */ +class BlockOnParentShardTask implements ITask { + + private static final Log LOG = LogFactory.getLog(BlockOnParentShardTask.class); + private final ShardInfo shardInfo; + private final ILeaseManager leaseManager; + + private final TaskType taskType = TaskType.BLOCK_ON_PARENT_SHARDS; + // Sleep for this duration if the parent shards have not completed processing, or we encounter an exception. + private final long parentShardPollIntervalMillis; + + /** + * @param shardInfo Information about the shard we are working on + * @param leaseManager Used to fetch the lease and checkpoint info for parent shards + * @param parentShardPollIntervalMillis Sleep time if the parent shard has not completed processing + */ + BlockOnParentShardTask(ShardInfo shardInfo, + ILeaseManager leaseManager, + long parentShardPollIntervalMillis) { + this.shardInfo = shardInfo; + this.leaseManager = leaseManager; + this.parentShardPollIntervalMillis = parentShardPollIntervalMillis; + } + + /* (non-Javadoc) + * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ITask#call() + */ + @Override + public TaskResult call() { + Exception exception = null; + + try { + boolean blockedOnParentShard = false; + for (String shardId : shardInfo.getParentShardIds()) { + KinesisClientLease lease = leaseManager.getLease(shardId); + if (lease != null) { + ExtendedSequenceNumber checkpoint = lease.getCheckpoint(); + if ((checkpoint == null) || (!checkpoint.equals(ExtendedSequenceNumber.SHARD_END))) { + LOG.debug("Shard " + shardId + " is not yet done. Its current checkpoint is " + checkpoint); + blockedOnParentShard = true; + exception = new BlockedOnParentShardException("Parent shard not yet done"); + break; + } else { + LOG.debug("Shard " + shardId + " has been completely processed."); + } + } else { + LOG.info("No lease found for shard " + shardId + ". Not blocking on completion of this shard."); + } + } + + if (!blockedOnParentShard) { + LOG.info("No need to block on parents " + shardInfo.getParentShardIds() + " of shard " + + shardInfo.getShardId()); + return new TaskResult(null); + } + } catch (Exception e) { + LOG.error("Caught exception when checking for parent shard checkpoint", e); + exception = e; + } + try { + Thread.sleep(parentShardPollIntervalMillis); + } catch (InterruptedException e) { + LOG.error("Sleep interrupted when waiting on parent shard(s) of " + shardInfo.getShardId(), e); + } + + return new TaskResult(exception); + } + + /* (non-Javadoc) + * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ITask#getTaskType() + */ + @Override + public TaskType getTaskType() { + return taskType; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/CheckpointValueComparator.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/CheckpointValueComparator.java new file mode 100644 index 00000000..0d3dd001 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/CheckpointValueComparator.java @@ -0,0 +1,126 @@ +/* + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.Comparator; + +import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint; + +/** + * + * Defines an ordering on checkpoint values, taking into account sentinel values: TRIM_HORIZON, LATEST, + * SHARD_END. + * + * SHARD_END -> infinity + * TRIM_HORIZON and LATEST -> less than sequence numbers + * sequence numbers -> BigInteger value of string + * + */ +class CheckpointValueComparator implements Comparator, Serializable { + + private static final long serialVersionUID = 1L; + + // Define TRIM_HORIZON and LATEST to be less than all sequence numbers + private static final BigInteger TRIM_HORIZON_BIG_INTEGER_VALUE = BigInteger.valueOf(-2); + private static final BigInteger LATEST_BIG_INTEGER_VALUE = BigInteger.valueOf(-1); + + /** + * Constructor. + */ + CheckpointValueComparator() { + + } + + /** + * Compares checkpoint values with these rules. + * + * SHARD_END is considered greatest + * TRIM_HORIZON and LATEST are considered less than sequence numbers + * sequence numbers are given their big integer value + * + * @param first The first element to be compared + * @param second The second element to be compared + * @return returns negative/0/positive if first is less than/equal to/greater than second + * @throws IllegalArgumentException If either input is a non-numeric non-sentinel value string. + */ + @Override + public int compare(String first, String second) { + if (!isDigitsOrSentinelValue(first) || !isDigitsOrSentinelValue(second)) { + throw new IllegalArgumentException("Expected a sequence number or a sentinel checkpoint value but " + + "received: first=" + first + " and second=" + second); + } + // SHARD_END is the greatest + if (SentinelCheckpoint.SHARD_END.toString().equals(first) + && SentinelCheckpoint.SHARD_END.toString().equals(second)) { + return 0; + } else if (SentinelCheckpoint.SHARD_END.toString().equals(second)) { + return -1; + } else if (SentinelCheckpoint.SHARD_END.toString().equals(first)) { + return 1; + } + + // Compare other sentinel values and serial numbers after converting them to a big integer value + return bigIntegerValue(first).compareTo(bigIntegerValue(second)); + } + + /** + * Sequence numbers are converted, sentinels are given a value of -1. Note this method is only used after special + * logic associated with SHARD_END and the case of comparing two sentinel values has already passed, so we map + * sentinel values LATEST and TRIM_HORIZON to negative numbers so that they are considered less than sequence + * numbers. + * + * @param checkpointValue string to convert to big integer value + * @return a BigInteger value representation of the checkpointValue + */ + private static BigInteger bigIntegerValue(String checkpointValue) { + if (SequenceNumberValidator.isDigits(checkpointValue)) { + return new BigInteger(checkpointValue); + } else if (SentinelCheckpoint.LATEST.toString().equals(checkpointValue)) { + return LATEST_BIG_INTEGER_VALUE; + } else if (SentinelCheckpoint.TRIM_HORIZON.toString().equals(checkpointValue)) { + return TRIM_HORIZON_BIG_INTEGER_VALUE; + } else { + throw new IllegalArgumentException("Expected a string of digits, TRIM_HORIZON, or LATEST but received " + + checkpointValue); + } + } + + /** + * Checks if the string is all digits or one of the SentinelCheckpoint values. + * + * @param string + * @return true if and only if the string is all digits or one of the SentinelCheckpoint values + */ + private static boolean isDigitsOrSentinelValue(String string) { + return SequenceNumberValidator.isDigits(string) || isSentinelValue(string); + } + + /** + * Checks if the string is a SentinelCheckpoint value. + * + * @param string + * @return true if and only if the string can be converted to a SentinelCheckpoint + */ + private static boolean isSentinelValue(String string) { + try { + SentinelCheckpoint.valueOf(string); + return true; + } catch (Exception e) { + return false; + } + } +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerStates.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ConsumerStates.java similarity index 71% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerStates.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ConsumerStates.java index ef0a8d75..2d92d7d7 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerStates.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ConsumerStates.java @@ -1,23 +1,4 @@ -/* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; - -import lombok.Getter; -import lombok.experimental.Accessors; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.retrieval.ThrottlingReporter; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; /** * Top level container for all the possible states a {@link ShardConsumer} can be in. The logic for creation of tasks, @@ -84,28 +65,87 @@ class ConsumerStates { SHUTDOWN_COMPLETE(new ShutdownCompleteState()); //@formatter:on - @Getter - @Accessors(fluent = true) private final ConsumerState consumerState; ShardConsumerState(ConsumerState consumerState) { this.consumerState = consumerState; } + + public ConsumerState getConsumerState() { + return consumerState; + } } + /** + * Represents a the current state of the consumer. This handles the creation of tasks for the consumer, and what to + * do when a transition occurs. + * + */ + interface ConsumerState { + /** + * Creates a new task for this state using the passed in consumer to build the task. If there is no task + * required for this state it may return a null value. {@link ConsumerState}'s are allowed to modify the + * consumer during the execution of this method. + * + * @param consumer + * the consumer to use build the task, or execute state. + * @return a valid task for this state or null if there is no task required. + */ + ITask createTask(ShardConsumer consumer); + + /** + * Provides the next state of the consumer upon success of the task return by + * {@link ConsumerState#createTask(ShardConsumer)}. + * + * @return the next state that the consumer should transition to, this may be the same object as the current + * state. + */ + ConsumerState successTransition(); + + /** + * Provides the next state of the consumer when a shutdown has been requested. The returned state is dependent + * on the current state, and the shutdown reason. + * + * @param shutdownReason + * the reason that a shutdown was requested + * @return the next state that the consumer should transition to, this may be the same object as the current + * state. + */ + ConsumerState shutdownTransition(ShutdownReason shutdownReason); + + /** + * The type of task that {@link ConsumerState#createTask(ShardConsumer)} would return. This is always a valid state + * even if createTask would return a null value. + * + * @return the type of task that this state represents. + */ + TaskType getTaskType(); + + /** + * An enumeration represent the type of this state. Different consumer states may return the same + * {@link ShardConsumerState}. + * + * @return the type of consumer state this represents. + */ + ShardConsumerState getState(); + + boolean isTerminal(); + + } + /** * The initial state that any {@link ShardConsumer} should start in. */ - static final ConsumerState INITIAL_STATE = ShardConsumerState.WAITING_ON_PARENT_SHARDS.consumerState(); + static final ConsumerState INITIAL_STATE = ShardConsumerState.WAITING_ON_PARENT_SHARDS.getConsumerState(); private static ConsumerState shutdownStateFor(ShutdownReason reason) { switch (reason) { case REQUESTED: - return ShardConsumerState.SHUTDOWN_REQUESTED.consumerState(); - case SHARD_END: - case LEASE_LOST: - return ShardConsumerState.SHUTTING_DOWN.consumerState(); + return ShardConsumerState.SHUTDOWN_REQUESTED.getConsumerState(); + case TERMINATE: + case ZOMBIE: + return ShardConsumerState.SHUTTING_DOWN.getConsumerState(); default: throw new IllegalArgumentException("Unknown reason: " + reason); } @@ -133,29 +173,28 @@ class ConsumerStates { static class BlockedOnParentState implements ConsumerState { @Override - public ConsumerTask createTask(ShardConsumerArgument consumerArgument, ShardConsumer consumer, ProcessRecordsInput input) { - return new BlockOnParentShardTask(consumerArgument.shardInfo(), - consumerArgument.leaseRefresher(), - consumerArgument.parentShardPollIntervalMillis()); + public ITask createTask(ShardConsumer consumer) { + return new BlockOnParentShardTask(consumer.getShardInfo(), consumer.getLeaseManager(), + consumer.getParentShardPollIntervalMillis()); } @Override public ConsumerState successTransition() { - return ShardConsumerState.INITIALIZING.consumerState(); + return ShardConsumerState.INITIALIZING.getConsumerState(); } @Override public ConsumerState shutdownTransition(ShutdownReason shutdownReason) { - return ShardConsumerState.SHUTDOWN_COMPLETE.consumerState(); + return ShardConsumerState.SHUTDOWN_COMPLETE.getConsumerState(); } @Override - public TaskType taskType() { + public TaskType getTaskType() { return TaskType.BLOCK_ON_PARENT_SHARDS; } @Override - public ShardConsumerState state() { + public ShardConsumerState getState() { return ShardConsumerState.WAITING_ON_PARENT_SHARDS; } @@ -178,9 +217,9 @@ class ConsumerStates { *
*
{@link ShutdownReason#REQUESTED}
*
Transitions to the {@link ShutdownNotificationState}
- *
{@link ShutdownReason#LEASE_LOST}
+ *
{@link ShutdownReason#ZOMBIE}
*
Transitions to the {@link ShuttingDownState}
- *
{@link ShutdownReason#SHARD_END}
+ *
{@link ShutdownReason#TERMINATE}
*
*

* This reason should not occur, since terminate is triggered after reaching the end of a shard. Initialize never @@ -197,33 +236,29 @@ class ConsumerStates { static class InitializingState implements ConsumerState { @Override - public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) { - return new InitializeTask(argument.shardInfo(), - argument.shardRecordProcessor(), - argument.checkpoint(), - argument.recordProcessorCheckpointer(), argument.initialPositionInStream(), - argument.recordsPublisher(), - argument.taskBackoffTimeMillis(), - argument.metricsFactory()); + public ITask createTask(ShardConsumer consumer) { + return new InitializeTask(consumer.getShardInfo(), consumer.getRecordProcessor(), consumer.getCheckpoint(), + consumer.getRecordProcessorCheckpointer(), consumer.getDataFetcher(), + consumer.getTaskBackoffTimeMillis(), consumer.getStreamConfig()); } @Override public ConsumerState successTransition() { - return ShardConsumerState.PROCESSING.consumerState(); + return ShardConsumerState.PROCESSING.getConsumerState(); } @Override public ConsumerState shutdownTransition(ShutdownReason shutdownReason) { - return shutdownReason.shutdownState(); + return shutdownReason.getShutdownState(); } @Override - public TaskType taskType() { + public TaskType getTaskType() { return TaskType.INITIALIZE; } @Override - public ShardConsumerState state() { + public ShardConsumerState getState() { return ShardConsumerState.INITIALIZING; } @@ -242,13 +277,13 @@ class ConsumerStates { *

Doesn't actually transition, but instead returns the same state
*
Shutdown
*
At this point records are being retrieved, and processed. It's now possible for the consumer to reach the end - * of the shard triggering a {@link ShutdownReason#SHARD_END}. + * of the shard triggering a {@link ShutdownReason#TERMINATE}. *
*
{@link ShutdownReason#REQUESTED}
*
Transitions to the {@link ShutdownNotificationState}
- *
{@link ShutdownReason#LEASE_LOST}
+ *
{@link ShutdownReason#ZOMBIE}
*
Transitions to the {@link ShuttingDownState}
- *
{@link ShutdownReason#SHARD_END}
+ *
{@link ShutdownReason#TERMINATE}
*
Transitions to the {@link ShuttingDownState}
*
*
@@ -257,39 +292,29 @@ class ConsumerStates { static class ProcessingState implements ConsumerState { @Override - public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) { - ThrottlingReporter throttlingReporter = new ThrottlingReporter(5, argument.shardInfo().shardId()); - return new ProcessTask(argument.shardInfo(), - argument.shardRecordProcessor(), - argument.recordProcessorCheckpointer(), - argument.taskBackoffTimeMillis(), - argument.skipShardSyncAtWorkerInitializationIfLeasesExist(), - argument.shardDetector(), - throttlingReporter, - input, - argument.shouldCallProcessRecordsEvenForEmptyRecordList(), - argument.idleTimeInMilliseconds(), - argument.aggregatorUtil(), argument.metricsFactory() - ); + public ITask createTask(ShardConsumer consumer) { + return new ProcessTask(consumer.getShardInfo(), consumer.getStreamConfig(), consumer.getRecordProcessor(), + consumer.getRecordProcessorCheckpointer(), consumer.getDataFetcher(), + consumer.getTaskBackoffTimeMillis(), consumer.isSkipShardSyncAtWorkerInitializationIfLeasesExist()); } @Override public ConsumerState successTransition() { - return ShardConsumerState.PROCESSING.consumerState(); + return ShardConsumerState.PROCESSING.getConsumerState(); } @Override public ConsumerState shutdownTransition(ShutdownReason shutdownReason) { - return shutdownReason.shutdownState(); + return shutdownReason.getShutdownState(); } @Override - public TaskType taskType() { + public TaskType getTaskType() { return TaskType.PROCESS; } @Override - public ShardConsumerState state() { + public ShardConsumerState getState() { return ShardConsumerState.PROCESSING; } @@ -297,11 +322,6 @@ class ConsumerStates { public boolean isTerminal() { return false; } - - @Override - public boolean requiresDataAvailability() { - return true; - } } static final ConsumerState SHUTDOWN_REQUEST_COMPLETION_STATE = new ShutdownNotificationCompletionState(); @@ -322,9 +342,9 @@ class ConsumerStates { *
{@link ShutdownReason#REQUESTED}
*
Remains in the {@link ShardConsumerState#SHUTDOWN_REQUESTED}, but the state implementation changes to * {@link ShutdownNotificationCompletionState}
- *
{@link ShutdownReason#LEASE_LOST}
+ *
{@link ShutdownReason#ZOMBIE}
*
Transitions to the {@link ShuttingDownState}
- *
{@link ShutdownReason#SHARD_END}
+ *
{@link ShutdownReason#TERMINATE}
*
Transitions to the {@link ShuttingDownState}
*
* @@ -333,12 +353,9 @@ class ConsumerStates { static class ShutdownNotificationState implements ConsumerState { @Override - public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) { - // TODO: notify shutdownrequested - return new ShutdownNotificationTask(argument.shardRecordProcessor(), - argument.recordProcessorCheckpointer(), - consumer.shutdownNotification(), - argument.shardInfo()); + public ITask createTask(ShardConsumer consumer) { + return new ShutdownNotificationTask(consumer.getRecordProcessor(), consumer.getRecordProcessorCheckpointer(), + consumer.getShutdownNotification(), consumer.getShardInfo()); } @Override @@ -351,16 +368,16 @@ class ConsumerStates { if (shutdownReason == ShutdownReason.REQUESTED) { return SHUTDOWN_REQUEST_COMPLETION_STATE; } - return shutdownReason.shutdownState(); + return shutdownReason.getShutdownState(); } @Override - public TaskType taskType() { + public TaskType getTaskType() { return TaskType.SHUTDOWN_NOTIFICATION; } @Override - public ShardConsumerState state() { + public ShardConsumerState getState() { return ShardConsumerState.SHUTDOWN_REQUESTED; } @@ -368,7 +385,6 @@ class ConsumerStates { public boolean isTerminal() { return false; } - } /** @@ -394,9 +410,9 @@ class ConsumerStates { *
{@link ShutdownReason#REQUESTED}
*
Remains in the {@link ShardConsumerState#SHUTDOWN_REQUESTED}, and the state implementation remains * {@link ShutdownNotificationCompletionState}
- *
{@link ShutdownReason#LEASE_LOST}
+ *
{@link ShutdownReason#ZOMBIE}
*
Transitions to the {@link ShuttingDownState}
- *
{@link ShutdownReason#SHARD_END}
+ *
{@link ShutdownReason#TERMINATE}
*
Transitions to the {@link ShuttingDownState}
* * @@ -405,7 +421,7 @@ class ConsumerStates { static class ShutdownNotificationCompletionState implements ConsumerState { @Override - public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) { + public ITask createTask(ShardConsumer consumer) { return null; } @@ -417,18 +433,18 @@ class ConsumerStates { @Override public ConsumerState shutdownTransition(ShutdownReason shutdownReason) { if (shutdownReason != ShutdownReason.REQUESTED) { - return shutdownReason.shutdownState(); + return shutdownReason.getShutdownState(); } return this; } @Override - public TaskType taskType() { + public TaskType getTaskType() { return TaskType.SHUTDOWN_NOTIFICATION; } @Override - public ShardConsumerState state() { + public ShardConsumerState getState() { return ShardConsumerState.SHUTDOWN_REQUESTED; } @@ -436,11 +452,6 @@ class ConsumerStates { public boolean isTerminal() { return false; } - - @Override - public boolean requiresAwake() { - return true; - } } /** @@ -471,9 +482,9 @@ class ConsumerStates { * Transitions to {@link ShutdownCompleteState} *

* - *
{@link ShutdownReason#LEASE_LOST}
+ *
{@link ShutdownReason#ZOMBIE}
*
Transitions to the {@link ShutdownCompleteState}
- *
{@link ShutdownReason#SHARD_END}
+ *
{@link ShutdownReason#TERMINATE}
*
Transitions to the {@link ShutdownCompleteState}
* * @@ -482,40 +493,32 @@ class ConsumerStates { static class ShuttingDownState implements ConsumerState { @Override - public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) { - // TODO: set shutdown reason - return new ShutdownTask(argument.shardInfo(), - argument.shardDetector(), - argument.shardRecordProcessor(), - argument.recordProcessorCheckpointer(), - consumer.shutdownReason(), - argument.initialPositionInStream(), - argument.cleanupLeasesOfCompletedShards(), - argument.ignoreUnexpectedChildShards(), - argument.leaseRefresher(), - argument.taskBackoffTimeMillis(), - argument.recordsPublisher(), - argument.hierarchicalShardSyncer(), - argument.metricsFactory()); + public ITask createTask(ShardConsumer consumer) { + return new ShutdownTask(consumer.getShardInfo(), consumer.getRecordProcessor(), + consumer.getRecordProcessorCheckpointer(), consumer.getShutdownReason(), + consumer.getStreamConfig().getStreamProxy(), + consumer.getStreamConfig().getInitialPositionInStream(), + consumer.isCleanupLeasesOfCompletedShards(), consumer.getLeaseManager(), + consumer.getTaskBackoffTimeMillis()); } @Override public ConsumerState successTransition() { - return ShardConsumerState.SHUTDOWN_COMPLETE.consumerState(); + return ShardConsumerState.SHUTDOWN_COMPLETE.getConsumerState(); } @Override public ConsumerState shutdownTransition(ShutdownReason shutdownReason) { - return ShardConsumerState.SHUTDOWN_COMPLETE.consumerState(); + return ShardConsumerState.SHUTDOWN_COMPLETE.getConsumerState(); } @Override - public TaskType taskType() { + public TaskType getTaskType() { return TaskType.SHUTDOWN; } @Override - public ShardConsumerState state() { + public ShardConsumerState getState() { return ShardConsumerState.SHUTTING_DOWN; } @@ -523,7 +526,6 @@ class ConsumerStates { public boolean isTerminal() { return false; } - } /** @@ -553,9 +555,9 @@ class ConsumerStates { * Remains in {@link ShutdownCompleteState} *

* - *
{@link ShutdownReason#LEASE_LOST}
+ *
{@link ShutdownReason#ZOMBIE}
*
Remains in {@link ShutdownCompleteState}
- *
{@link ShutdownReason#SHARD_END}
+ *
{@link ShutdownReason#TERMINATE}
*
Remains in {@link ShutdownCompleteState}
* * @@ -564,9 +566,9 @@ class ConsumerStates { static class ShutdownCompleteState implements ConsumerState { @Override - public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) { - if (consumer.shutdownNotification() != null) { - consumer.shutdownNotification().shutdownComplete(); + public ITask createTask(ShardConsumer consumer) { + if (consumer.getShutdownNotification() != null) { + consumer.getShutdownNotification().shutdownComplete(); } return null; } @@ -582,12 +584,12 @@ class ConsumerStates { } @Override - public TaskType taskType() { + public TaskType getTaskType() { return TaskType.SHUTDOWN_COMPLETE; } @Override - public ShardConsumerState state() { + public ShardConsumerState getState() { return ShardConsumerState.SHUTDOWN_COMPLETE; } @@ -595,7 +597,6 @@ class ConsumerStates { public boolean isTerminal() { return true; } - } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ITask.java similarity index 81% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ITask.java index 0fcf07be..d19166a1 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ITask.java @@ -1,5 +1,5 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Amazon Software License (the "License"). * You may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ -package software.amazon.kinesis.lifecycle; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; import java.util.concurrent.Callable; @@ -20,7 +20,7 @@ import java.util.concurrent.Callable; * Interface for shard processing tasks. * A task may execute an application callback (e.g. initialize, process, shutdown). */ -public interface ConsumerTask extends Callable { +interface ITask extends Callable { /** * Perform task logic. @@ -33,6 +33,6 @@ public interface ConsumerTask extends Callable { /** * @return TaskType */ - TaskType taskType(); + TaskType getTaskType(); } diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/InitialPositionInStream.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/InitialPositionInStream.java new file mode 100644 index 00000000..94f9b455 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/InitialPositionInStream.java @@ -0,0 +1,36 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +/** + * Used to specify the position in the stream where a new application should start from. + * This is used during initial application bootstrap (when a checkpoint doesn't exist for a shard or its parents). + */ +public enum InitialPositionInStream { + /** + * Start after the most recent data record (fetch new data). + */ + LATEST, + + /** + * Start from the oldest available data record. + */ + TRIM_HORIZON, + + /** + * Start from the record at or after the specified server-side timestamp. + */ + AT_TIMESTAMP +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStreamExtended.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/InitialPositionInStreamExtended.java similarity index 69% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStreamExtended.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/InitialPositionInStreamExtended.java index 30c5e935..6a9948c7 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStreamExtended.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/InitialPositionInStreamExtended.java @@ -1,20 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.common; - -import lombok.ToString; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; import java.util.Date; @@ -22,8 +20,7 @@ import java.util.Date; * Class that houses the entities needed to specify the position in the stream from where a new application should * start. */ -@ToString -public class InitialPositionInStreamExtended { +class InitialPositionInStreamExtended { private final InitialPositionInStream position; private final Date timestamp; @@ -47,7 +44,7 @@ public class InitialPositionInStreamExtended { * * @return The initial position in stream. */ - public InitialPositionInStream getInitialPositionInStream() { + protected InitialPositionInStream getInitialPositionInStream() { return this.position; } @@ -57,11 +54,11 @@ public class InitialPositionInStreamExtended { * * @return The timestamp from where we need to start the application. */ - public Date getTimestamp() { + protected Date getTimestamp() { return this.timestamp; } - public static InitialPositionInStreamExtended newInitialPosition(final InitialPositionInStream position) { + protected static InitialPositionInStreamExtended newInitialPosition(final InitialPositionInStream position) { switch (position) { case LATEST: return new InitialPositionInStreamExtended(InitialPositionInStream.LATEST, null); @@ -72,7 +69,7 @@ public class InitialPositionInStreamExtended { } } - public static InitialPositionInStreamExtended newInitialPositionAtTimestamp(final Date timestamp) { + protected static InitialPositionInStreamExtended newInitialPositionAtTimestamp(final Date timestamp) { if (timestamp == null) { throw new IllegalArgumentException("Timestamp must be specified for InitialPosition AT_TIMESTAMP"); } diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/InitializeTask.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/InitializeTask.java new file mode 100644 index 00000000..262b98c7 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/InitializeTask.java @@ -0,0 +1,129 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; + +/** + * Task for initializing shard position and invoking the RecordProcessor initialize() API. + */ +class InitializeTask implements ITask { + + private static final Log LOG = LogFactory.getLog(InitializeTask.class); + + private static final String RECORD_PROCESSOR_INITIALIZE_METRIC = "RecordProcessor.initialize"; + + private final ShardInfo shardInfo; + private final IRecordProcessor recordProcessor; + private final KinesisDataFetcher dataFetcher; + private final TaskType taskType = TaskType.INITIALIZE; + private final ICheckpoint checkpoint; + private final RecordProcessorCheckpointer recordProcessorCheckpointer; + // Back off for this interval if we encounter a problem (exception) + private final long backoffTimeMillis; + private final StreamConfig streamConfig; + + /** + * Constructor. + */ + InitializeTask(ShardInfo shardInfo, + IRecordProcessor recordProcessor, + ICheckpoint checkpoint, + RecordProcessorCheckpointer recordProcessorCheckpointer, + KinesisDataFetcher dataFetcher, + long backoffTimeMillis, + StreamConfig streamConfig) { + this.shardInfo = shardInfo; + this.recordProcessor = recordProcessor; + this.checkpoint = checkpoint; + this.recordProcessorCheckpointer = recordProcessorCheckpointer; + this.dataFetcher = dataFetcher; + this.backoffTimeMillis = backoffTimeMillis; + this.streamConfig = streamConfig; + } + + /* + * Initializes the data fetcher (position in shard) and invokes the RecordProcessor initialize() API. + * (non-Javadoc) + * + * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ITask#call() + */ + @Override + public TaskResult call() { + boolean applicationException = false; + Exception exception = null; + + try { + LOG.debug("Initializing ShardId " + shardInfo.getShardId()); + ExtendedSequenceNumber initialCheckpoint = checkpoint.getCheckpoint(shardInfo.getShardId()); + + dataFetcher.initialize(initialCheckpoint.getSequenceNumber(), streamConfig.getInitialPositionInStream()); + recordProcessorCheckpointer.setLargestPermittedCheckpointValue(initialCheckpoint); + recordProcessorCheckpointer.setInitialCheckpointValue(initialCheckpoint); + + LOG.debug("Calling the record processor initialize()."); + final InitializationInput initializationInput = new InitializationInput() + .withShardId(shardInfo.getShardId()) + .withExtendedSequenceNumber(initialCheckpoint); + final long recordProcessorStartTimeMillis = System.currentTimeMillis(); + try { + recordProcessor.initialize(initializationInput); + LOG.debug("Record processor initialize() completed."); + } catch (Exception e) { + applicationException = true; + throw e; + } finally { + MetricsHelper.addLatency(RECORD_PROCESSOR_INITIALIZE_METRIC, recordProcessorStartTimeMillis, + MetricsLevel.SUMMARY); + } + + return new TaskResult(null); + } catch (Exception e) { + if (applicationException) { + LOG.error("Application initialize() threw exception: ", e); + } else { + LOG.error("Caught exception: ", e); + } + exception = e; + // backoff if we encounter an exception. + try { + Thread.sleep(this.backoffTimeMillis); + } catch (InterruptedException ie) { + LOG.debug("Interrupted sleep", ie); + } + } + + return new TaskResult(exception); + } + + /* + * (non-Javadoc) + * + * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ITask#getTaskType() + */ + @Override + public TaskType getTaskType() { + return taskType; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java new file mode 100644 index 00000000..b8218968 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfiguration.java @@ -0,0 +1,1061 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.Date; +import java.util.Set; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.regions.RegionUtils; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; +import com.google.common.collect.ImmutableSet; + +/** + * Configuration for the Amazon Kinesis Client Library. + */ +public class KinesisClientLibConfiguration { + + private static final long EPSILON_MS = 25; + + /** + * The location in the shard from which the KinesisClientLibrary will start fetching records from + * when the application starts for the first time and there is no checkpoint for the shard. + */ + public static final InitialPositionInStream DEFAULT_INITIAL_POSITION_IN_STREAM = InitialPositionInStream.LATEST; + + /** + * Fail over time in milliseconds. A worker which does not renew it's lease within this time interval + * will be regarded as having problems and it's shards will be assigned to other workers. + * For applications that have a large number of shards, this may be set to a higher number to reduce + * the number of DynamoDB IOPS required for tracking leases. + */ + public static final long DEFAULT_FAILOVER_TIME_MILLIS = 10000L; + + /** + * Max records to fetch from Kinesis in a single GetRecords call. + */ + public static final int DEFAULT_MAX_RECORDS = 10000; + + /** + * Idle time between record reads in milliseconds. + */ + public static final long DEFAULT_IDLETIME_BETWEEN_READS_MILLIS = 1000L; + + /** + * Don't call processRecords() on the record processor for empty record lists. + */ + public static final boolean DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST = false; + + /** + * Interval in milliseconds between polling to check for parent shard completion. + * Polling frequently will take up more DynamoDB IOPS (when there are leases for shards waiting on + * completion of parent shards). + */ + public static final long DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS = 10000L; + + /** + * Shard sync interval in milliseconds - e.g. wait for this long between shard sync tasks. + */ + public static final long DEFAULT_SHARD_SYNC_INTERVAL_MILLIS = 60000L; + + /** + * Cleanup leases upon shards completion (don't wait until they expire in Kinesis). + * Keeping leases takes some tracking/resources (e.g. they need to be renewed, assigned), so by default we try + * to delete the ones we don't need any longer. + */ + public static final boolean DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION = true; + + /** + * Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures). + */ + public static final long DEFAULT_TASK_BACKOFF_TIME_MILLIS = 500L; + + /** + * Buffer metrics for at most this long before publishing to CloudWatch. + */ + public static final long DEFAULT_METRICS_BUFFER_TIME_MILLIS = 10000L; + + /** + * Buffer at most this many metrics before publishing to CloudWatch. + */ + public static final int DEFAULT_METRICS_MAX_QUEUE_SIZE = 10000; + + /** + * Metrics level for which to enable CloudWatch metrics. + */ + public static final MetricsLevel DEFAULT_METRICS_LEVEL = MetricsLevel.DETAILED; + + /** + * Metrics dimensions that always will be enabled regardless of the config provided by user. + */ + public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS = ImmutableSet.of( + MetricsHelper.OPERATION_DIMENSION_NAME); + + /** + * Allowed dimensions for CloudWatch metrics. By default, worker ID dimension will be disabled. + */ + public static final Set DEFAULT_METRICS_ENABLED_DIMENSIONS = ImmutableSet.builder().addAll( + METRICS_ALWAYS_ENABLED_DIMENSIONS).add(MetricsHelper.SHARD_ID_DIMENSION_NAME).build(); + + /** + * Metrics dimensions that signify all possible dimensions. + */ + public static final Set METRICS_DIMENSIONS_ALL = ImmutableSet.of(IMetricsScope.METRICS_DIMENSIONS_ALL); + + /** + * User agent set when Amazon Kinesis Client Library makes AWS requests. + */ + public static final String KINESIS_CLIENT_LIB_USER_AGENT = "amazon-kinesis-client-library-java-1.7.4"; + + /** + * KCL will validate client provided sequence numbers with a call to Amazon Kinesis before checkpointing for calls + * to {@link RecordProcessorCheckpointer#checkpoint(String)} by default. + */ + public static final boolean DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING = true; + + /** + * The max number of leases (shards) this worker should process. + * This can be useful to avoid overloading (and thrashing) a worker when a host has resource constraints + * or during deployment. + * NOTE: Setting this to a low value can cause data loss if workers are not able to pick up all shards in the + * stream due to the max limit. + */ + public static final int DEFAULT_MAX_LEASES_FOR_WORKER = Integer.MAX_VALUE; + + /** + * Max leases to steal from another worker at one time (for load balancing). + * Setting this to a higher number can allow for faster load convergence (e.g. during deployments, cold starts), + * but can cause higher churn in the system. + */ + public static final int DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME = 1; + + /** + * The Amazon DynamoDB table used for tracking leases will be provisioned with this read capacity. + */ + public static final int DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY = 10; + + /** + * The Amazon DynamoDB table used for tracking leases will be provisioned with this write capacity. + */ + public static final int DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY = 10; + + /* + * The Worker will skip shard sync during initialization if there are one or more leases in the lease table. + * This assumes that the shards and leases are in-sync. + * This enables customers to choose faster startup times (e.g. during incremental deployments of an application). + */ + public static final boolean DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST = false; + + /** + * Default Shard prioritization strategy. + */ + public static final ShardPrioritization DEFAULT_SHARD_PRIORITIZATION = new NoOpShardPrioritization(); + + private String applicationName; + private String tableName; + private String streamName; + private String kinesisEndpoint; + private String dynamoDBEndpoint; + private InitialPositionInStream initialPositionInStream; + private AWSCredentialsProvider kinesisCredentialsProvider; + private AWSCredentialsProvider dynamoDBCredentialsProvider; + private AWSCredentialsProvider cloudWatchCredentialsProvider; + private long failoverTimeMillis; + private String workerIdentifier; + private long shardSyncIntervalMillis; + private int maxRecords; + private long idleTimeBetweenReadsInMillis; + // Enables applications flush/checkpoint (if they have some data "in progress", but don't get new data for while) + private boolean callProcessRecordsEvenForEmptyRecordList; + private long parentShardPollIntervalMillis; + private boolean cleanupLeasesUponShardCompletion; + private ClientConfiguration kinesisClientConfig; + private ClientConfiguration dynamoDBClientConfig; + private ClientConfiguration cloudWatchClientConfig; + private long taskBackoffTimeMillis; + private long metricsBufferTimeMillis; + private int metricsMaxQueueSize; + private MetricsLevel metricsLevel; + private Set metricsEnabledDimensions; + private boolean validateSequenceNumberBeforeCheckpointing; + private String regionName; + private int maxLeasesForWorker; + private int maxLeasesToStealAtOneTime; + private int initialLeaseTableReadCapacity; + private int initialLeaseTableWriteCapacity; + private InitialPositionInStreamExtended initialPositionInStreamExtended; + // This is useful for optimizing deployments to large fleets working on a stable stream. + private boolean skipShardSyncAtWorkerInitializationIfLeasesExist; + private ShardPrioritization shardPrioritization; + + /** + * Constructor. + * + * @param applicationName Name of the Amazon Kinesis application. + * By default the application name is included in the user agent string used to make AWS requests. This + * can assist with troubleshooting (e.g. distinguish requests made by separate applications). + * @param streamName Name of the Kinesis stream + * @param credentialsProvider Provides credentials used to sign AWS requests + * @param workerId Used to distinguish different workers/processes of a Kinesis application + */ + public KinesisClientLibConfiguration(String applicationName, + String streamName, + AWSCredentialsProvider credentialsProvider, + String workerId) { + this(applicationName, streamName, credentialsProvider, credentialsProvider, credentialsProvider, workerId); + } + + /** + * Constructor. + * + * @param applicationName Name of the Amazon Kinesis application + * By default the application name is included in the user agent string used to make AWS requests. This + * can assist with troubleshooting (e.g. distinguish requests made by separate applications). + * @param streamName Name of the Kinesis stream + * @param kinesisCredentialsProvider Provides credentials used to access Kinesis + * @param dynamoDBCredentialsProvider Provides credentials used to access DynamoDB + * @param cloudWatchCredentialsProvider Provides credentials used to access CloudWatch + * @param workerId Used to distinguish different workers/processes of a Kinesis application + */ + public KinesisClientLibConfiguration(String applicationName, + String streamName, + AWSCredentialsProvider kinesisCredentialsProvider, + AWSCredentialsProvider dynamoDBCredentialsProvider, + AWSCredentialsProvider cloudWatchCredentialsProvider, + String workerId) { + this(applicationName, streamName, null, null, DEFAULT_INITIAL_POSITION_IN_STREAM, kinesisCredentialsProvider, + dynamoDBCredentialsProvider, cloudWatchCredentialsProvider, DEFAULT_FAILOVER_TIME_MILLIS, workerId, + DEFAULT_MAX_RECORDS, DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, + DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS, + DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, + new ClientConfiguration(), new ClientConfiguration(), new ClientConfiguration(), + DEFAULT_TASK_BACKOFF_TIME_MILLIS, DEFAULT_METRICS_BUFFER_TIME_MILLIS, DEFAULT_METRICS_MAX_QUEUE_SIZE, + DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, null); + } + + /** + * @param applicationName Name of the Kinesis application + * By default the application name is included in the user agent string used to make AWS requests. This + * can assist with troubleshooting (e.g. distinguish requests made by separate applications). + * @param streamName Name of the Kinesis stream + * @param kinesisEndpoint Kinesis endpoint + * @param initialPositionInStream One of LATEST or TRIM_HORIZON. The KinesisClientLibrary will start fetching + * records from that location in the stream when an application starts up for the first time and there + * are no checkpoints. If there are checkpoints, then we start from the checkpoint position. + * @param kinesisCredentialsProvider Provides credentials used to access Kinesis + * @param dynamoDBCredentialsProvider Provides credentials used to access DynamoDB + * @param cloudWatchCredentialsProvider Provides credentials used to access CloudWatch + * @param failoverTimeMillis Lease duration (leases not renewed within this period will be claimed by others) + * @param workerId Used to distinguish different workers/processes of a Kinesis application + * @param maxRecords Max records to read per Kinesis getRecords() call + * @param idleTimeBetweenReadsInMillis Idle time between calls to fetch data from Kinesis + * @param callProcessRecordsEvenForEmptyRecordList Call the IRecordProcessor::processRecords() API even if + * GetRecords returned an empty record list. + * @param parentShardPollIntervalMillis Wait for this long between polls to check if parent shards are done + * @param shardSyncIntervalMillis Time between tasks to sync leases and Kinesis shards + * @param cleanupTerminatedShardsBeforeExpiry Clean up shards we've finished processing (don't wait for expiration + * in Kinesis) + * @param kinesisClientConfig Client Configuration used by Kinesis client + * @param dynamoDBClientConfig Client Configuration used by DynamoDB client + * @param cloudWatchClientConfig Client Configuration used by CloudWatch client + * @param taskBackoffTimeMillis Backoff period when tasks encounter an exception + * @param metricsBufferTimeMillis Metrics are buffered for at most this long before publishing to CloudWatch + * @param metricsMaxQueueSize Max number of metrics to buffer before publishing to CloudWatch + * @param validateSequenceNumberBeforeCheckpointing whether KCL should validate client provided sequence numbers + * with a call to Amazon Kinesis before checkpointing for calls to + * {@link RecordProcessorCheckpointer#checkpoint(String)} + * @param regionName The region name for the service + */ + // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 26 LINES + // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 26 LINES + public KinesisClientLibConfiguration(String applicationName, + String streamName, + String kinesisEndpoint, + InitialPositionInStream initialPositionInStream, + AWSCredentialsProvider kinesisCredentialsProvider, + AWSCredentialsProvider dynamoDBCredentialsProvider, + AWSCredentialsProvider cloudWatchCredentialsProvider, + long failoverTimeMillis, + String workerId, + int maxRecords, + long idleTimeBetweenReadsInMillis, + boolean callProcessRecordsEvenForEmptyRecordList, + long parentShardPollIntervalMillis, + long shardSyncIntervalMillis, + boolean cleanupTerminatedShardsBeforeExpiry, + ClientConfiguration kinesisClientConfig, + ClientConfiguration dynamoDBClientConfig, + ClientConfiguration cloudWatchClientConfig, + long taskBackoffTimeMillis, + long metricsBufferTimeMillis, + int metricsMaxQueueSize, + boolean validateSequenceNumberBeforeCheckpointing, + String regionName) { + this(applicationName, streamName, kinesisEndpoint, null, initialPositionInStream, kinesisCredentialsProvider, + dynamoDBCredentialsProvider, cloudWatchCredentialsProvider, failoverTimeMillis, workerId, + maxRecords, idleTimeBetweenReadsInMillis, + callProcessRecordsEvenForEmptyRecordList, parentShardPollIntervalMillis, + shardSyncIntervalMillis, cleanupTerminatedShardsBeforeExpiry, + kinesisClientConfig, dynamoDBClientConfig, cloudWatchClientConfig, + taskBackoffTimeMillis, metricsBufferTimeMillis, metricsMaxQueueSize, + validateSequenceNumberBeforeCheckpointing, regionName); + } + + /** + * @param applicationName Name of the Kinesis application + * By default the application name is included in the user agent string used to make AWS requests. This + * can assist with troubleshooting (e.g. distinguish requests made by separate applications). + * @param streamName Name of the Kinesis stream + * @param kinesisEndpoint Kinesis endpoint + * @param dynamoDBEndpoint DynamoDB endpoint + * @param initialPositionInStream One of LATEST or TRIM_HORIZON. The KinesisClientLibrary will start fetching + * records from that location in the stream when an application starts up for the first time and there + * are no checkpoints. If there are checkpoints, then we start from the checkpoint position. + * @param kinesisCredentialsProvider Provides credentials used to access Kinesis + * @param dynamoDBCredentialsProvider Provides credentials used to access DynamoDB + * @param cloudWatchCredentialsProvider Provides credentials used to access CloudWatch + * @param failoverTimeMillis Lease duration (leases not renewed within this period will be claimed by others) + * @param workerId Used to distinguish different workers/processes of a Kinesis application + * @param maxRecords Max records to read per Kinesis getRecords() call + * @param idleTimeBetweenReadsInMillis Idle time between calls to fetch data from Kinesis + * @param callProcessRecordsEvenForEmptyRecordList Call the IRecordProcessor::processRecords() API even if + * GetRecords returned an empty record list. + * @param parentShardPollIntervalMillis Wait for this long between polls to check if parent shards are done + * @param shardSyncIntervalMillis Time between tasks to sync leases and Kinesis shards + * @param cleanupTerminatedShardsBeforeExpiry Clean up shards we've finished processing (don't wait for expiration + * in Kinesis) + * @param kinesisClientConfig Client Configuration used by Kinesis client + * @param dynamoDBClientConfig Client Configuration used by DynamoDB client + * @param cloudWatchClientConfig Client Configuration used by CloudWatch client + * @param taskBackoffTimeMillis Backoff period when tasks encounter an exception + * @param metricsBufferTimeMillis Metrics are buffered for at most this long before publishing to CloudWatch + * @param metricsMaxQueueSize Max number of metrics to buffer before publishing to CloudWatch + * @param validateSequenceNumberBeforeCheckpointing whether KCL should validate client provided sequence numbers + * with a call to Amazon Kinesis before checkpointing for calls to + * {@link RecordProcessorCheckpointer#checkpoint(String)} + * @param regionName The region name for the service + */ + // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 26 LINES + // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 26 LINES + public KinesisClientLibConfiguration(String applicationName, + String streamName, + String kinesisEndpoint, + String dynamoDBEndpoint, + InitialPositionInStream initialPositionInStream, + AWSCredentialsProvider kinesisCredentialsProvider, + AWSCredentialsProvider dynamoDBCredentialsProvider, + AWSCredentialsProvider cloudWatchCredentialsProvider, + long failoverTimeMillis, + String workerId, + int maxRecords, + long idleTimeBetweenReadsInMillis, + boolean callProcessRecordsEvenForEmptyRecordList, + long parentShardPollIntervalMillis, + long shardSyncIntervalMillis, + boolean cleanupTerminatedShardsBeforeExpiry, + ClientConfiguration kinesisClientConfig, + ClientConfiguration dynamoDBClientConfig, + ClientConfiguration cloudWatchClientConfig, + long taskBackoffTimeMillis, + long metricsBufferTimeMillis, + int metricsMaxQueueSize, + boolean validateSequenceNumberBeforeCheckpointing, + String regionName) { + // Check following values are greater than zero + checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis); + checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis); + checkIsValuePositive("ParentShardPollIntervalMillis", parentShardPollIntervalMillis); + checkIsValuePositive("ShardSyncIntervalMillis", shardSyncIntervalMillis); + checkIsValuePositive("MaxRecords", (long) maxRecords); + checkIsValuePositive("TaskBackoffTimeMillis", taskBackoffTimeMillis); + checkIsValuePositive("MetricsBufferTimeMills", metricsBufferTimeMillis); + checkIsValuePositive("MetricsMaxQueueSize", (long) metricsMaxQueueSize); + checkIsRegionNameValid(regionName); + this.applicationName = applicationName; + this.tableName = applicationName; + this.streamName = streamName; + this.kinesisEndpoint = kinesisEndpoint; + this.dynamoDBEndpoint = dynamoDBEndpoint; + this.initialPositionInStream = initialPositionInStream; + this.kinesisCredentialsProvider = kinesisCredentialsProvider; + this.dynamoDBCredentialsProvider = dynamoDBCredentialsProvider; + this.cloudWatchCredentialsProvider = cloudWatchCredentialsProvider; + this.failoverTimeMillis = failoverTimeMillis; + this.maxRecords = maxRecords; + this.idleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis; + this.callProcessRecordsEvenForEmptyRecordList = callProcessRecordsEvenForEmptyRecordList; + this.parentShardPollIntervalMillis = parentShardPollIntervalMillis; + this.shardSyncIntervalMillis = shardSyncIntervalMillis; + this.cleanupLeasesUponShardCompletion = cleanupTerminatedShardsBeforeExpiry; + this.workerIdentifier = workerId; + this.kinesisClientConfig = checkAndAppendKinesisClientLibUserAgent(kinesisClientConfig); + this.dynamoDBClientConfig = checkAndAppendKinesisClientLibUserAgent(dynamoDBClientConfig); + this.cloudWatchClientConfig = checkAndAppendKinesisClientLibUserAgent(cloudWatchClientConfig); + this.taskBackoffTimeMillis = taskBackoffTimeMillis; + this.metricsBufferTimeMillis = metricsBufferTimeMillis; + this.metricsMaxQueueSize = metricsMaxQueueSize; + this.metricsLevel = DEFAULT_METRICS_LEVEL; + this.metricsEnabledDimensions = DEFAULT_METRICS_ENABLED_DIMENSIONS; + this.validateSequenceNumberBeforeCheckpointing = validateSequenceNumberBeforeCheckpointing; + this.regionName = regionName; + this.maxLeasesForWorker = DEFAULT_MAX_LEASES_FOR_WORKER; + this.maxLeasesToStealAtOneTime = DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME; + this.initialLeaseTableReadCapacity = DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY; + this.initialLeaseTableWriteCapacity = DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY; + this.initialPositionInStreamExtended = + InitialPositionInStreamExtended.newInitialPosition(initialPositionInStream); + this.skipShardSyncAtWorkerInitializationIfLeasesExist = DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST; + this.shardPrioritization = DEFAULT_SHARD_PRIORITIZATION; + } + + // Check if value is positive, otherwise throw an exception + private void checkIsValuePositive(String key, long value) { + if (value <= 0) { + throw new IllegalArgumentException("Value of " + key + + " should be positive, but current value is " + value); + } + } + + // Check if user agent in configuration is the default agent. + // If so, replace it with application name plus KINESIS_CLIENT_LIB_USER_AGENT. + // If not, append KINESIS_CLIENT_LIB_USER_AGENT to the end. + private ClientConfiguration checkAndAppendKinesisClientLibUserAgent(ClientConfiguration config) { + String existingUserAgent = config.getUserAgent(); + if (existingUserAgent.equals(ClientConfiguration.DEFAULT_USER_AGENT)) { + existingUserAgent = applicationName; + } + if (!existingUserAgent.contains(KINESIS_CLIENT_LIB_USER_AGENT)) { + existingUserAgent += "," + KINESIS_CLIENT_LIB_USER_AGENT; + } + config.setUserAgent(existingUserAgent); + return config; + } + + private void checkIsRegionNameValid(String regionNameToCheck) { + if (regionNameToCheck != null && RegionUtils.getRegion(regionNameToCheck) == null) { + throw new IllegalArgumentException("The specified region name is not valid"); + } + } + + /** + * @return Name of the application + */ + public String getApplicationName() { + return applicationName; + } + + /** + * @return Name of the table to use in DynamoDB + */ + public String getTableName() { + return tableName; + } + + /** + * @return Time within which a worker should renew a lease (else it is assumed dead) + */ + public long getFailoverTimeMillis() { + return failoverTimeMillis; + } + + /** + * @return Credentials provider used to access Kinesis + */ + public AWSCredentialsProvider getKinesisCredentialsProvider() { + return kinesisCredentialsProvider; + } + + /** + * @return Credentials provider used to access DynamoDB + */ + public AWSCredentialsProvider getDynamoDBCredentialsProvider() { + return dynamoDBCredentialsProvider; + } + + /** + * @return Credentials provider used to access CloudWatch + */ + public AWSCredentialsProvider getCloudWatchCredentialsProvider() { + return cloudWatchCredentialsProvider; + } + + /** + * @return workerIdentifier + */ + public String getWorkerIdentifier() { + return workerIdentifier; + } + + /** + * @return the shardSyncIntervalMillis + */ + public long getShardSyncIntervalMillis() { + return shardSyncIntervalMillis; + } + + /** + * @return Max records to fetch per Kinesis getRecords call + */ + public int getMaxRecords() { + return maxRecords; + } + + /** + * @return Idle time between calls to fetch data from Kinesis + */ + public long getIdleTimeBetweenReadsInMillis() { + return idleTimeBetweenReadsInMillis; + } + + /** + * @return true if processRecords() should be called even for empty record lists + */ + public boolean shouldCallProcessRecordsEvenForEmptyRecordList() { + return callProcessRecordsEvenForEmptyRecordList; + } + + /** + * @return Epsilon milliseconds (used for lease timing margins) + */ + public long getEpsilonMillis() { + return EPSILON_MS; + } + + /** + * @return stream name + */ + public String getStreamName() { + return streamName; + } + + /** + * @return Kinesis endpoint + */ + public String getKinesisEndpoint() { + return kinesisEndpoint; + } + + /** + * @return DynamoDB endpoint + */ + public String getDynamoDBEndpoint() { + return dynamoDBEndpoint; + } + + /** + * @return the initialPositionInStream + */ + public InitialPositionInStream getInitialPositionInStream() { + return initialPositionInStream; + } + + /** + * @return interval between polls for parent shard completion + */ + public long getParentShardPollIntervalMillis() { + return parentShardPollIntervalMillis; + } + + /** + * @return Kinesis client configuration + */ + public ClientConfiguration getKinesisClientConfiguration() { + return kinesisClientConfig; + } + + /** + * @return DynamoDB client configuration + */ + public ClientConfiguration getDynamoDBClientConfiguration() { + return dynamoDBClientConfig; + } + + /** + * @return CloudWatch client configuration + */ + public ClientConfiguration getCloudWatchClientConfiguration() { + return cloudWatchClientConfig; + } + + /** + * @return backoff time when tasks encounter exceptions + */ + public long getTaskBackoffTimeMillis() { + return taskBackoffTimeMillis; + } + + /** + * @return Metrics are buffered for at most this long before publishing. + */ + public long getMetricsBufferTimeMillis() { + return metricsBufferTimeMillis; + } + + /** + * @return Max number of metrics to buffer before publishing. + */ + public int getMetricsMaxQueueSize() { + return metricsMaxQueueSize; + } + + /** + * @return Metrics level enabled for metrics. + */ + public MetricsLevel getMetricsLevel() { + return metricsLevel; + } + + /** + * @return Enabled dimensions for metrics. + */ + public Set getMetricsEnabledDimensions() { + // Unmodifiable set. + return metricsEnabledDimensions; + } + + /** + * @return true if we should clean up leases of shards after processing is complete (don't wait for expiration) + */ + public boolean shouldCleanupLeasesUponShardCompletion() { + return cleanupLeasesUponShardCompletion; + } + + /** + * @return true if KCL should validate client provided sequence numbers with a call to Amazon Kinesis before + * checkpointing for calls to {@link RecordProcessorCheckpointer#checkpoint(String)} + */ + public boolean shouldValidateSequenceNumberBeforeCheckpointing() { + return validateSequenceNumberBeforeCheckpointing; + } + + /** + * @return Region for the service + */ + public String getRegionName() { + return regionName; + } + + /** + * @return true if Worker should skip syncing shards and leases at startup if leases are present + */ + public boolean getSkipShardSyncAtWorkerInitializationIfLeasesExist() { + return skipShardSyncAtWorkerInitializationIfLeasesExist; + } + + /** + * @return Max leases this Worker can handle at a time + */ + public int getMaxLeasesForWorker() { + return maxLeasesForWorker; + } + + /** + * @return Max leases to steal at one time (for load balancing) + */ + public int getMaxLeasesToStealAtOneTime() { + return maxLeasesToStealAtOneTime; + } + + /** + * @return Read capacity to provision when creating the lease table. + */ + public int getInitialLeaseTableReadCapacity() { + return initialLeaseTableReadCapacity; + } + + /** + * @return Write capacity to provision when creating the lease table. + */ + public int getInitialLeaseTableWriteCapacity() { + return initialLeaseTableWriteCapacity; + } + + /** + * Keeping it protected to forbid outside callers from depending on this internal object. + * @return The initialPositionInStreamExtended object. + */ + protected InitialPositionInStreamExtended getInitialPositionInStreamExtended() { + return initialPositionInStreamExtended; + } + + /** + * @return The timestamp from where we need to start the application. + * Valid only for initial position of type AT_TIMESTAMP, returns null for other positions. + */ + public Date getTimestampAtInitialPositionInStream() { + return initialPositionInStreamExtended.getTimestamp(); + } + + /** + * @return Shard prioritization strategy. + */ + public ShardPrioritization getShardPrioritizationStrategy() { + return shardPrioritization; + } + + // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 190 LINES + /** + * @param tableName name of the lease table in DynamoDB + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withTableName(String tableName) { + this.tableName = tableName; + return this; + } + + /** + * @param kinesisEndpoint Kinesis endpoint + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withKinesisEndpoint(String kinesisEndpoint) { + this.kinesisEndpoint = kinesisEndpoint; + return this; + } + + /** + * @param dynamoDBEndpoint DynamoDB endpoint + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withDynamoDBEndpoint(String dynamoDBEndpoint) { + this.dynamoDBEndpoint = dynamoDBEndpoint; + return this; + } + + /** + * @param initialPositionInStream One of LATEST or TRIM_HORIZON. The Amazon Kinesis Client Library + * will start fetching records from this position when the application starts up if there are no checkpoints. + * If there are checkpoints, we will process records from the checkpoint position. + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withInitialPositionInStream(InitialPositionInStream initialPositionInStream) { + this.initialPositionInStream = initialPositionInStream; + this.initialPositionInStreamExtended = + InitialPositionInStreamExtended.newInitialPosition(initialPositionInStream); + return this; + } + + /** + * @param timestamp The timestamp to use with the AT_TIMESTAMP value for initialPositionInStream. + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withTimestampAtInitialPositionInStream(Date timestamp) { + this.initialPositionInStream = InitialPositionInStream.AT_TIMESTAMP; + this.initialPositionInStreamExtended = InitialPositionInStreamExtended.newInitialPositionAtTimestamp(timestamp); + return this; + } + + /** + * @param failoverTimeMillis Lease duration (leases not renewed within this period will be claimed by others) + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withFailoverTimeMillis(long failoverTimeMillis) { + checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis); + this.failoverTimeMillis = failoverTimeMillis; + return this; + } + + /** + * @param shardSyncIntervalMillis Time between tasks to sync leases and Kinesis shards + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withShardSyncIntervalMillis(long shardSyncIntervalMillis) { + checkIsValuePositive("ShardSyncIntervalMillis", shardSyncIntervalMillis); + this.shardSyncIntervalMillis = shardSyncIntervalMillis; + return this; + } + + /** + * @param maxRecords Max records to fetch in a Kinesis getRecords() call + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withMaxRecords(int maxRecords) { + checkIsValuePositive("MaxRecords", (long) maxRecords); + this.maxRecords = maxRecords; + return this; + } + + /** + * @param idleTimeBetweenReadsInMillis Idle time between calls to fetch data from Kinesis + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withIdleTimeBetweenReadsInMillis(long idleTimeBetweenReadsInMillis) { + checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis); + this.idleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis; + return this; + } + + /** + * @param callProcessRecordsEvenForEmptyRecordList Call the RecordProcessor::processRecords() API even if + * GetRecords returned an empty record list + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withCallProcessRecordsEvenForEmptyRecordList( + boolean callProcessRecordsEvenForEmptyRecordList) { + this.callProcessRecordsEvenForEmptyRecordList = callProcessRecordsEvenForEmptyRecordList; + return this; + } + + /** + * @param parentShardPollIntervalMillis Wait for this long between polls to check if parent shards are done + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withParentShardPollIntervalMillis(long parentShardPollIntervalMillis) { + checkIsValuePositive("ParentShardPollIntervalMillis", parentShardPollIntervalMillis); + this.parentShardPollIntervalMillis = parentShardPollIntervalMillis; + return this; + } + + /** + * @param cleanupLeasesUponShardCompletion Clean up shards we've finished processing (don't wait for expiration + * in Kinesis) + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withCleanupLeasesUponShardCompletion( + boolean cleanupLeasesUponShardCompletion) { + this.cleanupLeasesUponShardCompletion = cleanupLeasesUponShardCompletion; + return this; + } + + /** + * @param clientConfig Common client configuration used by Kinesis/DynamoDB/CloudWatch client + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withCommonClientConfig(ClientConfiguration clientConfig) { + ClientConfiguration tempClientConfig = checkAndAppendKinesisClientLibUserAgent(clientConfig); + this.kinesisClientConfig = tempClientConfig; + this.dynamoDBClientConfig = tempClientConfig; + this.cloudWatchClientConfig = tempClientConfig; + return this; + } + + /** + * @param kinesisClientConfig Client configuration used by Kinesis client + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withKinesisClientConfig(ClientConfiguration kinesisClientConfig) { + this.kinesisClientConfig = checkAndAppendKinesisClientLibUserAgent(kinesisClientConfig); + return this; + } + + /** + * @param dynamoDBClientConfig Client configuration used by DynamoDB client + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withDynamoDBClientConfig(ClientConfiguration dynamoDBClientConfig) { + this.dynamoDBClientConfig = checkAndAppendKinesisClientLibUserAgent(dynamoDBClientConfig); + return this; + } + + /** + * @param cloudWatchClientConfig Client configuration used by CloudWatch client + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withCloudWatchClientConfig(ClientConfiguration cloudWatchClientConfig) { + this.cloudWatchClientConfig = checkAndAppendKinesisClientLibUserAgent(cloudWatchClientConfig); + return this; + } + + /** + * Override the default user agent (application name). + * + * @param userAgent User agent to use in AWS requests + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withUserAgent(String userAgent) { + String customizedUserAgent = userAgent + "," + KINESIS_CLIENT_LIB_USER_AGENT; + this.kinesisClientConfig.setUserAgent(customizedUserAgent); + this.dynamoDBClientConfig.setUserAgent(customizedUserAgent); + this.cloudWatchClientConfig.setUserAgent(customizedUserAgent); + return this; + } + + /** + * @param taskBackoffTimeMillis Backoff period when tasks encounter an exception + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withTaskBackoffTimeMillis(long taskBackoffTimeMillis) { + checkIsValuePositive("TaskBackoffTimeMillis", taskBackoffTimeMillis); + this.taskBackoffTimeMillis = taskBackoffTimeMillis; + return this; + } + + /** + * @param metricsBufferTimeMillis Metrics are buffered for at most this long before publishing to CloudWatch + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withMetricsBufferTimeMillis(long metricsBufferTimeMillis) { + checkIsValuePositive("MetricsBufferTimeMillis", metricsBufferTimeMillis); + this.metricsBufferTimeMillis = metricsBufferTimeMillis; + return this; + } + + /** + * @param metricsMaxQueueSize Max number of metrics to buffer before publishing to CloudWatch + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withMetricsMaxQueueSize(int metricsMaxQueueSize) { + checkIsValuePositive("MetricsMaxQueueSize", (long) metricsMaxQueueSize); + this.metricsMaxQueueSize = metricsMaxQueueSize; + return this; + } + + /** + * @param metricsLevel Metrics level to enable. + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withMetricsLevel(MetricsLevel metricsLevel) { + this.metricsLevel = metricsLevel == null ? DEFAULT_METRICS_LEVEL : metricsLevel; + return this; + } + + /** + * Sets metrics level that should be enabled. Possible values are: + * NONE + * SUMMARY + * DETAILED + * + * @param metricsLevel Metrics level to enable. + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withMetricsLevel(String metricsLevel) { + this.metricsLevel = MetricsLevel.fromName(metricsLevel); + return this; + } + + /** + * Sets the dimensions that are allowed to be emitted in metrics. + * @param metricsEnabledDimensions Set of dimensions that are allowed. + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withMetricsEnabledDimensions(Set metricsEnabledDimensions) { + if (metricsEnabledDimensions == null) { + this.metricsEnabledDimensions = METRICS_ALWAYS_ENABLED_DIMENSIONS; + } else if (metricsEnabledDimensions.contains(IMetricsScope.METRICS_DIMENSIONS_ALL)) { + this.metricsEnabledDimensions = METRICS_DIMENSIONS_ALL; + } else { + this.metricsEnabledDimensions = ImmutableSet.builder().addAll( + metricsEnabledDimensions).addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS).build(); + } + return this; + } + + /** + * + * @param validateSequenceNumberBeforeCheckpointing whether KCL should validate client provided sequence numbers + * with a call to Amazon Kinesis before checkpointing for calls to + * {@link RecordProcessorCheckpointer#checkpoint(String)}. + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withValidateSequenceNumberBeforeCheckpointing( + boolean validateSequenceNumberBeforeCheckpointing) { + this.validateSequenceNumberBeforeCheckpointing = validateSequenceNumberBeforeCheckpointing; + return this; + } + + /** + * If set to true, the Worker will not sync shards and leases during initialization if there are one or more leases + * in the lease table. This assumes that the shards and leases are in-sync. + * This enables customers to choose faster startup times (e.g. during incremental deployments of an application). + * + * @param skipShardSyncAtStartupIfLeasesExist Should Worker skip syncing shards and leases at startup (Worker + * initialization). + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withSkipShardSyncAtStartupIfLeasesExist( + boolean skipShardSyncAtStartupIfLeasesExist) { + this.skipShardSyncAtWorkerInitializationIfLeasesExist = skipShardSyncAtStartupIfLeasesExist; + return this; + } + + /** + * + * @param regionName The region name for the service + * @return KinesisClientLibConfiguration + */ + // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 2 LINES + public KinesisClientLibConfiguration withRegionName(String regionName) { + checkIsRegionNameValid(regionName); + this.regionName = regionName; + return this; + } + + /** + * Worker will not acquire more than the specified max number of leases even if there are more + * shards that need to be processed. This can be used in scenarios where a worker is resource constrained or + * to prevent lease thrashing when small number of workers pick up all leases for small amount of time during + * deployment. + * Note that setting a low value may cause data loss (e.g. if there aren't enough Workers to make progress on all + * shards). When setting the value for this property, one must ensure enough workers are present to process + * shards and should consider future resharding, child shards that may be blocked on parent shards, some workers + * becoming unhealthy, etc. + * + * @param maxLeasesForWorker Max leases this Worker can handle at a time + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withMaxLeasesForWorker(int maxLeasesForWorker) { + checkIsValuePositive("maxLeasesForWorker", maxLeasesForWorker); + this.maxLeasesForWorker = maxLeasesForWorker; + return this; + } + + /** + * Max leases to steal from a more loaded Worker at one time (for load balancing). + * Setting this to a higher number can allow for faster load convergence (e.g. during deployments, cold starts), + * but can cause higher churn in the system. + * + * @param maxLeasesToStealAtOneTime Steal up to this many leases at one time (for load balancing) + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withMaxLeasesToStealAtOneTime(int maxLeasesToStealAtOneTime) { + checkIsValuePositive("maxLeasesToStealAtOneTime", maxLeasesToStealAtOneTime); + this.maxLeasesToStealAtOneTime = maxLeasesToStealAtOneTime; + return this; + } + + /** + * @param initialLeaseTableReadCapacity Read capacity to provision when creating the lease table. + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withInitialLeaseTableReadCapacity(int initialLeaseTableReadCapacity) { + checkIsValuePositive("initialLeaseTableReadCapacity", initialLeaseTableReadCapacity); + this.initialLeaseTableReadCapacity = initialLeaseTableReadCapacity; + return this; + } + + /** + * @param initialLeaseTableWriteCapacity Write capacity to provision when creating the lease table. + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withInitialLeaseTableWriteCapacity(int initialLeaseTableWriteCapacity) { + checkIsValuePositive("initialLeaseTableWriteCapacity", initialLeaseTableWriteCapacity); + this.initialLeaseTableWriteCapacity = initialLeaseTableWriteCapacity; + return this; + } + + /** + * @param shardPrioritization Implementation of ShardPrioritization interface that should be used during processing. + * @return KinesisClientLibConfiguration + */ + public KinesisClientLibConfiguration withShardPrioritizationStrategy(ShardPrioritization shardPrioritization) { + if (shardPrioritization == null) { + throw new IllegalArgumentException("shardPrioritization cannot be null"); + } + this.shardPrioritization = shardPrioritization; + return this; + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinator.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinator.java new file mode 100644 index 00000000..59de31be --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinator.java @@ -0,0 +1,279 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.UUID; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.KinesisClientLibIOException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.impl.LeaseCoordinator; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; + +/** + * This class is used to coordinate/manage leases owned by this worker process and to get/set checkpoints. + */ +class KinesisClientLibLeaseCoordinator extends LeaseCoordinator implements ICheckpoint { + + private static final Log LOG = LogFactory.getLog(KinesisClientLibLeaseCoordinator.class); + + private static final long DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY = 10L; + private static final long DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY = 10L; + + private final ILeaseManager leaseManager; + + private long initialLeaseTableReadCapacity = DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY; + private long initialLeaseTableWriteCapacity = DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY; + + /** + * @param leaseManager Lease manager which provides CRUD lease operations. + * @param workerIdentifier Used to identify this worker process + * @param leaseDurationMillis Duration of a lease in milliseconds + * @param epsilonMillis Delta for timing operations (e.g. checking lease expiry) + */ + public KinesisClientLibLeaseCoordinator(ILeaseManager leaseManager, + String workerIdentifier, + long leaseDurationMillis, + long epsilonMillis) { + super(leaseManager, workerIdentifier, leaseDurationMillis, epsilonMillis); + this.leaseManager = leaseManager; + } + + /** + * @param leaseManager Lease manager which provides CRUD lease operations. + * @param workerIdentifier Used to identify this worker process + * @param leaseDurationMillis Duration of a lease in milliseconds + * @param epsilonMillis Delta for timing operations (e.g. checking lease expiry) + * @param metricsFactory Metrics factory used to emit metrics + */ + public KinesisClientLibLeaseCoordinator(ILeaseManager leaseManager, + String workerIdentifier, + long leaseDurationMillis, + long epsilonMillis, + IMetricsFactory metricsFactory) { + super(leaseManager, workerIdentifier, leaseDurationMillis, epsilonMillis, metricsFactory); + this.leaseManager = leaseManager; + } + + /** + * @param leaseManager Lease manager which provides CRUD lease operations. + * @param workerIdentifier Used to identify this worker process + * @param leaseDurationMillis Duration of a lease in milliseconds + * @param epsilonMillis Delta for timing operations (e.g. checking lease expiry) + * @param maxLeasesForWorker Max leases this worker can handle at a time + * @param maxLeasesToStealAtOneTime Steal up to this many leases at a time (for load balancing) + * @param metricsFactory Metrics factory used to emit metrics + */ + public KinesisClientLibLeaseCoordinator(ILeaseManager leaseManager, + String workerIdentifier, + long leaseDurationMillis, + long epsilonMillis, + int maxLeasesForWorker, + int maxLeasesToStealAtOneTime, + IMetricsFactory metricsFactory) { + super(leaseManager, workerIdentifier, leaseDurationMillis, epsilonMillis, maxLeasesForWorker, + maxLeasesToStealAtOneTime, metricsFactory); + this.leaseManager = leaseManager; + } + + /** + * @param readCapacity The DynamoDB table used for tracking leases will be provisioned with the specified initial + * read capacity + * @return KinesisClientLibLeaseCoordinator + */ + public KinesisClientLibLeaseCoordinator withInitialLeaseTableReadCapacity(long readCapacity) { + if (readCapacity <= 0) { + throw new IllegalArgumentException("readCapacity should be >= 1"); + } + this.initialLeaseTableReadCapacity = readCapacity; + return this; + } + + /** + * @param writeCapacity The DynamoDB table used for tracking leases will be provisioned with the specified initial + * write capacity + * @return KinesisClientLibLeaseCoordinator + */ + public KinesisClientLibLeaseCoordinator withInitialLeaseTableWriteCapacity(long writeCapacity) { + if (writeCapacity <= 0) { + throw new IllegalArgumentException("writeCapacity should be >= 1"); + } + this.initialLeaseTableWriteCapacity = writeCapacity; + return this; + } + + /** + * Sets the checkpoint for a shard and updates ownerSwitchesSinceCheckpoint. + * + * @param shardId shardId to update the checkpoint for + * @param checkpoint checkpoint value to set + * @param concurrencyToken obtained by calling Lease.getConcurrencyToken for a currently held lease + * + * @return true if checkpoint update succeeded, false otherwise + * + * @throws InvalidStateException if lease table does not exist + * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity + * @throws DependencyException if DynamoDB update fails in an unexpected way + */ + boolean setCheckpoint(String shardId, ExtendedSequenceNumber checkpoint, UUID concurrencyToken) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + KinesisClientLease lease = getCurrentlyHeldLease(shardId); + if (lease == null) { + LOG.info(String.format( + "Worker %s could not update checkpoint for shard %s because it does not hold the lease", + getWorkerIdentifier(), + shardId)); + return false; + } + + lease.setCheckpoint(checkpoint); + lease.setOwnerSwitchesSinceCheckpoint(0L); + + return updateLease(lease, concurrencyToken); + } + + /** + * {@inheritDoc} + */ + @Override + public void setCheckpoint(String shardId, ExtendedSequenceNumber checkpointValue, String concurrencyToken) + throws KinesisClientLibException { + try { + boolean wasSuccessful = setCheckpoint(shardId, checkpointValue, UUID.fromString(concurrencyToken)); + if (!wasSuccessful) { + throw new ShutdownException("Can't update checkpoint - instance doesn't hold the lease for this shard"); + } + } catch (ProvisionedThroughputException e) { + throw new ThrottlingException("Got throttled while updating checkpoint.", e); + } catch (InvalidStateException e) { + String message = "Unable to save checkpoint for shardId " + shardId; + LOG.error(message, e); + throw new com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException(message, e); + } catch (DependencyException e) { + throw new KinesisClientLibDependencyException("Unable to save checkpoint for shardId " + shardId, e); + } + } + + /** + * {@inheritDoc} + */ + @Override + public ExtendedSequenceNumber getCheckpoint(String shardId) throws KinesisClientLibException { + try { + return leaseManager.getLease(shardId).getCheckpoint(); + } catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) { + String message = "Unable to fetch checkpoint for shardId " + shardId; + LOG.error(message, e); + throw new KinesisClientLibIOException(message, e); + } + } + + /** + * @return Current shard/lease assignments + */ + public List getCurrentAssignments() { + Collection leases = getAssignments(); + return convertLeasesToAssignments(leases); + + } + + public static List convertLeasesToAssignments(Collection leases) { + if (leases == null || leases.isEmpty()) { + return Collections.emptyList(); + } + List assignments = new ArrayList<>(leases.size()); + for (KinesisClientLease lease : leases) { + assignments.add(convertLeaseToAssignment(lease)); + } + + return assignments; + } + + public static ShardInfo convertLeaseToAssignment(KinesisClientLease lease) { + Set parentShardIds = lease.getParentShardIds(); + return new ShardInfo(lease.getLeaseKey(), lease.getConcurrencyToken().toString(), parentShardIds, + lease.getCheckpoint()); + } + + /** + * Initialize the lease coordinator (create the lease table if needed). + * @throws DependencyException + * @throws ProvisionedThroughputException + */ + void initialize() throws ProvisionedThroughputException, DependencyException, IllegalStateException { + final boolean newTableCreated = + leaseManager.createLeaseTableIfNotExists(initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity); + if (newTableCreated) { + LOG.info(String.format( + "Created new lease table for coordinator with initial read capacity of %d and write capacity of %d.", + initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity)); + } + // Need to wait for table in active state. + final long secondsBetweenPolls = 10L; + final long timeoutSeconds = 600L; + final boolean isTableActive = leaseManager.waitUntilLeaseTableExists(secondsBetweenPolls, timeoutSeconds); + if (!isTableActive) { + throw new DependencyException(new IllegalStateException("Creating table timeout")); + } + } + + /** + * Package access for testing. + * + * @throws DependencyException + * @throws InvalidStateException + */ + void runLeaseTaker() throws DependencyException, InvalidStateException { + super.runTaker(); + } + + /** + * Package access for testing. + * + * @throws DependencyException + * @throws InvalidStateException + */ + void runLeaseRenewer() throws DependencyException, InvalidStateException { + super.runRenewer(); + } + + /** + * Used to get information about leases for Kinesis shards (e.g. sync shards and leases, check on parent shard + * completion). + * + * @return LeaseManager + */ + ILeaseManager getLeaseManager() { + return leaseManager; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisDataFetcher.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisDataFetcher.java new file mode 100644 index 00000000..2ce3152a --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisDataFetcher.java @@ -0,0 +1,195 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.model.GetRecordsResult; +import com.amazonaws.services.kinesis.model.ResourceNotFoundException; +import com.amazonaws.services.kinesis.model.ShardIteratorType; +import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.proxies.MetricsCollectingKinesisProxyDecorator; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; + +import java.util.Date; + +/** + * Used to get data from Amazon Kinesis. Tracks iterator state internally. + */ +class KinesisDataFetcher { + + private static final Log LOG = LogFactory.getLog(KinesisDataFetcher.class); + + private String nextIterator; + private IKinesisProxy kinesisProxy; + private final String shardId; + private boolean isShardEndReached; + private boolean isInitialized; + + /** + * + * @param kinesisProxy Kinesis proxy + * @param shardInfo The shardInfo object. + */ + public KinesisDataFetcher(IKinesisProxy kinesisProxy, ShardInfo shardInfo) { + this.shardId = shardInfo.getShardId(); + this.kinesisProxy = + new MetricsCollectingKinesisProxyDecorator("KinesisDataFetcher", kinesisProxy, this.shardId); + } + + /** + * Get records from the current position in the stream (up to maxRecords). + * + * @param maxRecords Max records to fetch + * @return list of records of up to maxRecords size + */ + public GetRecordsResult getRecords(int maxRecords) { + if (!isInitialized) { + throw new IllegalArgumentException("KinesisDataFetcher.getRecords called before initialization."); + } + + GetRecordsResult response = null; + if (nextIterator != null) { + try { + response = kinesisProxy.get(nextIterator, maxRecords); + nextIterator = response.getNextShardIterator(); + } catch (ResourceNotFoundException e) { + LOG.info("Caught ResourceNotFoundException when fetching records for shard " + shardId); + nextIterator = null; + } + if (nextIterator == null) { + isShardEndReached = true; + } + } else { + isShardEndReached = true; + } + + return response; + } + + /** + * Initializes this KinesisDataFetcher's iterator based on the checkpointed sequence number. + * @param initialCheckpoint Current checkpoint sequence number for this shard. + * @param initialPositionInStream The initialPositionInStream. + */ + public void initialize(String initialCheckpoint, InitialPositionInStreamExtended initialPositionInStream) { + LOG.info("Initializing shard " + shardId + " with " + initialCheckpoint); + advanceIteratorTo(initialCheckpoint, initialPositionInStream); + isInitialized = true; + } + + public void initialize(ExtendedSequenceNumber initialCheckpoint, + InitialPositionInStreamExtended initialPositionInStream) { + LOG.info("Initializing shard " + shardId + " with " + initialCheckpoint.getSequenceNumber()); + advanceIteratorTo(initialCheckpoint.getSequenceNumber(), initialPositionInStream); + isInitialized = true; + } + + /** + * Advances this KinesisDataFetcher's internal iterator to be at the passed-in sequence number. + * + * @param sequenceNumber advance the iterator to the record at this sequence number. + * @param initialPositionInStream The initialPositionInStream. + */ + void advanceIteratorTo(String sequenceNumber, InitialPositionInStreamExtended initialPositionInStream) { + if (sequenceNumber == null) { + throw new IllegalArgumentException("SequenceNumber should not be null: shardId " + shardId); + } else if (sequenceNumber.equals(SentinelCheckpoint.LATEST.toString())) { + nextIterator = getIterator(ShardIteratorType.LATEST.toString()); + } else if (sequenceNumber.equals(SentinelCheckpoint.TRIM_HORIZON.toString())) { + nextIterator = getIterator(ShardIteratorType.TRIM_HORIZON.toString()); + } else if (sequenceNumber.equals(SentinelCheckpoint.AT_TIMESTAMP.toString())) { + nextIterator = getIterator(initialPositionInStream.getTimestamp()); + } else if (sequenceNumber.equals(SentinelCheckpoint.SHARD_END.toString())) { + nextIterator = null; + } else { + nextIterator = getIterator(ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), sequenceNumber); + } + if (nextIterator == null) { + isShardEndReached = true; + } + } + + /** + * @param iteratorType The iteratorType - either AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER. + * @param sequenceNumber The sequenceNumber. + * + * @return iterator or null if we catch a ResourceNotFound exception + */ + private String getIterator(String iteratorType, String sequenceNumber) { + String iterator = null; + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Calling getIterator for " + shardId + ", iterator type " + iteratorType + + " and sequence number " + sequenceNumber); + } + iterator = kinesisProxy.getIterator(shardId, iteratorType, sequenceNumber); + } catch (ResourceNotFoundException e) { + LOG.info("Caught ResourceNotFoundException when getting an iterator for shard " + shardId, e); + } + return iterator; + } + + /** + * @param iteratorType The iteratorType - either TRIM_HORIZON or LATEST. + * @return iterator or null if we catch a ResourceNotFound exception + */ + private String getIterator(String iteratorType) { + String iterator = null; + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Calling getIterator for " + shardId + " and iterator type " + iteratorType); + } + iterator = kinesisProxy.getIterator(shardId, iteratorType); + } catch (ResourceNotFoundException e) { + LOG.info("Caught ResourceNotFoundException when getting an iterator for shard " + shardId, e); + } + return iterator; + } + + /** + * @param timestamp The timestamp. + * @return iterator or null if we catch a ResourceNotFound exception + */ + private String getIterator(Date timestamp) { + String iterator = null; + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Calling getIterator for " + shardId + " and timestamp " + timestamp); + } + iterator = kinesisProxy.getIterator(shardId, timestamp); + } catch (ResourceNotFoundException e) { + LOG.info("Caught ResourceNotFoundException when getting an iterator for shard " + shardId, e); + } + return iterator; + } + + /** + * @return the shardEndReached + */ + protected boolean isShardEndReached() { + return isShardEndReached; + } + + /** Note: This method has package level access for testing purposes. + * @return nextIterator + */ + String getNextIterator() { + return nextIterator; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/MetricsCollectingTaskDecorator.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/MetricsCollectingTaskDecorator.java new file mode 100644 index 00000000..e61da491 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/MetricsCollectingTaskDecorator.java @@ -0,0 +1,74 @@ +/* + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; + +/** + * Decorates an ITask and reports metrics about its timing and success/failure. + */ +class MetricsCollectingTaskDecorator implements ITask { + + private final ITask other; + private final IMetricsFactory factory; + + /** + * Constructor. + * + * @param other task to report metrics on + * @param factory IMetricsFactory to use + */ + public MetricsCollectingTaskDecorator(ITask other, IMetricsFactory factory) { + this.other = other; + this.factory = factory; + } + + /** + * {@inheritDoc} + */ + @Override + public TaskResult call() { + MetricsHelper.startScope(factory, other.getClass().getSimpleName()); + TaskResult result = null; + final long startTimeMillis = System.currentTimeMillis(); + try { + result = other.call(); + } finally { + MetricsHelper.addSuccessAndLatency(startTimeMillis, result != null && result.getException() == null, + MetricsLevel.SUMMARY); + MetricsHelper.endScope(); + } + return result; + } + + /** + * {@inheritDoc} + */ + @Override + public TaskType getTaskType() { + return other.getTaskType(); + } + + @Override + public String toString() { + return this.getClass().getName() + "<" + other.getTaskType() + ">(" + other + ")"; + } + + ITask getOther() { + return other; + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/NoOpShardPrioritization.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/NoOpShardPrioritization.java new file mode 100644 index 00000000..b2f46d13 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/NoOpShardPrioritization.java @@ -0,0 +1,21 @@ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.List; + +/** + * Shard Prioritization that returns the same original list of shards without any modifications. + */ +public class NoOpShardPrioritization implements + ShardPrioritization { + + /** + * Empty constructor for NoOp Shard Prioritization. + */ + public NoOpShardPrioritization() { + } + + @Override + public List prioritize(List original) { + return original; + } +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritization.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ParentsFirstShardPrioritization.java similarity index 85% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritization.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ParentsFirstShardPrioritization.java index 2f1649b3..dbacbd98 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritization.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ParentsFirstShardPrioritization.java @@ -1,18 +1,4 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; import java.util.ArrayList; import java.util.Collections; @@ -51,14 +37,14 @@ public class ParentsFirstShardPrioritization implements public List prioritize(List original) { Map shards = new HashMap<>(); for (ShardInfo shardInfo : original) { - shards.put(shardInfo.shardId(), + shards.put(shardInfo.getShardId(), shardInfo); } Map processedNodes = new HashMap<>(); for (ShardInfo shardInfo : original) { - populateDepth(shardInfo.shardId(), + populateDepth(shardInfo.getShardId(), shards, processedNodes); } @@ -104,7 +90,7 @@ public class ParentsFirstShardPrioritization implements processedNodes.put(shardId, PROCESSING_NODE); int maxParentDepth = 0; - for (String parentId : shardInfo.parentShardIds()) { + for (String parentId : shardInfo.getParentShardIds()) { maxParentDepth = Math.max(maxParentDepth, populateDepth(parentId, shards, diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ProcessTask.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ProcessTask.java new file mode 100644 index 00000000..c419c693 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ProcessTask.java @@ -0,0 +1,388 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.math.BigInteger; +import java.util.Collections; +import java.util.List; +import java.util.ListIterator; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxyExtended; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; +import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; +import com.amazonaws.services.kinesis.model.ExpiredIteratorException; +import com.amazonaws.services.kinesis.model.GetRecordsResult; +import com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException; +import com.amazonaws.services.kinesis.model.Record; +import com.amazonaws.services.kinesis.model.Shard; + +/** + * Task for fetching data records and invoking processRecords() on the record processor instance. + */ +class ProcessTask implements ITask { + + private static final Log LOG = LogFactory.getLog(ProcessTask.class); + + private static final String EXPIRED_ITERATOR_METRIC = "ExpiredIterator"; + private static final String DATA_BYTES_PROCESSED_METRIC = "DataBytesProcessed"; + private static final String RECORDS_PROCESSED_METRIC = "RecordsProcessed"; + private static final String MILLIS_BEHIND_LATEST_METRIC = "MillisBehindLatest"; + private static final String RECORD_PROCESSOR_PROCESS_RECORDS_METRIC = "RecordProcessor.processRecords"; + private static final int MAX_CONSECUTIVE_THROTTLES = 5; + + private final ShardInfo shardInfo; + private final IRecordProcessor recordProcessor; + private final RecordProcessorCheckpointer recordProcessorCheckpointer; + private final KinesisDataFetcher dataFetcher; + private final TaskType taskType = TaskType.PROCESS; + private final StreamConfig streamConfig; + private final long backoffTimeMillis; + private final Shard shard; + private final ThrottlingReporter throttlingReporter; + + /** + * @param shardInfo + * contains information about the shard + * @param streamConfig + * Stream configuration + * @param recordProcessor + * Record processor used to process the data records for the shard + * @param recordProcessorCheckpointer + * Passed to the RecordProcessor so it can checkpoint progress + * @param dataFetcher + * Kinesis data fetcher (used to fetch records from Kinesis) + * @param backoffTimeMillis + * backoff time when catching exceptions + */ + public ProcessTask(ShardInfo shardInfo, StreamConfig streamConfig, IRecordProcessor recordProcessor, + RecordProcessorCheckpointer recordProcessorCheckpointer, KinesisDataFetcher dataFetcher, + long backoffTimeMillis, boolean skipShardSyncAtWorkerInitializationIfLeasesExist) { + this(shardInfo, streamConfig, recordProcessor, recordProcessorCheckpointer, dataFetcher, backoffTimeMillis, + skipShardSyncAtWorkerInitializationIfLeasesExist, + new ThrottlingReporter(MAX_CONSECUTIVE_THROTTLES, shardInfo.getShardId())); + } + + /** + * @param shardInfo + * contains information about the shard + * @param streamConfig + * Stream configuration + * @param recordProcessor + * Record processor used to process the data records for the shard + * @param recordProcessorCheckpointer + * Passed to the RecordProcessor so it can checkpoint progress + * @param dataFetcher + * Kinesis data fetcher (used to fetch records from Kinesis) + * @param backoffTimeMillis + * backoff time when catching exceptions + * @param throttlingReporter + * determines how throttling events should be reported in the log. + */ + public ProcessTask(ShardInfo shardInfo, StreamConfig streamConfig, IRecordProcessor recordProcessor, + RecordProcessorCheckpointer recordProcessorCheckpointer, KinesisDataFetcher dataFetcher, + long backoffTimeMillis, boolean skipShardSyncAtWorkerInitializationIfLeasesExist, + ThrottlingReporter throttlingReporter) { + super(); + this.shardInfo = shardInfo; + this.recordProcessor = recordProcessor; + this.recordProcessorCheckpointer = recordProcessorCheckpointer; + this.dataFetcher = dataFetcher; + this.streamConfig = streamConfig; + this.backoffTimeMillis = backoffTimeMillis; + this.throttlingReporter = throttlingReporter; + IKinesisProxy kinesisProxy = this.streamConfig.getStreamProxy(); + // If skipShardSyncAtWorkerInitializationIfLeasesExist is set, we will not get the shard for + // this ProcessTask. In this case, duplicate KPL user records in the event of resharding will + // not be dropped during deaggregation of Amazon Kinesis records. This is only applicable if + // KPL is used for ingestion and KPL's aggregation feature is used. + if (!skipShardSyncAtWorkerInitializationIfLeasesExist && kinesisProxy instanceof IKinesisProxyExtended) { + this.shard = ((IKinesisProxyExtended) kinesisProxy).getShard(this.shardInfo.getShardId()); + } else { + this.shard = null; + } + if (this.shard == null && !skipShardSyncAtWorkerInitializationIfLeasesExist) { + LOG.warn("Cannot get the shard for this ProcessTask, so duplicate KPL user records " + + "in the event of resharding will not be dropped during deaggregation of Amazon " + + "Kinesis records."); + } + } + + /* + * (non-Javadoc) + * + * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ITask#call() + */ + @Override + public TaskResult call() { + long startTimeMillis = System.currentTimeMillis(); + IMetricsScope scope = MetricsHelper.getMetricsScope(); + scope.addDimension(MetricsHelper.SHARD_ID_DIMENSION_NAME, shardInfo.getShardId()); + scope.addData(RECORDS_PROCESSED_METRIC, 0, StandardUnit.Count, MetricsLevel.SUMMARY); + scope.addData(DATA_BYTES_PROCESSED_METRIC, 0, StandardUnit.Bytes, MetricsLevel.SUMMARY); + + Exception exception = null; + + try { + if (dataFetcher.isShardEndReached()) { + LOG.info("Reached end of shard " + shardInfo.getShardId()); + return new TaskResult(null, true); + } + + final GetRecordsResult getRecordsResult = getRecordsResult(); + throttlingReporter.success(); + List records = getRecordsResult.getRecords(); + + if (!records.isEmpty()) { + scope.addData(RECORDS_PROCESSED_METRIC, records.size(), StandardUnit.Count, MetricsLevel.SUMMARY); + } else { + handleNoRecords(startTimeMillis); + } + records = deaggregateRecords(records); + + recordProcessorCheckpointer.setLargestPermittedCheckpointValue( + filterAndGetMaxExtendedSequenceNumber(scope, records, + recordProcessorCheckpointer.getLastCheckpointValue(), + recordProcessorCheckpointer.getLargestPermittedCheckpointValue())); + + if (shouldCallProcessRecords(records)) { + callProcessRecords(getRecordsResult, records); + } + } catch (ProvisionedThroughputExceededException pte) { + throttlingReporter.throttled(); + exception = pte; + backoff(); + + } catch (RuntimeException e) { + LOG.error("ShardId " + shardInfo.getShardId() + ": Caught exception: ", e); + exception = e; + backoff(); + } + + return new TaskResult(exception); + } + + /** + * Sleeps for the configured backoff period. This is usually only called when an exception occurs. + */ + private void backoff() { + // backoff if we encounter an exception. + try { + Thread.sleep(this.backoffTimeMillis); + } catch (InterruptedException ie) { + LOG.debug(shardInfo.getShardId() + ": Sleep was interrupted", ie); + } + } + + /** + * Dispatches a batch of records to the record processor, and handles any fallout from that. + * + * @param getRecordsResult + * the result of the last call to Kinesis + * @param records + * the records to be dispatched. It's possible the records have been adjusted by KPL deaggregation. + */ + private void callProcessRecords(GetRecordsResult getRecordsResult, List records) { + LOG.debug("Calling application processRecords() with " + records.size() + " records from " + + shardInfo.getShardId()); + final ProcessRecordsInput processRecordsInput = new ProcessRecordsInput().withRecords(records) + .withCheckpointer(recordProcessorCheckpointer) + .withMillisBehindLatest(getRecordsResult.getMillisBehindLatest()); + + final long recordProcessorStartTimeMillis = System.currentTimeMillis(); + try { + recordProcessor.processRecords(processRecordsInput); + } catch (Exception e) { + LOG.error("ShardId " + shardInfo.getShardId() + + ": Application processRecords() threw an exception when processing shard ", e); + LOG.error("ShardId " + shardInfo.getShardId() + ": Skipping over the following data records: " + records); + } finally { + MetricsHelper.addLatencyPerShard(shardInfo.getShardId(), RECORD_PROCESSOR_PROCESS_RECORDS_METRIC, + recordProcessorStartTimeMillis, MetricsLevel.SUMMARY); + } + } + + /** + * Whether we should call process records or not + * + * @param records + * the records returned from the call to Kinesis, and/or deaggregation + * @return true if the set of records should be dispatched to the record process, false if they should not. + */ + private boolean shouldCallProcessRecords(List records) { + return (!records.isEmpty()) || streamConfig.shouldCallProcessRecordsEvenForEmptyRecordList(); + } + + /** + * Determines whether to deaggregate the given records, and if they are KPL records dispatches them to deaggregation + * + * @param records + * the records to deaggregate is deaggregation is required. + * @return returns either the deaggregated records, or the original records + */ + @SuppressWarnings("unchecked") + private List deaggregateRecords(List records) { + // We deaggregate if and only if we got actual Kinesis records, i.e. + // not instances of some subclass thereof. + if (!records.isEmpty() && records.get(0).getClass().equals(Record.class)) { + if (this.shard != null) { + return (List) (List) UserRecord.deaggregate(records, + new BigInteger(this.shard.getHashKeyRange().getStartingHashKey()), + new BigInteger(this.shard.getHashKeyRange().getEndingHashKey())); + } else { + return (List) (List) UserRecord.deaggregate(records); + } + } + return records; + } + + /** + * Emits metrics, and sleeps if there are no records available + * + * @param startTimeMillis + * the time when the task started + */ + private void handleNoRecords(long startTimeMillis) { + LOG.debug("Kinesis didn't return any records for shard " + shardInfo.getShardId()); + + long sleepTimeMillis = streamConfig.getIdleTimeInMilliseconds() + - (System.currentTimeMillis() - startTimeMillis); + if (sleepTimeMillis > 0) { + sleepTimeMillis = Math.max(sleepTimeMillis, streamConfig.getIdleTimeInMilliseconds()); + try { + LOG.debug("Sleeping for " + sleepTimeMillis + " ms since there were no new records in shard " + + shardInfo.getShardId()); + Thread.sleep(sleepTimeMillis); + } catch (InterruptedException e) { + LOG.debug("ShardId " + shardInfo.getShardId() + ": Sleep was interrupted"); + } + } + } + + @Override + public TaskType getTaskType() { + return taskType; + } + + /** + * Scans a list of records to filter out records up to and including the most recent checkpoint value and to get + * the greatest extended sequence number from the retained records. Also emits metrics about the records. + * + * @param scope metrics scope to emit metrics into + * @param records list of records to scan and change in-place as needed + * @param lastCheckpointValue the most recent checkpoint value + * @param lastLargestPermittedCheckpointValue previous largest permitted checkpoint value + * @return the largest extended sequence number among the retained records + */ + private ExtendedSequenceNumber filterAndGetMaxExtendedSequenceNumber(IMetricsScope scope, List records, + final ExtendedSequenceNumber lastCheckpointValue, + final ExtendedSequenceNumber lastLargestPermittedCheckpointValue) { + ExtendedSequenceNumber largestExtendedSequenceNumber = lastLargestPermittedCheckpointValue; + ListIterator recordIterator = records.listIterator(); + while (recordIterator.hasNext()) { + Record record = recordIterator.next(); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber( + record.getSequenceNumber(), + record instanceof UserRecord + ? ((UserRecord) record).getSubSequenceNumber() + : null); + + if (extendedSequenceNumber.compareTo(lastCheckpointValue) <= 0) { + recordIterator.remove(); + LOG.debug("removing record with ESN " + extendedSequenceNumber + + " because the ESN is <= checkpoint (" + lastCheckpointValue + ")"); + continue; + } + + if (largestExtendedSequenceNumber == null + || largestExtendedSequenceNumber.compareTo(extendedSequenceNumber) < 0) { + largestExtendedSequenceNumber = extendedSequenceNumber; + } + + scope.addData(DATA_BYTES_PROCESSED_METRIC, record.getData().limit(), StandardUnit.Bytes, + MetricsLevel.SUMMARY); + } + return largestExtendedSequenceNumber; + } + + /** + * Gets records from Kinesis and retries once in the event of an ExpiredIteratorException. + * + * @return list of data records from Kinesis + */ + private GetRecordsResult getRecordsResult() { + try { + return getRecordsResultAndRecordMillisBehindLatest(); + } catch (ExpiredIteratorException e) { + // If we see a ExpiredIteratorException, try once to restart from the greatest remembered sequence number + LOG.info("ShardId " + shardInfo.getShardId() + + ": getRecords threw ExpiredIteratorException - restarting after greatest seqNum " + + "passed to customer", e); + MetricsHelper.getMetricsScope().addData(EXPIRED_ITERATOR_METRIC, 1, StandardUnit.Count, + MetricsLevel.SUMMARY); + + /* + * Advance the iterator to after the greatest processed sequence number (remembered by + * recordProcessorCheckpointer). + */ + dataFetcher.advanceIteratorTo(recordProcessorCheckpointer.getLargestPermittedCheckpointValue() + .getSequenceNumber(), streamConfig.getInitialPositionInStream()); + + // Try a second time - if we fail this time, expose the failure. + try { + return getRecordsResultAndRecordMillisBehindLatest(); + } catch (ExpiredIteratorException ex) { + String msg = + "Shard " + shardInfo.getShardId() + + ": getRecords threw ExpiredIteratorException with a fresh iterator."; + LOG.error(msg, ex); + throw ex; + } + } + } + + /** + * Gets records from Kinesis and records the MillisBehindLatest metric if present. + * + * @return list of data records from Kinesis + */ + private GetRecordsResult getRecordsResultAndRecordMillisBehindLatest() { + final GetRecordsResult getRecordsResult = dataFetcher.getRecords(streamConfig.getMaxRecords()); + + if (getRecordsResult == null) { + // Stream no longer exists + return new GetRecordsResult().withRecords(Collections.emptyList()); + } + + if (getRecordsResult.getMillisBehindLatest() != null) { + MetricsHelper.getMetricsScope().addData(MILLIS_BEHIND_LATEST_METRIC, + getRecordsResult.getMillisBehindLatest(), + StandardUnit.Milliseconds, + MetricsLevel.SUMMARY); + } + + return getRecordsResult; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/RecordProcessorCheckpointer.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/RecordProcessorCheckpointer.java new file mode 100644 index 00000000..69922670 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/RecordProcessorCheckpointer.java @@ -0,0 +1,227 @@ +/* + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord; +import com.amazonaws.services.kinesis.model.Record; + +/** + * This class is used to enable RecordProcessors to checkpoint their progress. + * The Amazon Kinesis Client Library will instantiate an object and provide a reference to the application + * RecordProcessor instance. Amazon Kinesis Client Library will create one instance per shard assignment. + */ +class RecordProcessorCheckpointer implements IRecordProcessorCheckpointer { + + private static final Log LOG = LogFactory.getLog(RecordProcessorCheckpointer.class); + + private ICheckpoint checkpoint; + + private ExtendedSequenceNumber largestPermittedCheckpointValue; + // Set to the last value set via checkpoint(). + // Sample use: verify application shutdown() invoked checkpoint() at the end of a shard. + private ExtendedSequenceNumber lastCheckpointValue; + + private ShardInfo shardInfo; + + private SequenceNumberValidator sequenceNumberValidator; + + private ExtendedSequenceNumber sequenceNumberAtShardEnd; + + /** + * Only has package level access, since only the Amazon Kinesis Client Library should be creating these. + * + * @param checkpoint Used to checkpoint progress of a RecordProcessor + * @param validator Used for validating sequence numbers + */ + RecordProcessorCheckpointer(ShardInfo shardInfo, + ICheckpoint checkpoint, + SequenceNumberValidator validator) { + this.shardInfo = shardInfo; + this.checkpoint = checkpoint; + this.sequenceNumberValidator = validator; + } + + /** + * {@inheritDoc} + */ + @Override + public synchronized void checkpoint() + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + if (LOG.isDebugEnabled()) { + LOG.debug("Checkpointing " + shardInfo.getShardId() + ", " + " token " + shardInfo.getConcurrencyToken() + + " at largest permitted value " + this.largestPermittedCheckpointValue); + } + advancePosition(this.largestPermittedCheckpointValue); + } + + /** + * {@inheritDoc} + */ + @Override + public synchronized void checkpoint(Record record) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { + if (record == null) { + throw new IllegalArgumentException("Could not checkpoint a null record"); + } else if (record instanceof UserRecord) { + checkpoint(record.getSequenceNumber(), ((UserRecord) record).getSubSequenceNumber()); + } else { + checkpoint(record.getSequenceNumber(), 0); + } + } + + /** + * {@inheritDoc} + */ + @Override + public synchronized void checkpoint(String sequenceNumber) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { + checkpoint(sequenceNumber, 0); + } + + /** + * {@inheritDoc} + */ + @Override + public synchronized void checkpoint(String sequenceNumber, long subSequenceNumber) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { + + if (subSequenceNumber < 0) { + throw new IllegalArgumentException("Could not checkpoint at invalid, negative subsequence number " + + subSequenceNumber); + } + + // throws exception if sequence number shouldn't be checkpointed for this shard + sequenceNumberValidator.validateSequenceNumber(sequenceNumber); + if (LOG.isDebugEnabled()) { + LOG.debug("Validated checkpoint sequence number " + sequenceNumber + " for " + shardInfo.getShardId() + + ", token " + shardInfo.getConcurrencyToken()); + } + /* + * If there isn't a last checkpoint value, we only care about checking the upper bound. + * If there is a last checkpoint value, we want to check both the lower and upper bound. + */ + ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber(sequenceNumber, subSequenceNumber); + if ((lastCheckpointValue.compareTo(newCheckpoint) <= 0) + && newCheckpoint.compareTo(largestPermittedCheckpointValue) <= 0) { + + if (LOG.isDebugEnabled()) { + LOG.debug("Checkpointing " + shardInfo.getShardId() + ", token " + shardInfo.getConcurrencyToken() + + " at specific extended sequence number " + newCheckpoint); + } + this.advancePosition(newCheckpoint); + } else { + throw new IllegalArgumentException(String.format( + "Could not checkpoint at extended sequence number %s as it did not fall into acceptable range " + + "between the last checkpoint %s and the greatest extended sequence number passed to this " + + "record processor %s", + newCheckpoint, this.lastCheckpointValue, this.largestPermittedCheckpointValue)); + } + } + + /** + * @return the lastCheckpointValue + */ + ExtendedSequenceNumber getLastCheckpointValue() { + return lastCheckpointValue; + } + + synchronized void setInitialCheckpointValue(ExtendedSequenceNumber initialCheckpoint) { + lastCheckpointValue = initialCheckpoint; + } + + /** + * Used for testing. + * + * @return the largest permitted checkpoint + */ + synchronized ExtendedSequenceNumber getLargestPermittedCheckpointValue() { + return largestPermittedCheckpointValue; + } + + /** + * @param checkpoint the checkpoint value to set + */ + synchronized void setLargestPermittedCheckpointValue(ExtendedSequenceNumber largestPermittedCheckpointValue) { + this.largestPermittedCheckpointValue = largestPermittedCheckpointValue; + } + + /** + * Used to remember the last extended sequence number before SHARD_END to allow us to prevent the checkpointer + * from checkpointing at the end of the shard twice (i.e. at the last extended sequence number and then again + * at SHARD_END). + * + * @param extendedSequenceNumber + */ + synchronized void setSequenceNumberAtShardEnd(ExtendedSequenceNumber extendedSequenceNumber) { + this.sequenceNumberAtShardEnd = extendedSequenceNumber; + } + + + /** + * Internal API - has package level access only for testing purposes. + * + * @param sequenceNumber + * + * @throws KinesisClientLibDependencyException + * @throws ThrottlingException + * @throws ShutdownException + * @throws InvalidStateException + */ + void advancePosition(String sequenceNumber) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + advancePosition(new ExtendedSequenceNumber(sequenceNumber)); + } + + void advancePosition(ExtendedSequenceNumber extendedSequenceNumber) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + ExtendedSequenceNumber checkpointToRecord = extendedSequenceNumber; + if (sequenceNumberAtShardEnd != null && sequenceNumberAtShardEnd.equals(extendedSequenceNumber)) { + // If we are about to checkpoint the very last sequence number for this shard, we might as well + // just checkpoint at SHARD_END + checkpointToRecord = ExtendedSequenceNumber.SHARD_END; + } + // Don't checkpoint a value we already successfully checkpointed + if (extendedSequenceNumber != null && !extendedSequenceNumber.equals(lastCheckpointValue)) { + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Setting " + shardInfo.getShardId() + ", token " + shardInfo.getConcurrencyToken() + + " checkpoint to " + checkpointToRecord); + } + checkpoint.setCheckpoint(shardInfo.getShardId(), checkpointToRecord, shardInfo.getConcurrencyToken()); + lastCheckpointValue = checkpointToRecord; + } catch (ThrottlingException | ShutdownException | InvalidStateException + | KinesisClientLibDependencyException e) { + throw e; + } catch (KinesisClientLibException e) { + LOG.warn("Caught exception setting checkpoint.", e); + throw new KinesisClientLibDependencyException("Caught exception while checkpointing", e); + } + } + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/SequenceNumberValidator.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/SequenceNumberValidator.java new file mode 100644 index 00000000..96af5f7c --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/SequenceNumberValidator.java @@ -0,0 +1,128 @@ +/* + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.model.InvalidArgumentException; +import com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException; +import com.amazonaws.services.kinesis.model.ShardIteratorType; + +/** + * This class provides some methods for validating sequence numbers. It provides a method + * {@link #validateSequenceNumber(String)} which validates a sequence number by attempting to get an iterator from + * Amazon Kinesis for that sequence number. (e.g. Before checkpointing a client provided sequence number in + * {@link RecordProcessorCheckpointer#checkpoint(String)} to prevent invalid sequence numbers from being checkpointed, + * which could prevent another shard consumer instance from processing the shard later on). This class also provides a + * utility function {@link #isDigits(String)} which is used to check whether a string is all digits + */ +public class SequenceNumberValidator { + + private static final Log LOG = LogFactory.getLog(SequenceNumberValidator.class); + + private IKinesisProxy proxy; + private String shardId; + private boolean validateWithGetIterator; + private static final int SERVER_SIDE_ERROR_CODE = 500; + + /** + * Constructor. + * + * @param proxy Kinesis proxy to be used for getIterator call + * @param shardId ShardId to check with sequence numbers + * @param validateWithGetIterator Whether to attempt to get an iterator for this shard id and the sequence numbers + * being validated + */ + SequenceNumberValidator(IKinesisProxy proxy, String shardId, boolean validateWithGetIterator) { + this.proxy = proxy; + this.shardId = shardId; + this.validateWithGetIterator = validateWithGetIterator; + } + + /** + * Validates the sequence number by attempting to get an iterator from Amazon Kinesis. Repackages exceptions from + * Amazon Kinesis into the appropriate KCL exception to allow clients to determine exception handling strategies + * + * @param sequenceNumber The sequence number to be validated. Must be a numeric string + * @throws IllegalArgumentException Thrown when sequence number validation fails. + * @throws ThrottlingException Thrown when GetShardIterator returns a ProvisionedThroughputExceededException which + * indicates that too many getIterator calls are being made for this shard. + * @throws KinesisClientLibDependencyException Thrown when a service side error is received. This way clients have + * the option of retrying + */ + void validateSequenceNumber(String sequenceNumber) + throws IllegalArgumentException, ThrottlingException, KinesisClientLibDependencyException { + if (!isDigits(sequenceNumber)) { + LOG.info("Sequence number must be numeric, but was " + sequenceNumber); + throw new IllegalArgumentException("Sequence number must be numeric, but was " + sequenceNumber); + } + try { + if (validateWithGetIterator) { + proxy.getIterator(shardId, ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), sequenceNumber); + LOG.info("Validated sequence number " + sequenceNumber + " with shard id " + shardId); + } + } catch (InvalidArgumentException e) { + LOG.info("Sequence number " + sequenceNumber + " is invalid for shard " + shardId, e); + throw new IllegalArgumentException("Sequence number " + sequenceNumber + " is invalid for shard " + + shardId, e); + } catch (ProvisionedThroughputExceededException e) { + // clients should have back off logic in their checkpoint logic + LOG.info("Exceeded throughput while getting an iterator for shard " + shardId, e); + throw new ThrottlingException("Exceeded throughput while getting an iterator for shard " + shardId, e); + } catch (AmazonServiceException e) { + LOG.info("Encountered service exception while getting an iterator for shard " + shardId, e); + if (e.getStatusCode() >= SERVER_SIDE_ERROR_CODE) { + // clients can choose whether to retry in their checkpoint logic + throw new KinesisClientLibDependencyException("Encountered service exception while getting an iterator" + + " for shard " + shardId, e); + } + // Just throw any other exceptions, e.g. 400 errors caused by the client + throw e; + } + } + + void validateSequenceNumber(ExtendedSequenceNumber checkpoint) + throws IllegalArgumentException, ThrottlingException, KinesisClientLibDependencyException { + validateSequenceNumber(checkpoint.getSequenceNumber()); + if (checkpoint.getSubSequenceNumber() < 0) { + throw new IllegalArgumentException("SubSequence number must be non-negative, but was " + + checkpoint.getSubSequenceNumber()); + } + } + + /** + * Checks if the string is composed of only digits. + * + * @param string + * @return true for a string of all digits, false otherwise (including false for null and empty string) + */ + static boolean isDigits(String string) { + if (string == null || string.length() == 0) { + return false; + } + for (int i = 0; i < string.length(); i++) { + if (!Character.isDigit(string.charAt(i))) { + return false; + } + } + return true; + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumer.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumer.java new file mode 100644 index 00000000..63cce40d --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumer.java @@ -0,0 +1,367 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.BlockedOnParentShardException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; +import com.google.common.annotations.VisibleForTesting; + +/** + * Responsible for consuming data records of a (specified) shard. + * The instance should be shutdown when we lose the primary responsibility for a shard. + * A new instance should be created if the primary responsibility is reassigned back to this process. + */ +class ShardConsumer { + + private static final Log LOG = LogFactory.getLog(ShardConsumer.class); + + private final StreamConfig streamConfig; + private final IRecordProcessor recordProcessor; + private final RecordProcessorCheckpointer recordProcessorCheckpointer; + private final ExecutorService executorService; + private final ShardInfo shardInfo; + private final KinesisDataFetcher dataFetcher; + private final IMetricsFactory metricsFactory; + private final ILeaseManager leaseManager; + private ICheckpoint checkpoint; + // Backoff time when polling to check if application has finished processing parent shards + private final long parentShardPollIntervalMillis; + private final boolean cleanupLeasesOfCompletedShards; + private final long taskBackoffTimeMillis; + private final boolean skipShardSyncAtWorkerInitializationIfLeasesExist; + + private ITask currentTask; + private long currentTaskSubmitTime; + private Future future; + + /* + * Tracks current state. It is only updated via the consumeStream/shutdown APIs. Therefore we don't do + * much coordination/synchronization to handle concurrent reads/updates. + */ + private ConsumerStates.ConsumerState currentState = ConsumerStates.INITIAL_STATE; + /* + * Used to track if we lost the primary responsibility. Once set to true, we will start shutting down. + * If we regain primary responsibility before shutdown is complete, Worker should create a new ShardConsumer object. + */ + private volatile ShutdownReason shutdownReason; + private volatile ShutdownNotification shutdownNotification; + + /** + * @param shardInfo Shard information + * @param streamConfig Stream configuration to use + * @param checkpoint Checkpoint tracker + * @param recordProcessor Record processor used to process the data records for the shard + * @param leaseManager Used to create leases for new shards + * @param parentShardPollIntervalMillis Wait for this long if parent shards are not done (or we get an exception) + * @param executorService ExecutorService used to execute process tasks for this shard + * @param metricsFactory IMetricsFactory used to construct IMetricsScopes for this shard + * @param backoffTimeMillis backoff interval when we encounter exceptions + */ + // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 10 LINES + ShardConsumer(ShardInfo shardInfo, + StreamConfig streamConfig, + ICheckpoint checkpoint, + IRecordProcessor recordProcessor, + ILeaseManager leaseManager, + long parentShardPollIntervalMillis, + boolean cleanupLeasesOfCompletedShards, + ExecutorService executorService, + IMetricsFactory metricsFactory, + long backoffTimeMillis, + boolean skipShardSyncAtWorkerInitializationIfLeasesExist) { + this.streamConfig = streamConfig; + this.recordProcessor = recordProcessor; + this.executorService = executorService; + this.shardInfo = shardInfo; + this.checkpoint = checkpoint; + this.recordProcessorCheckpointer = + new RecordProcessorCheckpointer(shardInfo, + checkpoint, + new SequenceNumberValidator(streamConfig.getStreamProxy(), + shardInfo.getShardId(), + streamConfig.shouldValidateSequenceNumberBeforeCheckpointing())); + this.dataFetcher = new KinesisDataFetcher(streamConfig.getStreamProxy(), shardInfo); + this.leaseManager = leaseManager; + this.metricsFactory = metricsFactory; + this.parentShardPollIntervalMillis = parentShardPollIntervalMillis; + this.cleanupLeasesOfCompletedShards = cleanupLeasesOfCompletedShards; + this.taskBackoffTimeMillis = backoffTimeMillis; + this.skipShardSyncAtWorkerInitializationIfLeasesExist = skipShardSyncAtWorkerInitializationIfLeasesExist; + } + + /** + * No-op if current task is pending, otherwise submits next task for this shard. + * This method should NOT be called if the ShardConsumer is already in SHUTDOWN_COMPLETED state. + * + * @return true if a new process task was submitted, false otherwise + */ + synchronized boolean consumeShard() { + return checkAndSubmitNextTask(); + } + + private boolean readyForNextTask() { + return future == null || future.isCancelled() || future.isDone(); + } + + private synchronized boolean checkAndSubmitNextTask() { + boolean submittedNewTask = false; + if (readyForNextTask()) { + TaskOutcome taskOutcome = TaskOutcome.NOT_COMPLETE; + if (future != null && future.isDone()) { + taskOutcome = determineTaskOutcome(); + } + + updateState(taskOutcome); + ITask nextTask = getNextTask(); + if (nextTask != null) { + currentTask = nextTask; + try { + future = executorService.submit(currentTask); + currentTaskSubmitTime = System.currentTimeMillis(); + submittedNewTask = true; + LOG.debug("Submitted new " + currentTask.getTaskType() + + " task for shard " + shardInfo.getShardId()); + } catch (RejectedExecutionException e) { + LOG.info(currentTask.getTaskType() + " task was not accepted for execution.", e); + } catch (RuntimeException e) { + LOG.info(currentTask.getTaskType() + " task encountered exception ", e); + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("No new task to submit for shard %s, currentState %s", + shardInfo.getShardId(), + currentState.toString())); + } + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Previous " + currentTask.getTaskType() + " task still pending for shard " + + shardInfo.getShardId() + " since " + (System.currentTimeMillis() - currentTaskSubmitTime) + + " ms ago" + ". Not submitting new task."); + } + } + + return submittedNewTask; + } + + public boolean isSkipShardSyncAtWorkerInitializationIfLeasesExist() { + return skipShardSyncAtWorkerInitializationIfLeasesExist; + } + + private enum TaskOutcome { + SUCCESSFUL, END_OF_SHARD, NOT_COMPLETE, FAILURE + } + + private TaskOutcome determineTaskOutcome() { + try { + TaskResult result = future.get(); + if (result.getException() == null) { + if (result.isShardEndReached()) { + return TaskOutcome.END_OF_SHARD; + } + return TaskOutcome.SUCCESSFUL; + } + logTaskException(result); + } catch (Exception e) { + throw new RuntimeException(e); + } finally { + // Setting future to null so we don't misinterpret task completion status in case of exceptions + future = null; + } + return TaskOutcome.FAILURE; + } + + private void logTaskException(TaskResult taskResult) { + if (LOG.isDebugEnabled()) { + Exception taskException = taskResult.getException(); + if (taskException instanceof BlockedOnParentShardException) { + // No need to log the stack trace for this exception (it is very specific). + LOG.debug("Shard " + shardInfo.getShardId() + " is blocked on completion of parent shard."); + } else { + LOG.debug("Caught exception running " + currentTask.getTaskType() + " task: ", + taskResult.getException()); + } + } + } + + /** + * Requests the shutdown of the this ShardConsumer. This should give the record processor a chance to checkpoint + * before being shutdown. + * + * @param shutdownNotification used to signal that the record processor has been given the chance to shutdown. + */ + void notifyShutdownRequested(ShutdownNotification shutdownNotification) { + this.shutdownNotification = shutdownNotification; + markForShutdown(ShutdownReason.REQUESTED); + } + + /** + * Shutdown this ShardConsumer (including invoking the RecordProcessor shutdown API). + * This is called by Worker when it loses responsibility for a shard. + * + * @return true if shutdown is complete (false if shutdown is still in progress) + */ + synchronized boolean beginShutdown() { + markForShutdown(ShutdownReason.ZOMBIE); + checkAndSubmitNextTask(); + + return isShutdown(); + } + + synchronized void markForShutdown(ShutdownReason reason) { + // ShutdownReason.ZOMBIE takes precedence over TERMINATE (we won't be able to save checkpoint at end of shard) + if (shutdownReason == null || shutdownReason.canTransitionTo(reason)) { + shutdownReason = reason; + } + } + + /** + * Used (by Worker) to check if this ShardConsumer instance has been shutdown + * RecordProcessor shutdown() has been invoked, as appropriate. + * + * @return true if shutdown is complete + */ + boolean isShutdown() { + return currentState.isTerminal(); + } + + /** + * @return the shutdownReason + */ + ShutdownReason getShutdownReason() { + return shutdownReason; + } + + /** + * Figure out next task to run based on current state, task, and shutdown context. + * + * @return Return next task to run + */ + private ITask getNextTask() { + ITask nextTask = currentState.createTask(this); + + if (nextTask == null) { + return null; + } else { + return new MetricsCollectingTaskDecorator(nextTask, metricsFactory); + } + } + + /** + * Note: This is a private/internal method with package level access solely for testing purposes. + * Update state based on information about: task success, current state, and shutdown info. + * + * @param taskOutcome The outcome of the last task + */ + void updateState(TaskOutcome taskOutcome) { + if (taskOutcome == TaskOutcome.END_OF_SHARD) { + markForShutdown(ShutdownReason.TERMINATE); + } + if (isShutdownRequested()) { + currentState = currentState.shutdownTransition(shutdownReason); + } else if (taskOutcome == TaskOutcome.SUCCESSFUL) { + if (currentState.getTaskType() == currentTask.getTaskType()) { + currentState = currentState.successTransition(); + } else { + LOG.error("Current State task type of '" + currentState.getTaskType() + + "' doesn't match the current tasks type of '" + currentTask.getTaskType() + + "'. This shouldn't happen, and indicates a programming error. " + + "Unable to safely transition to the next state."); + } + } + // + // Don't change state otherwise + // + + } + + @VisibleForTesting + boolean isShutdownRequested() { + return shutdownReason != null; + } + + /** + * Private/Internal method - has package level access solely for testing purposes. + * + * @return the currentState + */ + ConsumerStates.ShardConsumerState getCurrentState() { + return currentState.getState(); + } + + StreamConfig getStreamConfig() { + return streamConfig; + } + + IRecordProcessor getRecordProcessor() { + return recordProcessor; + } + + RecordProcessorCheckpointer getRecordProcessorCheckpointer() { + return recordProcessorCheckpointer; + } + + ExecutorService getExecutorService() { + return executorService; + } + + ShardInfo getShardInfo() { + return shardInfo; + } + + KinesisDataFetcher getDataFetcher() { + return dataFetcher; + } + + ILeaseManager getLeaseManager() { + return leaseManager; + } + + ICheckpoint getCheckpoint() { + return checkpoint; + } + + long getParentShardPollIntervalMillis() { + return parentShardPollIntervalMillis; + } + + boolean isCleanupLeasesOfCompletedShards() { + return cleanupLeasesOfCompletedShards; + } + + long getTaskBackoffTimeMillis() { + return taskBackoffTimeMillis; + } + + Future getFuture() { + return future; + } + + ShutdownNotification getShutdownNotification() { + return shutdownNotification; + } +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerShutdownNotification.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumerShutdownNotification.java similarity index 57% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerShutdownNotification.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumerShutdownNotification.java index 99505f17..b3792131 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerShutdownNotification.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumerShutdownNotification.java @@ -1,35 +1,19 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.lifecycle; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; import java.util.concurrent.CountDownLatch; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseCoordinator; -import software.amazon.kinesis.processor.ShutdownNotificationAware; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IShutdownNotificationAware; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.impl.LeaseCoordinator; /** * Contains callbacks for completion of stages in a requested record processor shutdown. * */ -@KinesisClientInternalApi -public class ShardConsumerShutdownNotification implements ShutdownNotification { +class ShardConsumerShutdownNotification implements ShutdownNotification { - private final LeaseCoordinator leaseCoordinator; - private final Lease lease; + private final LeaseCoordinator leaseCoordinator; + private final KinesisClientLease lease; private final CountDownLatch shutdownCompleteLatch; private final CountDownLatch notificationCompleteLatch; @@ -45,15 +29,13 @@ public class ShardConsumerShutdownNotification implements ShutdownNotification { * the lease that this shutdown request will free once initial shutdown is complete * @param notificationCompleteLatch * used to inform the caller once the - * {@link ShutdownNotificationAware} object has been + * {@link IShutdownNotificationAware} object has been * notified of the shutdown request. * @param shutdownCompleteLatch * used to inform the caller once the record processor is fully shutdown */ - public ShardConsumerShutdownNotification(final LeaseCoordinator leaseCoordinator, - final Lease lease, - final CountDownLatch notificationCompleteLatch, - final CountDownLatch shutdownCompleteLatch) { + ShardConsumerShutdownNotification(LeaseCoordinator leaseCoordinator, KinesisClientLease lease, + CountDownLatch notificationCompleteLatch, CountDownLatch shutdownCompleteLatch) { this.leaseCoordinator = leaseCoordinator; this.lease = lease; this.notificationCompleteLatch = notificationCompleteLatch; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardInfo.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardInfo.java similarity index 64% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardInfo.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardInfo.java index 998c167c..c339e9f9 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardInfo.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardInfo.java @@ -1,39 +1,32 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.leases; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; import java.util.Collection; import java.util.Collections; import java.util.LinkedList; import java.util.List; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.commons.lang.builder.EqualsBuilder; +import org.apache.commons.lang.builder.HashCodeBuilder; -import lombok.Getter; -import lombok.NonNull; -import lombok.ToString; -import lombok.experimental.Accessors; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; /** * Used to pass shard related info among different classes and as a key to the map of shard consumers. */ -@Getter -@Accessors(fluent = true) -@ToString public class ShardInfo { private final String shardId; @@ -54,14 +47,13 @@ public class ShardInfo { * @param checkpoint * the latest checkpoint from lease */ - // TODO: check what values can be null - public ShardInfo(@NonNull final String shardId, - final String concurrencyToken, - final Collection parentShardIds, - final ExtendedSequenceNumber checkpoint) { + public ShardInfo(String shardId, + String concurrencyToken, + Collection parentShardIds, + ExtendedSequenceNumber checkpoint) { this.shardId = shardId; this.concurrencyToken = concurrencyToken; - this.parentShardIds = new LinkedList<>(); + this.parentShardIds = new LinkedList(); if (parentShardIds != null) { this.parentShardIds.addAll(parentShardIds); } @@ -71,13 +63,31 @@ public class ShardInfo { this.checkpoint = checkpoint; } + /** + * The shardId that this ShardInfo contains data about + * + * @return the shardId + */ + public String getShardId() { + return shardId; + } + + /** + * Concurrency token for the lease that this shard is part of + * + * @return the concurrencyToken + */ + public String getConcurrencyToken() { + return concurrencyToken; + } + /** * A list of shards that are parents of this shard. This may be empty if the shard has no parents. * * @return a list of shardId's that are parents of this shard, or empty if the shard has no parents. */ - public List parentShardIds() { - return new LinkedList<>(parentShardIds); + protected List getParentShardIds() { + return new LinkedList(parentShardIds); } /** @@ -85,7 +95,7 @@ public class ShardInfo { * * @return completion status of the shard */ - public boolean isCompleted() { + protected boolean isCompleted() { return ExtendedSequenceNumber.SHARD_END.equals(checkpoint); } @@ -122,4 +132,13 @@ public class ShardInfo { } + + @Override + public String toString() { + return "ShardInfo [shardId=" + shardId + ", concurrencyToken=" + concurrencyToken + ", parentShardIds=" + + parentShardIds + ", checkpoint=" + checkpoint + "]"; + } + + + } diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardPrioritization.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardPrioritization.java new file mode 100644 index 00000000..54f7517d --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardPrioritization.java @@ -0,0 +1,19 @@ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.List; + +/** + * Provides logic to prioritize or filter shards before their execution. + */ +public interface ShardPrioritization { + + /** + * Returns new list of shards ordered based on their priority. + * Resulted list may have fewer shards compared to original list + * + * @param original + * list of shards needed to be prioritized + * @return new list that contains only shards that should be processed + */ + List prioritize(List original); +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTask.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTask.java new file mode 100644 index 00000000..ddfb8459 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTask.java @@ -0,0 +1,92 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; + +/** + * This task syncs leases/activies with shards of the stream. + * It will create new leases/activites when it discovers new shards (e.g. setup/resharding). + * It will clean up leases/activities for shards that have been completely processed (if + * cleanupLeasesUponShardCompletion is true). + */ +class ShardSyncTask implements ITask { + + private static final Log LOG = LogFactory.getLog(ShardSyncTask.class); + + private final IKinesisProxy kinesisProxy; + private final ILeaseManager leaseManager; + private InitialPositionInStreamExtended initialPosition; + private final boolean cleanupLeasesUponShardCompletion; + private final long shardSyncTaskIdleTimeMillis; + private final TaskType taskType = TaskType.SHARDSYNC; + + /** + * @param kinesisProxy Used to fetch information about the stream (e.g. shard list) + * @param leaseManager Used to fetch and create leases + * @param initialPositionInStream One of LATEST, TRIM_HORIZON or AT_TIMESTAMP. Amazon Kinesis Client Library will + * start processing records from this point in the stream (when an application starts up for the first time) + * except for shards that already have a checkpoint (and their descendant shards). + */ + ShardSyncTask(IKinesisProxy kinesisProxy, + ILeaseManager leaseManager, + InitialPositionInStreamExtended initialPositionInStream, + boolean cleanupLeasesUponShardCompletion, + long shardSyncTaskIdleTimeMillis) { + this.kinesisProxy = kinesisProxy; + this.leaseManager = leaseManager; + this.initialPosition = initialPositionInStream; + this.cleanupLeasesUponShardCompletion = cleanupLeasesUponShardCompletion; + this.shardSyncTaskIdleTimeMillis = shardSyncTaskIdleTimeMillis; + } + + /* (non-Javadoc) + * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ITask#call() + */ + @Override + public TaskResult call() { + Exception exception = null; + + try { + ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy, + leaseManager, + initialPosition, + cleanupLeasesUponShardCompletion); + if (shardSyncTaskIdleTimeMillis > 0) { + Thread.sleep(shardSyncTaskIdleTimeMillis); + } + } catch (Exception e) { + LOG.error("Caught exception while sync'ing Kinesis shards and leases", e); + exception = e; + } + + return new TaskResult(exception); + } + + + /* (non-Javadoc) + * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ITask#getTaskType() + */ + @Override + public TaskType getTaskType() { + return taskType; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTaskManager.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTaskManager.java new file mode 100644 index 00000000..c1bfae76 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTaskManager.java @@ -0,0 +1,117 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; + +/** + * The ShardSyncTaskManager is used to track the task to sync shards with leases (create leases for new + * Kinesis shards, remove obsolete leases). We'll have at most one outstanding sync task at any time. + * Worker will use this class to kick off a sync task when it finds shards which have been completely processed. + */ +class ShardSyncTaskManager { + + private static final Log LOG = LogFactory.getLog(ShardSyncTaskManager.class); + + private ITask currentTask; + private Future future; + private final IKinesisProxy kinesisProxy; + private final ILeaseManager leaseManager; + private final IMetricsFactory metricsFactory; + private final ExecutorService executorService; + private final InitialPositionInStreamExtended initialPositionInStream; + private boolean cleanupLeasesUponShardCompletion; + private final long shardSyncIdleTimeMillis; + + + /** + * Constructor. + * + * @param kinesisProxy Proxy used to fetch streamInfo (shards) + * @param leaseManager Lease manager (used to list and create leases for shards) + * @param initialPositionInStream Initial position in stream + * @param cleanupLeasesUponShardCompletion Clean up leases for shards that we've finished processing (don't wait + * until they expire) + * @param shardSyncIdleTimeMillis Time between tasks to sync leases and Kinesis shards + * @param metricsFactory Metrics factory + * @param executorService ExecutorService to execute the shard sync tasks + */ + ShardSyncTaskManager(final IKinesisProxy kinesisProxy, + final ILeaseManager leaseManager, + final InitialPositionInStreamExtended initialPositionInStream, + final boolean cleanupLeasesUponShardCompletion, + final long shardSyncIdleTimeMillis, + final IMetricsFactory metricsFactory, + ExecutorService executorService) { + this.kinesisProxy = kinesisProxy; + this.leaseManager = leaseManager; + this.metricsFactory = metricsFactory; + this.cleanupLeasesUponShardCompletion = cleanupLeasesUponShardCompletion; + this.shardSyncIdleTimeMillis = shardSyncIdleTimeMillis; + this.executorService = executorService; + this.initialPositionInStream = initialPositionInStream; + } + + synchronized boolean syncShardAndLeaseInfo(Set closedShardIds) { + return checkAndSubmitNextTask(closedShardIds); + } + + private synchronized boolean checkAndSubmitNextTask(Set closedShardIds) { + boolean submittedNewTask = false; + if ((future == null) || future.isCancelled() || future.isDone()) { + if ((future != null) && future.isDone()) { + try { + TaskResult result = future.get(); + if (result.getException() != null) { + LOG.error("Caught exception running " + currentTask.getTaskType() + " task: ", + result.getException()); + } + } catch (InterruptedException | ExecutionException e) { + LOG.warn(currentTask.getTaskType() + " task encountered exception.", e); + } + } + + currentTask = + new MetricsCollectingTaskDecorator(new ShardSyncTask(kinesisProxy, + leaseManager, + initialPositionInStream, + cleanupLeasesUponShardCompletion, + shardSyncIdleTimeMillis), metricsFactory); + future = executorService.submit(currentTask); + submittedNewTask = true; + if (LOG.isDebugEnabled()) { + LOG.debug("Submitted new " + currentTask.getTaskType() + " task."); + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Previous " + currentTask.getTaskType() + " task still pending. Not submitting new task."); + } + } + + return submittedNewTask; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncer.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncer.java new file mode 100644 index 00000000..52944200 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncer.java @@ -0,0 +1,840 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.KinesisClientLibIOException; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; +import com.amazonaws.services.kinesis.model.Shard; + +/** + * Helper class to sync leases with shards of the Kinesis stream. + * It will create new leases/activities when it discovers new Kinesis shards (bootstrap/resharding). + * It deletes leases for shards that have been trimmed from Kinesis, or if we've completed processing it + * and begun processing it's child shards. + */ +class ShardSyncer { + + private static final Log LOG = LogFactory.getLog(ShardSyncer.class); + + /** + * Note constructor is private: We use static synchronized methods - this is a utility class. + */ + private ShardSyncer() { + } + + static synchronized void bootstrapShardLeases(IKinesisProxy kinesisProxy, + ILeaseManager leaseManager, + InitialPositionInStreamExtended initialPositionInStream, + boolean cleanupLeasesOfCompletedShards) + throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException { + syncShardLeases(kinesisProxy, leaseManager, initialPositionInStream, cleanupLeasesOfCompletedShards); + } + + /** + * Check and create leases for any new shards (e.g. following a reshard operation). + * + * @param kinesisProxy + * @param leaseManager + * @param initialPositionInStream + * @param expectedClosedShardId If this is not null, we will assert that the shard list we get from Kinesis + * shows this shard to be closed (e.g. parent shard must be closed after a reshard operation). + * If it is open, we assume this is an race condition around a reshard event and throw + * a KinesisClientLibIOException so client can backoff and retry later. + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws KinesisClientLibIOException + */ + static synchronized void checkAndCreateLeasesForNewShards(IKinesisProxy kinesisProxy, + ILeaseManager leaseManager, + InitialPositionInStreamExtended initialPositionInStream, + boolean cleanupLeasesOfCompletedShards) + throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException { + syncShardLeases(kinesisProxy, leaseManager, initialPositionInStream, cleanupLeasesOfCompletedShards); + } + + /** + * Sync leases with Kinesis shards (e.g. at startup, or when we reach end of a shard). + * + * @param kinesisProxy + * @param leaseManager + * @param expectedClosedShardId If this is not null, we will assert that the shard list we get from Kinesis + * does not show this shard to be open (e.g. parent shard must be closed after a reshard operation). + * If it is still open, we assume this is a race condition around a reshard event and + * throw a KinesisClientLibIOException so client can backoff and retry later. If the shard doesn't exist in + * Kinesis at all, we assume this is an old/expired shard and continue with the sync operation. + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws KinesisClientLibIOException + */ + // CHECKSTYLE:OFF CyclomaticComplexity + private static synchronized void syncShardLeases(IKinesisProxy kinesisProxy, + ILeaseManager leaseManager, + InitialPositionInStreamExtended initialPosition, + boolean cleanupLeasesOfCompletedShards) + throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException { + List shards = getShardList(kinesisProxy); + LOG.debug("Num shards: " + shards.size()); + + Map shardIdToShardMap = constructShardIdToShardMap(shards); + Map> shardIdToChildShardIdsMap = constructShardIdToChildShardIdsMap(shardIdToShardMap); + assertAllParentShardsAreClosed(shardIdToChildShardIdsMap, shardIdToShardMap); + + List currentLeases = leaseManager.listLeases(); + + List newLeasesToCreate = determineNewLeasesToCreate(shards, currentLeases, initialPosition); + LOG.debug("Num new leases to create: " + newLeasesToCreate.size()); + for (KinesisClientLease lease : newLeasesToCreate) { + long startTimeMillis = System.currentTimeMillis(); + boolean success = false; + try { + leaseManager.createLeaseIfNotExists(lease); + success = true; + } finally { + MetricsHelper.addSuccessAndLatency("CreateLease", startTimeMillis, success, MetricsLevel.DETAILED); + } + } + + List trackedLeases = new ArrayList<>(); + if (currentLeases != null) { + trackedLeases.addAll(currentLeases); + } + trackedLeases.addAll(newLeasesToCreate); + cleanupGarbageLeases(shards, trackedLeases, kinesisProxy, leaseManager); + if (cleanupLeasesOfCompletedShards) { + cleanupLeasesOfFinishedShards(currentLeases, + shardIdToShardMap, + shardIdToChildShardIdsMap, + trackedLeases, + leaseManager); + } + } + // CHECKSTYLE:ON CyclomaticComplexity + + /** Helper method to detect a race condition between fetching the shards via paginated DescribeStream calls + * and a reshard operation. + * @param shardIdToChildShardIdsMap + * @param shardIdToShardMap + * @throws KinesisClientLibIOException + */ + private static void assertAllParentShardsAreClosed(Map> shardIdToChildShardIdsMap, + Map shardIdToShardMap) throws KinesisClientLibIOException { + for (String parentShardId : shardIdToChildShardIdsMap.keySet()) { + Shard parentShard = shardIdToShardMap.get(parentShardId); + if ((parentShardId == null) || (parentShard.getSequenceNumberRange().getEndingSequenceNumber() == null)) { + throw new KinesisClientLibIOException("Parent shardId " + parentShardId + " is not closed. " + + "This can happen due to a race condition between describeStream and a reshard operation."); + } + } + } + + /** + * Helper method to create a shardId->KinesisClientLease map. + * Note: This has package level access for testing purposes only. + * @param trackedLeaseList + * @return + */ + static Map constructShardIdToKCLLeaseMap(List trackedLeaseList) { + Map trackedLeasesMap = new HashMap<>(); + for (KinesisClientLease lease : trackedLeaseList) { + trackedLeasesMap.put(lease.getLeaseKey(), lease); + } + return trackedLeasesMap; + } + + /** + * Note: this has package level access for testing purposes. + * Useful for asserting that we don't have an incomplete shard list following a reshard operation. + * We verify that if the shard is present in the shard list, it is closed and its hash key range + * is covered by its child shards. + * @param shards List of all Kinesis shards + * @param shardIdsOfClosedShards Id of the shard which is expected to be closed + * @return ShardIds of child shards (children of the expectedClosedShard) + * @throws KinesisClientLibIOException + */ + static synchronized void assertClosedShardsAreCoveredOrAbsent(Map shardIdToShardMap, + Map> shardIdToChildShardIdsMap, + Set shardIdsOfClosedShards) throws KinesisClientLibIOException { + String exceptionMessageSuffix = "This can happen if we constructed the list of shards " + + " while a reshard operation was in progress."; + + for (String shardId : shardIdsOfClosedShards) { + Shard shard = shardIdToShardMap.get(shardId); + if (shard == null) { + LOG.info("Shard " + shardId + " is not present in Kinesis anymore."); + continue; + } + + String endingSequenceNumber = shard.getSequenceNumberRange().getEndingSequenceNumber(); + if (endingSequenceNumber == null) { + throw new KinesisClientLibIOException("Shard " + shardIdsOfClosedShards + + " is not closed. " + exceptionMessageSuffix); + } + + Set childShardIds = shardIdToChildShardIdsMap.get(shardId); + if (childShardIds == null) { + throw new KinesisClientLibIOException("Incomplete shard list: Closed shard " + shardId + + " has no children." + exceptionMessageSuffix); + } + + assertHashRangeOfClosedShardIsCovered(shard, shardIdToShardMap, childShardIds); + } + } + + private static synchronized void assertHashRangeOfClosedShardIsCovered(Shard closedShard, + Map shardIdToShardMap, + Set childShardIds) throws KinesisClientLibIOException { + + BigInteger startingHashKeyOfClosedShard = new BigInteger(closedShard.getHashKeyRange().getStartingHashKey()); + BigInteger endingHashKeyOfClosedShard = new BigInteger(closedShard.getHashKeyRange().getEndingHashKey()); + BigInteger minStartingHashKeyOfChildren = null; + BigInteger maxEndingHashKeyOfChildren = null; + + for (String childShardId : childShardIds) { + Shard childShard = shardIdToShardMap.get(childShardId); + BigInteger startingHashKey = new BigInteger(childShard.getHashKeyRange().getStartingHashKey()); + if ((minStartingHashKeyOfChildren == null) + || (startingHashKey.compareTo(minStartingHashKeyOfChildren) < 0)) { + minStartingHashKeyOfChildren = startingHashKey; + } + BigInteger endingHashKey = new BigInteger(childShard.getHashKeyRange().getEndingHashKey()); + if ((maxEndingHashKeyOfChildren == null) + || (endingHashKey.compareTo(maxEndingHashKeyOfChildren) > 0)) { + maxEndingHashKeyOfChildren = endingHashKey; + } + } + + if ((minStartingHashKeyOfChildren == null) || (maxEndingHashKeyOfChildren == null) + || (minStartingHashKeyOfChildren.compareTo(startingHashKeyOfClosedShard) > 0) + || (maxEndingHashKeyOfChildren.compareTo(endingHashKeyOfClosedShard) < 0)) { + throw new KinesisClientLibIOException("Incomplete shard list: hash key range of shard " + + closedShard.getShardId() + " is not covered by its child shards."); + } + + } + + /** + * Helper method to construct shardId->setOfChildShardIds map. + * Note: This has package access for testing purposes only. + * @param shardIdToShardMap + * @return + */ + static Map> constructShardIdToChildShardIdsMap( + Map shardIdToShardMap) { + Map> shardIdToChildShardIdsMap = new HashMap<>(); + for (Map.Entry entry : shardIdToShardMap.entrySet()) { + String shardId = entry.getKey(); + Shard shard = entry.getValue(); + String parentShardId = shard.getParentShardId(); + if ((parentShardId != null) && (shardIdToShardMap.containsKey(parentShardId))) { + Set childShardIds = shardIdToChildShardIdsMap.get(parentShardId); + if (childShardIds == null) { + childShardIds = new HashSet(); + shardIdToChildShardIdsMap.put(parentShardId, childShardIds); + } + childShardIds.add(shardId); + } + + String adjacentParentShardId = shard.getAdjacentParentShardId(); + if ((adjacentParentShardId != null) && (shardIdToShardMap.containsKey(adjacentParentShardId))) { + Set childShardIds = shardIdToChildShardIdsMap.get(adjacentParentShardId); + if (childShardIds == null) { + childShardIds = new HashSet(); + shardIdToChildShardIdsMap.put(adjacentParentShardId, childShardIds); + } + childShardIds.add(shardId); + } + } + return shardIdToChildShardIdsMap; + } + + private static List getShardList(IKinesisProxy kinesisProxy) throws KinesisClientLibIOException { + List shards = kinesisProxy.getShardList(); + if (shards == null) { + throw new KinesisClientLibIOException( + "Stream is not in ACTIVE OR UPDATING state - will retry getting the shard list."); + } + return shards; + } + + /** + * Determine new leases to create and their initial checkpoint. + * Note: Package level access only for testing purposes. + * + * For each open (no ending sequence number) shard that doesn't already have a lease, + * determine if it is a descendent of any shard which is or will be processed (e.g. for which a lease exists): + * If so, set checkpoint of the shard to TrimHorizon and also create leases for ancestors if needed. + * If not, set checkpoint of the shard to the initial position specified by the client. + * To check if we need to create leases for ancestors, we use the following rules: + * * If we began (or will begin) processing data for a shard, then we must reach end of that shard before + * we begin processing data from any of its descendants. + * * A shard does not start processing data until data from all its parents has been processed. + * Note, if the initial position is LATEST and a shard has two parents and only one is a descendant - we'll create + * leases corresponding to both the parents - the parent shard which is not a descendant will have + * its checkpoint set to Latest. + * + * We assume that if there is an existing lease for a shard, then either: + * * we have previously created a lease for its parent (if it was needed), or + * * the parent shard has expired. + * + * For example: + * Shard structure (each level depicts a stream segment): + * 0 1 2 3 4 5- shards till epoch 102 + * \ / \ / | | + * 6 7 4 5- shards from epoch 103 - 205 + * \ / | /\ + * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) + * Current leases: (3, 4, 5) + * New leases to create: (2, 6, 7, 8, 9, 10) + * + * The leases returned are sorted by the starting sequence number - following the same order + * when persisting the leases in DynamoDB will ensure that we recover gracefully if we fail + * before creating all the leases. + * + * @param shards List of all shards in Kinesis (we'll create new leases based on this set) + * @param currentLeases List of current leases + * @param initialPosition One of LATEST, TRIM_HORIZON, or AT_TIMESTAMP. We'll start fetching records from that + * location in the shard (when an application starts up for the first time - and there are no checkpoints). + * @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard + */ + static List determineNewLeasesToCreate(List shards, + List currentLeases, + InitialPositionInStreamExtended initialPosition) { + Map shardIdToNewLeaseMap = new HashMap(); + Map shardIdToShardMapOfAllKinesisShards = constructShardIdToShardMap(shards); + + Set shardIdsOfCurrentLeases = new HashSet(); + for (KinesisClientLease lease : currentLeases) { + shardIdsOfCurrentLeases.add(lease.getLeaseKey()); + LOG.debug("Existing lease: " + lease); + } + + List openShards = getOpenShards(shards); + Map memoizationContext = new HashMap<>(); + + // Iterate over the open shards and find those that don't have any lease entries. + for (Shard shard : openShards) { + String shardId = shard.getShardId(); + LOG.debug("Evaluating leases for open shard " + shardId + " and its ancestors."); + if (shardIdsOfCurrentLeases.contains(shardId)) { + LOG.debug("Lease for shardId " + shardId + " already exists. Not creating a lease"); + } else { + LOG.debug("Need to create a lease for shardId " + shardId); + KinesisClientLease newLease = newKCLLease(shard); + boolean isDescendant = + checkIfDescendantAndAddNewLeasesForAncestors(shardId, + initialPosition, + shardIdsOfCurrentLeases, + shardIdToShardMapOfAllKinesisShards, + shardIdToNewLeaseMap, + memoizationContext); + + /** + * If the shard is a descendant and the specified initial position is AT_TIMESTAMP, then the + * checkpoint should be set to AT_TIMESTAMP, else to TRIM_HORIZON. For AT_TIMESTAMP, we will add a + * lease just like we do for TRIM_HORIZON. However we will only return back records with server-side + * timestamp at or after the specified initial position timestamp. + * + * Shard structure (each level depicts a stream segment): + * 0 1 2 3 4 5 - shards till epoch 102 + * \ / \ / | | + * 6 7 4 5 - shards from epoch 103 - 205 + * \ / | /\ + * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) + * + * Current leases: empty set + * + * For the above example, suppose the initial position in stream is set to AT_TIMESTAMP with + * timestamp value 206. We will then create new leases for all the shards (with checkpoint set to + * AT_TIMESTAMP), including the ancestor shards with epoch less than 206. However as we begin + * processing the ancestor shards, their checkpoints would be updated to SHARD_END and their leases + * would then be deleted since they won't have records with server-side timestamp at/after 206. And + * after that we will begin processing the descendant shards with epoch at/after 206 and we will + * return the records that meet the timestamp requirement for these shards. + */ + if (isDescendant && !initialPosition.getInitialPositionInStream() + .equals(InitialPositionInStream.AT_TIMESTAMP)) { + newLease.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON); + } else { + newLease.setCheckpoint(convertToCheckpoint(initialPosition)); + } + LOG.debug("Set checkpoint of " + newLease.getLeaseKey() + " to " + newLease.getCheckpoint()); + shardIdToNewLeaseMap.put(shardId, newLease); + } + } + + List newLeasesToCreate = new ArrayList(); + newLeasesToCreate.addAll(shardIdToNewLeaseMap.values()); + Comparator startingSequenceNumberComparator = + new StartingSequenceNumberAndShardIdBasedComparator(shardIdToShardMapOfAllKinesisShards); + Collections.sort(newLeasesToCreate, startingSequenceNumberComparator); + return newLeasesToCreate; + } + + /** + * Note: Package level access for testing purposes only. + * Check if this shard is a descendant of a shard that is (or will be) processed. + * Create leases for the ancestors of this shard as required. + * See javadoc of determineNewLeasesToCreate() for rules and example. + * + * @param shardId The shardId to check. + * @param initialPosition One of LATEST, TRIM_HORIZON, or AT_TIMESTAMP. We'll start fetching records from that + * location in the shard (when an application starts up for the first time - and there are no checkpoints). + * @param shardIdsOfCurrentLeases The shardIds for the current leases. + * @param shardIdToShardMapOfAllKinesisShards ShardId->Shard map containing all shards obtained via DescribeStream. + * @param shardIdToLeaseMapOfNewShards Add lease POJOs corresponding to ancestors to this map. + * @param memoizationContext Memoization of shards that have been evaluated as part of the evaluation + * @return true if the shard is a descendant of any current shard (lease already exists) + */ + // CHECKSTYLE:OFF CyclomaticComplexity + static boolean checkIfDescendantAndAddNewLeasesForAncestors(String shardId, + InitialPositionInStreamExtended initialPosition, + Set shardIdsOfCurrentLeases, + Map shardIdToShardMapOfAllKinesisShards, + Map shardIdToLeaseMapOfNewShards, + Map memoizationContext) { + + Boolean previousValue = memoizationContext.get(shardId); + if (previousValue != null) { + return previousValue; + } + + boolean isDescendant = false; + Shard shard; + Set parentShardIds; + Set descendantParentShardIds = new HashSet(); + + if ((shardId != null) && (shardIdToShardMapOfAllKinesisShards.containsKey(shardId))) { + if (shardIdsOfCurrentLeases.contains(shardId)) { + // This shard is a descendant of a current shard. + isDescendant = true; + // We don't need to add leases of its ancestors, + // because we'd have done it when creating a lease for this shard. + } else { + shard = shardIdToShardMapOfAllKinesisShards.get(shardId); + parentShardIds = getParentShardIds(shard, shardIdToShardMapOfAllKinesisShards); + for (String parentShardId : parentShardIds) { + // Check if the parent is a descendant, and include its ancestors. + if (checkIfDescendantAndAddNewLeasesForAncestors(parentShardId, + initialPosition, + shardIdsOfCurrentLeases, + shardIdToShardMapOfAllKinesisShards, + shardIdToLeaseMapOfNewShards, + memoizationContext)) { + isDescendant = true; + descendantParentShardIds.add(parentShardId); + LOG.debug("Parent shard " + parentShardId + " is a descendant."); + } else { + LOG.debug("Parent shard " + parentShardId + " is NOT a descendant."); + } + } + + // If this is a descendant, create leases for its parent shards (if they don't exist) + if (isDescendant) { + for (String parentShardId : parentShardIds) { + if (!shardIdsOfCurrentLeases.contains(parentShardId)) { + LOG.debug("Need to create a lease for shardId " + parentShardId); + KinesisClientLease lease = shardIdToLeaseMapOfNewShards.get(parentShardId); + if (lease == null) { + lease = newKCLLease(shardIdToShardMapOfAllKinesisShards.get(parentShardId)); + shardIdToLeaseMapOfNewShards.put(parentShardId, lease); + } + + if (descendantParentShardIds.contains(parentShardId) + && !initialPosition.getInitialPositionInStream() + .equals(InitialPositionInStream.AT_TIMESTAMP)) { + lease.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON); + } else { + lease.setCheckpoint(convertToCheckpoint(initialPosition)); + } + } + } + } else { + // This shard should be included, if the customer wants to process all records in the stream or + // if the initial position is AT_TIMESTAMP. For AT_TIMESTAMP, we will add a lease just like we do + // for TRIM_HORIZON. However we will only return back records with server-side timestamp at or + // after the specified initial position timestamp. + if (initialPosition.getInitialPositionInStream().equals(InitialPositionInStream.TRIM_HORIZON) + || initialPosition.getInitialPositionInStream() + .equals(InitialPositionInStream.AT_TIMESTAMP)) { + isDescendant = true; + } + } + + } + } + + memoizationContext.put(shardId, isDescendant); + return isDescendant; + } + // CHECKSTYLE:ON CyclomaticComplexity + + /** + * Helper method to get parent shardIds of the current shard - includes the parent shardIds if: + * a/ they are not null + * b/ if they exist in the current shard map (i.e. haven't expired) + * + * @param shard Will return parents of this shard + * @param shardIdToShardMapOfAllKinesisShards ShardId->Shard map containing all shards obtained via DescribeStream. + * @return Set of parentShardIds + */ + static Set getParentShardIds(Shard shard, Map shardIdToShardMapOfAllKinesisShards) { + Set parentShardIds = new HashSet(2); + String parentShardId = shard.getParentShardId(); + if ((parentShardId != null) && shardIdToShardMapOfAllKinesisShards.containsKey(parentShardId)) { + parentShardIds.add(parentShardId); + } + String adjacentParentShardId = shard.getAdjacentParentShardId(); + if ((adjacentParentShardId != null) && shardIdToShardMapOfAllKinesisShards.containsKey(adjacentParentShardId)) { + parentShardIds.add(adjacentParentShardId); + } + return parentShardIds; + } + + /** + * Delete leases corresponding to shards that no longer exist in the stream. + * Current scheme: Delete a lease if: + * * the corresponding shard is not present in the list of Kinesis shards, AND + * * the parentShardIds listed in the lease are also not present in the list of Kinesis shards. + * @param shards List of all Kinesis shards (assumed to be a consistent snapshot - when stream is in Active state). + * @param trackedLeases List of + * @param kinesisProxy Kinesis proxy (used to get shard list) + * @param leaseManager + * @throws KinesisClientLibIOException Thrown if we couldn't get a fresh shard list from Kinesis. + * @throws ProvisionedThroughputException + * @throws InvalidStateException + * @throws DependencyException + */ + private static void cleanupGarbageLeases(List shards, + List trackedLeases, + IKinesisProxy kinesisProxy, + ILeaseManager leaseManager) + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException { + Set kinesisShards = new HashSet<>(); + for (Shard shard : shards) { + kinesisShards.add(shard.getShardId()); + } + + // Check if there are leases for non-existent shards + List garbageLeases = new ArrayList<>(); + for (KinesisClientLease lease : trackedLeases) { + if (isCandidateForCleanup(lease, kinesisShards)) { + garbageLeases.add(lease); + } + } + + if (!garbageLeases.isEmpty()) { + LOG.info("Found " + garbageLeases.size() + + " candidate leases for cleanup. Refreshing list of" + + " Kinesis shards to pick up recent/latest shards"); + List currentShardList = getShardList(kinesisProxy); + Set currentKinesisShardIds = new HashSet<>(); + for (Shard shard : currentShardList) { + currentKinesisShardIds.add(shard.getShardId()); + } + + for (KinesisClientLease lease : garbageLeases) { + if (isCandidateForCleanup(lease, currentKinesisShardIds)) { + LOG.info("Deleting lease for shard " + lease.getLeaseKey() + + " as it is not present in Kinesis stream."); + leaseManager.deleteLease(lease); + } + } + } + + } + + /** + * Note: This method has package level access, solely for testing purposes. + * + * @param lease Candidate shard we are considering for deletion. + * @param currentKinesisShardIds + * @return true if neither the shard (corresponding to the lease), nor its parents are present in + * currentKinesisShardIds + * @throws KinesisClientLibIOException Thrown if currentKinesisShardIds contains a parent shard but not the child + * shard (we are evaluating for deletion). + */ + static boolean isCandidateForCleanup(KinesisClientLease lease, Set currentKinesisShardIds) + throws KinesisClientLibIOException { + boolean isCandidateForCleanup = true; + + if (currentKinesisShardIds.contains(lease.getLeaseKey())) { + isCandidateForCleanup = false; + } else { + LOG.info("Found lease for non-existent shard: " + lease.getLeaseKey() + ". Checking its parent shards"); + Set parentShardIds = lease.getParentShardIds(); + for (String parentShardId : parentShardIds) { + + // Throw an exception if the parent shard exists (but the child does not). + // This may be a (rare) race condition between fetching the shard list and Kinesis expiring shards. + if (currentKinesisShardIds.contains(parentShardId)) { + String message = + "Parent shard " + parentShardId + " exists but not the child shard " + + lease.getLeaseKey(); + LOG.info(message); + throw new KinesisClientLibIOException(message); + } + } + } + + return isCandidateForCleanup; + } + + /** + * Private helper method. + * Clean up leases for shards that meet the following criteria: + * a/ the shard has been fully processed (checkpoint is set to SHARD_END) + * b/ we've begun processing all the child shards: we have leases for all child shards and their checkpoint is not + * TRIM_HORIZON. + * + * @param currentLeases List of leases we evaluate for clean up + * @param shardIdToShardMap Map of shardId->Shard (assumed to include all Kinesis shards) + * @param shardIdToChildShardIdsMap Map of shardId->childShardIds (assumed to include all Kinesis shards) + * @param trackedLeases List of all leases we are tracking. + * @param leaseManager Lease manager (will be used to delete leases) + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws KinesisClientLibIOException + */ + private static synchronized void cleanupLeasesOfFinishedShards(Collection currentLeases, + Map shardIdToShardMap, + Map> shardIdToChildShardIdsMap, + List trackedLeases, + ILeaseManager leaseManager) + throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException { + Set shardIdsOfClosedShards = new HashSet<>(); + List leasesOfClosedShards = new ArrayList<>(); + for (KinesisClientLease lease : currentLeases) { + if (lease.getCheckpoint().equals(ExtendedSequenceNumber.SHARD_END)) { + shardIdsOfClosedShards.add(lease.getLeaseKey()); + leasesOfClosedShards.add(lease); + } + } + + if (!leasesOfClosedShards.isEmpty()) { + assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, + shardIdToChildShardIdsMap, + shardIdsOfClosedShards); + Comparator startingSequenceNumberComparator = + new StartingSequenceNumberAndShardIdBasedComparator(shardIdToShardMap); + Collections.sort(leasesOfClosedShards, startingSequenceNumberComparator); + Map trackedLeaseMap = constructShardIdToKCLLeaseMap(trackedLeases); + + for (KinesisClientLease leaseOfClosedShard : leasesOfClosedShards) { + String closedShardId = leaseOfClosedShard.getLeaseKey(); + Set childShardIds = shardIdToChildShardIdsMap.get(closedShardId); + if ((closedShardId != null) && (childShardIds != null) && (!childShardIds.isEmpty())) { + cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager); + } + } + } + } + + /** + * Delete lease for the closed shard. Rules for deletion are: + * a/ the checkpoint for the closed shard is SHARD_END, + * b/ there are leases for all the childShardIds and their checkpoint is NOT TRIM_HORIZON + * Note: This method has package level access solely for testing purposes. + * + * @param closedShardId Identifies the closed shard + * @param childShardIds ShardIds of children of the closed shard + * @param trackedLeases shardId->KinesisClientLease map with all leases we are tracking (should not be null) + * @param leaseManager + * @throws ProvisionedThroughputException + * @throws InvalidStateException + * @throws DependencyException + */ + static synchronized void cleanupLeaseForClosedShard(String closedShardId, + Set childShardIds, + Map trackedLeases, + ILeaseManager leaseManager) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + KinesisClientLease leaseForClosedShard = trackedLeases.get(closedShardId); + List childShardLeases = new ArrayList<>(); + + for (String childShardId : childShardIds) { + KinesisClientLease childLease = trackedLeases.get(childShardId); + if (childLease != null) { + childShardLeases.add(childLease); + } + } + + if ((leaseForClosedShard != null) + && (leaseForClosedShard.getCheckpoint().equals(ExtendedSequenceNumber.SHARD_END)) + && (childShardLeases.size() == childShardIds.size())) { + boolean okayToDelete = true; + for (KinesisClientLease lease : childShardLeases) { + if (lease.getCheckpoint().equals(ExtendedSequenceNumber.TRIM_HORIZON)) { + okayToDelete = false; + break; + } + } + + if (okayToDelete) { + LOG.info("Deleting lease for shard " + leaseForClosedShard.getLeaseKey() + + " as it has been completely processed and processing of child shards has begun."); + leaseManager.deleteLease(leaseForClosedShard); + } + } + } + + /** + * Helper method to create a new KinesisClientLease POJO for a shard. + * Note: Package level access only for testing purposes + * + * @param shard + * @return + */ + static KinesisClientLease newKCLLease(Shard shard) { + KinesisClientLease newLease = new KinesisClientLease(); + newLease.setLeaseKey(shard.getShardId()); + List parentShardIds = new ArrayList(2); + if (shard.getParentShardId() != null) { + parentShardIds.add(shard.getParentShardId()); + } + if (shard.getAdjacentParentShardId() != null) { + parentShardIds.add(shard.getAdjacentParentShardId()); + } + newLease.setParentShardIds(parentShardIds); + newLease.setOwnerSwitchesSinceCheckpoint(0L); + + return newLease; + } + + /** + * Helper method to construct a shardId->Shard map for the specified list of shards. + * + * @param shards List of shards + * @return ShardId->Shard map + */ + static Map constructShardIdToShardMap(List shards) { + Map shardIdToShardMap = new HashMap(); + for (Shard shard : shards) { + shardIdToShardMap.put(shard.getShardId(), shard); + } + return shardIdToShardMap; + } + + /** + * Helper method to return all the open shards for a stream. + * Note: Package level access only for testing purposes. + * + * @param allShards All shards returved via DescribeStream. We assume this to represent a consistent shard list. + * @return List of open shards (shards at the tip of the stream) - may include shards that are not yet active. + */ + static List getOpenShards(List allShards) { + List openShards = new ArrayList(); + for (Shard shard : allShards) { + String endingSequenceNumber = shard.getSequenceNumberRange().getEndingSequenceNumber(); + if (endingSequenceNumber == null) { + openShards.add(shard); + LOG.debug("Found open shard: " + shard.getShardId()); + } + } + return openShards; + } + + private static ExtendedSequenceNumber convertToCheckpoint(InitialPositionInStreamExtended position) { + ExtendedSequenceNumber checkpoint = null; + + if (position.getInitialPositionInStream().equals(InitialPositionInStream.TRIM_HORIZON)) { + checkpoint = ExtendedSequenceNumber.TRIM_HORIZON; + } else if (position.getInitialPositionInStream().equals(InitialPositionInStream.LATEST)) { + checkpoint = ExtendedSequenceNumber.LATEST; + } else if (position.getInitialPositionInStream().equals(InitialPositionInStream.AT_TIMESTAMP)) { + checkpoint = ExtendedSequenceNumber.AT_TIMESTAMP; + } + + return checkpoint; + } + + /** Helper class to compare leases based on starting sequence number of the corresponding shards. + * + */ + private static class StartingSequenceNumberAndShardIdBasedComparator implements Comparator, + Serializable { + + private static final long serialVersionUID = 1L; + + private final Map shardIdToShardMap; + + /** + * @param shardIdToShardMapOfAllKinesisShards + */ + public StartingSequenceNumberAndShardIdBasedComparator(Map shardIdToShardMapOfAllKinesisShards) { + shardIdToShardMap = shardIdToShardMapOfAllKinesisShards; + } + + /** + * Compares two leases based on the starting sequence number of corresponding shards. + * If shards are not found in the shardId->shard map supplied, we do a string comparison on the shardIds. + * We assume that lease1 and lease2 are: + * a/ not null, + * b/ shards (if found) have non-null starting sequence numbers + * + * {@inheritDoc} + */ + @Override + public int compare(KinesisClientLease lease1, KinesisClientLease lease2) { + int result = 0; + String shardId1 = lease1.getLeaseKey(); + String shardId2 = lease2.getLeaseKey(); + Shard shard1 = shardIdToShardMap.get(shardId1); + Shard shard2 = shardIdToShardMap.get(shardId2); + + // If we found shards for the two leases, use comparison of the starting sequence numbers + if ((shard1 != null) && (shard2 != null)) { + BigInteger sequenceNumber1 = + new BigInteger(shard1.getSequenceNumberRange().getStartingSequenceNumber()); + BigInteger sequenceNumber2 = + new BigInteger(shard2.getSequenceNumberRange().getStartingSequenceNumber()); + result = sequenceNumber1.compareTo(sequenceNumber2); + } + + if (result == 0) { + result = shardId1.compareTo(shardId2); + } + + return result; + } + + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownFuture.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownFuture.java new file mode 100644 index 00000000..8ee96537 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownFuture.java @@ -0,0 +1,155 @@ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +/** + * Used as a response from the {@link Worker#requestShutdown()} to allow callers to wait until shutdown is complete. + */ +class ShutdownFuture implements Future { + + private static final Log log = LogFactory.getLog(ShutdownFuture.class); + + private final CountDownLatch shutdownCompleteLatch; + private final CountDownLatch notificationCompleteLatch; + private final Worker worker; + + ShutdownFuture(CountDownLatch shutdownCompleteLatch, CountDownLatch notificationCompleteLatch, Worker worker) { + this.shutdownCompleteLatch = shutdownCompleteLatch; + this.notificationCompleteLatch = notificationCompleteLatch; + this.worker = worker; + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + throw new UnsupportedOperationException("Cannot cancel a shutdown process"); + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return isWorkerShutdownComplete(); + } + + private boolean isWorkerShutdownComplete() { + return worker.isShutdownComplete() || worker.getShardInfoShardConsumerMap().isEmpty(); + } + + private long outstandingRecordProcessors(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + + final long startNanos = System.nanoTime(); + + // + // Awaiting for all ShardConsumer/RecordProcessors to be notified that a shutdown has been requested. + // There is the possibility of a race condition where a lease is terminated after the shutdown request + // notification is started, but before the ShardConsumer is sent the notification. In this case the + // ShardConsumer would start the lease loss shutdown, and may never call the notification methods. + // + if (!notificationCompleteLatch.await(timeout, unit)) { + long awaitingNotification = notificationCompleteLatch.getCount(); + long awaitingFinalShutdown = shutdownCompleteLatch.getCount(); + log.info("Awaiting " + awaitingNotification + " record processors to complete shutdown notification, and " + + awaitingFinalShutdown + " awaiting final shutdown"); + if (awaitingFinalShutdown != 0) { + // + // The number of record processor awaiting final shutdown should be a superset of the those awaiting + // notification + // + return checkWorkerShutdownMiss(awaitingFinalShutdown); + } + } + + long remaining = remainingTimeout(timeout, unit, startNanos); + throwTimeoutMessageIfExceeded(remaining, "Notification hasn't completed within timeout time."); + + // + // Once all record processors have been notified of the shutdown it is safe to allow the worker to + // start its shutdown behavior. Once shutdown starts it will stop renewer, and drop any remaining leases. + // + worker.shutdown(); + remaining = remainingTimeout(timeout, unit, startNanos); + throwTimeoutMessageIfExceeded(remaining, "Shutdown hasn't completed within timeout time."); + + // + // Want to wait for all the remaining ShardConsumers/RecordProcessor's to complete their final shutdown + // processing. This should really be a no-op since as part of the notification completion the lease for + // ShardConsumer is terminated. + // + if (!shutdownCompleteLatch.await(remaining, TimeUnit.NANOSECONDS)) { + long outstanding = shutdownCompleteLatch.getCount(); + log.info("Awaiting " + outstanding + " record processors to complete final shutdown"); + + return checkWorkerShutdownMiss(outstanding); + } + return 0; + } + + private long remainingTimeout(long timeout, TimeUnit unit, long startNanos) { + long checkNanos = System.nanoTime() - startNanos; + return unit.toNanos(timeout) - checkNanos; + } + + private void throwTimeoutMessageIfExceeded(long remainingNanos, String message) throws TimeoutException { + if (remainingNanos <= 0) { + throw new TimeoutException(message); + } + } + + /** + * This checks to see if the worker has already hit it's shutdown target, while there is outstanding record + * processors. This maybe a little racy due to when the value of outstanding is retrieved. In general though the + * latch should be decremented before the shutdown completion. + * + * @param outstanding + * the number of record processor still awaiting shutdown. + * @return the number of record processors awaiting shutdown, or 0 if the worker believes it's shutdown already. + */ + private long checkWorkerShutdownMiss(long outstanding) { + if (isWorkerShutdownComplete()) { + if (outstanding != 0) { + log.info("Shutdown completed, but shutdownCompleteLatch still had outstanding " + outstanding + + " with a current value of " + shutdownCompleteLatch.getCount() + ". shutdownComplete: " + + worker.isShutdownComplete() + " -- Consumer Map: " + + worker.getShardInfoShardConsumerMap().size()); + } + return 0; + } + return outstanding; + } + + @Override + public Void get() throws InterruptedException, ExecutionException { + boolean complete = false; + do { + try { + long outstanding = outstandingRecordProcessors(1, TimeUnit.SECONDS); + complete = outstanding == 0; + log.info("Awaiting " + outstanding + " consumer(s) to finish shutdown."); + } catch (TimeoutException te) { + log.info("Timeout while waiting for completion: " + te.getMessage()); + } + + } while(!complete); + return null; + } + + @Override + public Void get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + long outstanding = outstandingRecordProcessors(timeout, unit); + if (outstanding != 0) { + throw new TimeoutException("Awaiting " + outstanding + " record processors to shutdown."); + } + return null; + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownNotification.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownNotification.java new file mode 100644 index 00000000..928e6900 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownNotification.java @@ -0,0 +1,22 @@ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput; + +/** + * A shutdown request to the ShardConsumer + */ +public interface ShutdownNotification { + /** + * Used to indicate that the record processor has been notified of a requested shutdown, and given the chance to + * checkpoint. + * + */ + void shutdownNotificationComplete(); + + /** + * Used to indicate that the record processor has completed the call to + * {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#shutdown(ShutdownInput)} has + * completed. + */ + void shutdownComplete(); +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownNotificationTask.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownNotificationTask.java new file mode 100644 index 00000000..a689ee43 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownNotificationTask.java @@ -0,0 +1,45 @@ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IShutdownNotificationAware; + +/** + * Notifies record processor of incoming shutdown request, and gives them a chance to checkpoint. + */ +class ShutdownNotificationTask implements ITask { + + private final IRecordProcessor recordProcessor; + private final IRecordProcessorCheckpointer recordProcessorCheckpointer; + private final ShutdownNotification shutdownNotification; + private final ShardInfo shardInfo; + + ShutdownNotificationTask(IRecordProcessor recordProcessor, IRecordProcessorCheckpointer recordProcessorCheckpointer, ShutdownNotification shutdownNotification, ShardInfo shardInfo) { + this.recordProcessor = recordProcessor; + this.recordProcessorCheckpointer = recordProcessorCheckpointer; + this.shutdownNotification = shutdownNotification; + this.shardInfo = shardInfo; + } + + @Override + public TaskResult call() { + try { + if (recordProcessor instanceof IShutdownNotificationAware) { + IShutdownNotificationAware shutdownNotificationAware = (IShutdownNotificationAware) recordProcessor; + try { + shutdownNotificationAware.shutdownRequested(recordProcessorCheckpointer); + } catch (Exception ex) { + return new TaskResult(ex); + } + } + return new TaskResult(null); + } finally { + shutdownNotification.shutdownNotificationComplete(); + } + } + + @Override + public TaskType getTaskType() { + return TaskType.SHUTDOWN_NOTIFICATION; + } +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownReason.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownReason.java similarity index 58% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownReason.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownReason.java index 88058aed..8d0dfc80 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownReason.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownReason.java @@ -1,29 +1,26 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.lifecycle; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; -import lombok.AccessLevel; -import lombok.Getter; -import lombok.experimental.Accessors; -import software.amazon.kinesis.processor.ShardRecordProcessor; - -import static software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState; +import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput; +import static com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerStates.ConsumerState; +import static com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerStates.ShardConsumerState; /** - * Reason the ShardRecordProcessor is being shutdown. + * Reason the RecordProcessor is being shutdown. * Used to distinguish between a fail-over vs. a termination (shard is closed and all records have been delivered). * In case of a fail over, applications should NOT checkpoint as part of shutdown, * since another record processor may have already started processing records for that shard. @@ -36,27 +33,25 @@ public enum ShutdownReason { * Applications SHOULD NOT checkpoint their progress (as another record processor may have already started * processing data). */ - LEASE_LOST(3, ShardConsumerState.SHUTTING_DOWN.consumerState()), + ZOMBIE(3, ShardConsumerState.SHUTTING_DOWN.getConsumerState()), /** - * Terminate processing for this ShardRecordProcessor (resharding use case). + * Terminate processing for this RecordProcessor (resharding use case). * Indicates that the shard is closed and all records from the shard have been delivered to the application. * Applications SHOULD checkpoint their progress to indicate that they have successfully processed all records * from this shard and processing of child shards can be started. */ - SHARD_END(2, ShardConsumerState.SHUTTING_DOWN.consumerState()), + TERMINATE(2, ShardConsumerState.SHUTTING_DOWN.getConsumerState()), /** * Indicates that the entire application is being shutdown, and if desired the record processor will be given a * final chance to checkpoint. This state will not trigger a direct call to - * {@link ShardRecordProcessor#shutdown(ShutdownInput)}, but + * {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#shutdown(ShutdownInput)}, but * instead depend on a different interface for backward compatibility. */ - REQUESTED(1, ShardConsumerState.SHUTDOWN_REQUESTED.consumerState()); + REQUESTED(1, ShardConsumerState.SHUTDOWN_REQUESTED.getConsumerState()); private final int rank; - @Getter(AccessLevel.PACKAGE) - @Accessors(fluent = true) private final ConsumerState shutdownState; ShutdownReason(int rank, ConsumerState shutdownState) { @@ -76,4 +71,8 @@ public enum ShutdownReason { } return reason.rank > this.rank; } + + ConsumerState getShutdownState() { + return shutdownState; + } } diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownTask.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownTask.java new file mode 100644 index 00000000..d40fbb0e --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownTask.java @@ -0,0 +1,163 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; +import com.google.common.annotations.VisibleForTesting; + +/** + * Task for invoking the RecordProcessor shutdown() callback. + */ +class ShutdownTask implements ITask { + + private static final Log LOG = LogFactory.getLog(ShutdownTask.class); + + private static final String RECORD_PROCESSOR_SHUTDOWN_METRIC = "RecordProcessor.shutdown"; + + private final ShardInfo shardInfo; + private final IRecordProcessor recordProcessor; + private final RecordProcessorCheckpointer recordProcessorCheckpointer; + private final ShutdownReason reason; + private final IKinesisProxy kinesisProxy; + private final ILeaseManager leaseManager; + private final InitialPositionInStreamExtended initialPositionInStream; + private final boolean cleanupLeasesOfCompletedShards; + private final TaskType taskType = TaskType.SHUTDOWN; + private final long backoffTimeMillis; + + /** + * Constructor. + */ + // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 10 LINES + ShutdownTask(ShardInfo shardInfo, + IRecordProcessor recordProcessor, + RecordProcessorCheckpointer recordProcessorCheckpointer, + ShutdownReason reason, + IKinesisProxy kinesisProxy, + InitialPositionInStreamExtended initialPositionInStream, + boolean cleanupLeasesOfCompletedShards, + ILeaseManager leaseManager, + long backoffTimeMillis) { + this.shardInfo = shardInfo; + this.recordProcessor = recordProcessor; + this.recordProcessorCheckpointer = recordProcessorCheckpointer; + this.reason = reason; + this.kinesisProxy = kinesisProxy; + this.initialPositionInStream = initialPositionInStream; + this.cleanupLeasesOfCompletedShards = cleanupLeasesOfCompletedShards; + this.leaseManager = leaseManager; + this.backoffTimeMillis = backoffTimeMillis; + } + + /* + * Invokes RecordProcessor shutdown() API. + * (non-Javadoc) + * + * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ITask#call() + */ + @Override + public TaskResult call() { + Exception exception = null; + boolean applicationException = false; + + try { + // If we reached end of the shard, set sequence number to SHARD_END. + if (reason == ShutdownReason.TERMINATE) { + recordProcessorCheckpointer.setSequenceNumberAtShardEnd( + recordProcessorCheckpointer.getLargestPermittedCheckpointValue()); + recordProcessorCheckpointer.setLargestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END); + } + + LOG.debug("Invoking shutdown() for shard " + shardInfo.getShardId() + ", concurrencyToken " + + shardInfo.getConcurrencyToken() + ". Shutdown reason: " + reason); + final ShutdownInput shutdownInput = new ShutdownInput() + .withShutdownReason(reason) + .withCheckpointer(recordProcessorCheckpointer); + final long recordProcessorStartTimeMillis = System.currentTimeMillis(); + try { + recordProcessor.shutdown(shutdownInput); + ExtendedSequenceNumber lastCheckpointValue = recordProcessorCheckpointer.getLastCheckpointValue(); + + if (reason == ShutdownReason.TERMINATE) { + if ((lastCheckpointValue == null) + || (!lastCheckpointValue.equals(ExtendedSequenceNumber.SHARD_END))) { + throw new IllegalArgumentException("Application didn't checkpoint at end of shard " + + shardInfo.getShardId()); + } + } + LOG.debug("Record processor completed shutdown() for shard " + shardInfo.getShardId()); + } catch (Exception e) { + applicationException = true; + throw e; + } finally { + MetricsHelper.addLatency(RECORD_PROCESSOR_SHUTDOWN_METRIC, recordProcessorStartTimeMillis, + MetricsLevel.SUMMARY); + } + + if (reason == ShutdownReason.TERMINATE) { + LOG.debug("Looking for child shards of shard " + shardInfo.getShardId()); + // create leases for the child shards + ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy, + leaseManager, + initialPositionInStream, + cleanupLeasesOfCompletedShards); + LOG.debug("Finished checking for child shards of shard " + shardInfo.getShardId()); + } + + return new TaskResult(null); + } catch (Exception e) { + if (applicationException) { + LOG.error("Application exception. ", e); + } else { + LOG.error("Caught exception: ", e); + } + exception = e; + // backoff if we encounter an exception. + try { + Thread.sleep(this.backoffTimeMillis); + } catch (InterruptedException ie) { + LOG.debug("Interrupted sleep", ie); + } + } + + return new TaskResult(exception); + } + + /* + * (non-Javadoc) + * + * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ITask#getTaskType() + */ + @Override + public TaskType getTaskType() { + return taskType; + } + + @VisibleForTesting + ShutdownReason getReason() { + return reason; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/StreamConfig.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/StreamConfig.java new file mode 100644 index 00000000..b5c283fb --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/StreamConfig.java @@ -0,0 +1,95 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; + +/** + * Used to capture stream configuration and pass it along. + */ +class StreamConfig { + + private final IKinesisProxy streamProxy; + private final int maxRecords; + private final long idleTimeInMilliseconds; + private final boolean callProcessRecordsEvenForEmptyRecordList; + private InitialPositionInStreamExtended initialPositionInStream; + private final boolean validateSequenceNumberBeforeCheckpointing; + + /** + * @param proxy Used to fetch records and information about the stream + * @param maxRecords Max records to be fetched in a call + * @param idleTimeInMilliseconds Idle time between get calls to the stream + * @param callProcessRecordsEvenForEmptyRecordList Call the IRecordProcessor::processRecords() API even if + * GetRecords returned an empty record list. + * @param validateSequenceNumberBeforeCheckpointing Whether to call Amazon Kinesis to validate sequence numbers + * @param initialPositionInStream Initial position in stream + */ + StreamConfig(IKinesisProxy proxy, + int maxRecords, + long idleTimeInMilliseconds, + boolean callProcessRecordsEvenForEmptyRecordList, + boolean validateSequenceNumberBeforeCheckpointing, + InitialPositionInStreamExtended initialPositionInStream) { + this.streamProxy = proxy; + this.maxRecords = maxRecords; + this.idleTimeInMilliseconds = idleTimeInMilliseconds; + this.callProcessRecordsEvenForEmptyRecordList = callProcessRecordsEvenForEmptyRecordList; + this.validateSequenceNumberBeforeCheckpointing = validateSequenceNumberBeforeCheckpointing; + this.initialPositionInStream = initialPositionInStream; + } + + /** + * @return the streamProxy + */ + IKinesisProxy getStreamProxy() { + return streamProxy; + } + + /** + * @return the maxRecords + */ + int getMaxRecords() { + return maxRecords; + } + + /** + * @return the idleTimeInMilliseconds + */ + long getIdleTimeInMilliseconds() { + return idleTimeInMilliseconds; + } + + /** + * @return the callProcessRecordsEvenForEmptyRecordList + */ + boolean shouldCallProcessRecordsEvenForEmptyRecordList() { + return callProcessRecordsEvenForEmptyRecordList; + } + + /** + * @return the initialPositionInStream + */ + InitialPositionInStreamExtended getInitialPositionInStream() { + return initialPositionInStream; + } + + /** + * @return validateSequenceNumberBeforeCheckpointing + */ + boolean shouldValidateSequenceNumberBeforeCheckpointing() { + return validateSequenceNumberBeforeCheckpointing; + } +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskResult.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TaskResult.java similarity index 67% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskResult.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TaskResult.java index 8762f07d..cede1167 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskResult.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TaskResult.java @@ -1,24 +1,24 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.lifecycle; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; /** * Used to capture information from a task that we want to communicate back to the higher layer. * E.g. exception thrown when executing the task, if we reach end of a shard. */ -public class TaskResult { +class TaskResult { // Did we reach the end of the shard while processing this task. private boolean shardEndReached; @@ -29,7 +29,7 @@ public class TaskResult { /** * @return the shardEndReached */ - public boolean isShardEndReached() { + protected boolean isShardEndReached() { return shardEndReached; } @@ -50,7 +50,7 @@ public class TaskResult { /** * @param e Any exception encountered when running the process task. */ - public TaskResult(Exception e) { + TaskResult(Exception e) { this(e, false); } diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TaskType.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TaskType.java new file mode 100644 index 00000000..32fd1cd2 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TaskType.java @@ -0,0 +1,49 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +/** + * Enumerates types of tasks executed as part of processing a shard. + */ +public enum TaskType { + /** + * Polls and waits until parent shard(s) have been fully processed. + */ + BLOCK_ON_PARENT_SHARDS, + /** + * Initialization of RecordProcessor (and Amazon Kinesis Client Library internal state for a shard). + */ + INITIALIZE, + /** + * Fetching and processing of records. + */ + PROCESS, + /** + * Shutdown of RecordProcessor. + */ + SHUTDOWN, + /** + * Graceful shutdown has been requested, and notification of the record processor will occur. + */ + SHUTDOWN_NOTIFICATION, + /** + * Occurs once the shutdown has been completed + */ + SHUTDOWN_COMPLETE, + /** + * Sync leases/activities corresponding to Kinesis shards. + */ + SHARDSYNC +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ThrottlingReporter.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ThrottlingReporter.java new file mode 100644 index 00000000..f88f131f --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ThrottlingReporter.java @@ -0,0 +1,38 @@ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import lombok.extern.apachecommons.CommonsLog; +import org.apache.commons.logging.Log; + +@RequiredArgsConstructor +@CommonsLog +class ThrottlingReporter { + + private final int maxConsecutiveWarnThrottles; + private final String shardId; + + private int consecutiveThrottles = 0; + + void throttled() { + consecutiveThrottles++; + String message = "Shard '" + shardId + "' has been throttled " + + consecutiveThrottles + " consecutively"; + + if (consecutiveThrottles > maxConsecutiveWarnThrottles) { + getLog().error(message); + } else { + getLog().warn(message); + } + + } + + void success() { + consecutiveThrottles = 0; + } + + protected Log getLog() { + return log; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/V1ToV2RecordProcessorAdapter.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/V1ToV2RecordProcessorAdapter.java new file mode 100644 index 00000000..477acb74 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/V1ToV2RecordProcessorAdapter.java @@ -0,0 +1,51 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput; + +/** + * Adapts a V1 {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor IRecordProcessor} + * to V2 {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor IRecordProcessor}. + */ +class V1ToV2RecordProcessorAdapter implements IRecordProcessor { + + private com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor recordProcessor; + + V1ToV2RecordProcessorAdapter( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor recordProcessor) { + this.recordProcessor = recordProcessor; + } + + @Override + public void initialize(InitializationInput initializationInput) { + recordProcessor.initialize(initializationInput.getShardId()); + } + + @Override + public void processRecords(ProcessRecordsInput processRecordsInput) { + recordProcessor.processRecords(processRecordsInput.getRecords(), processRecordsInput.getCheckpointer()); + + } + + @Override + public void shutdown(ShutdownInput shutdownInput) { + recordProcessor.shutdown(shutdownInput.getCheckpointer(), shutdownInput.getShutdownReason()); + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/V1ToV2RecordProcessorFactoryAdapter.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/V1ToV2RecordProcessorFactoryAdapter.java new file mode 100644 index 00000000..57146e64 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/V1ToV2RecordProcessorFactoryAdapter.java @@ -0,0 +1,38 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; + +/** + * Adapts a V1 {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory + * IRecordProcessorFactory} to V2 + * {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory IRecordProcessorFactory}. + */ +class V1ToV2RecordProcessorFactoryAdapter implements IRecordProcessorFactory { + + private com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory factory; + + V1ToV2RecordProcessorFactoryAdapter( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory factory) { + this.factory = factory; + } + + @Override + public IRecordProcessor createProcessor() { + return new V1ToV2RecordProcessorAdapter(factory.createProcessor()); + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/Worker.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/Worker.java new file mode 100644 index 00000000..bf9f4e7d --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/Worker.java @@ -0,0 +1,1126 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.regions.Region; +import com.amazonaws.regions.RegionUtils; +import com.amazonaws.services.cloudwatch.AmazonCloudWatch; +import com.amazonaws.services.cloudwatch.AmazonCloudWatchClient; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient; +import com.amazonaws.services.kinesis.AmazonKinesis; +import com.amazonaws.services.kinesis.AmazonKinesisClient; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IShutdownNotificationAware; +import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxyFactory; +import com.amazonaws.services.kinesis.leases.exceptions.LeasingException; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager; +import com.amazonaws.services.kinesis.metrics.impl.CWMetricsFactory; +import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * Worker is the high level class that Kinesis applications use to start + * processing data. It initializes and oversees different components (e.g. + * syncing shard and lease information, tracking shard assignments, and + * processing data from the shards). + */ +public class Worker implements Runnable { + + private static final Log LOG = LogFactory.getLog(Worker.class); + + private static final int MAX_INITIALIZATION_ATTEMPTS = 20; + + private WorkerLog wlog = new WorkerLog(); + + private final String applicationName; + private final IRecordProcessorFactory recordProcessorFactory; + private final StreamConfig streamConfig; + private final InitialPositionInStreamExtended initialPosition; + private final ICheckpoint checkpointTracker; + private final long idleTimeInMilliseconds; + // Backoff time when polling to check if application has finished processing + // parent shards + private final long parentShardPollIntervalMillis; + private final ExecutorService executorService; + private final IMetricsFactory metricsFactory; + // Backoff time when running tasks if they encounter exceptions + private final long taskBackoffTimeMillis; + private final long failoverTimeMillis; + + // private final KinesisClientLeaseManager leaseManager; + private final KinesisClientLibLeaseCoordinator leaseCoordinator; + private final ShardSyncTaskManager controlServer; + + private final ShardPrioritization shardPrioritization; + + private volatile boolean shutdown; + private volatile long shutdownStartTimeMillis; + private volatile boolean shutdownComplete = false; + + // Holds consumers for shards the worker is currently tracking. Key is shard + // info, value is ShardConsumer. + private ConcurrentMap shardInfoShardConsumerMap = + new ConcurrentHashMap(); + private final boolean cleanupLeasesUponShardCompletion; + + private final boolean skipShardSyncAtWorkerInitializationIfLeasesExist; + + /** + * Constructor. + * + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @param config Kinesis Client Library configuration + */ + public Worker( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory recordProcessorFactory, + KinesisClientLibConfiguration config) { + this(recordProcessorFactory, config, getExecutorService()); + } + + /** + * Constructor. + * + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @param config Kinesis Client Library configuration + * @param execService ExecutorService to use for processing records (support for multi-threaded + * consumption) + */ + public Worker( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory recordProcessorFactory, + KinesisClientLibConfiguration config, + ExecutorService execService) { + this(recordProcessorFactory, config, new AmazonKinesisClient(config.getKinesisCredentialsProvider(), + config.getKinesisClientConfiguration()), + new AmazonDynamoDBClient(config.getDynamoDBCredentialsProvider(), + config.getDynamoDBClientConfiguration()), + new AmazonCloudWatchClient(config.getCloudWatchCredentialsProvider(), + config.getCloudWatchClientConfiguration()), execService); + } + + /** + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @param config Kinesis Client Library configuration + * @param metricsFactory Metrics factory used to emit metrics + */ + public Worker( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory recordProcessorFactory, + KinesisClientLibConfiguration config, + IMetricsFactory metricsFactory) { + this(recordProcessorFactory, config, metricsFactory, getExecutorService()); + } + + /** + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @param config Kinesis Client Library configuration + * @param metricsFactory Metrics factory used to emit metrics + * @param execService ExecutorService to use for processing records (support for multi-threaded + * consumption) + */ + public Worker( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory recordProcessorFactory, + KinesisClientLibConfiguration config, + IMetricsFactory metricsFactory, + ExecutorService execService) { + this(recordProcessorFactory, config, new AmazonKinesisClient(config.getKinesisCredentialsProvider(), + config.getKinesisClientConfiguration()), + new AmazonDynamoDBClient(config.getDynamoDBCredentialsProvider(), + config.getDynamoDBClientConfiguration()), metricsFactory, execService); + } + + /** + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @param config Kinesis Client Library configuration + * @param kinesisClient Kinesis Client used for fetching data + * @param dynamoDBClient DynamoDB client used for checkpoints and tracking leases + * @param cloudWatchClient CloudWatch Client for publishing metrics + */ + public Worker( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory recordProcessorFactory, + KinesisClientLibConfiguration config, + AmazonKinesis kinesisClient, + AmazonDynamoDB dynamoDBClient, + AmazonCloudWatch cloudWatchClient) { + this(recordProcessorFactory, config, kinesisClient, dynamoDBClient, cloudWatchClient, getExecutorService()); + } + + /** + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @param config Kinesis Client Library configuration + * @param kinesisClient Kinesis Client used for fetching data + * @param dynamoDBClient DynamoDB client used for checkpoints and tracking leases + * @param cloudWatchClient CloudWatch Client for publishing metrics + * @param execService ExecutorService to use for processing records (support for multi-threaded + * consumption) + */ + public Worker( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory recordProcessorFactory, + KinesisClientLibConfiguration config, + AmazonKinesis kinesisClient, + AmazonDynamoDB dynamoDBClient, + AmazonCloudWatch cloudWatchClient, + ExecutorService execService) { + this(recordProcessorFactory, config, kinesisClient, dynamoDBClient, + getMetricsFactory(cloudWatchClient, config), execService); + } + + /** + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @param config Kinesis Client Library configuration + * @param kinesisClient Kinesis Client used for fetching data + * @param dynamoDBClient DynamoDB client used for checkpoints and tracking leases + * @param metricsFactory Metrics factory used to emit metrics + * @param execService ExecutorService to use for processing records (support for multi-threaded + * consumption) + */ + public Worker( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory recordProcessorFactory, + KinesisClientLibConfiguration config, + AmazonKinesis kinesisClient, + AmazonDynamoDB dynamoDBClient, + IMetricsFactory metricsFactory, + ExecutorService execService) { + this( + config.getApplicationName(), + new V1ToV2RecordProcessorFactoryAdapter(recordProcessorFactory), + new StreamConfig( + new KinesisProxyFactory(config.getKinesisCredentialsProvider(), kinesisClient) + .getProxy(config.getStreamName()), + config.getMaxRecords(), config.getIdleTimeBetweenReadsInMillis(), + config.shouldCallProcessRecordsEvenForEmptyRecordList(), + config.shouldValidateSequenceNumberBeforeCheckpointing(), + config.getInitialPositionInStreamExtended()), + config.getInitialPositionInStreamExtended(), + config.getParentShardPollIntervalMillis(), + config.getShardSyncIntervalMillis(), + config.shouldCleanupLeasesUponShardCompletion(), + null, + new KinesisClientLibLeaseCoordinator( + new KinesisClientLeaseManager(config.getTableName(), dynamoDBClient), + config.getWorkerIdentifier(), + config.getFailoverTimeMillis(), + config.getEpsilonMillis(), + config.getMaxLeasesForWorker(), + config.getMaxLeasesToStealAtOneTime(), + metricsFactory) + .withInitialLeaseTableReadCapacity(config.getInitialLeaseTableReadCapacity()) + .withInitialLeaseTableWriteCapacity(config.getInitialLeaseTableWriteCapacity()), + execService, + metricsFactory, + config.getTaskBackoffTimeMillis(), + config.getFailoverTimeMillis(), + config.getSkipShardSyncAtWorkerInitializationIfLeasesExist(), + config.getShardPrioritizationStrategy()); + + // If a region name was explicitly specified, use it as the region for Amazon Kinesis and Amazon DynamoDB. + if (config.getRegionName() != null) { + Region region = RegionUtils.getRegion(config.getRegionName()); + kinesisClient.setRegion(region); + LOG.debug("The region of Amazon Kinesis client has been set to " + config.getRegionName()); + dynamoDBClient.setRegion(region); + LOG.debug("The region of Amazon DynamoDB client has been set to " + config.getRegionName()); + } + // If a dynamoDB endpoint was explicitly specified, use it to set the DynamoDB endpoint. + if (config.getDynamoDBEndpoint() != null) { + dynamoDBClient.setEndpoint(config.getDynamoDBEndpoint()); + LOG.debug("The endpoint of Amazon DynamoDB client has been set to " + config.getDynamoDBEndpoint()); + } + // If a kinesis endpoint was explicitly specified, use it to set the region of kinesis. + if (config.getKinesisEndpoint() != null) { + kinesisClient.setEndpoint(config.getKinesisEndpoint()); + if (config.getRegionName() != null) { + LOG.warn("Received configuration for both region name as " + config.getRegionName() + + ", and Amazon Kinesis endpoint as " + config.getKinesisEndpoint() + + ". Amazon Kinesis endpoint will overwrite region name."); + LOG.debug("The region of Amazon Kinesis client has been overwritten to " + config.getKinesisEndpoint()); + } else { + LOG.debug("The region of Amazon Kinesis client has been set to " + config.getKinesisEndpoint()); + } + } + } + + /** + * @param applicationName Name of the Kinesis application + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @param streamConfig Stream configuration + * @param initialPositionInStream One of LATEST, TRIM_HORIZON, or AT_TIMESTAMP. The KinesisClientLibrary will start + * fetching data from this location in the stream when an application starts up for the first time and + * there are no checkpoints. If there are checkpoints, we start from the checkpoint position. + * @param parentShardPollIntervalMillis Wait for this long between polls to check if parent shards are done + * @param shardSyncIdleTimeMillis Time between tasks to sync leases and Kinesis shards + * @param cleanupLeasesUponShardCompletion Clean up shards we've finished processing (don't wait till they expire in + * Kinesis) + * @param checkpoint Used to get/set checkpoints + * @param leaseCoordinator Lease coordinator (coordinates currently owned leases) + * @param execService ExecutorService to use for processing records (support for multi-threaded + * consumption) + * @param metricsFactory Metrics factory used to emit metrics + * @param taskBackoffTimeMillis Backoff period when tasks encounter an exception + * @param shardPrioritization Provides prioritization logic to decide which available shards process first + */ + // NOTE: This has package level access solely for testing + // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 10 LINES + Worker(String applicationName, + IRecordProcessorFactory recordProcessorFactory, + StreamConfig streamConfig, + InitialPositionInStreamExtended initialPositionInStream, + long parentShardPollIntervalMillis, + long shardSyncIdleTimeMillis, + boolean cleanupLeasesUponShardCompletion, + ICheckpoint checkpoint, + KinesisClientLibLeaseCoordinator leaseCoordinator, + ExecutorService execService, + IMetricsFactory metricsFactory, + long taskBackoffTimeMillis, + long failoverTimeMillis, + boolean skipShardSyncAtWorkerInitializationIfLeasesExist, + ShardPrioritization shardPrioritization) { + this.applicationName = applicationName; + this.recordProcessorFactory = recordProcessorFactory; + this.streamConfig = streamConfig; + this.initialPosition = initialPositionInStream; + this.parentShardPollIntervalMillis = parentShardPollIntervalMillis; + this.cleanupLeasesUponShardCompletion = cleanupLeasesUponShardCompletion; + this.checkpointTracker = checkpoint != null ? checkpoint : leaseCoordinator; + this.idleTimeInMilliseconds = streamConfig.getIdleTimeInMilliseconds(); + this.executorService = execService; + this.leaseCoordinator = leaseCoordinator; + this.metricsFactory = metricsFactory; + this.controlServer = + new ShardSyncTaskManager(streamConfig.getStreamProxy(), + leaseCoordinator.getLeaseManager(), + initialPositionInStream, + cleanupLeasesUponShardCompletion, + shardSyncIdleTimeMillis, + metricsFactory, + executorService); + this.taskBackoffTimeMillis = taskBackoffTimeMillis; + this.failoverTimeMillis = failoverTimeMillis; + this.skipShardSyncAtWorkerInitializationIfLeasesExist = skipShardSyncAtWorkerInitializationIfLeasesExist; + this.shardPrioritization = shardPrioritization; + } + + /** + * @return the applicationName + */ + public String getApplicationName() { + return applicationName; + } + + /** + * Start consuming data from the stream, and pass it to the application + * record processors. + */ + public void run() { + if (shutdown) { + return; + } + + try { + initialize(); + LOG.info("Initialization complete. Starting worker loop."); + } catch (RuntimeException e1) { + LOG.error("Unable to initialize after " + MAX_INITIALIZATION_ATTEMPTS + " attempts. Shutting down.", e1); + shutdown(); + } + + while (!shouldShutdown()) { + runProcessLoop(); + } + + finalShutdown(); + LOG.info("Worker loop is complete. Exiting from worker."); + } + + @VisibleForTesting + void runProcessLoop() { + try { + boolean foundCompletedShard = false; + Set assignedShards = new HashSet<>(); + for (ShardInfo shardInfo : getShardInfoForAssignments()) { + ShardConsumer shardConsumer = createOrGetShardConsumer(shardInfo, recordProcessorFactory); + if (shardConsumer.isShutdown() && shardConsumer.getShutdownReason().equals(ShutdownReason.TERMINATE)) { + foundCompletedShard = true; + } else { + shardConsumer.consumeShard(); + } + assignedShards.add(shardInfo); + } + + if (foundCompletedShard) { + controlServer.syncShardAndLeaseInfo(null); + } + + // clean up shard consumers for unassigned shards + cleanupShardConsumers(assignedShards); + + wlog.info("Sleeping ..."); + Thread.sleep(idleTimeInMilliseconds); + } catch (Exception e) { + LOG.error(String.format("Worker.run caught exception, sleeping for %s milli seconds!", + String.valueOf(idleTimeInMilliseconds)), e); + try { + Thread.sleep(idleTimeInMilliseconds); + } catch (InterruptedException ex) { + LOG.info("Worker: sleep interrupted after catching exception ", ex); + } + } + wlog.resetInfoLogging(); + } + + private void initialize() { + boolean isDone = false; + Exception lastException = null; + + for (int i = 0; (!isDone) && (i < MAX_INITIALIZATION_ATTEMPTS); i++) { + try { + LOG.info("Initialization attempt " + (i + 1)); + LOG.info("Initializing LeaseCoordinator"); + leaseCoordinator.initialize(); + + TaskResult result = null; + if (!skipShardSyncAtWorkerInitializationIfLeasesExist + || leaseCoordinator.getLeaseManager().isLeaseTableEmpty()) { + LOG.info("Syncing Kinesis shard info"); + ShardSyncTask shardSyncTask = + new ShardSyncTask(streamConfig.getStreamProxy(), + leaseCoordinator.getLeaseManager(), + initialPosition, + cleanupLeasesUponShardCompletion, + 0L); + result = new MetricsCollectingTaskDecorator(shardSyncTask, metricsFactory).call(); + } else { + LOG.info("Skipping shard sync per config setting (and lease table is not empty)"); + } + + if (result == null || result.getException() == null) { + if (!leaseCoordinator.isRunning()) { + LOG.info("Starting LeaseCoordinator"); + leaseCoordinator.start(); + } else { + LOG.info("LeaseCoordinator is already running. No need to start it."); + } + isDone = true; + } else { + lastException = result.getException(); + } + } catch (LeasingException e) { + LOG.error("Caught exception when initializing LeaseCoordinator", e); + lastException = e; + } catch (Exception e) { + lastException = e; + } + + try { + Thread.sleep(parentShardPollIntervalMillis); + } catch (InterruptedException e) { + LOG.debug("Sleep interrupted while initializing worker."); + } + } + + if (!isDone) { + throw new RuntimeException(lastException); + } + } + + /** + * NOTE: This method is internal/private to the Worker class. It has package + * access solely for testing. + * + * This method relies on ShardInfo.equals() method returning true for ShardInfo objects which may have been + * instantiated with parentShardIds in a different order (and rest of the fields being the equal). For example + * shardInfo1.equals(shardInfo2) should return true with shardInfo1 and shardInfo2 defined as follows. + * ShardInfo shardInfo1 = new ShardInfo(shardId1, concurrencyToken1, Arrays.asList("parent1", "parent2")); + * ShardInfo shardInfo2 = new ShardInfo(shardId1, concurrencyToken1, Arrays.asList("parent2", "parent1")); + */ + void cleanupShardConsumers(Set assignedShards) { + for (ShardInfo shard : shardInfoShardConsumerMap.keySet()) { + if (!assignedShards.contains(shard)) { + // Shutdown the consumer since we are no longer responsible for + // the shard. + boolean isShutdown = shardInfoShardConsumerMap.get(shard).beginShutdown(); + if (isShutdown) { + shardInfoShardConsumerMap.remove(shard); + } + } + } + } + + private List getShardInfoForAssignments() { + List assignedStreamShards = leaseCoordinator.getCurrentAssignments(); + List prioritizedShards = shardPrioritization.prioritize(assignedStreamShards); + + if ((prioritizedShards != null) && (!prioritizedShards.isEmpty())) { + if (wlog.isInfoEnabled()) { + StringBuilder builder = new StringBuilder(); + boolean firstItem = true; + for (ShardInfo shardInfo : prioritizedShards) { + if (!firstItem) { + builder.append(", "); + } + builder.append(shardInfo.getShardId()); + firstItem = false; + } + wlog.info("Current stream shard assignments: " + builder.toString()); + } + } else { + wlog.info("No activities assigned"); + } + + return prioritizedShards; + } + + /** + * Requests shutdown of the worker, notifying record processors, that implement {@link IShutdownNotificationAware}, + * of the impending shutdown. This gives the record processor a final chance to checkpoint. + * + * It's possible that a record processor won't be notify before being shutdown. This can occur if the lease is + * lost after requesting shutdown, but before the notification is dispatched. + * + *

Requested Shutdown Process

When a shutdown process is requested it operates slightly differently to + * allow the record processors a chance to checkpoint a final time. + *
    + *
  1. Call to request shutdown invoked.
  2. + *
  3. Worker stops attempting to acquire new leases
  4. + *
  5. Record Processor Shutdown Begins + *
      + *
    1. Record processor is notified of the impending shutdown, and given a final chance to checkpoint
    2. + *
    3. The lease for the record processor is then dropped.
    4. + *
    5. The record processor enters into an idle state waiting for the worker to complete final termination
    6. + *
    7. The worker will detect a record processor that has lost it's lease, and will terminate the record processor + * with {@link ShutdownReason#ZOMBIE}
    8. + *
    + *
  6. + *
  7. The worker will shutdown all record processors.
  8. + *
  9. Once all record processors have been terminated, the worker will terminate all owned resources.
  10. + *
  11. Once the worker shutdown is complete, the returned future is completed.
  12. + *
+ * + * + * + * @return a Future that will be set once the shutdown is complete. + */ + public Future requestShutdown() { + + // + // Stop accepting new leases. Once we do this we can be sure that + // no more leases will be acquired. + // + leaseCoordinator.stopLeaseTaker(); + + Collection leases = leaseCoordinator.getAssignments(); + if (leases == null || leases.isEmpty()) { + // + // If there are no leases notification is already completed, but we still need to shutdown the worker. + // + this.shutdown(); + return Futures.immediateFuture(null); + } + CountDownLatch shutdownCompleteLatch = new CountDownLatch(leases.size()); + CountDownLatch notificationCompleteLatch = new CountDownLatch(leases.size()); + for (KinesisClientLease lease : leases) { + ShutdownNotification shutdownNotification = new ShardConsumerShutdownNotification(leaseCoordinator, lease, + notificationCompleteLatch, shutdownCompleteLatch); + ShardInfo shardInfo = KinesisClientLibLeaseCoordinator.convertLeaseToAssignment(lease); + ShardConsumer consumer = shardInfoShardConsumerMap.get(shardInfo); + if (consumer != null) { + consumer.notifyShutdownRequested(shutdownNotification); + } else { + // + // There is a race condition between retrieving the current assignments, and creating the + // notification. If the a lease is lost in between these two points, we explicitly decrement the + // notification latches to clear the shutdown. + // + notificationCompleteLatch.countDown(); + shutdownCompleteLatch.countDown(); + } + } + + return new ShutdownFuture(shutdownCompleteLatch, notificationCompleteLatch, this); + + } + + boolean isShutdownComplete() { + return shutdownComplete; + } + + ConcurrentMap getShardInfoShardConsumerMap() { + return shardInfoShardConsumerMap; + } + + /** + * Signals worker to shutdown. Worker will try initiating shutdown of all record processors. Note that if executor + * services were passed to the worker by the user, worker will not attempt to shutdown those resources. + * + *

Shutdown Process

When called this will start shutdown of the record processor, and eventually shutdown + * the worker itself. + *
    + *
  1. Call to start shutdown invoked
  2. + *
  3. Lease coordinator told to stop taking leases, and to drop existing leases.
  4. + *
  5. Worker discovers record processors that no longer have leases.
  6. + *
  7. Worker triggers shutdown with state {@link ShutdownReason#ZOMBIE}.
  8. + *
  9. Once all record processors are shutdown, worker terminates owned resources.
  10. + *
  11. Shutdown complete.
  12. + *
+ */ + public void shutdown() { + if (shutdown) { + LOG.warn("Shutdown requested a second time."); + return; + } + LOG.info("Worker shutdown requested."); + + // Set shutdown flag, so Worker.run can start shutdown process. + shutdown = true; + shutdownStartTimeMillis = System.currentTimeMillis(); + + // Stop lease coordinator, so leases are not renewed or stolen from other workers. + // Lost leases will force Worker to begin shutdown process for all shard consumers in + // Worker.run(). + leaseCoordinator.stop(); + } + + /** + * Perform final shutdown related tasks for the worker including shutting down worker owned + * executor services, threads, etc. + */ + private void finalShutdown() { + LOG.info("Starting worker's final shutdown."); + + if (executorService instanceof WorkerThreadPoolExecutor) { + // This should interrupt all active record processor tasks. + executorService.shutdownNow(); + } + if (metricsFactory instanceof WorkerCWMetricsFactory) { + ((CWMetricsFactory) metricsFactory).shutdown(); + } + shutdownComplete = true; + } + + /** + * Returns whether worker can shutdown immediately. Note that this method is called from Worker's {{@link #run()} + * method before every loop run, so method must do minimum amount of work to not impact shard processing timings. + * + * @return Whether worker should shutdown immediately. + */ + @VisibleForTesting + boolean shouldShutdown() { + if (executorService.isShutdown()) { + LOG.error("Worker executor service has been shutdown, so record processors cannot be shutdown."); + return true; + } + if (shutdown) { + if (shardInfoShardConsumerMap.isEmpty()) { + LOG.info("All record processors have been shutdown successfully."); + return true; + } + if ((System.currentTimeMillis() - shutdownStartTimeMillis) >= failoverTimeMillis) { + LOG.info("Lease failover time is reached, so forcing shutdown."); + return true; + } + } + return false; + } + + /** + * NOTE: This method is internal/private to the Worker class. It has package + * access solely for testing. + * + * @param shardInfo Kinesis shard info + * @param factory RecordProcessor factory + * @return ShardConsumer for the shard + */ + ShardConsumer createOrGetShardConsumer(ShardInfo shardInfo, IRecordProcessorFactory factory) { + ShardConsumer consumer = shardInfoShardConsumerMap.get(shardInfo); + // Instantiate a new consumer if we don't have one, or the one we + // had was from an earlier + // lease instance (and was shutdown). Don't need to create another + // one if the shard has been + // completely processed (shutdown reason terminate). + if ((consumer == null) + || (consumer.isShutdown() && consumer.getShutdownReason().equals(ShutdownReason.ZOMBIE))) { + consumer = buildConsumer(shardInfo, factory); + shardInfoShardConsumerMap.put(shardInfo, consumer); + wlog.infoForce("Created new shardConsumer for : " + shardInfo); + } + return consumer; + } + + protected ShardConsumer buildConsumer(ShardInfo shardInfo, IRecordProcessorFactory factory) { + IRecordProcessor recordProcessor = factory.createProcessor(); + + return new ShardConsumer(shardInfo, streamConfig, checkpointTracker, recordProcessor, + leaseCoordinator.getLeaseManager(), parentShardPollIntervalMillis, cleanupLeasesUponShardCompletion, + executorService, metricsFactory, taskBackoffTimeMillis, skipShardSyncAtWorkerInitializationIfLeasesExist); + + } + + /** + * Logger for suppressing too much INFO logging. To avoid too much logging + * information Worker will output logging at INFO level for a single pass + * through the main loop every minute. At DEBUG level it will output all + * INFO logs on every pass. + */ + private static class WorkerLog { + + private long reportIntervalMillis = TimeUnit.MINUTES.toMillis(1); + private long nextReportTime = System.currentTimeMillis() + reportIntervalMillis; + private boolean infoReporting; + + private WorkerLog() { + + } + + @SuppressWarnings("unused") + public void debug(Object message, Throwable t) { + LOG.debug(message, t); + } + + public void info(Object message) { + if (this.isInfoEnabled()) { + LOG.info(message); + } + } + + public void infoForce(Object message) { + LOG.info(message); + } + + @SuppressWarnings("unused") + public void warn(Object message) { + LOG.warn(message); + } + + @SuppressWarnings("unused") + public void error(Object message, Throwable t) { + LOG.error(message, t); + } + + private boolean isInfoEnabled() { + return infoReporting; + } + + private void resetInfoLogging() { + if (infoReporting) { + // We just logged at INFO level for a pass through worker loop + if (LOG.isInfoEnabled()) { + infoReporting = false; + nextReportTime = System.currentTimeMillis() + reportIntervalMillis; + } // else is DEBUG or TRACE so leave reporting true + } else if (nextReportTime <= System.currentTimeMillis()) { + infoReporting = true; + } + } + } + + // Backwards compatible constructors + /** + * This constructor is for binary compatibility with code compiled against + * version of the KCL that only have constructors taking "Client" objects. + * + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @param config Kinesis Client Library configuration + * @param kinesisClient Kinesis Client used for fetching data + * @param dynamoDBClient DynamoDB client used for checkpoints and tracking leases + * @param cloudWatchClient CloudWatch Client for publishing metrics + */ + public Worker( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory recordProcessorFactory, + KinesisClientLibConfiguration config, + AmazonKinesisClient kinesisClient, + AmazonDynamoDBClient dynamoDBClient, + AmazonCloudWatchClient cloudWatchClient) { + this(recordProcessorFactory, + config, + (AmazonKinesis) kinesisClient, + (AmazonDynamoDB) dynamoDBClient, + (AmazonCloudWatch) cloudWatchClient); + } + + /** + * This constructor is for binary compatibility with code compiled against + * version of the KCL that only have constructors taking "Client" objects. + * + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @param config Kinesis Client Library configuration + * @param kinesisClient Kinesis Client used for fetching data + * @param dynamoDBClient DynamoDB client used for checkpoints and tracking leases + * @param cloudWatchClient CloudWatch Client for publishing metrics + * @param execService ExecutorService to use for processing records (support for multi-threaded + * consumption) + */ + public Worker( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory recordProcessorFactory, + KinesisClientLibConfiguration config, + AmazonKinesisClient kinesisClient, + AmazonDynamoDBClient dynamoDBClient, + AmazonCloudWatchClient cloudWatchClient, + ExecutorService execService) { + this(recordProcessorFactory, + config, + (AmazonKinesis) kinesisClient, + (AmazonDynamoDB) dynamoDBClient, + (AmazonCloudWatch) cloudWatchClient, + execService); + } + + /** + * This constructor is for binary compatibility with code compiled against + * version of the KCL that only have constructors taking "Client" objects. + * + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @param config Kinesis Client Library configuration + * @param kinesisClient Kinesis Client used for fetching data + * @param dynamoDBClient DynamoDB client used for checkpoints and tracking leases + * @param metricsFactory Metrics factory used to emit metrics + * @param execService ExecutorService to use for processing records (support for multi-threaded + * consumption) + */ + public Worker( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory recordProcessorFactory, + KinesisClientLibConfiguration config, + AmazonKinesisClient kinesisClient, + AmazonDynamoDBClient dynamoDBClient, + IMetricsFactory metricsFactory, + ExecutorService execService) { + this(recordProcessorFactory, + config, + (AmazonKinesis) kinesisClient, + (AmazonDynamoDB) dynamoDBClient, + metricsFactory, + execService); + } + + /** + * Given configuration, returns appropriate metrics factory. + * @param cloudWatchClient Amazon CloudWatch client + * @param config KinesisClientLibConfiguration + * @return Returns metrics factory based on the config. + */ + private static IMetricsFactory getMetricsFactory( + AmazonCloudWatch cloudWatchClient, KinesisClientLibConfiguration config) { + IMetricsFactory metricsFactory; + if (config.getMetricsLevel() == MetricsLevel.NONE) { + metricsFactory = new NullMetricsFactory(); + } else { + if (config.getRegionName() != null) { + Region region = RegionUtils.getRegion(config.getRegionName()); + cloudWatchClient.setRegion(region); + LOG.debug("The region of Amazon CloudWatch client has been set to " + config.getRegionName()); + } + metricsFactory = new WorkerCWMetricsFactory( + cloudWatchClient, + config.getApplicationName(), + config.getMetricsBufferTimeMillis(), + config.getMetricsMaxQueueSize(), + config.getMetricsLevel(), + config.getMetricsEnabledDimensions()); + } + return metricsFactory; + } + + /** + * Returns default executor service that should be used by the worker. + * @return Default executor service that should be used by the worker. + */ + private static ExecutorService getExecutorService() { + ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("RecordProcessor-%04d").build(); + return new WorkerThreadPoolExecutor(threadFactory); + } + + /** + * Extension to CWMetricsFactory, so worker can identify whether it owns the metrics factory instance + * or not. + * Visible and non-final only for testing. + */ + static class WorkerCWMetricsFactory extends CWMetricsFactory { + + WorkerCWMetricsFactory(AmazonCloudWatch cloudWatchClient, + String namespace, + long bufferTimeMillis, + int maxQueueSize, + MetricsLevel metricsLevel, + Set metricsEnabledDimensions) { + super(cloudWatchClient, namespace, bufferTimeMillis, + maxQueueSize, metricsLevel, metricsEnabledDimensions); + } + } + + /** + * Extension to ThreadPoolExecutor, so worker can identify whether it owns the executor service instance + * or not. + * Visible and non-final only for testing. + */ + static class WorkerThreadPoolExecutor extends ThreadPoolExecutor { + private static final long DEFAULT_KEEP_ALIVE_TIME = 60L; + + WorkerThreadPoolExecutor(ThreadFactory threadFactory) { + // Defaults are based on Executors.newCachedThreadPool() + super(0, Integer.MAX_VALUE, DEFAULT_KEEP_ALIVE_TIME, TimeUnit.SECONDS, new SynchronousQueue(), + threadFactory); + } + } + + /** + * Builder to construct a Worker instance. + */ + public static class Builder { + + private IRecordProcessorFactory recordProcessorFactory; + private KinesisClientLibConfiguration config; + private AmazonKinesis kinesisClient; + private AmazonDynamoDB dynamoDBClient; + private AmazonCloudWatch cloudWatchClient; + private IMetricsFactory metricsFactory; + private ExecutorService execService; + private ShardPrioritization shardPrioritization; + + /** + * Default constructor. + */ + public Builder() { + } + + /** + * Provide a V1 + * {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor IRecordProcessor}. + * + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @return A reference to this updated object so that method calls can be chained together. + */ + public Builder recordProcessorFactory( + com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory + recordProcessorFactory) { + this.recordProcessorFactory = new V1ToV2RecordProcessorFactoryAdapter(recordProcessorFactory); + return this; + } + + /** + * Provide a V2 + * {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor IRecordProcessor}. + * + * @param recordProcessorFactory Used to get record processor instances for processing data from shards + * @return A reference to this updated object so that method calls can be chained together. + */ + public Builder recordProcessorFactory(IRecordProcessorFactory recordProcessorFactory) { + this.recordProcessorFactory = recordProcessorFactory; + return this; + } + + /** + * Set the Worker config. + * + * @param config Kinesis Client Library configuration + * @return A reference to this updated object so that method calls can be chained together. + */ + public Builder config(KinesisClientLibConfiguration config) { + this.config = config; + return this; + } + + /** + * Set the Kinesis client. + * + * @param kinesisClient Kinesis Client used for fetching data + * @return A reference to this updated object so that method calls can be chained together. + */ + public Builder kinesisClient(AmazonKinesis kinesisClient) { + this.kinesisClient = kinesisClient; + return this; + } + + /** + * Set the DynamoDB client. + * + * @param dynamoDBClient DynamoDB client used for checkpoints and tracking leases + * @return A reference to this updated object so that method calls can be chained together. + */ + public Builder dynamoDBClient(AmazonDynamoDB dynamoDBClient) { + this.dynamoDBClient = dynamoDBClient; + return this; + } + + /** + * Set the Cloudwatch client. + * + * @param cloudWatchClient CloudWatch Client for publishing metrics + * @return A reference to this updated object so that method calls can be chained together. + */ + public Builder cloudWatchClient(AmazonCloudWatch cloudWatchClient) { + this.cloudWatchClient = cloudWatchClient; + return this; + } + + /** + * Set the metrics factory. + * + * @param metricsFactory Metrics factory used to emit metrics + * @return A reference to this updated object so that method calls can be chained together. + */ + public Builder metricsFactory(IMetricsFactory metricsFactory) { + this.metricsFactory = metricsFactory; + return this; + } + + /** + * Set the executor service for processing records. + * + * @param execService ExecutorService to use for processing records (support for multi-threaded consumption) + * @return A reference to this updated object so that method calls can be chained together. + */ + public Builder execService(ExecutorService execService) { + this.execService = execService; + return this; + } + + /** + * Provides logic how to prioritize shard processing. + * + * @param shardPrioritization + * shardPrioritization is responsible to order shards before processing + * + * @return A reference to this updated object so that method calls can be chained together. + */ + public Builder shardPrioritization(ShardPrioritization shardPrioritization) { + this.shardPrioritization = shardPrioritization; + return this; + } + + /** + * Build the Worker instance. + * + * @return a Worker instance. + */ + // CHECKSTYLE:OFF CyclomaticComplexity + // CHECKSTYLE:OFF NPathComplexity + public Worker build() { + if (config == null) { + throw new IllegalArgumentException( + "Kinesis Client Library configuration needs to be provided to build Worker"); + } + if (recordProcessorFactory == null) { + throw new IllegalArgumentException( + "A Record Processor Factory needs to be provided to build Worker"); + } + + if (execService == null) { + execService = getExecutorService(); + } + if (kinesisClient == null) { + kinesisClient = new AmazonKinesisClient(config.getKinesisCredentialsProvider(), + config.getKinesisClientConfiguration()); + } + if (dynamoDBClient == null) { + dynamoDBClient = new AmazonDynamoDBClient(config.getDynamoDBCredentialsProvider(), + config.getDynamoDBClientConfiguration()); + } + if (cloudWatchClient == null) { + cloudWatchClient = new AmazonCloudWatchClient(config.getCloudWatchCredentialsProvider(), + config.getCloudWatchClientConfiguration()); + } + // If a region name was explicitly specified, use it as the region for Amazon Kinesis and Amazon DynamoDB. + if (config.getRegionName() != null) { + Region region = RegionUtils.getRegion(config.getRegionName()); + cloudWatchClient.setRegion(region); + LOG.debug("The region of Amazon CloudWatch client has been set to " + config.getRegionName()); + kinesisClient.setRegion(region); + LOG.debug("The region of Amazon Kinesis client has been set to " + config.getRegionName()); + dynamoDBClient.setRegion(region); + LOG.debug("The region of Amazon DynamoDB client has been set to " + config.getRegionName()); + } + // If a dynamoDB endpoint was explicitly specified, use it to set the DynamoDB endpoint. + if (config.getDynamoDBEndpoint() != null) { + dynamoDBClient.setEndpoint(config.getDynamoDBEndpoint()); + LOG.debug("The endpoint of Amazon DynamoDB client has been set to " + config.getDynamoDBEndpoint()); + } + // If a kinesis endpoint was explicitly specified, use it to set the region of kinesis. + if (config.getKinesisEndpoint() != null) { + kinesisClient.setEndpoint(config.getKinesisEndpoint()); + if (config.getRegionName() != null) { + LOG.warn("Received configuration for both region name as " + config.getRegionName() + + ", and Amazon Kinesis endpoint as " + config.getKinesisEndpoint() + + ". Amazon Kinesis endpoint will overwrite region name."); + LOG.debug("The region of Amazon Kinesis client has been overwritten to " + + config.getKinesisEndpoint()); + } else { + LOG.debug("The region of Amazon Kinesis client has been set to " + config.getKinesisEndpoint()); + } + } + if (metricsFactory == null) { + metricsFactory = getMetricsFactory(cloudWatchClient, config); + } + if (shardPrioritization == null) { + shardPrioritization = new ParentsFirstShardPrioritization(1); + } + + return new Worker(config.getApplicationName(), + recordProcessorFactory, + new StreamConfig(new KinesisProxyFactory(config.getKinesisCredentialsProvider(), + kinesisClient).getProxy(config.getStreamName()), + config.getMaxRecords(), + config.getIdleTimeBetweenReadsInMillis(), + config.shouldCallProcessRecordsEvenForEmptyRecordList(), + config.shouldValidateSequenceNumberBeforeCheckpointing(), + config.getInitialPositionInStreamExtended()), + config.getInitialPositionInStreamExtended(), + config.getParentShardPollIntervalMillis(), + config.getShardSyncIntervalMillis(), + config.shouldCleanupLeasesUponShardCompletion(), + null, + new KinesisClientLibLeaseCoordinator(new KinesisClientLeaseManager(config.getTableName(), + dynamoDBClient), + config.getWorkerIdentifier(), + config.getFailoverTimeMillis(), + config.getEpsilonMillis(), + config.getMaxLeasesForWorker(), + config.getMaxLeasesToStealAtOneTime(), + metricsFactory) + .withInitialLeaseTableReadCapacity(config.getInitialLeaseTableReadCapacity()) + .withInitialLeaseTableWriteCapacity(config.getInitialLeaseTableWriteCapacity()), + execService, + metricsFactory, + config.getTaskBackoffTimeMillis(), + config.getFailoverTimeMillis(), + config.getSkipShardSyncAtWorkerInitializationIfLeasesExist(), + shardPrioritization); + + } + + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxy.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxy.java new file mode 100644 index 00000000..df7f951d --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxy.java @@ -0,0 +1,135 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.proxies; + +import java.nio.ByteBuffer; +import java.util.Date; +import java.util.List; +import java.util.Set; + +import com.amazonaws.services.kinesis.model.DescribeStreamResult; +import com.amazonaws.services.kinesis.model.ExpiredIteratorException; +import com.amazonaws.services.kinesis.model.GetRecordsResult; +import com.amazonaws.services.kinesis.model.InvalidArgumentException; +import com.amazonaws.services.kinesis.model.PutRecordResult; +import com.amazonaws.services.kinesis.model.ResourceNotFoundException; +import com.amazonaws.services.kinesis.model.Shard; + +/** + * Kinesis proxy interface. Operates on a single stream (set up at initialization). + */ +public interface IKinesisProxy { + + /** + * Get records from stream. + * + * @param shardIterator Fetch data records using this shard iterator + * @param maxRecords Fetch at most this many records + * @return List of data records from Kinesis. + * @throws InvalidArgumentException Invalid input parameters + * @throws ResourceNotFoundException The Kinesis stream or shard was not found + * @throws ExpiredIteratorException The iterator has expired + */ + GetRecordsResult get(String shardIterator, int maxRecords) + throws ResourceNotFoundException, InvalidArgumentException, ExpiredIteratorException; + + /** + * Fetch information about stream. Useful for fetching the list of shards in a stream. + * + * @param startShardId exclusive start shardId - used when paginating the list of shards. + * @return DescribeStreamOutput object containing a description of the stream. + * @throws ResourceNotFoundException The Kinesis stream was not found + */ + DescribeStreamResult getStreamInfo(String startShardId) throws ResourceNotFoundException; + + /** + * Fetch the shardIds of all shards in the stream. + * + * @return Set of all shardIds + * @throws ResourceNotFoundException If the specified Kinesis stream was not found + */ + Set getAllShardIds() throws ResourceNotFoundException; + + /** + * Fetch all the shards defined for the stream (e.g. obtained via calls to the DescribeStream API). + * This can be used to discover new shards and consume data from them. + * + * @return List of all shards in the Kinesis stream. + * @throws ResourceNotFoundException The Kinesis stream was not found. + */ + List getShardList() throws ResourceNotFoundException; + + /** + * Fetch a shard iterator from the specified position in the shard. + * This is to fetch a shard iterator for ShardIteratorType AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER which + * requires the starting sequence number. + * + * NOTE: Currently this method continues to fetch iterators for ShardIteratorTypes TRIM_HORIZON, LATEST, + * AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER. + * But this behavior will change in the next release, after which this method will only serve + * AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER ShardIteratorTypes. + * We recommend users who call this method directly to use the appropriate getIterator method based on the + * ShardIteratorType. + * + * @param shardId Shard id + * @param iteratorEnum one of: TRIM_HORIZON, LATEST, AT_SEQUENCE_NUMBER, AFTER_SEQUENCE_NUMBER + * @param sequenceNumber the sequence number - must be null unless iteratorEnum is AT_SEQUENCE_NUMBER or + * AFTER_SEQUENCE_NUMBER + * @return shard iterator which can be used to read data from Kinesis. + * @throws ResourceNotFoundException The Kinesis stream or shard was not found + * @throws InvalidArgumentException Invalid input parameters + */ + String getIterator(String shardId, String iteratorEnum, String sequenceNumber) + throws ResourceNotFoundException, InvalidArgumentException; + + /** + * Fetch a shard iterator from the specified position in the shard. + * This is to fetch a shard iterator for ShardIteratorType LATEST or TRIM_HORIZON which doesn't require a starting + * sequence number. + * + * @param shardId Shard id + * @param iteratorEnum Either TRIM_HORIZON or LATEST. + * @return shard iterator which can be used to read data from Kinesis. + * @throws ResourceNotFoundException The Kinesis stream or shard was not found + * @throws InvalidArgumentException Invalid input parameters + */ + String getIterator(String shardId, String iteratorEnum) throws ResourceNotFoundException, InvalidArgumentException; + + /** + * Fetch a shard iterator from the specified position in the shard. + * This is to fetch a shard iterator for ShardIteratorType AT_TIMESTAMP which requires the timestamp field. + * + * @param shardId Shard id + * @param timestamp The timestamp. + * @return shard iterator which can be used to read data from Kinesis. + * @throws ResourceNotFoundException The Kinesis stream or shard was not found + * @throws InvalidArgumentException Invalid input parameters + */ + String getIterator(String shardId, Date timestamp) throws ResourceNotFoundException, InvalidArgumentException; + + /** + * @param sequenceNumberForOrdering (optional) used for record ordering + * @param explicitHashKey optionally supplied transformation of partitionkey + * @param partitionKey for this record + * @param data payload + * @return PutRecordResult (contains the Kinesis sequence number of the record). + * @throws ResourceNotFoundException The Kinesis stream was not found. + * @throws InvalidArgumentException InvalidArgumentException. + */ + PutRecordResult put(String sequenceNumberForOrdering, + String explicitHashKey, + String partitionKey, + ByteBuffer data) throws ResourceNotFoundException, InvalidArgumentException; +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxyExtended.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxyExtended.java new file mode 100644 index 00000000..7b3e8cc2 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxyExtended.java @@ -0,0 +1,35 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.proxies; + +import com.amazonaws.services.kinesis.model.Shard; + +/** + * Kinesis proxy interface extended with addition method(s). Operates on a + * single stream (set up at initialization). + * + */ +public interface IKinesisProxyExtended extends IKinesisProxy { + + /** + * Get the Shard corresponding to shardId associated with this + * IKinesisProxy. + * + * @param shardId + * Fetch the Shard with this given shardId + * @return the Shard with the given shardId + */ + Shard getShard(String shardId); +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxyFactory.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxyFactory.java new file mode 100644 index 00000000..0467b8e4 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/IKinesisProxyFactory.java @@ -0,0 +1,30 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.proxies; + +/** + * Interface for a KinesisProxyFactory. + * + */ +public interface IKinesisProxyFactory { + + /** + * Return an IKinesisProxy object for the specified stream. + * @param streamName Stream from which data is consumed. + * @return IKinesisProxy object. + */ + IKinesisProxy getProxy(String streamName); + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxy.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxy.java new file mode 100644 index 00000000..de330dc9 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxy.java @@ -0,0 +1,347 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.proxies; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Date; +import java.util.EnumSet; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.kinesis.AmazonKinesis; +import com.amazonaws.services.kinesis.AmazonKinesisClient; +import com.amazonaws.services.kinesis.model.DescribeStreamRequest; +import com.amazonaws.services.kinesis.model.DescribeStreamResult; +import com.amazonaws.services.kinesis.model.ExpiredIteratorException; +import com.amazonaws.services.kinesis.model.GetRecordsRequest; +import com.amazonaws.services.kinesis.model.GetRecordsResult; +import com.amazonaws.services.kinesis.model.GetShardIteratorRequest; +import com.amazonaws.services.kinesis.model.GetShardIteratorResult; +import com.amazonaws.services.kinesis.model.InvalidArgumentException; +import com.amazonaws.services.kinesis.model.LimitExceededException; +import com.amazonaws.services.kinesis.model.PutRecordRequest; +import com.amazonaws.services.kinesis.model.PutRecordResult; +import com.amazonaws.services.kinesis.model.ResourceNotFoundException; +import com.amazonaws.services.kinesis.model.Shard; +import com.amazonaws.services.kinesis.model.ShardIteratorType; +import com.amazonaws.services.kinesis.model.StreamStatus; + +/** + * Kinesis proxy - used to make calls to Amazon Kinesis (e.g. fetch data records and list of shards). + */ +public class KinesisProxy implements IKinesisProxyExtended { + + private static final Log LOG = LogFactory.getLog(KinesisProxy.class); + + private static final EnumSet EXPECTED_ITERATOR_TYPES = EnumSet + .of(ShardIteratorType.AT_SEQUENCE_NUMBER, ShardIteratorType.AFTER_SEQUENCE_NUMBER); + + private static String defaultServiceName = "kinesis"; + private static String defaultRegionId = "us-east-1";; + + private AmazonKinesis client; + private AWSCredentialsProvider credentialsProvider; + private AtomicReference> listOfShardsSinceLastGet = new AtomicReference<>(); + + private final String streamName; + + private static final long DEFAULT_DESCRIBE_STREAM_BACKOFF_MILLIS = 1000L; + private static final int DEFAULT_DESCRIBE_STREAM_RETRY_TIMES = 50; + private final long describeStreamBackoffTimeInMillis; + private final int maxDescribeStreamRetryAttempts; + + /** + * Public constructor. + * + * @param streamName Data records will be fetched from this stream + * @param credentialProvider Provides credentials for signing Kinesis requests + * @param endpoint Kinesis endpoint + */ + + public KinesisProxy(final String streamName, AWSCredentialsProvider credentialProvider, String endpoint) { + this(streamName, credentialProvider, endpoint, defaultServiceName, defaultRegionId, + DEFAULT_DESCRIBE_STREAM_BACKOFF_MILLIS, DEFAULT_DESCRIBE_STREAM_RETRY_TIMES); + } + + /** + * Public constructor. + * + * @param streamName Data records will be fetched from this stream + * @param credentialProvider Provides credentials for signing Kinesis requests + * @param endpoint Kinesis endpoint + * @param serviceName service name + * @param regionId region id + * @param describeStreamBackoffTimeInMillis Backoff time for DescribeStream calls in milliseconds + * @param maxDescribeStreamRetryAttempts Number of retry attempts for DescribeStream calls + */ + public KinesisProxy(final String streamName, + AWSCredentialsProvider credentialProvider, + String endpoint, + String serviceName, + String regionId, + long describeStreamBackoffTimeInMillis, + int maxDescribeStreamRetryAttempts) { + this(streamName, credentialProvider, buildClientSettingEndpoint(credentialProvider, + endpoint, + serviceName, + regionId), describeStreamBackoffTimeInMillis, maxDescribeStreamRetryAttempts); + + + LOG.debug("KinesisProxy has created a kinesisClient"); + } + + private static AmazonKinesisClient buildClientSettingEndpoint(AWSCredentialsProvider credentialProvider, + String endpoint, + String serviceName, + String regionId) { + AmazonKinesisClient client = new AmazonKinesisClient(credentialProvider); + client.setEndpoint(endpoint); + client.setSignerRegionOverride(regionId); + return client; + } + + /** + * Public constructor. + * + * @param streamName Data records will be fetched from this stream + * @param credentialProvider Provides credentials for signing Kinesis requests + * @param kinesisClient Kinesis client (used to fetch data from Kinesis) + * @param describeStreamBackoffTimeInMillis Backoff time for DescribeStream calls in milliseconds + * @param maxDescribeStreamRetryAttempts Number of retry attempts for DescribeStream calls + */ + public KinesisProxy(final String streamName, + AWSCredentialsProvider credentialProvider, + AmazonKinesis kinesisClient, + long describeStreamBackoffTimeInMillis, + int maxDescribeStreamRetryAttempts) { + this.streamName = streamName; + this.credentialsProvider = credentialProvider; + this.describeStreamBackoffTimeInMillis = describeStreamBackoffTimeInMillis; + this.maxDescribeStreamRetryAttempts = maxDescribeStreamRetryAttempts; + this.client = kinesisClient; + + LOG.debug("KinesisProxy( " + streamName + ")"); + } + + /** + * {@inheritDoc} + */ + @Override + public GetRecordsResult get(String shardIterator, int maxRecords) + throws ResourceNotFoundException, InvalidArgumentException, ExpiredIteratorException { + + final GetRecordsRequest getRecordsRequest = new GetRecordsRequest(); + getRecordsRequest.setRequestCredentials(credentialsProvider.getCredentials()); + getRecordsRequest.setShardIterator(shardIterator); + getRecordsRequest.setLimit(maxRecords); + final GetRecordsResult response = client.getRecords(getRecordsRequest); + return response; + + } + + /** + * {@inheritDoc} + */ + @Override + public DescribeStreamResult getStreamInfo(String startShardId) + throws ResourceNotFoundException, LimitExceededException { + final DescribeStreamRequest describeStreamRequest = new DescribeStreamRequest(); + describeStreamRequest.setRequestCredentials(credentialsProvider.getCredentials()); + describeStreamRequest.setStreamName(streamName); + describeStreamRequest.setExclusiveStartShardId(startShardId); + DescribeStreamResult response = null; + int remainingRetryTimes = this.maxDescribeStreamRetryAttempts; + // Call DescribeStream, with backoff and retries (if we get LimitExceededException). + while ((remainingRetryTimes >= 0) && (response == null)) { + try { + response = client.describeStream(describeStreamRequest); + } catch (LimitExceededException le) { + LOG.info("Got LimitExceededException when describing stream " + streamName + ". Backing off for " + + this.describeStreamBackoffTimeInMillis + " millis."); + try { + Thread.sleep(this.describeStreamBackoffTimeInMillis); + } catch (InterruptedException ie) { + LOG.debug("Stream " + streamName + " : Sleep was interrupted ", ie); + } + } + remainingRetryTimes--; + } + + if (StreamStatus.ACTIVE.toString().equals(response.getStreamDescription().getStreamStatus()) + || StreamStatus.UPDATING.toString().equals(response.getStreamDescription().getStreamStatus())) { + return response; + } else { + LOG.info("Stream is in status " + response.getStreamDescription().getStreamStatus() + + ", KinesisProxy.DescribeStream returning null (wait until stream is Active or Updating"); + return null; + } + } + + /** + * {@inheritDoc} + */ + @Override + public Shard getShard(String shardId) { + if (this.listOfShardsSinceLastGet.get() == null) { + //Update this.listOfShardsSinceLastGet as needed. + this.getShardList(); + } + + for (Shard shard : listOfShardsSinceLastGet.get()) { + if (shard.getShardId().equals(shardId)) { + return shard; + } + } + + LOG.warn("Cannot find the shard given the shardId " + shardId); + return null; + } + + /** + * {@inheritDoc} + */ + @Override + public List getShardList() { + List result = new ArrayList(); + + DescribeStreamResult response = null; + String lastShardId = null; + + do { + response = getStreamInfo(lastShardId); + + if (response == null) { + /* + * If getStreamInfo ever returns null, we should bail and return null. This indicates the stream is not + * in ACTIVE or UPDATING state and we may not have accurate/consistent information about the stream. + */ + return null; + } else { + List shards = response.getStreamDescription().getShards(); + result.addAll(shards); + lastShardId = shards.get(shards.size() - 1).getShardId(); + } + } while (response.getStreamDescription().isHasMoreShards()); + this.listOfShardsSinceLastGet.set(result); + return result; + } + + /** + * {@inheritDoc} + */ + @Override + public Set getAllShardIds() throws ResourceNotFoundException { + List shards = getShardList(); + if (shards == null) { + return null; + } else { + Set shardIds = new HashSet(); + + for (Shard shard : getShardList()) { + shardIds.add(shard.getShardId()); + } + + return shardIds; + } + } + + /** + * {@inheritDoc} + */ + @Override + public String getIterator(String shardId, String iteratorType, String sequenceNumber) { + ShardIteratorType shardIteratorType; + try { + shardIteratorType = ShardIteratorType.fromValue(iteratorType); + } catch (IllegalArgumentException iae) { + LOG.error("Caught illegal argument exception while parsing iteratorType: " + iteratorType, iae); + shardIteratorType = null; + } + + if (!EXPECTED_ITERATOR_TYPES.contains(shardIteratorType)) { + LOG.info("This method should only be used for AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER " + + "ShardIteratorTypes. For methods to use with other ShardIteratorTypes, see IKinesisProxy.java"); + } + final GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest(); + getShardIteratorRequest.setRequestCredentials(credentialsProvider.getCredentials()); + getShardIteratorRequest.setStreamName(streamName); + getShardIteratorRequest.setShardId(shardId); + getShardIteratorRequest.setShardIteratorType(iteratorType); + getShardIteratorRequest.setStartingSequenceNumber(sequenceNumber); + getShardIteratorRequest.setTimestamp(null); + final GetShardIteratorResult response = client.getShardIterator(getShardIteratorRequest); + return response.getShardIterator(); + } + + /** + * {@inheritDoc} + */ + @Override + public String getIterator(String shardId, String iteratorType) { + final GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest(); + getShardIteratorRequest.setRequestCredentials(credentialsProvider.getCredentials()); + getShardIteratorRequest.setStreamName(streamName); + getShardIteratorRequest.setShardId(shardId); + getShardIteratorRequest.setShardIteratorType(iteratorType); + getShardIteratorRequest.setStartingSequenceNumber(null); + getShardIteratorRequest.setTimestamp(null); + final GetShardIteratorResult response = client.getShardIterator(getShardIteratorRequest); + return response.getShardIterator(); + } + + /** + * {@inheritDoc} + */ + @Override + public String getIterator(String shardId, Date timestamp) { + final GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest(); + getShardIteratorRequest.setRequestCredentials(credentialsProvider.getCredentials()); + getShardIteratorRequest.setStreamName(streamName); + getShardIteratorRequest.setShardId(shardId); + getShardIteratorRequest.setShardIteratorType(ShardIteratorType.AT_TIMESTAMP); + getShardIteratorRequest.setStartingSequenceNumber(null); + getShardIteratorRequest.setTimestamp(timestamp); + final GetShardIteratorResult response = client.getShardIterator(getShardIteratorRequest); + return response.getShardIterator(); + } + + /** + * {@inheritDoc} + */ + @Override + public PutRecordResult put(String exclusiveMinimumSequenceNumber, + String explicitHashKey, + String partitionKey, + ByteBuffer data) throws ResourceNotFoundException, InvalidArgumentException { + final PutRecordRequest putRecordRequest = new PutRecordRequest(); + putRecordRequest.setRequestCredentials(credentialsProvider.getCredentials()); + putRecordRequest.setStreamName(streamName); + putRecordRequest.setSequenceNumberForOrdering(exclusiveMinimumSequenceNumber); + putRecordRequest.setExplicitHashKey(explicitHashKey); + putRecordRequest.setPartitionKey(partitionKey); + putRecordRequest.setData(data); + + final PutRecordResult response = client.putRecord(putRecordRequest); + return response; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxyFactory.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxyFactory.java new file mode 100644 index 00000000..93df67e0 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxyFactory.java @@ -0,0 +1,140 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.proxies; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.kinesis.AmazonKinesis; +import com.amazonaws.services.kinesis.AmazonKinesisClient; + +/** + * Factory used for instantiating KinesisProxy objects (to fetch data from Kinesis). + */ +public class KinesisProxyFactory implements IKinesisProxyFactory { + + private final AWSCredentialsProvider credentialProvider; + private static String defaultServiceName = "kinesis"; + private static String defaultRegionId = "us-east-1"; + private static final long DEFAULT_DESCRIBE_STREAM_BACKOFF_MILLIS = 1000L; + private static final int DEFAULT_DESCRIBE_STREAM_RETRY_TIMES = 50; + private final AmazonKinesis kinesisClient; + private final long describeStreamBackoffTimeInMillis; + private final int maxDescribeStreamRetryAttempts; + + /** + * Constructor for creating a KinesisProxy factory, using the specified credentials provider and endpoint. + * + * @param credentialProvider credentials provider used to sign requests + * @param endpoint Amazon Kinesis endpoint to use + */ + public KinesisProxyFactory(AWSCredentialsProvider credentialProvider, String endpoint) { + this(credentialProvider, new ClientConfiguration(), endpoint, defaultServiceName, defaultRegionId, + DEFAULT_DESCRIBE_STREAM_BACKOFF_MILLIS, DEFAULT_DESCRIBE_STREAM_RETRY_TIMES); + } + + /** + * Constructor for KinesisProxy factory using the client configuration to use when interacting with Kinesis. + * + * @param credentialProvider credentials provider used to sign requests + * @param clientConfig Client Configuration used when instantiating an AmazonKinesisClient + * @param endpoint Amazon Kinesis endpoint to use + */ + public KinesisProxyFactory(AWSCredentialsProvider credentialProvider, + ClientConfiguration clientConfig, + String endpoint) { + this(credentialProvider, clientConfig, endpoint, defaultServiceName, defaultRegionId, + DEFAULT_DESCRIBE_STREAM_BACKOFF_MILLIS, DEFAULT_DESCRIBE_STREAM_RETRY_TIMES); + } + + /** + * This constructor may be used to specify the AmazonKinesisClient to use. + * + * @param credentialProvider credentials provider used to sign requests + * @param client AmazonKinesisClient used to fetch data from Kinesis + */ + public KinesisProxyFactory(AWSCredentialsProvider credentialProvider, AmazonKinesis client) { + this(credentialProvider, client, DEFAULT_DESCRIBE_STREAM_BACKOFF_MILLIS, DEFAULT_DESCRIBE_STREAM_RETRY_TIMES); + } + + /** + * Used internally and for development/testing. + * + * @param credentialProvider credentials provider used to sign requests + * @param clientConfig Client Configuration used when instantiating an AmazonKinesisClient + * @param endpoint Amazon Kinesis endpoint to use + * @param serviceName service name + * @param regionId region id + * @param describeStreamBackoffTimeInMillis backoff time for describing stream in millis + * @param maxDescribeStreamRetryAttempts Number of retry attempts for DescribeStream calls + */ + KinesisProxyFactory(AWSCredentialsProvider credentialProvider, + ClientConfiguration clientConfig, + String endpoint, + String serviceName, + String regionId, + long describeStreamBackoffTimeInMillis, + int maxDescribeStreamRetryAttempts) { + this(credentialProvider, buildClientSettingEndpoint(credentialProvider, + clientConfig, + endpoint, + serviceName, + regionId), + describeStreamBackoffTimeInMillis, maxDescribeStreamRetryAttempts); + + } + + /** + * Used internally in the class (and for development/testing). + * + * @param credentialProvider credentials provider used to sign requests + * @param client AmazonKinesisClient used to fetch data from Kinesis + * @param describeStreamBackoffTimeInMillis backoff time for describing stream in millis + * @param maxDescribeStreamRetryAttempts Number of retry attempts for DescribeStream calls + */ + KinesisProxyFactory(AWSCredentialsProvider credentialProvider, + AmazonKinesis client, + long describeStreamBackoffTimeInMillis, + int maxDescribeStreamRetryAttempts) { + super(); + this.kinesisClient = client; + this.credentialProvider = credentialProvider; + this.describeStreamBackoffTimeInMillis = describeStreamBackoffTimeInMillis; + this.maxDescribeStreamRetryAttempts = maxDescribeStreamRetryAttempts; + } + + /** + * {@inheritDoc} + */ + @Override + public IKinesisProxy getProxy(String streamName) { + return new KinesisProxy(streamName, + credentialProvider, + kinesisClient, + describeStreamBackoffTimeInMillis, + maxDescribeStreamRetryAttempts); + + } + + private static AmazonKinesisClient buildClientSettingEndpoint(AWSCredentialsProvider credentialProvider, + ClientConfiguration clientConfig, + String endpoint, + String serviceName, + String regionId) { + AmazonKinesisClient client = new AmazonKinesisClient(credentialProvider, clientConfig); + client.setEndpoint(endpoint); + client.setSignerRegionOverride(regionId); + return client; + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/MetricsCollectingKinesisProxyDecorator.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/MetricsCollectingKinesisProxyDecorator.java new file mode 100644 index 00000000..d27fc6a1 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/proxies/MetricsCollectingKinesisProxyDecorator.java @@ -0,0 +1,200 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.proxies; + +import java.nio.ByteBuffer; +import java.util.Date; +import java.util.List; +import java.util.Set; + +import com.amazonaws.services.kinesis.model.DescribeStreamResult; +import com.amazonaws.services.kinesis.model.ExpiredIteratorException; +import com.amazonaws.services.kinesis.model.GetRecordsResult; +import com.amazonaws.services.kinesis.model.InvalidArgumentException; +import com.amazonaws.services.kinesis.model.PutRecordResult; +import com.amazonaws.services.kinesis.model.ResourceNotFoundException; +import com.amazonaws.services.kinesis.model.Shard; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; + +/** + * IKinesisProxy implementation that wraps another implementation and collects metrics. + */ +public class MetricsCollectingKinesisProxyDecorator implements IKinesisProxy { + + private static final String SEP = "."; + + private final String getIteratorMetric; + private final String getRecordsMetric; + private final String getStreamInfoMetric; + private final String getShardListMetric; + private final String putRecordMetric; + private final String getRecordsShardId; + + private IKinesisProxy other; + + /** + * Constructor. + * + * @param prefix prefix for generated metrics + * @param other Kinesis proxy to decorate + * @param shardId shardId will be included in the metrics. + */ + public MetricsCollectingKinesisProxyDecorator(String prefix, IKinesisProxy other, String shardId) { + this.other = other; + getRecordsShardId = shardId; + getIteratorMetric = prefix + SEP + "getIterator"; + getRecordsMetric = prefix + SEP + "getRecords"; + getStreamInfoMetric = prefix + SEP + "getStreamInfo"; + getShardListMetric = prefix + SEP + "getShardList"; + putRecordMetric = prefix + SEP + "putRecord"; + } + + /** + * {@inheritDoc} + */ + @Override + public GetRecordsResult get(String shardIterator, int maxRecords) + throws ResourceNotFoundException, InvalidArgumentException, ExpiredIteratorException { + long startTime = System.currentTimeMillis(); + boolean success = false; + try { + GetRecordsResult response = other.get(shardIterator, maxRecords); + success = true; + return response; + } finally { + MetricsHelper.addSuccessAndLatencyPerShard(getRecordsShardId, getRecordsMetric, startTime, success, + MetricsLevel.DETAILED); + } + } + + /** + * {@inheritDoc} + */ + @Override + public DescribeStreamResult getStreamInfo(String startingShardId) throws ResourceNotFoundException { + long startTime = System.currentTimeMillis(); + boolean success = false; + try { + DescribeStreamResult response = other.getStreamInfo(startingShardId); + success = true; + return response; + } finally { + MetricsHelper.addSuccessAndLatency(getStreamInfoMetric, startTime, success, MetricsLevel.DETAILED); + } + } + + /** + * {@inheritDoc} + */ + @Override + public Set getAllShardIds() throws ResourceNotFoundException { + long startTime = System.currentTimeMillis(); + boolean success = false; + try { + Set response = other.getAllShardIds(); + success = true; + return response; + } finally { + MetricsHelper.addSuccessAndLatency(getStreamInfoMetric, startTime, success, MetricsLevel.DETAILED); + } + } + + /** + * {@inheritDoc} + */ + @Override + public String getIterator(String shardId, String iteratorEnum, String sequenceNumber) + throws ResourceNotFoundException, InvalidArgumentException { + long startTime = System.currentTimeMillis(); + boolean success = false; + try { + String response = other.getIterator(shardId, iteratorEnum, sequenceNumber); + success = true; + return response; + } finally { + MetricsHelper.addSuccessAndLatency(getIteratorMetric, startTime, success, MetricsLevel.DETAILED); + } + } + + /** + * {@inheritDoc} + */ + @Override + public String getIterator(String shardId, String iteratorEnum) + throws ResourceNotFoundException, InvalidArgumentException { + long startTime = System.currentTimeMillis(); + boolean success = false; + try { + String response = other.getIterator(shardId, iteratorEnum); + success = true; + return response; + } finally { + MetricsHelper.addSuccessAndLatency(getIteratorMetric, startTime, success, MetricsLevel.DETAILED); + } + } + + /** + * {@inheritDoc} + */ + @Override + public String getIterator(String shardId, Date timestamp) + throws ResourceNotFoundException, InvalidArgumentException { + long startTime = System.currentTimeMillis(); + boolean success = false; + try { + String response = other.getIterator(shardId, timestamp); + success = true; + return response; + } finally { + MetricsHelper.addSuccessAndLatency(getIteratorMetric, startTime, success, MetricsLevel.DETAILED); + } + } + + /** + * {@inheritDoc} + */ + @Override + public List getShardList() throws ResourceNotFoundException { + long startTime = System.currentTimeMillis(); + boolean success = false; + try { + List response = other.getShardList(); + success = true; + return response; + } finally { + MetricsHelper.addSuccessAndLatency(getShardListMetric, startTime, success, MetricsLevel.DETAILED); + } + } + + /** + * {@inheritDoc} + */ + @Override + public PutRecordResult put(String sequenceNumberForOrdering, + String explicitHashKey, + String partitionKey, + ByteBuffer data) throws ResourceNotFoundException, InvalidArgumentException { + long startTime = System.currentTimeMillis(); + boolean success = false; + try { + PutRecordResult response = other.put(sequenceNumberForOrdering, explicitHashKey, partitionKey, data); + success = true; + return response; + } finally { + MetricsHelper.addSuccessAndLatency(putRecordMetric, startTime, success, MetricsLevel.DETAILED); + } + } +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumber.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/ExtendedSequenceNumber.java similarity index 87% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumber.java rename to src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/ExtendedSequenceNumber.java index 6fdee4f8..1ed7ed67 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumber.java +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/ExtendedSequenceNumber.java @@ -1,23 +1,23 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.retrieval.kpl; +package com.amazonaws.services.kinesis.clientlibrary.types; import java.math.BigInteger; //import com.amazonaws.services.kinesis.clientlibrary.lib.worker.String; -import software.amazon.kinesis.checkpoint.SentinelCheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint; /** * Represents a two-part sequence number for records aggregated by the Kinesis @@ -44,7 +44,7 @@ public class ExtendedSequenceNumber implements Comparable= 0) { - sb.append("SubsequenceNumber: " + subSequenceNumber()); + sb.append("SubsequenceNumber: " + getSubSequenceNumber()); } sb.append("}"); return sb.toString(); @@ -181,10 +181,10 @@ public class ExtendedSequenceNumber implements Comparable records; + private IRecordProcessorCheckpointer checkpointer; + private Long millisBehindLatest; + + /** + * Default constructor. + */ + public ProcessRecordsInput() { + } + + /** + * Get records. + * + * @return Data records to be processed + */ + public List getRecords() { + return records; + } + + /** + * Set records. + * + * @param records Data records to be processed + * @return A reference to this updated object so that method calls can be chained together. + */ + public ProcessRecordsInput withRecords(List records) { + this.records = records; + return this; + } + + /** + * Get Checkpointer. + * + * @return RecordProcessor should use this instance to checkpoint their progress. + */ + public IRecordProcessorCheckpointer getCheckpointer() { + return checkpointer; + } + + /** + * Set Checkpointer. + * + * @param checkpointer RecordProcessor should use this instance to checkpoint their progress. + * @return A reference to this updated object so that method calls can be chained together. + */ + public ProcessRecordsInput withCheckpointer(IRecordProcessorCheckpointer checkpointer) { + this.checkpointer = checkpointer; + return this; + } + + /** + * Get milliseconds behind latest. + * + * @return The number of milliseconds this batch of records is from the tip of the stream, + * indicating how far behind current time the record processor is. + */ + public Long getMillisBehindLatest() { + return millisBehindLatest; + } + + /** + * Set milliseconds behind latest. + * + * @param millisBehindLatest The number of milliseconds this batch of records is from the tip of the stream, + * indicating how far behind current time the record processor is. + * @return A reference to this updated object so that method calls can be chained together. + */ + public ProcessRecordsInput withMillisBehindLatest(Long millisBehindLatest) { + this.millisBehindLatest = millisBehindLatest; + return this; + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/ShutdownInput.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/ShutdownInput.java new file mode 100644 index 00000000..c533a4da --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/ShutdownInput.java @@ -0,0 +1,77 @@ +/* + * Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.types; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason; + +/** + * Container for the parameters to the IRecordProcessor's + * {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#shutdown(ShutdownInput + * shutdownInput) shutdown} method. + */ +public class ShutdownInput { + + private ShutdownReason shutdownReason; + private IRecordProcessorCheckpointer checkpointer; + + /** + * Default constructor. + */ + public ShutdownInput() { + } + + /** + * Get shutdown reason. + * + * @return Reason for the shutdown (ShutdownReason.TERMINATE indicates the shard is closed and there are no + * more records to process. Shutdown.ZOMBIE indicates a fail over has occurred). + */ + public ShutdownReason getShutdownReason() { + return shutdownReason; + } + + /** + * Set shutdown reason. + * + * @param shutdownReason Reason for the shutdown + * @return A reference to this updated object so that method calls can be chained together. + */ + public ShutdownInput withShutdownReason(ShutdownReason shutdownReason) { + this.shutdownReason = shutdownReason; + return this; + } + + /** + * Get Checkpointer. + * + * @return The checkpointer object that the record processor should use to checkpoint + */ + public IRecordProcessorCheckpointer getCheckpointer() { + return checkpointer; + } + + /** + * Set the checkpointer. + * + * @param checkpointer The checkpointer object that the record processor should use to checkpoint + * @return A reference to this updated object so that method calls can be chained together. + */ + public ShutdownInput withCheckpointer(IRecordProcessorCheckpointer checkpointer) { + this.checkpointer = checkpointer; + return this; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/UserRecord.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/UserRecord.java new file mode 100644 index 00000000..2f60671a --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/types/UserRecord.java @@ -0,0 +1,305 @@ +/* + * Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.types; + +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.List; + +import org.apache.commons.lang.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.model.Record; +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * This class represents a KPL user record. + */ +@SuppressWarnings("serial") +public class UserRecord extends Record { + private static final Log LOG = LogFactory.getLog(UserRecord.class); + + private static final byte[] AGGREGATED_RECORD_MAGIC = new byte[] {-13, -119, -102, -62 }; + private static final int DIGEST_SIZE = 16; + private static final BigInteger SMALLEST_HASH_KEY = new BigInteger("0"); + // largest hash key = 2^128-1 + private static final BigInteger LARGEST_HASH_KEY = new BigInteger(StringUtils.repeat("FF", 16), 16); + + private final long subSequenceNumber; + private final String explicitHashKey; + private final boolean aggregated; + + /** + * Create a User Record from a Kinesis Record. + * + * @param record Kinesis record + */ + public UserRecord(Record record) { + this(false, record, 0, null); + } + + /** + * Create a User Record. + * + * @param aggregated whether the record is aggregated + * @param record Kinesis record + * @param subSequenceNumber subsequence number + * @param explicitHashKey explicit hash key + */ + protected UserRecord(boolean aggregated, Record record, long subSequenceNumber, String explicitHashKey) { + if (subSequenceNumber < 0) { + throw new IllegalArgumentException("Cannot have an invalid, negative subsequence number"); + } + + this.aggregated = aggregated; + this.subSequenceNumber = subSequenceNumber; + this.explicitHashKey = explicitHashKey; + + this.setSequenceNumber(record.getSequenceNumber()); + this.setData(record.getData()); + this.setPartitionKey(record.getPartitionKey()); + this.setApproximateArrivalTimestamp(record.getApproximateArrivalTimestamp()); + } + + /** + * @return subSequenceNumber of this UserRecord. + */ + public long getSubSequenceNumber() { + return subSequenceNumber; + } + + /** + * @return explicitHashKey of this UserRecord. + */ + public String getExplicitHashKey() { + return explicitHashKey; + } + + /** + * @return a boolean indicating whether this UserRecord is aggregated. + */ + public boolean isAggregated() { + return aggregated; + } + + /** + * @return the String representation of this UserRecord. + */ + @Override + public String toString() { + return "UserRecord [subSequenceNumber=" + subSequenceNumber + ", explicitHashKey=" + explicitHashKey + + ", aggregated=" + aggregated + ", getSequenceNumber()=" + getSequenceNumber() + ", getData()=" + + getData() + ", getPartitionKey()=" + getPartitionKey() + "]"; + } + + /** + * {@inheritDoc} + */ + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + (aggregated ? 1231 : 1237); + result = prime * result + ((explicitHashKey == null) ? 0 : explicitHashKey.hashCode()); + result = prime * result + (int) (subSequenceNumber ^ (subSequenceNumber >>> 32)); + return result; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!super.equals(obj)) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + UserRecord other = (UserRecord) obj; + if (aggregated != other.aggregated) { + return false; + } + if (explicitHashKey == null) { + if (other.explicitHashKey != null) { + return false; + } + } else if (!explicitHashKey.equals(other.explicitHashKey)) { + return false; + } + if (subSequenceNumber != other.subSequenceNumber) { + return false; + } + return true; + } + + private static byte[] md5(byte[] data) { + try { + MessageDigest d = MessageDigest.getInstance("MD5"); + return d.digest(data); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + } + + /** + * This method deaggregates the given list of Amazon Kinesis records into a + * list of KPL user records. This method will then return the resulting list + * of KPL user records. + * + * @param records + * A list of Amazon Kinesis records, each possibly aggregated. + * @return A resulting list of deaggregated KPL user records. + */ + public static List deaggregate(List records) { + return deaggregate(records, SMALLEST_HASH_KEY, LARGEST_HASH_KEY); + } + + /** + * This method deaggregates the given list of Amazon Kinesis records into a + * list of KPL user records. Any KPL user records whose explicit hash key or + * partition key falls outside the range of the startingHashKey and the + * endingHashKey are discarded from the resulting list. This method will + * then return the resulting list of KPL user records. + * + * @param records + * A list of Amazon Kinesis records, each possibly aggregated. + * @param startingHashKey + * A BigInteger representing the starting hash key that the + * explicit hash keys or partition keys of retained resulting KPL + * user records must be greater than or equal to. + * @param endingHashKey + * A BigInteger representing the ending hash key that the the + * explicit hash keys or partition keys of retained resulting KPL + * user records must be smaller than or equal to. + * @return A resulting list of KPL user records whose explicit hash keys or + * partition keys fall within the range of the startingHashKey and + * the endingHashKey. + */ + // CHECKSTYLE:OFF NPathComplexity + public static List deaggregate(List records, BigInteger startingHashKey, + BigInteger endingHashKey) { + List result = new ArrayList<>(); + byte[] magic = new byte[AGGREGATED_RECORD_MAGIC.length]; + byte[] digest = new byte[DIGEST_SIZE]; + + for (Record r : records) { + boolean isAggregated = true; + long subSeqNum = 0; + ByteBuffer bb = r.getData(); + + if (bb.remaining() >= magic.length) { + bb.get(magic); + } else { + isAggregated = false; + } + + if (!Arrays.equals(AGGREGATED_RECORD_MAGIC, magic) || bb.remaining() <= DIGEST_SIZE) { + isAggregated = false; + } + + if (isAggregated) { + int oldLimit = bb.limit(); + bb.limit(oldLimit - DIGEST_SIZE); + byte[] messageData = new byte[bb.remaining()]; + bb.get(messageData); + bb.limit(oldLimit); + bb.get(digest); + byte[] calculatedDigest = md5(messageData); + + if (!Arrays.equals(digest, calculatedDigest)) { + isAggregated = false; + } else { + try { + Messages.AggregatedRecord ar = Messages.AggregatedRecord.parseFrom(messageData); + List pks = ar.getPartitionKeyTableList(); + List ehks = ar.getExplicitHashKeyTableList(); + long aat = r.getApproximateArrivalTimestamp() == null + ? -1 : r.getApproximateArrivalTimestamp().getTime(); + try { + int recordsInCurrRecord = 0; + for (Messages.Record mr : ar.getRecordsList()) { + String explicitHashKey = null; + String partitionKey = pks.get((int) mr.getPartitionKeyIndex()); + if (mr.hasExplicitHashKeyIndex()) { + explicitHashKey = ehks.get((int) mr.getExplicitHashKeyIndex()); + } + + BigInteger effectiveHashKey = explicitHashKey != null + ? new BigInteger(explicitHashKey) + : new BigInteger(1, md5(partitionKey.getBytes("UTF-8"))); + + if (effectiveHashKey.compareTo(startingHashKey) < 0 + || effectiveHashKey.compareTo(endingHashKey) > 0) { + for (int toRemove = 0; toRemove < recordsInCurrRecord; ++toRemove) { + result.remove(result.size() - 1); + } + break; + } + + ++recordsInCurrRecord; + Record record = new Record() + .withData(ByteBuffer.wrap(mr.getData().toByteArray())) + .withPartitionKey(partitionKey) + .withSequenceNumber(r.getSequenceNumber()) + .withApproximateArrivalTimestamp(aat < 0 ? null : new Date(aat)); + result.add(new UserRecord(true, record, subSeqNum++, explicitHashKey)); + } + } catch (Exception e) { + StringBuilder sb = new StringBuilder(); + sb.append("Unexpected exception during deaggregation, record was:\n"); + sb.append("PKS:\n"); + for (String s : pks) { + sb.append(s).append("\n"); + } + sb.append("EHKS: \n"); + for (String s : ehks) { + sb.append(s).append("\n"); + } + for (Messages.Record mr : ar.getRecordsList()) { + sb.append("Record: [hasEhk=").append(mr.hasExplicitHashKeyIndex()).append(", ") + .append("ehkIdx=").append(mr.getExplicitHashKeyIndex()).append(", ") + .append("pkIdx=").append(mr.getPartitionKeyIndex()).append(", ") + .append("dataLen=").append(mr.getData().toByteArray().length).append("]\n"); + } + sb.append("Sequence number: ").append(r.getSequenceNumber()).append("\n") + .append("Raw data: ") + .append(javax.xml.bind.DatatypeConverter.printBase64Binary(messageData)).append("\n"); + LOG.error(sb.toString(), e); + } + } catch (InvalidProtocolBufferException e) { + isAggregated = false; + } + } + } + + if (!isAggregated) { + bb.rewind(); + result.add(new UserRecord(r)); + } + } + return result; + } + // CHECKSTYLE:ON NPathComplexity +} diff --git a/src/main/java/com/amazonaws/services/kinesis/clientlibrary/utils/NamedThreadFactory.java b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/utils/NamedThreadFactory.java new file mode 100644 index 00000000..29d8a7be --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/clientlibrary/utils/NamedThreadFactory.java @@ -0,0 +1,32 @@ +package com.amazonaws.services.kinesis.clientlibrary.utils; + +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; + + +/** + * Custom thread factory that sets thread names based on the specified prefix. + */ +public class NamedThreadFactory implements ThreadFactory { + + private String threadPrefix; + private ThreadFactory defaultFactory = Executors.defaultThreadFactory(); + private AtomicInteger counter = new AtomicInteger(0); + + /** + * Construct a thread factory that uses the specified parameter as the thread prefix. + * + * @param threadPrefix the prefix with witch all created threads will be named + */ + public NamedThreadFactory(String threadPrefix) { + this.threadPrefix = threadPrefix; + } + + @Override + public Thread newThread(Runnable r) { + Thread thread = defaultFactory.newThread(r); + thread.setName(threadPrefix + counter.incrementAndGet()); + return thread; + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/DependencyException.java b/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/DependencyException.java new file mode 100644 index 00000000..af5a8fec --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/DependencyException.java @@ -0,0 +1,34 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.exceptions; + +/** + * Indicates that a lease operation has failed because a dependency of the leasing system has failed. This will happen + * if DynamoDB throws an InternalServerException or a generic AmazonClientException (the specific subclasses of + * AmazonClientException are all handled more gracefully). + */ +public class DependencyException extends LeasingException { + + private static final long serialVersionUID = 1L; + + public DependencyException(Throwable e) { + super(e); + } + + public DependencyException(String message, Throwable e) { + super(message, e); + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/InvalidStateException.java b/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/InvalidStateException.java new file mode 100644 index 00000000..2cf44d20 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/InvalidStateException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.exceptions; + +/** + * Indicates that a lease operation has failed because DynamoDB is an invalid state. The most common example is failing + * to create the DynamoDB table before doing any lease operations. + */ +public class InvalidStateException extends LeasingException { + + private static final long serialVersionUID = 1L; + + public InvalidStateException(Throwable e) { + super(e); + } + + public InvalidStateException(String message, Throwable e) { + super(message, e); + } + + public InvalidStateException(String message) { + super(message); + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/LeasingException.java b/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/LeasingException.java new file mode 100644 index 00000000..00b3ea02 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/LeasingException.java @@ -0,0 +1,36 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.exceptions; + +/** + * Top-level exception type for all exceptions thrown by the leasing code. + */ +public class LeasingException extends Exception { + + public LeasingException(Throwable e) { + super(e); + } + + public LeasingException(String message, Throwable e) { + super(message, e); + } + + public LeasingException(String message) { + super(message); + } + + private static final long serialVersionUID = 1L; + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/ProvisionedThroughputException.java b/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/ProvisionedThroughputException.java new file mode 100644 index 00000000..167cb6aa --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/exceptions/ProvisionedThroughputException.java @@ -0,0 +1,32 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.exceptions; + +/** + * Indicates that a lease operation has failed due to lack of provisioned throughput for a DynamoDB table. + */ +public class ProvisionedThroughputException extends LeasingException { + + private static final long serialVersionUID = 1L; + + public ProvisionedThroughputException(Throwable e) { + super(e); + } + + public ProvisionedThroughputException(String message, Throwable e) { + super(message, e); + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLease.java b/src/main/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLease.java new file mode 100644 index 00000000..b3a0ce6c --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLease.java @@ -0,0 +1,181 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; +import java.util.UUID; + +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; + +/** + * A Lease subclass containing KinesisClientLibrary related fields for checkpoints. + */ +public class KinesisClientLease extends Lease { + + private ExtendedSequenceNumber checkpoint; + private Long ownerSwitchesSinceCheckpoint = 0L; + private Set parentShardIds = new HashSet(); + + public KinesisClientLease() { + + } + + public KinesisClientLease(KinesisClientLease other) { + super(other); + this.checkpoint = other.getCheckpoint(); + this.ownerSwitchesSinceCheckpoint = other.getOwnerSwitchesSinceCheckpoint(); + this.parentShardIds.addAll(other.getParentShardIds()); + } + + KinesisClientLease(String leaseKey, String leaseOwner, Long leaseCounter, UUID concurrencyToken, + Long lastCounterIncrementNanos, ExtendedSequenceNumber checkpoint, Long ownerSwitchesSinceCheckpoint, + Set parentShardIds) { + super(leaseKey, leaseOwner, leaseCounter, concurrencyToken, lastCounterIncrementNanos); + + this.checkpoint = checkpoint; + this.ownerSwitchesSinceCheckpoint = ownerSwitchesSinceCheckpoint; + this.parentShardIds.addAll(parentShardIds); + } + + /** + * {@inheritDoc} + */ + @Override + public void update(T other) { + super.update(other); + if (!(other instanceof KinesisClientLease)) { + throw new IllegalArgumentException("Must pass KinesisClientLease object to KinesisClientLease.update(Lease)"); + } + KinesisClientLease casted = (KinesisClientLease) other; + + setOwnerSwitchesSinceCheckpoint(casted.ownerSwitchesSinceCheckpoint); + setCheckpoint(casted.checkpoint); + setParentShardIds(casted.parentShardIds); + } + + /** + * @return most recently application-supplied checkpoint value. During fail over, the new worker will pick up after + * the old worker's last checkpoint. + */ + public ExtendedSequenceNumber getCheckpoint() { + return checkpoint; + } + + /** + * @return count of distinct lease holders between checkpoints. + */ + public Long getOwnerSwitchesSinceCheckpoint() { + return ownerSwitchesSinceCheckpoint; + } + + /** + * @return shardIds that parent this lease. Used for resharding. + */ + public Set getParentShardIds() { + return new HashSet(parentShardIds); + } + + /** + * Sets checkpoint. + * + * @param checkpoint may not be null + */ + public void setCheckpoint(ExtendedSequenceNumber checkpoint) { + verifyNotNull(checkpoint, "Checkpoint should not be null"); + + this.checkpoint = checkpoint; + } + + /** + * Sets ownerSwitchesSinceCheckpoint. + * + * @param ownerSwitchesSinceCheckpoint may not be null + */ + public void setOwnerSwitchesSinceCheckpoint(Long ownerSwitchesSinceCheckpoint) { + verifyNotNull(ownerSwitchesSinceCheckpoint, "ownerSwitchesSinceCheckpoint should not be null"); + + this.ownerSwitchesSinceCheckpoint = ownerSwitchesSinceCheckpoint; + } + + /** + * Sets parentShardIds. + * + * @param parentShardIds may not be null + */ + public void setParentShardIds(Collection parentShardIds) { + verifyNotNull(parentShardIds, "parentShardIds should not be null"); + + this.parentShardIds.clear(); + this.parentShardIds.addAll(parentShardIds); + } + + private void verifyNotNull(Object object, String message) { + if (object == null) { + throw new IllegalArgumentException(message); + } + } + + @Override + public int hashCode() { + final int prime = 31; + int result = super.hashCode(); + result = prime * result + ((checkpoint == null) ? 0 : checkpoint.hashCode()); + result = + prime * result + ((ownerSwitchesSinceCheckpoint == null) ? 0 : ownerSwitchesSinceCheckpoint.hashCode()); + result = prime * result + ((parentShardIds == null) ? 0 : parentShardIds.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (!super.equals(obj)) + return false; + if (getClass() != obj.getClass()) + return false; + KinesisClientLease other = (KinesisClientLease) obj; + if (checkpoint == null) { + if (other.checkpoint != null) + return false; + } else if (!checkpoint.equals(other.checkpoint)) + return false; + if (ownerSwitchesSinceCheckpoint == null) { + if (other.ownerSwitchesSinceCheckpoint != null) + return false; + } else if (!ownerSwitchesSinceCheckpoint.equals(other.ownerSwitchesSinceCheckpoint)) + return false; + if (parentShardIds == null) { + if (other.parentShardIds != null) + return false; + } else if (!parentShardIds.equals(other.parentShardIds)) + return false; + return true; + } + + /** + * Returns a deep copy of this object. Type-unsafe - there aren't good mechanisms for copy-constructing generics. + * + * @return A deep copy of this object. + */ + @Override + @SuppressWarnings("unchecked") + public T copy() { + return (T) new KinesisClientLease(this); + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLeaseManager.java b/src/main/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLeaseManager.java new file mode 100644 index 00000000..8727d4ce --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLeaseManager.java @@ -0,0 +1,87 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.interfaces.IKinesisClientLeaseManager; + +/** + * An implementation of LeaseManager for the KinesisClientLibrary - takeLease updates the ownerSwitchesSinceCheckpoint field. + */ +public class KinesisClientLeaseManager extends LeaseManager implements IKinesisClientLeaseManager { + + @SuppressWarnings("unused") + private static final Log LOG = LogFactory.getLog(KinesisClientLeaseManager.class); + + /** + * Constructor. + * + * @param table Leases table + * @param dynamoDBClient DynamoDB client to use + */ + public KinesisClientLeaseManager(String table, AmazonDynamoDB dynamoDBClient) { + this(table, dynamoDBClient, false); + } + + /** + * Constructor for integration tests - see comment on superclass for documentation on setting the consistentReads + * flag. + * + * @param table leases table + * @param dynamoDBClient DynamoDB client to use + * @param consistentReads true if we want consistent reads for testing purposes. + */ + public KinesisClientLeaseManager(String table, AmazonDynamoDB dynamoDBClient, boolean consistentReads) { + super(table, dynamoDBClient, new KinesisClientLeaseSerializer(), consistentReads); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean takeLease(KinesisClientLease lease, String newOwner) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + String oldOwner = lease.getLeaseOwner(); + + boolean result = super.takeLease(lease, newOwner); + + if (oldOwner != null && !oldOwner.equals(newOwner)) { + lease.setOwnerSwitchesSinceCheckpoint(lease.getOwnerSwitchesSinceCheckpoint() + 1); + } + + return result; + } + + /** + * {@inheritDoc} + */ + @Override + public ExtendedSequenceNumber getCheckpoint(String shardId) + throws ProvisionedThroughputException, InvalidStateException, DependencyException { + ExtendedSequenceNumber checkpoint = null; + KinesisClientLease lease = getLease(shardId); + if (lease != null) { + checkpoint = lease.getCheckpoint(); + } + return checkpoint; + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLeaseSerializer.java b/src/main/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLeaseSerializer.java new file mode 100644 index 00000000..0fad61ea --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLeaseSerializer.java @@ -0,0 +1,144 @@ +/* + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.Collection; +import java.util.Map; + +import com.amazonaws.services.dynamodbv2.model.AttributeAction; +import com.amazonaws.services.dynamodbv2.model.AttributeDefinition; +import com.amazonaws.services.dynamodbv2.model.AttributeValue; +import com.amazonaws.services.dynamodbv2.model.AttributeValueUpdate; +import com.amazonaws.services.dynamodbv2.model.ExpectedAttributeValue; +import com.amazonaws.services.dynamodbv2.model.KeySchemaElement; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseSerializer; +import com.amazonaws.services.kinesis.leases.util.DynamoUtils; + +/** + * An implementation of ILeaseSerializer for KinesisClientLease objects. + */ +public class KinesisClientLeaseSerializer implements ILeaseSerializer { + + private static final String OWNER_SWITCHES_KEY = "ownerSwitchesSinceCheckpoint"; + private static final String CHECKPOINT_SEQUENCE_NUMBER_KEY = "checkpoint"; + private static final String CHECKPOINT_SUBSEQUENCE_NUMBER_KEY = "checkpointSubSequenceNumber"; + public final String PARENT_SHARD_ID_KEY = "parentShardId"; + + private final LeaseSerializer baseSerializer = new LeaseSerializer(KinesisClientLease.class); + + @Override + public Map toDynamoRecord(KinesisClientLease lease) { + Map result = baseSerializer.toDynamoRecord(lease); + + result.put(OWNER_SWITCHES_KEY, DynamoUtils.createAttributeValue(lease.getOwnerSwitchesSinceCheckpoint())); + result.put(CHECKPOINT_SEQUENCE_NUMBER_KEY, DynamoUtils.createAttributeValue(lease.getCheckpoint().getSequenceNumber())); + result.put(CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, DynamoUtils.createAttributeValue(lease.getCheckpoint().getSubSequenceNumber())); + if (lease.getParentShardIds() != null && !lease.getParentShardIds().isEmpty()) { + result.put(PARENT_SHARD_ID_KEY, DynamoUtils.createAttributeValue(lease.getParentShardIds())); + } + + return result; + } + + @Override + public KinesisClientLease fromDynamoRecord(Map dynamoRecord) { + KinesisClientLease result = (KinesisClientLease) baseSerializer.fromDynamoRecord(dynamoRecord); + + result.setOwnerSwitchesSinceCheckpoint(DynamoUtils.safeGetLong(dynamoRecord, OWNER_SWITCHES_KEY)); + result.setCheckpoint( + new ExtendedSequenceNumber( + DynamoUtils.safeGetString(dynamoRecord, CHECKPOINT_SEQUENCE_NUMBER_KEY), + DynamoUtils.safeGetLong(dynamoRecord, CHECKPOINT_SUBSEQUENCE_NUMBER_KEY)) + ); + result.setParentShardIds(DynamoUtils.safeGetSS(dynamoRecord, PARENT_SHARD_ID_KEY)); + + return result; + } + + @Override + public Map getDynamoHashKey(KinesisClientLease lease) { + return baseSerializer.getDynamoHashKey(lease); + } + + @Override + public Map getDynamoHashKey(String shardId) { + return baseSerializer.getDynamoHashKey(shardId); + } + + @Override + public Map getDynamoLeaseCounterExpectation(KinesisClientLease lease) { + return baseSerializer.getDynamoLeaseCounterExpectation(lease); + } + + @Override + public Map getDynamoLeaseOwnerExpectation(KinesisClientLease lease) { + return baseSerializer.getDynamoLeaseOwnerExpectation(lease); + } + + @Override + public Map getDynamoNonexistantExpectation() { + return baseSerializer.getDynamoNonexistantExpectation(); + } + + @Override + public Map getDynamoLeaseCounterUpdate(KinesisClientLease lease) { + return baseSerializer.getDynamoLeaseCounterUpdate(lease); + } + + @Override + public Map getDynamoTakeLeaseUpdate(KinesisClientLease lease, String newOwner) { + Map result = baseSerializer.getDynamoTakeLeaseUpdate(lease, newOwner); + + String oldOwner = lease.getLeaseOwner(); + if (oldOwner != null && !oldOwner.equals(newOwner)) { + result.put(OWNER_SWITCHES_KEY, new AttributeValueUpdate(DynamoUtils.createAttributeValue(1L), + AttributeAction.ADD)); + } + + return result; + } + + @Override + public Map getDynamoEvictLeaseUpdate(KinesisClientLease lease) { + return baseSerializer.getDynamoEvictLeaseUpdate(lease); + } + + @Override + public Map getDynamoUpdateLeaseUpdate(KinesisClientLease lease) { + Map result = baseSerializer.getDynamoUpdateLeaseUpdate(lease); + + result.put(CHECKPOINT_SEQUENCE_NUMBER_KEY, new AttributeValueUpdate(DynamoUtils.createAttributeValue(lease.getCheckpoint().getSequenceNumber()), + AttributeAction.PUT)); + result.put(CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, new AttributeValueUpdate(DynamoUtils.createAttributeValue(lease.getCheckpoint().getSubSequenceNumber()), + AttributeAction.PUT)); + result.put(OWNER_SWITCHES_KEY, + new AttributeValueUpdate(DynamoUtils.createAttributeValue(lease.getOwnerSwitchesSinceCheckpoint()), + AttributeAction.PUT)); + + return result; + } + + @Override + public Collection getKeySchema() { + return baseSerializer.getKeySchema(); + } + + @Override + public Collection getAttributeDefinitions() { + return baseSerializer.getAttributeDefinitions(); + } + +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/Lease.java b/src/main/java/com/amazonaws/services/kinesis/leases/impl/Lease.java similarity index 50% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/Lease.java rename to src/main/java/com/amazonaws/services/kinesis/leases/impl/Lease.java index b8b50fa1..32234e35 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/Lease.java +++ b/src/main/java/com/amazonaws/services/kinesis/leases/impl/Lease.java @@ -1,34 +1,23 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.leases; +package com.amazonaws.services.kinesis.leases.impl; -import java.util.Collection; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; -import com.google.common.collect.Collections2; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.NoArgsConstructor; -import lombok.NonNull; -import lombok.ToString; -import lombok.experimental.Accessors; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import com.amazonaws.util.json.Jackson; /** * This class contains data pertaining to a Lease. Distributed systems may use leases to partition work across a @@ -37,11 +26,6 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; * processing the corresponding unit of work, or until it fails. When the worker stops holding the lease, another worker will * take and hold the lease. */ -@NoArgsConstructor -@Getter -@Accessors(fluent = true) -@EqualsAndHashCode(exclude = {"concurrencyToken", "lastCounterIncrementNanos"}) -@ToString public class Lease { /* * See javadoc for System.nanoTime - summary: @@ -51,17 +35,8 @@ public class Lease { */ private static final long MAX_ABS_AGE_NANOS = TimeUnit.DAYS.toNanos(365); - /** - * @return leaseKey - identifies the unit of work associated with this lease. - */ private String leaseKey; - /** - * @return current owner of the lease, may be null. - */ private String leaseOwner; - /** - * @return leaseCounter is incremented periodically by the holder of the lease. Used for optimistic locking. - */ private Long leaseCounter = 0L; /* @@ -75,20 +50,12 @@ public class Lease { * deliberately not persisted in DynamoDB and excluded from hashCode and equals. */ private Long lastCounterIncrementNanos; + /** - * @return most recently application-supplied checkpoint value. During fail over, the new worker will pick up after - * the old worker's last checkpoint. + * Constructor. */ - private ExtendedSequenceNumber checkpoint; - /** - * @return pending checkpoint, possibly null. - */ - private ExtendedSequenceNumber pendingCheckpoint; - /** - * @return count of distinct lease holders between checkpoints. - */ - private Long ownerSwitchesSinceCheckpoint = 0L; - private Set parentShardIds = new HashSet<>(); + public Lease() { + } /** * Copy constructor, used by clone(). @@ -96,46 +63,62 @@ public class Lease { * @param lease lease to copy */ protected Lease(Lease lease) { - this(lease.leaseKey(), lease.leaseOwner(), lease.leaseCounter(), lease.concurrencyToken(), - lease.lastCounterIncrementNanos(), lease.checkpoint(), lease.pendingCheckpoint(), - lease.ownerSwitchesSinceCheckpoint(), lease.parentShardIds()); + this(lease.getLeaseKey(), lease.getLeaseOwner(), lease.getLeaseCounter(), lease.getConcurrencyToken(), + lease.getLastCounterIncrementNanos()); } - public Lease(final String leaseKey, final String leaseOwner, final Long leaseCounter, - final UUID concurrencyToken, final Long lastCounterIncrementNanos, - final ExtendedSequenceNumber checkpoint, final ExtendedSequenceNumber pendingCheckpoint, - final Long ownerSwitchesSinceCheckpoint, final Set parentShardIds) { + protected Lease(String leaseKey, String leaseOwner, Long leaseCounter, UUID concurrencyToken, + Long lastCounterIncrementNanos) { this.leaseKey = leaseKey; this.leaseOwner = leaseOwner; this.leaseCounter = leaseCounter; this.concurrencyToken = concurrencyToken; this.lastCounterIncrementNanos = lastCounterIncrementNanos; - this.checkpoint = checkpoint; - this.pendingCheckpoint = pendingCheckpoint; - this.ownerSwitchesSinceCheckpoint = ownerSwitchesSinceCheckpoint; - if (parentShardIds != null) { - this.parentShardIds.addAll(parentShardIds); - } - } - - /** - * @return shardIds that parent this lease. Used for resharding. - */ - public Set parentShardIds() { - return new HashSet<>(parentShardIds); } /** * Updates this Lease's mutable, application-specific fields based on the passed-in lease object. Does not update * fields that are internal to the leasing library (leaseKey, leaseOwner, leaseCounter). * - * @param lease + * @param other */ - public void update(final Lease lease) { - ownerSwitchesSinceCheckpoint(lease.ownerSwitchesSinceCheckpoint()); - checkpoint(lease.checkpoint); - pendingCheckpoint(lease.pendingCheckpoint); - parentShardIds(lease.parentShardIds); + public void update(T other) { + // The default implementation (no application-specific fields) has nothing to do. + } + + /** + * @return leaseKey - identifies the unit of work associated with this lease. + */ + public String getLeaseKey() { + return leaseKey; + } + + /** + * @return leaseCounter is incremented periodically by the holder of the lease. Used for optimistic locking. + */ + public Long getLeaseCounter() { + return leaseCounter; + } + + /** + * @return current owner of the lease, may be null. + */ + public String getLeaseOwner() { + return leaseOwner; + } + + /** + * @return concurrency token + */ + public UUID getConcurrencyToken() { + return concurrencyToken; + } + + /** + * @return last update in nanoseconds since the epoch + */ + public Long getLastCounterIncrementNanos() { + return lastCounterIncrementNanos; } /** @@ -162,7 +145,7 @@ public class Lease { * * @param lastCounterIncrementNanos last renewal in nanoseconds since the epoch */ - public void lastCounterIncrementNanos(Long lastCounterIncrementNanos) { + public void setLastCounterIncrementNanos(Long lastCounterIncrementNanos) { this.lastCounterIncrementNanos = lastCounterIncrementNanos; } @@ -171,7 +154,8 @@ public class Lease { * * @param concurrencyToken may not be null */ - public void concurrencyToken(@NonNull final UUID concurrencyToken) { + public void setConcurrencyToken(UUID concurrencyToken) { + verifyNotNull(concurrencyToken, "concurencyToken cannot be null"); this.concurrencyToken = concurrencyToken; } @@ -180,10 +164,12 @@ public class Lease { * * @param leaseKey may not be null. */ - public void leaseKey(@NonNull final String leaseKey) { + public void setLeaseKey(String leaseKey) { if (this.leaseKey != null) { throw new IllegalArgumentException("LeaseKey is immutable once set"); } + verifyNotNull(leaseKey, "LeaseKey cannot be set to null"); + this.leaseKey = leaseKey; } @@ -192,62 +178,77 @@ public class Lease { * * @param leaseCounter may not be null */ - public void leaseCounter(@NonNull final Long leaseCounter) { + public void setLeaseCounter(Long leaseCounter) { + verifyNotNull(leaseCounter, "leaseCounter must not be null"); + this.leaseCounter = leaseCounter; } - /** - * Sets checkpoint. - * - * @param checkpoint may not be null - */ - public void checkpoint(@NonNull final ExtendedSequenceNumber checkpoint) { - this.checkpoint = checkpoint; - } - - /** - * Sets pending checkpoint. - * - * @param pendingCheckpoint can be null - */ - public void pendingCheckpoint(ExtendedSequenceNumber pendingCheckpoint) { - this.pendingCheckpoint = pendingCheckpoint; - } - - /** - * Sets ownerSwitchesSinceCheckpoint. - * - * @param ownerSwitchesSinceCheckpoint may not be null - */ - public void ownerSwitchesSinceCheckpoint(@NonNull final Long ownerSwitchesSinceCheckpoint) { - this.ownerSwitchesSinceCheckpoint = ownerSwitchesSinceCheckpoint; - } - - /** - * Sets parentShardIds. - * - * @param parentShardIds may not be null - */ - public void parentShardIds(@NonNull final Collection parentShardIds) { - this.parentShardIds.clear(); - this.parentShardIds.addAll(parentShardIds); - } - /** * Sets leaseOwner. * * @param leaseOwner may be null. */ - public void leaseOwner(String leaseOwner) { + public void setLeaseOwner(String leaseOwner) { this.leaseOwner = leaseOwner; } + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((leaseCounter == null) ? 0 : leaseCounter.hashCode()); + result = prime * result + ((leaseOwner == null) ? 0 : leaseOwner.hashCode()); + result = prime * result + ((leaseKey == null) ? 0 : leaseKey.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + Lease other = (Lease) obj; + if (leaseCounter == null) { + if (other.leaseCounter != null) + return false; + } else if (!leaseCounter.equals(other.leaseCounter)) + return false; + if (leaseOwner == null) { + if (other.leaseOwner != null) + return false; + } else if (!leaseOwner.equals(other.leaseOwner)) + return false; + if (leaseKey == null) { + if (other.leaseKey != null) + return false; + } else if (!leaseKey.equals(other.leaseKey)) + return false; + return true; + } + + @Override + public String toString() { + return Jackson.toJsonPrettyString(this); + } + /** * Returns a deep copy of this object. Type-unsafe - there aren't good mechanisms for copy-constructing generics. * * @return A deep copy of this object. */ - public Lease copy() { - return new Lease(this); + @SuppressWarnings("unchecked") + public T copy() { + return (T) new Lease(this); } + + private void verifyNotNull(Object object, String message) { + if (object == null) { + throw new IllegalArgumentException(message); + } + } + } diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseCoordinator.java b/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseCoordinator.java new file mode 100644 index 00000000..76cc7e87 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseCoordinator.java @@ -0,0 +1,371 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.Collection; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.utils.NamedThreadFactory; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.LeasingException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseRenewer; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseTaker; +import com.amazonaws.services.kinesis.metrics.impl.LogMetricsFactory; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; + +/** + * LeaseCoordinator abstracts away LeaseTaker and LeaseRenewer from the application code that's using leasing. It owns + * the scheduling of the two previously mentioned components as well as informing LeaseRenewer when LeaseTaker takes new + * leases. + * + */ +public class LeaseCoordinator { + + /* + * Name of the dimension used when setting worker identifier on IMetricsScopes. Exposed so that users of this class + * can easily create InterceptingMetricsFactories that rename this dimension to suit the destination metrics system. + */ + public static final String WORKER_IDENTIFIER_METRIC = "WorkerIdentifier"; + + private static final Log LOG = LogFactory.getLog(LeaseCoordinator.class); + + // Time to wait for in-flight Runnables to finish when calling .stop(); + private static final long STOP_WAIT_TIME_MILLIS = 2000L; + + private static final int DEFAULT_MAX_LEASES_FOR_WORKER = Integer.MAX_VALUE; + private static final int DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME = 1; + + private static final ThreadFactory LEASE_COORDINATOR_THREAD_FACTORY = new NamedThreadFactory("LeaseCoordinator-"); + private static final ThreadFactory LEASE_RENEWAL_THREAD_FACTORY = new NamedThreadFactory("LeaseRenewer-"); + + // Package level access for testing. + static final int MAX_LEASE_RENEWAL_THREAD_COUNT = 20; + + + private final ILeaseRenewer leaseRenewer; + private final ILeaseTaker leaseTaker; + private final long renewerIntervalMillis; + private final long takerIntervalMillis; + + private final Object shutdownLock = new Object(); + + protected final IMetricsFactory metricsFactory; + + private ScheduledExecutorService leaseCoordinatorThreadPool; + private final ExecutorService leaseRenewalThreadpool; + private volatile boolean running = false; + private ScheduledFuture takerFuture; + + /** + * Constructor. + * + * @param leaseManager LeaseManager instance to use + * @param workerIdentifier Identifies the worker (e.g. useful to track lease ownership) + * @param leaseDurationMillis Duration of a lease + * @param epsilonMillis Allow for some variance when calculating lease expirations + */ + public LeaseCoordinator(ILeaseManager leaseManager, + String workerIdentifier, + long leaseDurationMillis, + long epsilonMillis) { + this(leaseManager, workerIdentifier, leaseDurationMillis, epsilonMillis, new LogMetricsFactory()); + } + + /** + * Constructor. + * + * @param leaseManager LeaseManager instance to use + * @param workerIdentifier Identifies the worker (e.g. useful to track lease ownership) + * @param leaseDurationMillis Duration of a lease + * @param epsilonMillis Allow for some variance when calculating lease expirations + * @param metricsFactory Used to publish metrics about lease operations + */ + public LeaseCoordinator(ILeaseManager leaseManager, + String workerIdentifier, + long leaseDurationMillis, + long epsilonMillis, + IMetricsFactory metricsFactory) { + this(leaseManager, workerIdentifier, leaseDurationMillis, epsilonMillis, + DEFAULT_MAX_LEASES_FOR_WORKER, DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME, metricsFactory); + } + + /** + * Constructor. + * + * @param leaseManager LeaseManager instance to use + * @param workerIdentifier Identifies the worker (e.g. useful to track lease ownership) + * @param leaseDurationMillis Duration of a lease + * @param epsilonMillis Allow for some variance when calculating lease expirations + * @param maxLeasesForWorker Max leases this Worker can handle at a time + * @param maxLeasesToStealAtOneTime Steal up to these many leases at a time (for load balancing) + * @param metricsFactory Used to publish metrics about lease operations + */ + public LeaseCoordinator(ILeaseManager leaseManager, + String workerIdentifier, + long leaseDurationMillis, + long epsilonMillis, + int maxLeasesForWorker, + int maxLeasesToStealAtOneTime, + IMetricsFactory metricsFactory) { + this.leaseRenewalThreadpool = getLeaseRenewalExecutorService(MAX_LEASE_RENEWAL_THREAD_COUNT); + this.leaseTaker = new LeaseTaker(leaseManager, workerIdentifier, leaseDurationMillis) + .withMaxLeasesForWorker(maxLeasesForWorker) + .withMaxLeasesToStealAtOneTime(maxLeasesToStealAtOneTime); + this.leaseRenewer = new LeaseRenewer( + leaseManager, workerIdentifier, leaseDurationMillis, leaseRenewalThreadpool); + this.renewerIntervalMillis = leaseDurationMillis / 3 - epsilonMillis; + this.takerIntervalMillis = (leaseDurationMillis + epsilonMillis) * 2; + this.metricsFactory = metricsFactory; + + LOG.info(String.format( + "With failover time %d ms and epsilon %d ms, LeaseCoordinator will renew leases every %d ms, take" + + "leases every %d ms, process maximum of %d leases and steal %d lease(s) at a time.", + leaseDurationMillis, + epsilonMillis, + renewerIntervalMillis, + takerIntervalMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime)); + } + + private class TakerRunnable implements Runnable { + + @Override + public void run() { + try { + runTaker(); + } catch (LeasingException e) { + LOG.error("LeasingException encountered in lease taking thread", e); + } catch (Throwable t) { + LOG.error("Throwable encountered in lease taking thread", t); + } + } + + } + + private class RenewerRunnable implements Runnable { + + @Override + public void run() { + try { + runRenewer(); + } catch (LeasingException e) { + LOG.error("LeasingException encountered in lease renewing thread", e); + } catch (Throwable t) { + LOG.error("Throwable encountered in lease renewing thread", t); + } + } + + } + + /** + * Start background LeaseHolder and LeaseTaker threads. + * @throws ProvisionedThroughputException If we can't talk to DynamoDB due to insufficient capacity. + * @throws InvalidStateException If the lease table doesn't exist + * @throws DependencyException If we encountered exception taking to DynamoDB + */ + public void start() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + leaseRenewer.initialize(); + + // 2 because we know we'll have at most 2 concurrent tasks at a time. + leaseCoordinatorThreadPool = Executors.newScheduledThreadPool(2, LEASE_COORDINATOR_THREAD_FACTORY); + + // Taker runs with fixed DELAY because we want it to run slower in the event of performance degredation. + takerFuture = leaseCoordinatorThreadPool.scheduleWithFixedDelay(new TakerRunnable(), + 0L, + takerIntervalMillis, + TimeUnit.MILLISECONDS); + // Renewer runs at fixed INTERVAL because we want it to run at the same rate in the event of degredation. + leaseCoordinatorThreadPool.scheduleAtFixedRate(new RenewerRunnable(), + 0L, + renewerIntervalMillis, + TimeUnit.MILLISECONDS); + running = true; + } + + /** + * Runs a single iteration of the lease taker - used by integration tests. + * + * @throws InvalidStateException + * @throws DependencyException + */ + protected void runTaker() throws DependencyException, InvalidStateException { + IMetricsScope scope = MetricsHelper.startScope(metricsFactory, "TakeLeases"); + long startTime = System.currentTimeMillis(); + boolean success = false; + + try { + Map takenLeases = leaseTaker.takeLeases(); + + // Only add taken leases to renewer if coordinator is still running. + synchronized (shutdownLock) { + if (running) { + leaseRenewer.addLeasesToRenew(takenLeases.values()); + } + } + + success = true; + } finally { + scope.addDimension(WORKER_IDENTIFIER_METRIC, getWorkerIdentifier()); + MetricsHelper.addSuccessAndLatency(startTime, success, MetricsLevel.SUMMARY); + MetricsHelper.endScope(); + } + } + + /** + * Runs a single iteration of the lease renewer - used by integration tests. + * + * @throws InvalidStateException + * @throws DependencyException + */ + protected void runRenewer() throws DependencyException, InvalidStateException { + IMetricsScope scope = MetricsHelper.startScope(metricsFactory, "RenewAllLeases"); + long startTime = System.currentTimeMillis(); + boolean success = false; + + try { + leaseRenewer.renewLeases(); + success = true; + } finally { + scope.addDimension(WORKER_IDENTIFIER_METRIC, getWorkerIdentifier()); + MetricsHelper.addSuccessAndLatency(startTime, success, MetricsLevel.SUMMARY); + MetricsHelper.endScope(); + } + } + + /** + * @return currently held leases + */ + public Collection getAssignments() { + return leaseRenewer.getCurrentlyHeldLeases().values(); + } + + /** + * @param leaseKey lease key to fetch currently held lease for + * + * @return deep copy of currently held Lease for given key, or null if we don't hold the lease for that key + */ + public T getCurrentlyHeldLease(String leaseKey) { + return leaseRenewer.getCurrentlyHeldLease(leaseKey); + } + + /** + * @return workerIdentifier + */ + public String getWorkerIdentifier() { + return leaseTaker.getWorkerIdentifier(); + } + + /** + * Stops background threads and waits for {@link #STOP_WAIT_TIME_MILLIS} for all background tasks to complete. + * If tasks are not completed after this time, method will shutdown thread pool forcefully and return. + */ + public void stop() { + if (leaseCoordinatorThreadPool != null) { + leaseCoordinatorThreadPool.shutdown(); + try { + if (leaseCoordinatorThreadPool.awaitTermination(STOP_WAIT_TIME_MILLIS, TimeUnit.MILLISECONDS)) { + LOG.info(String.format("Worker %s has successfully stopped lease-tracking threads", + leaseTaker.getWorkerIdentifier())); + } else { + leaseCoordinatorThreadPool.shutdownNow(); + LOG.info(String.format("Worker %s stopped lease-tracking threads %dms after stop", + leaseTaker.getWorkerIdentifier(), + STOP_WAIT_TIME_MILLIS)); + } + } catch (InterruptedException e) { + LOG.debug("Encountered InterruptedException when awaiting threadpool termination"); + } + } else { + LOG.debug("Threadpool was null, no need to shutdown/terminate threadpool."); + } + + leaseRenewalThreadpool.shutdownNow(); + synchronized (shutdownLock) { + leaseRenewer.clearCurrentlyHeldLeases(); + running = false; + } + } + + /** + * Requests the cancellation of the lease taker. + */ + public void stopLeaseTaker() { + takerFuture.cancel(false); + + } + + /** + * Requests that renewals for the given lease are stopped. + * + * @param lease the lease to stop renewing. + */ + public void dropLease(T lease) { + synchronized (shutdownLock) { + if (lease != null) { + leaseRenewer.dropLease(lease); + } + } + } + + /** + * @return true if this LeaseCoordinator is running + */ + public boolean isRunning() { + return running; + } + + /** + * Updates application-specific lease values in DynamoDB. + * + * @param lease lease object containing updated values + * @param concurrencyToken obtained by calling Lease.getConcurrencyToken for a currently held lease + * + * @return true if update succeeded, false otherwise + * + * @throws InvalidStateException if lease table does not exist + * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity + * @throws DependencyException if DynamoDB update fails in an unexpected way + */ + public boolean updateLease(T lease, UUID concurrencyToken) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + return leaseRenewer.updateLease(lease, concurrencyToken); + } + + /** + * Returns executor service that should be used for lease renewal. + * @param maximumPoolSize Maximum allowed thread pool size + * @return Executor service that should be used for lease renewal. + */ + private static ExecutorService getLeaseRenewalExecutorService(int maximumPoolSize) { + return Executors.newFixedThreadPool(maximumPoolSize, LEASE_RENEWAL_THREAD_FACTORY); + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseManager.java b/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseManager.java new file mode 100644 index 00000000..226756eb --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseManager.java @@ -0,0 +1,587 @@ +/* + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; +import com.amazonaws.services.dynamodbv2.model.AttributeValue; +import com.amazonaws.services.dynamodbv2.model.AttributeValueUpdate; +import com.amazonaws.services.dynamodbv2.model.ConditionalCheckFailedException; +import com.amazonaws.services.dynamodbv2.model.CreateTableRequest; +import com.amazonaws.services.dynamodbv2.model.DeleteItemRequest; +import com.amazonaws.services.dynamodbv2.model.DescribeTableRequest; +import com.amazonaws.services.dynamodbv2.model.DescribeTableResult; +import com.amazonaws.services.dynamodbv2.model.GetItemRequest; +import com.amazonaws.services.dynamodbv2.model.GetItemResult; +import com.amazonaws.services.dynamodbv2.model.LimitExceededException; +import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughput; +import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputExceededException; +import com.amazonaws.services.dynamodbv2.model.PutItemRequest; +import com.amazonaws.services.dynamodbv2.model.ResourceInUseException; +import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException; +import com.amazonaws.services.dynamodbv2.model.ScanRequest; +import com.amazonaws.services.dynamodbv2.model.ScanResult; +import com.amazonaws.services.dynamodbv2.model.TableStatus; +import com.amazonaws.services.dynamodbv2.model.UpdateItemRequest; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseSerializer; + +/** + * An implementation of ILeaseManager that uses DynamoDB. + */ +public class LeaseManager implements ILeaseManager { + + private static final Log LOG = LogFactory.getLog(LeaseManager.class); + + protected String table; + protected AmazonDynamoDB dynamoDBClient; + protected ILeaseSerializer serializer; + protected boolean consistentReads; + + /** + * Constructor. + * + * @param table leases table + * @param dynamoDBClient DynamoDB client to use + * @param serializer LeaseSerializer to use to convert to/from DynamoDB objects. + */ + public LeaseManager(String table, AmazonDynamoDB dynamoDBClient, ILeaseSerializer serializer) { + this(table, dynamoDBClient, serializer, false); + } + + /** + * Constructor for test cases - allows control of consistent reads. Consistent reads should only be used for testing + * - our code is meant to be resilient to inconsistent reads. Using consistent reads during testing speeds up + * execution of simple tests (you don't have to wait out the consistency window). Test cases that want to experience + * eventual consistency should not set consistentReads=true. + * + * @param table leases table + * @param dynamoDBClient DynamoDB client to use + * @param serializer lease serializer to use + * @param consistentReads true if we want consistent reads for testing purposes. + */ + public LeaseManager(String table, AmazonDynamoDB dynamoDBClient, ILeaseSerializer serializer, boolean consistentReads) { + verifyNotNull(table, "Table name cannot be null"); + verifyNotNull(dynamoDBClient, "dynamoDBClient cannot be null"); + verifyNotNull(serializer, "ILeaseSerializer cannot be null"); + + this.table = table; + this.dynamoDBClient = dynamoDBClient; + this.consistentReads = consistentReads; + this.serializer = serializer; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean createLeaseTableIfNotExists(Long readCapacity, Long writeCapacity) + throws ProvisionedThroughputException, DependencyException { + verifyNotNull(readCapacity, "readCapacity cannot be null"); + verifyNotNull(writeCapacity, "writeCapacity cannot be null"); + + try { + if (tableStatus() != null) { + return false; + } + } catch (DependencyException de) { + // + // Something went wrong with DynamoDB + // + LOG.error("Failed to get table status for " + table, de); + } + CreateTableRequest request = new CreateTableRequest(); + request.setTableName(table); + request.setKeySchema(serializer.getKeySchema()); + request.setAttributeDefinitions(serializer.getAttributeDefinitions()); + + ProvisionedThroughput throughput = new ProvisionedThroughput(); + throughput.setReadCapacityUnits(readCapacity); + throughput.setWriteCapacityUnits(writeCapacity); + request.setProvisionedThroughput(throughput); + + try { + dynamoDBClient.createTable(request); + } catch (ResourceInUseException e) { + LOG.info("Table " + table + " already exists."); + return false; + } catch (LimitExceededException e) { + throw new ProvisionedThroughputException("Capacity exceeded when creating table " + table, e); + } catch (AmazonClientException e) { + throw new DependencyException(e); + } + return true; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean leaseTableExists() throws DependencyException { + return TableStatus.ACTIVE == tableStatus(); + } + + private TableStatus tableStatus() throws DependencyException { + DescribeTableRequest request = new DescribeTableRequest(); + + request.setTableName(table); + + DescribeTableResult result; + try { + result = dynamoDBClient.describeTable(request); + } catch (ResourceNotFoundException e) { + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Got ResourceNotFoundException for table %s in leaseTableExists, returning false.", + table)); + } + return null; + } catch (AmazonClientException e) { + throw new DependencyException(e); + } + + TableStatus tableStatus = TableStatus.fromValue(result.getTable().getTableStatus()); + if (LOG.isDebugEnabled()) { + LOG.debug("Lease table exists and is in status " + tableStatus); + } + + return tableStatus; + } + + @Override + public boolean waitUntilLeaseTableExists(long secondsBetweenPolls, long timeoutSeconds) throws DependencyException { + long sleepTimeRemaining = TimeUnit.SECONDS.toMillis(timeoutSeconds); + + while (!leaseTableExists()) { + if (sleepTimeRemaining <= 0) { + return false; + } + + long timeToSleepMillis = Math.min(TimeUnit.SECONDS.toMillis(secondsBetweenPolls), sleepTimeRemaining); + + sleepTimeRemaining -= sleep(timeToSleepMillis); + } + + return true; + } + + /** + * Exposed for testing purposes. + * + * @param timeToSleepMillis time to sleep in milliseconds + * + * @return actual time slept in millis + */ + long sleep(long timeToSleepMillis) { + long startTime = System.currentTimeMillis(); + + try { + Thread.sleep(timeToSleepMillis); + } catch (InterruptedException e) { + LOG.debug("Interrupted while sleeping"); + } + + return System.currentTimeMillis() - startTime; + } + + /** + * {@inheritDoc} + */ + @Override + public List listLeases() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + return list(null); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isLeaseTableEmpty() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + return list(1).isEmpty(); + } + + /** + * List with the given page size. Package access for integration testing. + * + * @param limit number of items to consider at a time - used by integration tests to force paging. + * @return list of leases + * @throws InvalidStateException if table does not exist + * @throws DependencyException if DynamoDB scan fail in an unexpected way + * @throws ProvisionedThroughputException if DynamoDB scan fail due to exceeded capacity + */ + List list(Integer limit) throws DependencyException, InvalidStateException, ProvisionedThroughputException { + if (LOG.isDebugEnabled()) { + LOG.debug("Listing leases from table " + table); + } + + ScanRequest scanRequest = new ScanRequest(); + scanRequest.setTableName(table); + if (limit != null) { + scanRequest.setLimit(limit); + } + + try { + ScanResult scanResult = dynamoDBClient.scan(scanRequest); + List result = new ArrayList(); + + while (scanResult != null) { + for (Map item : scanResult.getItems()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Got item " + item.toString() + " from DynamoDB."); + } + + result.add(serializer.fromDynamoRecord(item)); + } + + Map lastEvaluatedKey = scanResult.getLastEvaluatedKey(); + if (lastEvaluatedKey == null) { + // Signify that we're done. + scanResult = null; + if (LOG.isDebugEnabled()) { + LOG.debug("lastEvaluatedKey was null - scan finished."); + } + } else { + // Make another request, picking up where we left off. + scanRequest.setExclusiveStartKey(lastEvaluatedKey); + + if (LOG.isDebugEnabled()) { + LOG.debug("lastEvaluatedKey was " + lastEvaluatedKey + ", continuing scan."); + } + + scanResult = dynamoDBClient.scan(scanRequest); + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Listed " + result.size() + " leases from table " + table); + } + + return result; + } catch (ResourceNotFoundException e) { + throw new InvalidStateException("Cannot scan lease table " + table + " because it does not exist.", e); + } catch (ProvisionedThroughputExceededException e) { + throw new ProvisionedThroughputException(e); + } catch (AmazonClientException e) { + throw new DependencyException(e); + } + } + + /** + * {@inheritDoc} + */ + @Override + public boolean createLeaseIfNotExists(T lease) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + verifyNotNull(lease, "lease cannot be null"); + + if (LOG.isDebugEnabled()) { + LOG.debug("Creating lease " + lease); + } + + PutItemRequest request = new PutItemRequest(); + request.setTableName(table); + request.setItem(serializer.toDynamoRecord(lease)); + request.setExpected(serializer.getDynamoNonexistantExpectation()); + + try { + dynamoDBClient.putItem(request); + } catch (ConditionalCheckFailedException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Did not create lease " + lease + " because it already existed"); + } + + return false; + } catch (AmazonClientException e) { + throw convertAndRethrowExceptions("create", lease.getLeaseKey(), e); + } + + return true; + } + + /** + * {@inheritDoc} + */ + @Override + public T getLease(String leaseKey) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + verifyNotNull(leaseKey, "leaseKey cannot be null"); + + if (LOG.isDebugEnabled()) { + LOG.debug("Getting lease with key " + leaseKey); + } + + GetItemRequest request = new GetItemRequest(); + request.setTableName(table); + request.setKey(serializer.getDynamoHashKey(leaseKey)); + request.setConsistentRead(consistentReads); + + try { + GetItemResult result = dynamoDBClient.getItem(request); + + Map dynamoRecord = result.getItem(); + if (dynamoRecord == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("No lease found with key " + leaseKey + ", returning null."); + } + + return null; + } else { + T lease = serializer.fromDynamoRecord(dynamoRecord); + if (LOG.isDebugEnabled()) { + LOG.debug("Got lease " + lease); + } + + return lease; + } + } catch (AmazonClientException e) { + throw convertAndRethrowExceptions("get", leaseKey, e); + } + } + + /** + * {@inheritDoc} + */ + @Override + public boolean renewLease(T lease) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + verifyNotNull(lease, "lease cannot be null"); + + if (LOG.isDebugEnabled()) { + LOG.debug("Renewing lease with key " + lease.getLeaseKey()); + } + + UpdateItemRequest request = new UpdateItemRequest(); + request.setTableName(table); + request.setKey(serializer.getDynamoHashKey(lease)); + request.setExpected(serializer.getDynamoLeaseCounterExpectation(lease)); + request.setAttributeUpdates(serializer.getDynamoLeaseCounterUpdate(lease)); + + try { + dynamoDBClient.updateItem(request); + } catch (ConditionalCheckFailedException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Lease renewal failed for lease with key " + lease.getLeaseKey() + + " because the lease counter was not " + lease.getLeaseCounter()); + } + + return false; + } catch (AmazonClientException e) { + throw convertAndRethrowExceptions("renew", lease.getLeaseKey(), e); + } + + lease.setLeaseCounter(lease.getLeaseCounter() + 1); + return true; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean takeLease(T lease, String owner) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + verifyNotNull(lease, "lease cannot be null"); + verifyNotNull(owner, "owner cannot be null"); + + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Taking lease with leaseKey %s from %s to %s", + lease.getLeaseKey(), + lease.getLeaseOwner() == null ? "nobody" : lease.getLeaseOwner(), + owner)); + } + + UpdateItemRequest request = new UpdateItemRequest(); + request.setTableName(table); + request.setKey(serializer.getDynamoHashKey(lease)); + request.setExpected(serializer.getDynamoLeaseCounterExpectation(lease)); + + Map updates = serializer.getDynamoLeaseCounterUpdate(lease); + updates.putAll(serializer.getDynamoTakeLeaseUpdate(lease, owner)); + request.setAttributeUpdates(updates); + + try { + dynamoDBClient.updateItem(request); + } catch (ConditionalCheckFailedException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Lease renewal failed for lease with key " + lease.getLeaseKey() + + " because the lease counter was not " + lease.getLeaseCounter()); + } + + return false; + } catch (AmazonClientException e) { + throw convertAndRethrowExceptions("take", lease.getLeaseKey(), e); + } + + lease.setLeaseCounter(lease.getLeaseCounter() + 1); + lease.setLeaseOwner(owner); + + return true; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean evictLease(T lease) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + verifyNotNull(lease, "lease cannot be null"); + + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Evicting lease with leaseKey %s owned by %s", + lease.getLeaseKey(), + lease.getLeaseOwner())); + } + + UpdateItemRequest request = new UpdateItemRequest(); + request.setTableName(table); + request.setKey(serializer.getDynamoHashKey(lease)); + request.setExpected(serializer.getDynamoLeaseOwnerExpectation(lease)); + + Map updates = serializer.getDynamoLeaseCounterUpdate(lease); + updates.putAll(serializer.getDynamoEvictLeaseUpdate(lease)); + request.setAttributeUpdates(updates); + + try { + dynamoDBClient.updateItem(request); + } catch (ConditionalCheckFailedException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Lease eviction failed for lease with key " + lease.getLeaseKey() + + " because the lease owner was not " + lease.getLeaseOwner()); + } + + return false; + } catch (AmazonClientException e) { + throw convertAndRethrowExceptions("evict", lease.getLeaseKey(), e); + } + + lease.setLeaseOwner(null); + lease.setLeaseCounter(lease.getLeaseCounter() + 1); + return true; + } + + /** + * {@inheritDoc} + */ + public void deleteAll() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + List allLeases = listLeases(); + + LOG.warn("Deleting " + allLeases.size() + " items from table " + table); + + for (T lease : allLeases) { + DeleteItemRequest deleteRequest = new DeleteItemRequest(); + deleteRequest.setTableName(table); + deleteRequest.setKey(serializer.getDynamoHashKey(lease)); + + dynamoDBClient.deleteItem(deleteRequest); + } + } + + /** + * {@inheritDoc} + */ + @Override + public void deleteLease(T lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException { + verifyNotNull(lease, "lease cannot be null"); + + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Deleting lease with leaseKey %s", lease.getLeaseKey())); + } + + DeleteItemRequest deleteRequest = new DeleteItemRequest(); + deleteRequest.setTableName(table); + deleteRequest.setKey(serializer.getDynamoHashKey(lease)); + + try { + dynamoDBClient.deleteItem(deleteRequest); + } catch (AmazonClientException e) { + throw convertAndRethrowExceptions("delete", lease.getLeaseKey(), e); + } + } + + /** + * {@inheritDoc} + */ + @Override + public boolean updateLease(T lease) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + verifyNotNull(lease, "lease cannot be null"); + + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Updating lease %s", lease)); + } + + UpdateItemRequest request = new UpdateItemRequest(); + request.setTableName(table); + request.setKey(serializer.getDynamoHashKey(lease)); + request.setExpected(serializer.getDynamoLeaseCounterExpectation(lease)); + + Map updates = serializer.getDynamoLeaseCounterUpdate(lease); + updates.putAll(serializer.getDynamoUpdateLeaseUpdate(lease)); + request.setAttributeUpdates(updates); + + try { + dynamoDBClient.updateItem(request); + } catch (ConditionalCheckFailedException e) { + if (LOG.isDebugEnabled()) { + LOG.debug("Lease update failed for lease with key " + lease.getLeaseKey() + + " because the lease counter was not " + lease.getLeaseCounter()); + } + + return false; + } catch (AmazonClientException e) { + throw convertAndRethrowExceptions("update", lease.getLeaseKey(), e); + } + + lease.setLeaseCounter(lease.getLeaseCounter() + 1); + return true; + } + + /* + * This method contains boilerplate exception handling - it throws or returns something to be thrown. The + * inconsistency there exists to satisfy the compiler when this method is used at the end of non-void methods. + */ + protected DependencyException convertAndRethrowExceptions(String operation, String leaseKey, AmazonClientException e) + throws ProvisionedThroughputException, InvalidStateException { + if (e instanceof ProvisionedThroughputExceededException) { + throw new ProvisionedThroughputException(e); + } else if (e instanceof ResourceNotFoundException) { + // @formatter:on + throw new InvalidStateException(String.format("Cannot %s lease with key %s because table %s does not exist.", + operation, + leaseKey, + table), + e); + //@formatter:off + } else { + return new DependencyException(e); + } + } + + private void verifyNotNull(Object object, String message) { + if (object == null) { + throw new IllegalArgumentException(message); + } + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseRenewer.java b/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseRenewer.java new file mode 100644 index 00000000..b10ee1a3 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseRenewer.java @@ -0,0 +1,413 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseRenewer; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.impl.ThreadSafeMetricsDelegatingScope; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; + +/** + * An implementation of ILeaseRenewer that uses DynamoDB via LeaseManager. + */ +public class LeaseRenewer implements ILeaseRenewer { + + private static final Log LOG = LogFactory.getLog(LeaseRenewer.class); + private static final int RENEWAL_RETRIES = 2; + + private final ILeaseManager leaseManager; + private final ConcurrentNavigableMap ownedLeases = new ConcurrentSkipListMap(); + private final String workerIdentifier; + private final long leaseDurationNanos; + private final ExecutorService executorService; + + /** + * Constructor. + * + * @param leaseManager LeaseManager to use + * @param workerIdentifier identifier of this worker + * @param leaseDurationMillis duration of a lease in milliseconds + * @param executorService ExecutorService to use for renewing leases in parallel + */ + public LeaseRenewer(ILeaseManager leaseManager, String workerIdentifier, long leaseDurationMillis, + ExecutorService executorService) { + this.leaseManager = leaseManager; + this.workerIdentifier = workerIdentifier; + this.leaseDurationNanos = TimeUnit.MILLISECONDS.toNanos(leaseDurationMillis); + this.executorService = executorService; + } + + /** + * {@inheritDoc} + */ + @Override + public void renewLeases() throws DependencyException, InvalidStateException { + if (LOG.isDebugEnabled()) { + // Due to the eventually consistent nature of ConcurrentNavigableMap iterators, this log entry may become + // inaccurate during iteration. + LOG.debug(String.format("Worker %s holding %d leases: %s", + workerIdentifier, + ownedLeases.size(), + ownedLeases)); + } + + /* + * Lease renewals are done in parallel so many leases can be renewed for short lease fail over time + * configuration. In this case, metrics scope is also shared across different threads, so scope must be thread + * safe. + */ + IMetricsScope renewLeaseTaskMetricsScope = new ThreadSafeMetricsDelegatingScope( + MetricsHelper.getMetricsScope()); + + /* + * We iterate in descending order here so that the synchronized(lease) inside renewLease doesn't "lead" calls + * to getCurrentlyHeldLeases. They'll still cross paths, but they won't interleave their executions. + */ + int lostLeases = 0; + List> renewLeaseTasks = new ArrayList>(); + for (T lease : ownedLeases.descendingMap().values()) { + renewLeaseTasks.add(executorService.submit(new RenewLeaseTask(lease, renewLeaseTaskMetricsScope))); + } + int leasesInUnknownState = 0; + Exception lastException = null; + for (Future renewLeaseTask : renewLeaseTasks) { + try { + if (!renewLeaseTask.get()) { + lostLeases++; + } + } catch (InterruptedException e) { + LOG.info("Interrupted while waiting for a lease to renew."); + leasesInUnknownState += 1; + Thread.currentThread().interrupt(); + } catch (ExecutionException e) { + LOG.error("Encountered an exception while renewing a lease.", e.getCause()); + leasesInUnknownState += 1; + lastException = e; + } + } + + renewLeaseTaskMetricsScope.addData( + "LostLeases", lostLeases, StandardUnit.Count, MetricsLevel.SUMMARY); + renewLeaseTaskMetricsScope.addData( + "CurrentLeases", ownedLeases.size(), StandardUnit.Count, MetricsLevel.SUMMARY); + if (leasesInUnknownState > 0) { + throw new DependencyException(String.format("Encountered an exception while renewing leases. " + + "The number of leases which might not have been renewed is %d", + leasesInUnknownState), + lastException); + } + } + + private class RenewLeaseTask implements Callable { + + private final T lease; + private final IMetricsScope metricsScope; + + public RenewLeaseTask(T lease, IMetricsScope metricsScope) { + this.lease = lease; + this.metricsScope = metricsScope; + } + + @Override + public Boolean call() throws Exception { + MetricsHelper.setMetricsScope(metricsScope); + try { + return renewLease(lease); + } finally { + MetricsHelper.unsetMetricsScope(); + } + } + } + + private boolean renewLease(T lease) throws DependencyException, InvalidStateException { + return renewLease(lease, false); + } + + private boolean renewLease(T lease, boolean renewEvenIfExpired) throws DependencyException, InvalidStateException { + String leaseKey = lease.getLeaseKey(); + + boolean success = false; + boolean renewedLease = false; + long startTime = System.currentTimeMillis(); + try { + for (int i = 1; i <= RENEWAL_RETRIES; i++) { + try { + synchronized (lease) { + // Don't renew expired lease during regular renewals. getCopyOfHeldLease may have returned null + // triggering the application processing to treat this as a lost lease (fail checkpoint with + // ShutdownException). + if (renewEvenIfExpired || (!lease.isExpired(leaseDurationNanos, System.nanoTime()))) { + renewedLease = leaseManager.renewLease(lease); + } + if (renewedLease) { + lease.setLastCounterIncrementNanos(System.nanoTime()); + } + } + + if (renewedLease) { + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Worker %s successfully renewed lease with key %s", + workerIdentifier, + leaseKey)); + } + } else { + LOG.info(String.format("Worker %s lost lease with key %s", workerIdentifier, leaseKey)); + ownedLeases.remove(leaseKey); + } + + success = true; + break; + } catch (ProvisionedThroughputException e) { + LOG.info(String.format("Worker %s could not renew lease with key %s on try %d out of %d due to capacity", + workerIdentifier, + leaseKey, + i, + RENEWAL_RETRIES)); + } + } + } finally { + MetricsHelper.addSuccessAndLatency("RenewLease", startTime, success, MetricsLevel.DETAILED); + } + + return renewedLease; + } + + /** + * {@inheritDoc} + */ + @Override + public Map getCurrentlyHeldLeases() { + Map result = new HashMap(); + long now = System.nanoTime(); + + for (String leaseKey : ownedLeases.keySet()) { + T copy = getCopyOfHeldLease(leaseKey, now); + if (copy != null) { + result.put(copy.getLeaseKey(), copy); + } + } + + return result; + } + + /** + * {@inheritDoc} + */ + @Override + public T getCurrentlyHeldLease(String leaseKey) { + return getCopyOfHeldLease(leaseKey, System.nanoTime()); + } + + /** + * Internal method to return a lease with a specific lease key only if we currently hold it. + * + * @param leaseKey key of lease to return + * @param now current timestamp for old-ness checking + * @return non-authoritative copy of the held lease, or null if we don't currently hold it + */ + private T getCopyOfHeldLease(String leaseKey, long now) { + T authoritativeLease = ownedLeases.get(leaseKey); + if (authoritativeLease == null) { + return null; + } else { + T copy = null; + synchronized (authoritativeLease) { + copy = authoritativeLease.copy(); + } + + if (copy.isExpired(leaseDurationNanos, now)) { + LOG.info(String.format("getCurrentlyHeldLease not returning lease with key %s because it is expired", + copy.getLeaseKey())); + return null; + } else { + return copy; + } + } + } + + /** + * {@inheritDoc} + */ + @Override + public boolean updateLease(T lease, UUID concurrencyToken) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + verifyNotNull(lease, "lease cannot be null"); + verifyNotNull(lease.getLeaseKey(), "leaseKey cannot be null"); + verifyNotNull(concurrencyToken, "concurrencyToken cannot be null"); + + String leaseKey = lease.getLeaseKey(); + T authoritativeLease = ownedLeases.get(leaseKey); + + if (authoritativeLease == null) { + LOG.info(String.format("Worker %s could not update lease with key %s because it does not hold it", + workerIdentifier, + leaseKey)); + return false; + } + + /* + * If the passed-in concurrency token doesn't match the concurrency token of the authoritative lease, it means + * the lease was lost and regained between when the caller acquired his concurrency token and when the caller + * called update. + */ + if (!authoritativeLease.getConcurrencyToken().equals(concurrencyToken)) { + LOG.info(String.format("Worker %s refusing to update lease with key %s because" + + " concurrency tokens don't match", workerIdentifier, leaseKey)); + return false; + } + + long startTime = System.currentTimeMillis(); + boolean success = false; + try { + synchronized (authoritativeLease) { + authoritativeLease.update(lease); + boolean updatedLease = leaseManager.updateLease(authoritativeLease); + if (updatedLease) { + // Updates increment the counter + authoritativeLease.setLastCounterIncrementNanos(System.nanoTime()); + } else { + /* + * If updateLease returns false, it means someone took the lease from us. Remove the lease + * from our set of owned leases pro-actively rather than waiting for a run of renewLeases(). + */ + LOG.info(String.format("Worker %s lost lease with key %s - discovered during update", + workerIdentifier, + leaseKey)); + + /* + * Remove only if the value currently in the map is the same as the authoritative lease. We're + * guarding against a pause after the concurrency token check above. It plays out like so: + * + * 1) Concurrency token check passes + * 2) Pause. Lose lease, re-acquire lease. This requires at least one lease counter update. + * 3) Unpause. leaseManager.updateLease fails conditional write due to counter updates, returns + * false. + * 4) ownedLeases.remove(key, value) doesn't do anything because authoritativeLease does not + * .equals() the re-acquired version in the map on the basis of lease counter. This is what we want. + * If we just used ownedLease.remove(key), we would have pro-actively removed a lease incorrectly. + * + * Note that there is a subtlety here - Lease.equals() deliberately does not check the concurrency + * token, but it does check the lease counter, so this scheme works. + */ + ownedLeases.remove(leaseKey, authoritativeLease); + } + + success = true; + return updatedLease; + } + } finally { + MetricsHelper.addSuccessAndLatency("UpdateLease", startTime, success, MetricsLevel.DETAILED); + } + } + + /** + * {@inheritDoc} + */ + @Override + public void addLeasesToRenew(Collection newLeases) { + verifyNotNull(newLeases, "newLeases cannot be null"); + + for (T lease : newLeases) { + if (lease.getLastCounterIncrementNanos() == null) { + LOG.info(String.format("addLeasesToRenew ignoring lease with key %s because it does not have lastRenewalNanos set", + lease.getLeaseKey())); + continue; + } + + T authoritativeLease = lease.copy(); + + /* + * Assign a concurrency token when we add this to the set of currently owned leases. This ensures that + * every time we acquire a lease, it gets a new concurrency token. + */ + authoritativeLease.setConcurrencyToken(UUID.randomUUID()); + ownedLeases.put(authoritativeLease.getLeaseKey(), authoritativeLease); + } + } + + /** + * {@inheritDoc} + */ + @Override + public void clearCurrentlyHeldLeases() { + ownedLeases.clear(); + } + + /** + * {@inheritDoc} + * @param lease the lease to drop. + */ + @Override + public void dropLease(T lease) { + ownedLeases.remove(lease.getLeaseKey()); + } + + /** + * {@inheritDoc} + */ + @Override + public void initialize() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + Collection leases = leaseManager.listLeases(); + List myLeases = new LinkedList(); + boolean renewEvenIfExpired = true; + + for (T lease : leases) { + if (workerIdentifier.equals(lease.getLeaseOwner())) { + LOG.info(String.format(" Worker %s found lease %s", workerIdentifier, lease)); + // Okay to renew even if lease is expired, because we start with an empty list and we add the lease to + // our list only after a successful renew. So we don't need to worry about the edge case where we could + // continue renewing a lease after signaling a lease loss to the application. + if (renewLease(lease, renewEvenIfExpired)) { + myLeases.add(lease); + } + } else { + LOG.debug(String.format("Worker %s ignoring lease %s ", workerIdentifier, lease)); + } + } + + addLeasesToRenew(myLeases); + } + + private void verifyNotNull(Object object, String message) { + if (object == null) { + throw new IllegalArgumentException(message); + } + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseSerializer.java b/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseSerializer.java new file mode 100644 index 00000000..42121292 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseSerializer.java @@ -0,0 +1,196 @@ +/* + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import com.amazonaws.services.dynamodbv2.model.AttributeAction; +import com.amazonaws.services.dynamodbv2.model.AttributeDefinition; +import com.amazonaws.services.dynamodbv2.model.AttributeValue; +import com.amazonaws.services.dynamodbv2.model.AttributeValueUpdate; +import com.amazonaws.services.dynamodbv2.model.ExpectedAttributeValue; +import com.amazonaws.services.dynamodbv2.model.KeySchemaElement; +import com.amazonaws.services.dynamodbv2.model.KeyType; +import com.amazonaws.services.dynamodbv2.model.ScalarAttributeType; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseSerializer; +import com.amazonaws.services.kinesis.leases.util.DynamoUtils; + +/** + * An implementation of ILeaseSerializer for basic Lease objects. Can also instantiate subclasses of Lease so that + * LeaseSerializer can be decorated by other classes if you need to add fields to leases. + */ +public class LeaseSerializer implements ILeaseSerializer { + + public final String LEASE_KEY_KEY = "leaseKey"; + public final String LEASE_OWNER_KEY = "leaseOwner"; + public final String LEASE_COUNTER_KEY = "leaseCounter"; + public final Class clazz; + + public LeaseSerializer() { + this.clazz = Lease.class; + } + + public LeaseSerializer(Class clazz) { + this.clazz = clazz; + } + + @Override + public Map toDynamoRecord(Lease lease) { + Map result = new HashMap(); + + result.put(LEASE_KEY_KEY, DynamoUtils.createAttributeValue(lease.getLeaseKey())); + result.put(LEASE_COUNTER_KEY, DynamoUtils.createAttributeValue(lease.getLeaseCounter())); + + if (lease.getLeaseOwner() != null) { + result.put(LEASE_OWNER_KEY, DynamoUtils.createAttributeValue(lease.getLeaseOwner())); + } + + return result; + } + + @Override + public Lease fromDynamoRecord(Map dynamoRecord) { + Lease result; + try { + result = clazz.newInstance(); + } catch (InstantiationException e) { + throw new RuntimeException(e); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + + result.setLeaseKey(DynamoUtils.safeGetString(dynamoRecord, LEASE_KEY_KEY)); + result.setLeaseOwner(DynamoUtils.safeGetString(dynamoRecord, LEASE_OWNER_KEY)); + result.setLeaseCounter(DynamoUtils.safeGetLong(dynamoRecord, LEASE_COUNTER_KEY)); + + return result; + } + + @Override + public Map getDynamoHashKey(String leaseKey) { + Map result = new HashMap(); + + result.put(LEASE_KEY_KEY, DynamoUtils.createAttributeValue(leaseKey)); + + return result; + } + + @Override + public Map getDynamoHashKey(Lease lease) { + return getDynamoHashKey(lease.getLeaseKey()); + } + + @Override + public Map getDynamoLeaseCounterExpectation(Lease lease) { + return getDynamoLeaseCounterExpectation(lease.getLeaseCounter()); + } + + public Map getDynamoLeaseCounterExpectation(Long leaseCounter) { + Map result = new HashMap(); + + ExpectedAttributeValue eav = new ExpectedAttributeValue(DynamoUtils.createAttributeValue(leaseCounter)); + result.put(LEASE_COUNTER_KEY, eav); + + return result; + } + + @Override + public Map getDynamoLeaseOwnerExpectation(Lease lease) { + Map result = new HashMap(); + + ExpectedAttributeValue eav = null; + + if (lease.getLeaseOwner() == null) { + eav = new ExpectedAttributeValue(false); + } else { + eav = new ExpectedAttributeValue(DynamoUtils.createAttributeValue(lease.getLeaseOwner())); + } + + result.put(LEASE_OWNER_KEY, eav); + + return result; + } + + @Override + public Map getDynamoNonexistantExpectation() { + Map result = new HashMap(); + + ExpectedAttributeValue expectedAV = new ExpectedAttributeValue(false); + result.put(LEASE_KEY_KEY, expectedAV); + + return result; + } + + @Override + public Map getDynamoLeaseCounterUpdate(Lease lease) { + return getDynamoLeaseCounterUpdate(lease.getLeaseCounter()); + } + + public Map getDynamoLeaseCounterUpdate(Long leaseCounter) { + Map result = new HashMap(); + + AttributeValueUpdate avu = + new AttributeValueUpdate(DynamoUtils.createAttributeValue(leaseCounter + 1), AttributeAction.PUT); + result.put(LEASE_COUNTER_KEY, avu); + + return result; + } + + @Override + public Map getDynamoTakeLeaseUpdate(Lease lease, String owner) { + Map result = new HashMap(); + + result.put(LEASE_OWNER_KEY, new AttributeValueUpdate(DynamoUtils.createAttributeValue(owner), + AttributeAction.PUT)); + + return result; + } + + @Override + public Map getDynamoEvictLeaseUpdate(Lease lease) { + Map result = new HashMap(); + + result.put(LEASE_OWNER_KEY, new AttributeValueUpdate(null, AttributeAction.DELETE)); + + return result; + } + + @Override + public Map getDynamoUpdateLeaseUpdate(Lease lease) { + // There is no application-specific data in Lease - just return a map that increments the counter. + return new HashMap(); + } + + @Override + public Collection getKeySchema() { + List keySchema = new ArrayList(); + keySchema.add(new KeySchemaElement().withAttributeName(LEASE_KEY_KEY).withKeyType(KeyType.HASH)); + + return keySchema; + } + + @Override + public Collection getAttributeDefinitions() { + List definitions = new ArrayList(); + definitions.add(new AttributeDefinition().withAttributeName(LEASE_KEY_KEY) + .withAttributeType(ScalarAttributeType.S)); + + return definitions; + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseTaker.java b/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseTaker.java new file mode 100644 index 00000000..e75fd9c9 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/impl/LeaseTaker.java @@ -0,0 +1,540 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseTaker; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; + +/** + * An implementation of ILeaseTaker that uses DynamoDB via LeaseManager. + */ +public class LeaseTaker implements ILeaseTaker { + + private static final Log LOG = LogFactory.getLog(LeaseTaker.class); + + private static final int TAKE_RETRIES = 3; + private static final int SCAN_RETRIES = 1; + + // See note on takeLeases(Callable) for why we have this callable. + private static final Callable SYSTEM_CLOCK_CALLABLE = new Callable() { + + @Override + public Long call() { + return System.nanoTime(); + } + }; + + private final ILeaseManager leaseManager; + private final String workerIdentifier; + private final Map allLeases = new HashMap(); + private final long leaseDurationNanos; + private int maxLeasesForWorker = Integer.MAX_VALUE; + private int maxLeasesToStealAtOneTime = 1; + + private long lastScanTimeNanos = 0L; + + public LeaseTaker(ILeaseManager leaseManager, String workerIdentifier, long leaseDurationMillis) { + this.leaseManager = leaseManager; + this.workerIdentifier = workerIdentifier; + this.leaseDurationNanos = TimeUnit.MILLISECONDS.toNanos(leaseDurationMillis); + } + + /** + * Worker will not acquire more than the specified max number of leases even if there are more + * shards that need to be processed. This can be used in scenarios where a worker is resource constrained or + * to prevent lease thrashing when small number of workers pick up all leases for small amount of time during + * deployment. + * Note that setting a low value may cause data loss (e.g. if there aren't enough Workers to make progress on all + * shards). When setting the value for this property, one must ensure enough workers are present to process + * shards and should consider future resharding, child shards that may be blocked on parent shards, some workers + * becoming unhealthy, etc. + * + * @param maxLeasesForWorker Max leases this Worker can handle at a time + * @return LeaseTaker + */ + public LeaseTaker withMaxLeasesForWorker(int maxLeasesForWorker) { + if (maxLeasesForWorker <= 0) { + throw new IllegalArgumentException("maxLeasesForWorker should be >= 1"); + } + this.maxLeasesForWorker = maxLeasesForWorker; + return this; + } + + /** + * Max leases to steal from a more loaded Worker at one time (for load balancing). + * Setting this to a higher number can allow for faster load convergence (e.g. during deployments, cold starts), + * but can cause higher churn in the system. + * + * @param maxLeasesToStealAtOneTime Steal up to this many leases at one time (for load balancing) + * @return LeaseTaker + */ + public LeaseTaker withMaxLeasesToStealAtOneTime(int maxLeasesToStealAtOneTime) { + if (maxLeasesToStealAtOneTime <= 0) { + throw new IllegalArgumentException("maxLeasesToStealAtOneTime should be >= 1"); + } + this.maxLeasesToStealAtOneTime = maxLeasesToStealAtOneTime; + return this; + } + + /** + * {@inheritDoc} + */ + @Override + public Map takeLeases() throws DependencyException, InvalidStateException { + return takeLeases(SYSTEM_CLOCK_CALLABLE); + } + + /** + * Internal implementation of takeLeases. Takes a callable that can provide the time to enable test cases without + * Thread.sleep. Takes a callable instead of a raw time value because the time needs to be computed as-of + * immediately after the scan. + * + * @param timeProvider Callable that will supply the time + * + * @return map of lease key to taken lease + * + * @throws DependencyException + * @throws InvalidStateException + */ + synchronized Map takeLeases(Callable timeProvider) + throws DependencyException, InvalidStateException { + // Key is leaseKey + Map takenLeases = new HashMap(); + + long startTime = System.currentTimeMillis(); + boolean success = false; + + ProvisionedThroughputException lastException = null; + + try { + for (int i = 1; i <= SCAN_RETRIES; i++) { + try { + updateAllLeases(timeProvider); + success = true; + } catch (ProvisionedThroughputException e) { + LOG.info(String.format("Worker %s could not find expired leases on try %d out of %d", + workerIdentifier, + i, + TAKE_RETRIES)); + lastException = e; + } + } + } finally { + MetricsHelper.addSuccessAndLatency("ListLeases", startTime, success, MetricsLevel.DETAILED); + } + + if (lastException != null) { + LOG.error("Worker " + workerIdentifier + + " could not scan leases table, aborting takeLeases. Exception caught by last retry:", + lastException); + return takenLeases; + } + + List expiredLeases = getExpiredLeases(); + + Set leasesToTake = computeLeasesToTake(expiredLeases); + Set untakenLeaseKeys = new HashSet(); + + for (T lease : leasesToTake) { + String leaseKey = lease.getLeaseKey(); + + startTime = System.currentTimeMillis(); + success = false; + try { + for (int i = 1; i <= TAKE_RETRIES; i++) { + try { + if (leaseManager.takeLease(lease, workerIdentifier)) { + lease.setLastCounterIncrementNanos(System.nanoTime()); + takenLeases.put(leaseKey, lease); + } else { + untakenLeaseKeys.add(leaseKey); + } + + success = true; + break; + } catch (ProvisionedThroughputException e) { + LOG.info(String.format("Could not take lease with key %s for worker %s on try %d out of %d due to capacity", + leaseKey, + workerIdentifier, + i, + TAKE_RETRIES)); + } + } + } finally { + MetricsHelper.addSuccessAndLatency("TakeLease", startTime, success, MetricsLevel.DETAILED); + } + } + + if (takenLeases.size() > 0) { + LOG.info(String.format("Worker %s successfully took %d leases: %s", + workerIdentifier, + takenLeases.size(), + stringJoin(takenLeases.keySet(), ", "))); + } + + if (untakenLeaseKeys.size() > 0) { + LOG.info(String.format("Worker %s failed to take %d leases: %s", + workerIdentifier, + untakenLeaseKeys.size(), + stringJoin(untakenLeaseKeys, ", "))); + } + + MetricsHelper.getMetricsScope().addData( + "TakenLeases", takenLeases.size(), StandardUnit.Count, MetricsLevel.SUMMARY); + + return takenLeases; + } + + /** Package access for testing purposes. + * + * @param strings + * @param delimiter + * @return Joined string. + */ + static String stringJoin(Collection strings, String delimiter) { + StringBuilder builder = new StringBuilder(); + boolean needDelimiter = false; + for (String string : strings) { + if (needDelimiter) { + builder.append(delimiter); + } + builder.append(string); + needDelimiter = true; + } + + return builder.toString(); + } + + /** + * Scan all leases and update lastRenewalTime. Add new leases and delete old leases. + * + * @param timeProvider callable that supplies the current time + * + * @return list of expired leases, possibly empty, never null. + * + * @throws ProvisionedThroughputException if listLeases fails due to lack of provisioned throughput + * @throws InvalidStateException if the lease table does not exist + * @throws DependencyException if listLeases fails in an unexpected way + */ + private void updateAllLeases(Callable timeProvider) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + List freshList = leaseManager.listLeases(); + try { + lastScanTimeNanos = timeProvider.call(); + } catch (Exception e) { + throw new DependencyException("Exception caught from timeProvider", e); + } + + // This set will hold the lease keys not updated by the previous listLeases call. + Set notUpdated = new HashSet(allLeases.keySet()); + + // Iterate over all leases, finding ones to try to acquire that haven't changed since the last iteration + for (T lease : freshList) { + String leaseKey = lease.getLeaseKey(); + + T oldLease = allLeases.get(leaseKey); + allLeases.put(leaseKey, lease); + notUpdated.remove(leaseKey); + + if (oldLease != null) { + // If we've seen this lease before... + if (oldLease.getLeaseCounter().equals(lease.getLeaseCounter())) { + // ...and the counter hasn't changed, propagate the lastRenewalNanos time from the old lease + lease.setLastCounterIncrementNanos(oldLease.getLastCounterIncrementNanos()); + } else { + // ...and the counter has changed, set lastRenewalNanos to the time of the scan. + lease.setLastCounterIncrementNanos(lastScanTimeNanos); + } + } else { + if (lease.getLeaseOwner() == null) { + // if this new lease is unowned, it's never been renewed. + lease.setLastCounterIncrementNanos(0L); + + if (LOG.isDebugEnabled()) { + LOG.debug("Treating new lease with key " + leaseKey + + " as never renewed because it is new and unowned."); + } + } else { + // if this new lease is owned, treat it as renewed as of the scan + lease.setLastCounterIncrementNanos(lastScanTimeNanos); + if (LOG.isDebugEnabled()) { + LOG.debug("Treating new lease with key " + leaseKey + + " as recently renewed because it is new and owned."); + } + } + } + } + + // Remove dead leases from allLeases + for (String key : notUpdated) { + allLeases.remove(key); + } + } + + /** + * @return list of leases that were expired as of our last scan. + */ + private List getExpiredLeases() { + List expiredLeases = new ArrayList(); + + for (T lease : allLeases.values()) { + if (lease.isExpired(leaseDurationNanos, lastScanTimeNanos)) { + expiredLeases.add(lease); + } + } + + return expiredLeases; + } + + /** + * Compute the number of leases I should try to take based on the state of the system. + * + * @param allLeases map of shardId to lease containing all leases + * @param expiredLeases list of leases we determined to be expired + * @return set of leases to take. + */ + private Set computeLeasesToTake(List expiredLeases) { + Map leaseCounts = computeLeaseCounts(expiredLeases); + Set leasesToTake = new HashSet(); + IMetricsScope metrics = MetricsHelper.getMetricsScope(); + + int numLeases = allLeases.size(); + int numWorkers = leaseCounts.size(); + + if (numLeases == 0) { + // If there are no leases, I shouldn't try to take any. + return leasesToTake; + } + + int target; + if (numWorkers >= numLeases) { + // If we have n leases and n or more workers, each worker can have up to 1 lease, including myself. + target = 1; + } else { + /* + * numWorkers must be < numLeases. + * + * Our target for each worker is numLeases / numWorkers (+1 if numWorkers doesn't evenly divide numLeases) + */ + target = numLeases / numWorkers + (numLeases % numWorkers == 0 ? 0 : 1); + + // Spill over is the number of leases this worker should have claimed, but did not because it would + // exceed the max allowed for this worker. + int leaseSpillover = Math.max(0, target - maxLeasesForWorker); + if (target > maxLeasesForWorker) { + LOG.warn(String.format("Worker %s target is %d leases and maxLeasesForWorker is %d." + + " Resetting target to %d, lease spillover is %d. " + + " Note that some shards may not be processed if no other workers are able to pick them up.", + workerIdentifier, + target, + maxLeasesForWorker, + maxLeasesForWorker, + leaseSpillover)); + target = maxLeasesForWorker; + } + metrics.addData("LeaseSpillover", leaseSpillover, StandardUnit.Count, MetricsLevel.SUMMARY); + } + + int myCount = leaseCounts.get(workerIdentifier); + int numLeasesToReachTarget = target - myCount; + + if (numLeasesToReachTarget <= 0) { + // If we don't need anything, return the empty set. + return leasesToTake; + } + + // Shuffle expiredLeases so workers don't all try to contend for the same leases. + Collections.shuffle(expiredLeases); + + int originalExpiredLeasesSize = expiredLeases.size(); + if (expiredLeases.size() > 0) { + // If we have expired leases, get up to leases from expiredLeases + for (; numLeasesToReachTarget > 0 && expiredLeases.size() > 0; numLeasesToReachTarget--) { + leasesToTake.add(expiredLeases.remove(0)); + } + } else { + // If there are no expired leases and we need a lease, consider stealing. + List leasesToSteal = chooseLeasesToSteal(leaseCounts, numLeasesToReachTarget, target); + for (T leaseToSteal : leasesToSteal) { + LOG.info(String.format("Worker %s needed %d leases but none were expired, so it will steal lease %s from %s", + workerIdentifier, + numLeasesToReachTarget, + leaseToSteal.getLeaseKey(), + leaseToSteal.getLeaseOwner())); + leasesToTake.add(leaseToSteal); + } + } + + if (!leasesToTake.isEmpty()) { + LOG.info(String.format("Worker %s saw %d total leases, %d available leases, %d " + + "workers. Target is %d leases, I have %d leases, I will take %d leases", + workerIdentifier, + numLeases, + originalExpiredLeasesSize, + numWorkers, + target, + myCount, + leasesToTake.size())); + } + + metrics.addData("TotalLeases", numLeases, StandardUnit.Count, MetricsLevel.DETAILED); + metrics.addData("ExpiredLeases", originalExpiredLeasesSize, StandardUnit.Count, MetricsLevel.SUMMARY); + metrics.addData("NumWorkers", numWorkers, StandardUnit.Count, MetricsLevel.SUMMARY); + metrics.addData("NeededLeases", numLeasesToReachTarget, StandardUnit.Count, MetricsLevel.DETAILED); + metrics.addData("LeasesToTake", leasesToTake.size(), StandardUnit.Count, MetricsLevel.DETAILED); + + return leasesToTake; + } + + /** + * Choose leases to steal by randomly selecting one or more (up to max) from the most loaded worker. + * Stealing rules: + * + * Steal up to maxLeasesToStealAtOneTime leases from the most loaded worker if + * a) he has > target leases and I need >= 1 leases : steal min(leases needed, maxLeasesToStealAtOneTime) + * b) he has == target leases and I need > 1 leases : steal 1 + * + * @param leaseCounts map of workerIdentifier to lease count + * @param needed # of leases needed to reach the target leases for the worker + * @param target target # of leases per worker + * @return Leases to steal, or empty list if we should not steal + */ + private List chooseLeasesToSteal(Map leaseCounts, int needed, int target) { + List leasesToSteal = new ArrayList<>(); + + Entry mostLoadedWorker = null; + // Find the most loaded worker + for (Entry worker : leaseCounts.entrySet()) { + if (mostLoadedWorker == null || mostLoadedWorker.getValue() < worker.getValue()) { + mostLoadedWorker = worker; + } + } + + int numLeasesToSteal = 0; + if ((mostLoadedWorker.getValue() >= target) && (needed > 0)) { + int leasesOverTarget = mostLoadedWorker.getValue() - target; + numLeasesToSteal = Math.min(needed, leasesOverTarget); + // steal 1 if we need > 1 and max loaded worker has target leases. + if ((needed > 1) && (numLeasesToSteal == 0)) { + numLeasesToSteal = 1; + } + numLeasesToSteal = Math.min(numLeasesToSteal, maxLeasesToStealAtOneTime); + } + + if (numLeasesToSteal <= 0) { + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Worker %s not stealing from most loaded worker %s. He has %d," + + " target is %d, and I need %d", + workerIdentifier, + mostLoadedWorker.getKey(), + mostLoadedWorker.getValue(), + target, + needed)); + } + return leasesToSteal; + } else { + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Worker %s will attempt to steal %d leases from most loaded worker %s. " + + " He has %d leases, target is %d, I need %d, maxLeasesToSteatAtOneTime is %d.", + workerIdentifier, + numLeasesToSteal, + mostLoadedWorker.getKey(), + mostLoadedWorker.getValue(), + target, + needed, + maxLeasesToStealAtOneTime)); + } + } + + String mostLoadedWorkerIdentifier = mostLoadedWorker.getKey(); + List candidates = new ArrayList(); + // Collect leases belonging to that worker + for (T lease : allLeases.values()) { + if (mostLoadedWorkerIdentifier.equals(lease.getLeaseOwner())) { + candidates.add(lease); + } + } + + // Return random ones + Collections.shuffle(candidates); + int toIndex = Math.min(candidates.size(), numLeasesToSteal); + leasesToSteal.addAll(candidates.subList(0, toIndex)); + + return leasesToSteal; + } + + /** + * Count leases by host. Always includes myself, but otherwise only includes hosts that are currently holding + * leases. + * + * @param expiredLeases list of leases that are currently expired + * @return map of workerIdentifier to lease count + */ + private Map computeLeaseCounts(List expiredLeases) { + Map leaseCounts = new HashMap(); + + // Compute the number of leases per worker by looking through allLeases and ignoring leases that have expired. + for (T lease : allLeases.values()) { + if (!expiredLeases.contains(lease)) { + String leaseOwner = lease.getLeaseOwner(); + Integer oldCount = leaseCounts.get(leaseOwner); + if (oldCount == null) { + leaseCounts.put(leaseOwner, 1); + } else { + leaseCounts.put(leaseOwner, oldCount + 1); + } + } + } + + // If I have no leases, I wasn't represented in leaseCounts. Let's fix that. + Integer myCount = leaseCounts.get(workerIdentifier); + if (myCount == null) { + myCount = 0; + leaseCounts.put(workerIdentifier, myCount); + } + + return leaseCounts; + } + + /** + * {@inheritDoc} + */ + @Override + public String getWorkerIdentifier() { + return workerIdentifier; + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/IKinesisClientLeaseManager.java b/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/IKinesisClientLeaseManager.java new file mode 100644 index 00000000..2e3daeca --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/IKinesisClientLeaseManager.java @@ -0,0 +1,42 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.interfaces; + +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; + +/** + * A decoration of ILeaseManager that adds methods to get/update checkpoints. + */ +public interface IKinesisClientLeaseManager extends ILeaseManager { + + /** + * Gets the current checkpoint of the shard. This is useful in the resharding use case + * where we will wait for the parent shard to complete before starting on the records from a child shard. + * + * @param shardId Checkpoint of this shard will be returned + * @return Checkpoint of this shard, or null if the shard record doesn't exist. + * + * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity + * @throws InvalidStateException if lease table does not exist + * @throws DependencyException if DynamoDB update fails in an unexpected way + */ + public abstract ExtendedSequenceNumber getCheckpoint(String shardId) + throws ProvisionedThroughputException, InvalidStateException, DependencyException; + +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java b/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseManager.java similarity index 71% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java rename to src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseManager.java index bdf69260..ab296cc1 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java +++ b/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseManager.java @@ -1,30 +1,32 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.leases; +package com.amazonaws.services.kinesis.leases.interfaces; import java.util.List; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.impl.Lease; /** * Supports basic CRUD operations for Leases. + * + * @param Lease subclass, possibly Lease itself. */ -public interface LeaseRefresher { +public interface ILeaseManager { /** * Creates the table that will store leases. Succeeds if table already exists. @@ -38,7 +40,7 @@ public interface LeaseRefresher { * restrictions. * @throws DependencyException if DynamoDB createTable fails in an unexpected way */ - boolean createLeaseTableIfNotExists(Long readCapacity, Long writeCapacity) + public boolean createLeaseTableIfNotExists(Long readCapacity, Long writeCapacity) throws ProvisionedThroughputException, DependencyException; /** @@ -46,7 +48,7 @@ public interface LeaseRefresher { * * @throws DependencyException if DynamoDB describeTable fails in an unexpected way */ - boolean leaseTableExists() throws DependencyException; + public boolean leaseTableExists() throws DependencyException; /** * Blocks until the lease table exists by polling leaseTableExists. @@ -58,7 +60,7 @@ public interface LeaseRefresher { * * @throws DependencyException if DynamoDB describeTable fails in an unexpected way */ - boolean waitUntilLeaseTableExists(long secondsBetweenPolls, long timeoutSeconds) throws DependencyException; + public boolean waitUntilLeaseTableExists(long secondsBetweenPolls, long timeoutSeconds) throws DependencyException; /** * List all objects in table synchronously. @@ -69,7 +71,7 @@ public interface LeaseRefresher { * * @return list of leases */ - List listLeases() throws DependencyException, InvalidStateException, ProvisionedThroughputException; + public List listLeases() throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Create a new lease. Conditional on a lease not already existing with this shardId. @@ -82,7 +84,7 @@ public interface LeaseRefresher { * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB put fails due to lack of capacity */ - boolean createLeaseIfNotExists(Lease lease) + public boolean createLeaseIfNotExists(T lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** @@ -94,7 +96,7 @@ public interface LeaseRefresher { * * @return lease for the specified shardId, or null if one doesn't exist */ - Lease getLease(String shardId) throws DependencyException, InvalidStateException, ProvisionedThroughputException; + public T getLease(String shardId) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Renew a lease by incrementing the lease counter. Conditional on the leaseCounter in DynamoDB matching the leaseCounter @@ -108,7 +110,7 @@ public interface LeaseRefresher { * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ - boolean renewLease(Lease lease) + public boolean renewLease(T lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** @@ -125,7 +127,7 @@ public interface LeaseRefresher { * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ - boolean takeLease(Lease lease, String owner) + public boolean takeLease(T lease, String owner) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** @@ -140,7 +142,7 @@ public interface LeaseRefresher { * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ - boolean evictLease(Lease lease) + public boolean evictLease(T lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** @@ -152,7 +154,7 @@ public interface LeaseRefresher { * @throws ProvisionedThroughputException if DynamoDB delete fails due to lack of capacity * @throws DependencyException if DynamoDB delete fails in an unexpected way */ - void deleteLease(Lease lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; + public void deleteLease(T lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Delete all leases from DynamoDB. Useful for tools/utils and testing. @@ -161,7 +163,7 @@ public interface LeaseRefresher { * @throws ProvisionedThroughputException if DynamoDB scan or delete fail due to lack of capacity * @throws DependencyException if DynamoDB scan or delete fail in an unexpected way */ - void deleteAll() throws DependencyException, InvalidStateException, ProvisionedThroughputException; + public void deleteAll() throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Update application-specific fields of the given lease in DynamoDB. Does not update fields managed by the leasing @@ -175,7 +177,7 @@ public interface LeaseRefresher { * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ - boolean updateLease(Lease lease) + public boolean updateLease(T lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** @@ -187,19 +189,6 @@ public interface LeaseRefresher { * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB scan fails due to lack of capacity */ - boolean isLeaseTableEmpty() throws DependencyException, InvalidStateException, ProvisionedThroughputException; + public boolean isLeaseTableEmpty() throws DependencyException, InvalidStateException, ProvisionedThroughputException; - /** - * Gets the current checkpoint of the shard. This is useful in the resharding use case - * where we will wait for the parent shard to complete before starting on the records from a child shard. - * - * @param shardId Checkpoint of this shard will be returned - * @return Checkpoint of this shard, or null if the shard record doesn't exist. - * - * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity - * @throws InvalidStateException if lease table does not exist - * @throws DependencyException if DynamoDB update fails in an unexpected way - */ - ExtendedSequenceNumber getCheckpoint(String shardId) - throws ProvisionedThroughputException, InvalidStateException, DependencyException; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRenewer.java b/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseRenewer.java similarity index 58% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRenewer.java rename to src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseRenewer.java index 75c22f74..87e9182a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRenewer.java +++ b/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseRenewer.java @@ -1,41 +1,42 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.leases; +package com.amazonaws.services.kinesis.leases.interfaces; import java.util.Collection; import java.util.Map; import java.util.UUID; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.impl.Lease; /** * ILeaseRenewer objects are used by LeaseCoordinator to renew leases held by the LeaseCoordinator. Each * LeaseCoordinator instance corresponds to one worker, and uses exactly one ILeaseRenewer to manage lease renewal for * that worker. */ -public interface LeaseRenewer { +public interface ILeaseRenewer { /** - * Bootstrap initial set of leases from the {@link LeaseRefresher} (e.g. upon process restart, pick up leases we own) + * Bootstrap initial set of leases from the LeaseManager (e.g. upon process restart, pick up leases we own) * @throws DependencyException on unexpected DynamoDB failures * @throws InvalidStateException if lease table doesn't exist * @throws ProvisionedThroughputException if DynamoDB reads fail due to insufficient capacity */ - void initialize() throws DependencyException, InvalidStateException, ProvisionedThroughputException; + public void initialize() throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Attempt to renew all currently held leases. @@ -43,21 +44,21 @@ public interface LeaseRenewer { * @throws DependencyException on unexpected DynamoDB failures * @throws InvalidStateException if lease table does not exist */ - void renewLeases() throws DependencyException, InvalidStateException; + public void renewLeases() throws DependencyException, InvalidStateException; /** * @return currently held leases. Key is shardId, value is corresponding Lease object. A lease is currently held if * we successfully renewed it on the last run of renewLeases(). Lease objects returned are deep copies - * their lease counters will not tick. */ - Map getCurrentlyHeldLeases(); + public Map getCurrentlyHeldLeases(); /** * @param leaseKey key of the lease to retrieve * * @return a deep copy of a currently held lease, or null if we don't hold the lease */ - Lease getCurrentlyHeldLease(String leaseKey); + public T getCurrentlyHeldLease(String leaseKey); /** * Adds leases to this LeaseRenewer's set of currently held leases. Leases must have lastRenewalNanos set to the @@ -65,19 +66,19 @@ public interface LeaseRenewer { * * @param newLeases new leases. */ - void addLeasesToRenew(Collection newLeases); + public void addLeasesToRenew(Collection newLeases); /** * Clears this LeaseRenewer's set of currently held leases. */ - void clearCurrentlyHeldLeases(); + public void clearCurrentlyHeldLeases(); /** * Stops the lease renewer from continunig to maintain the given lease. * * @param lease the lease to drop. */ - void dropLease(Lease lease); + void dropLease(T lease); /** * Update application-specific fields in a currently held lease. Cannot be used to update internal fields such as @@ -85,7 +86,7 @@ public interface LeaseRenewer { * the concurrency token on the internal authoritative copy of the lease (ie, if we lost and re-acquired the lease). * * @param lease lease object containing updated data - * @param concurrencyToken obtained by calling Lease.concurrencyToken for a currently held lease + * @param concurrencyToken obtained by calling Lease.getConcurrencyToken for a currently held lease * * @return true if update succeeds, false otherwise * @@ -93,7 +94,7 @@ public interface LeaseRenewer { * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ - boolean updateLease(Lease lease, UUID concurrencyToken, String operation, String shardId) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; + boolean updateLease(T lease, UUID concurrencyToken) + throws DependencyException, InvalidStateException, ProvisionedThroughputException; } diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseSerializer.java b/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseSerializer.java new file mode 100644 index 00000000..2edb8d56 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseSerializer.java @@ -0,0 +1,116 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.interfaces; + +import java.util.Collection; +import java.util.Map; + +import com.amazonaws.services.dynamodbv2.model.AttributeDefinition; +import com.amazonaws.services.dynamodbv2.model.AttributeValue; +import com.amazonaws.services.dynamodbv2.model.AttributeValueUpdate; +import com.amazonaws.services.dynamodbv2.model.ExpectedAttributeValue; +import com.amazonaws.services.dynamodbv2.model.KeySchemaElement; +import com.amazonaws.services.kinesis.leases.impl.Lease; + +/** + * Utility class that manages the mapping of Lease objects/operations to records in DynamoDB. + * + * @param Lease subclass, possibly Lease itself + */ +public interface ILeaseSerializer { + + /** + * Construct a DynamoDB record out of a Lease object + * + * @param lease lease object to serialize + * @return an attribute value map representing the lease object + */ + public Map toDynamoRecord(T lease); + + /** + * Construct a Lease object out of a DynamoDB record. + * + * @param dynamoRecord attribute value map from DynamoDB + * @return a deserialized lease object representing the attribute value map + */ + public T fromDynamoRecord(Map dynamoRecord); + + /** + * @param lease + * @return the attribute value map representing a Lease's hash key given a Lease object. + */ + public Map getDynamoHashKey(T lease); + + /** + * Special getDynamoHashKey implementation used by ILeaseManager.getLease(). + * + * @param leaseKey + * @return the attribute value map representing a Lease's hash key given a string. + */ + public Map getDynamoHashKey(String leaseKey); + + /** + * @param lease + * @return the attribute value map asserting that a lease counter is what we expect. + */ + public Map getDynamoLeaseCounterExpectation(T lease); + + /** + * @param lease + * @return the attribute value map asserting that the lease owner is what we expect. + */ + public Map getDynamoLeaseOwnerExpectation(T lease); + + /** + * @return the attribute value map asserting that a lease does not exist. + */ + public Map getDynamoNonexistantExpectation(); + + /** + * @param lease + * @return the attribute value map that increments a lease counter + */ + public Map getDynamoLeaseCounterUpdate(T lease); + + /** + * @param lease + * @param newOwner + * @return the attribute value map that takes a lease for a new owner + */ + public Map getDynamoTakeLeaseUpdate(T lease, String newOwner); + + /** + * @param lease + * @return the attribute value map that voids a lease + */ + public Map getDynamoEvictLeaseUpdate(T lease); + + /** + * @param lease + * @return the attribute value map that updates application-specific data for a lease and increments the lease + * counter + */ + public Map getDynamoUpdateLeaseUpdate(T lease); + + /** + * @return the key schema for creating a DynamoDB table to store leases + */ + public Collection getKeySchema(); + + /** + * @return attribute definitions for creating a DynamoDB table to store leases + */ + public Collection getAttributeDefinitions(); +} diff --git a/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseTaker.java b/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseTaker.java new file mode 100644 index 00000000..2f8b5caa --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/leases/interfaces/ILeaseTaker.java @@ -0,0 +1,49 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.interfaces; + +import java.util.Map; + +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.impl.Lease; + +/** + * ILeaseTaker is used by LeaseCoordinator to take new leases, or leases that other workers fail to renew. Each + * LeaseCoordinator instance corresponds to one worker and uses exactly one ILeaseTaker to take leases for that worker. + */ +public interface ILeaseTaker { + + /** + * Compute the set of leases available to be taken and attempt to take them. Lease taking rules are: + * + * 1) If a lease's counter hasn't changed in long enough, try to take it. + * 2) If we see a lease we've never seen before, take it only if owner == null. If it's owned, odds are the owner is + * holding it. We can't tell until we see it more than once. + * 3) For load balancing purposes, you may violate rules 1 and 2 for EXACTLY ONE lease per call of takeLeases(). + * + * @return map of shardId to Lease object for leases we just successfully took. + * + * @throws DependencyException on unexpected DynamoDB failures + * @throws InvalidStateException if lease table does not exist + */ + public abstract Map takeLeases() throws DependencyException, InvalidStateException; + + /** + * @return workerIdentifier for this LeaseTaker + */ + public abstract String getWorkerIdentifier(); + +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/DynamoUtils.java b/src/main/java/com/amazonaws/services/kinesis/leases/util/DynamoUtils.java similarity index 61% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/DynamoUtils.java rename to src/main/java/com/amazonaws/services/kinesis/leases/util/DynamoUtils.java index 31bbadbe..9c40394b 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/DynamoUtils.java +++ b/src/main/java/com/amazonaws/services/kinesis/leases/util/DynamoUtils.java @@ -1,31 +1,29 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.leases; - -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; +package com.amazonaws.services.kinesis.leases.util; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; +import com.amazonaws.services.dynamodbv2.model.AttributeValue; + /** * Static utility functions used by our LeaseSerializers. */ -@KinesisClientInternalApi public class DynamoUtils { public static AttributeValue createAttributeValue(Collection collectionValue) { @@ -33,7 +31,7 @@ public class DynamoUtils { throw new IllegalArgumentException("Collection attributeValues cannot be null or empty."); } - return AttributeValue.builder().ss(collectionValue).build(); + return new AttributeValue().withSS(collectionValue); } public static AttributeValue createAttributeValue(String stringValue) { @@ -41,7 +39,7 @@ public class DynamoUtils { throw new IllegalArgumentException("String attributeValues cannot be null or empty."); } - return AttributeValue.builder().s(stringValue).build(); + return new AttributeValue().withS(stringValue); } public static AttributeValue createAttributeValue(Long longValue) { @@ -49,7 +47,7 @@ public class DynamoUtils { throw new IllegalArgumentException("Number AttributeValues cannot be null."); } - return AttributeValue.builder().n(longValue.toString()).build(); + return new AttributeValue().withN(longValue.toString()); } public static Long safeGetLong(Map dynamoRecord, String key) { @@ -57,7 +55,7 @@ public class DynamoUtils { if (av == null) { return null; } else { - return new Long(av.n()); + return new Long(av.getN()); } } @@ -66,7 +64,7 @@ public class DynamoUtils { if (av == null) { return null; } else { - return av.s(); + return av.getS(); } } @@ -76,7 +74,7 @@ public class DynamoUtils { if (av == null) { return new ArrayList(); } else { - return av.ss(); + return av.getSS(); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/MultipleSubscriberException.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/AccumulateByNameMetricsScope.java similarity index 52% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/MultipleSubscriberException.java rename to src/main/java/com/amazonaws/services/kinesis/metrics/impl/AccumulateByNameMetricsScope.java index daf528de..959f889d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/MultipleSubscriberException.java +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/AccumulateByNameMetricsScope.java @@ -1,5 +1,5 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Amazon Software License (the "License"). * You may not use this file except in compliance with the License. @@ -12,8 +12,18 @@ * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ +package com.amazonaws.services.kinesis.metrics.impl; -package software.amazon.kinesis.retrieval.fanout; +/** + * This is a MetricScope with a KeyType of String. It provides the implementation of + * getting the key based off of the String KeyType. + */ + +public abstract class AccumulateByNameMetricsScope extends AccumulatingMetricsScope { + + @Override + protected String getKey(String name) { + return name; + } -public class MultipleSubscriberException extends RuntimeException { } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulatingMetricsScope.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/AccumulatingMetricsScope.java similarity index 53% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulatingMetricsScope.java rename to src/main/java/com/amazonaws/services/kinesis/metrics/impl/AccumulatingMetricsScope.java index 38551838..1e12744f 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulatingMetricsScope.java +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/AccumulatingMetricsScope.java @@ -1,25 +1,26 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.impl; import java.util.HashMap; import java.util.Map; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; -import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; +import com.amazonaws.services.cloudwatch.model.MetricDatum; +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.cloudwatch.model.StatisticSet; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; /** * An IMetricsScope that accumulates data from multiple calls to addData with @@ -41,7 +42,7 @@ import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; */ public abstract class AccumulatingMetricsScope extends EndingMetricsScope { - protected Map data = new HashMap<>(); + protected Map data = new HashMap(); @Override public void addData(String name, double value, StandardUnit unit) { @@ -76,27 +77,25 @@ public abstract class AccumulatingMetricsScope extends EndingMetricsSco public void addData(KeyType key, String name, double value, StandardUnit unit) { super.addData(name, value, unit); - final MetricDatum datum = data.get(key); - final MetricDatum metricDatum; + MetricDatum datum = data.get(key); if (datum == null) { - metricDatum = MetricDatum.builder().metricName(name).unit(unit) - .statisticValues( - StatisticSet.builder().maximum(value).minimum(value).sampleCount(1.0).sum(value).build()) - .build(); + data.put(key, + new MetricDatum().withMetricName(name) + .withUnit(unit) + .withStatisticValues(new StatisticSet().withMaximum(value) + .withMinimum(value) + .withSampleCount(1.0) + .withSum(value))); } else { - if (!datum.unit().equals(unit)) { + if (!datum.getUnit().equals(unit.name())) { throw new IllegalArgumentException("Cannot add to existing metric with different unit"); } - final StatisticSet oldStatisticSet = datum.statisticValues(); - final StatisticSet statisticSet = oldStatisticSet.toBuilder() - .maximum(Math.max(value, oldStatisticSet.maximum())) - .minimum(Math.min(value, oldStatisticSet.minimum())).sampleCount(oldStatisticSet.sampleCount() + 1) - .sum(oldStatisticSet.sum() + value).build(); - - metricDatum = datum.toBuilder().statisticValues(statisticSet).build(); + StatisticSet statistics = datum.getStatisticValues(); + statistics.setMaximum(Math.max(value, statistics.getMaximum())); + statistics.setMinimum(Math.min(value, statistics.getMinimum())); + statistics.setSampleCount(statistics.getSampleCount() + 1); + statistics.setSum(statistics.getSum() + value); } - - data.put(key, metricDatum); } } diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWMetricKey.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWMetricKey.java new file mode 100644 index 00000000..26cb151f --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWMetricKey.java @@ -0,0 +1,59 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import java.util.List; +import java.util.Objects; + +import com.amazonaws.services.cloudwatch.model.Dimension; +import com.amazonaws.services.cloudwatch.model.MetricDatum; + +/* + * A representation of a key of a MetricDatum. This class is useful when wanting to compare + * whether 2 keys have the same MetricDatum. This feature will be used in MetricAccumulatingQueue + * where we aggregate metrics across multiple MetricScopes. + */ +public class CWMetricKey { + + private List dimensions; + private String metricName; + + /** + * @param datum data point + */ + + public CWMetricKey(MetricDatum datum) { + this.dimensions = datum.getDimensions(); + this.metricName = datum.getMetricName(); + } + + @Override + public int hashCode() { + return Objects.hash(dimensions, metricName); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + CWMetricKey other = (CWMetricKey) obj; + return Objects.equals(other.dimensions, dimensions) && Objects.equals(other.metricName, metricName); + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWMetricsFactory.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWMetricsFactory.java new file mode 100644 index 00000000..66a977c5 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWMetricsFactory.java @@ -0,0 +1,153 @@ +/* + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import java.util.Set; + +import com.amazonaws.AbortedException; +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.cloudwatch.AmazonCloudWatch; +import com.amazonaws.services.cloudwatch.AmazonCloudWatchClient; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; +import com.google.common.collect.ImmutableSet; + +/** + * An IMetricsFactory that creates IMetricsScopes that output themselves via CloudWatch. Batches IMetricsScopes together + * to reduce API calls. + */ +public class CWMetricsFactory implements IMetricsFactory { + + /** + * Default metrics level to enable. By default, all metrics levels are emitted. + */ + public static final MetricsLevel DEFAULT_METRICS_LEVEL = MetricsLevel.DETAILED; + /** + * Default metrics dimensions. By default, all dimensions are enabled. + */ + public static final Set DEFAULT_METRICS_ENABLED_DIMENSIONS = ImmutableSet.of( + IMetricsScope.METRICS_DIMENSIONS_ALL); + + /** + * If the CWPublisherRunnable accumulates more than FLUSH_SIZE distinct metrics, it will call CloudWatch + * immediately instead of waiting for the next scheduled call. + */ + private static final int FLUSH_SIZE = 200; + + private final CWPublisherRunnable runnable; + private final Thread publicationThread; + + /** + * Enabled metrics level. All metrics below this level will be dropped. + */ + private final MetricsLevel metricsLevel; + /** + * List of enabled dimensions for metrics. + */ + private final Set metricsEnabledDimensions; + + /** + * Constructor. + * + * @param credentialsProvider client credentials for CloudWatch + * @param namespace the namespace under which the metrics will appear in the CloudWatch console + * @param bufferTimeMillis time to buffer metrics before publishing to CloudWatch + * @param maxQueueSize maximum number of metrics that we can have in a queue + */ + public CWMetricsFactory(AWSCredentialsProvider credentialsProvider, + String namespace, + long bufferTimeMillis, + int maxQueueSize) { + this(new AmazonCloudWatchClient(credentialsProvider), namespace, bufferTimeMillis, maxQueueSize); + } + + /** + * Constructor. + * + * @param credentialsProvider client credentials for CloudWatch + * @param clientConfig Configuration to use with the AmazonCloudWatchClient + * @param namespace the namespace under which the metrics will appear in the CloudWatch console + * @param bufferTimeMillis time to buffer metrics before publishing to CloudWatch + * @param maxQueueSize maximum number of metrics that we can have in a queue + */ + public CWMetricsFactory(AWSCredentialsProvider credentialsProvider, + ClientConfiguration clientConfig, + String namespace, + long bufferTimeMillis, + int maxQueueSize) { + this(new AmazonCloudWatchClient(credentialsProvider, clientConfig), namespace, bufferTimeMillis, maxQueueSize); + } + + /** + * Constructor. + * + * @param cloudWatchClient Client used to make CloudWatch requests + * @param namespace the namespace under which the metrics will appear in the CloudWatch console + * @param bufferTimeMillis time to buffer metrics before publishing to CloudWatch + * @param maxQueueSize maximum number of metrics that we can have in a queue + */ + public CWMetricsFactory(AmazonCloudWatch cloudWatchClient, + String namespace, + long bufferTimeMillis, + int maxQueueSize) { + this(cloudWatchClient, namespace, bufferTimeMillis, maxQueueSize, + DEFAULT_METRICS_LEVEL, DEFAULT_METRICS_ENABLED_DIMENSIONS); + } + + /** + * Constructor. + * + * @param cloudWatchClient Client used to make CloudWatch requests + * @param namespace the namespace under which the metrics will appear in the CloudWatch console + * @param bufferTimeMillis time to buffer metrics before publishing to CloudWatch + * @param maxQueueSize maximum number of metrics that we can have in a queue + * @param metricsLevel metrics level to enable + * @param metricsEnabledDimensions metrics dimensions to allow + */ + public CWMetricsFactory(AmazonCloudWatch cloudWatchClient, + String namespace, + long bufferTimeMillis, + int maxQueueSize, + MetricsLevel metricsLevel, + Set metricsEnabledDimensions) { + this.metricsLevel = (metricsLevel == null ? DEFAULT_METRICS_LEVEL : metricsLevel); + this.metricsEnabledDimensions = (metricsEnabledDimensions == null + ? ImmutableSet.of() : ImmutableSet.copyOf(metricsEnabledDimensions)); + + runnable = new CWPublisherRunnable( + new DefaultCWMetricsPublisher(cloudWatchClient, namespace), + bufferTimeMillis, maxQueueSize, FLUSH_SIZE); + publicationThread = new Thread(runnable); + publicationThread.setName("cw-metrics-publisher"); + publicationThread.start(); + } + + @Override + public IMetricsScope createMetrics() { + return new CWMetricsScope(runnable, metricsLevel, metricsEnabledDimensions); + } + + public void shutdown() { + runnable.shutdown(); + try { + publicationThread.join(); + } catch (InterruptedException e) { + throw new AbortedException(e.getMessage(), e); + } + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWMetricsScope.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWMetricsScope.java new file mode 100644 index 00000000..c301850e --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWMetricsScope.java @@ -0,0 +1,64 @@ +/* + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import com.amazonaws.services.cloudwatch.model.MetricDatum; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; + +/** + * Metrics scope for CloudWatch metrics. + */ +public class CWMetricsScope extends FilteringMetricsScope implements IMetricsScope { + + private CWPublisherRunnable publisher; + + /** + * Creates a CloudWatch metrics scope with given metrics level and enabled dimensions. + * @param publisher Publisher that emits CloudWatch metrics periodically. + * @param metricsLevel Metrics level to enable. All data with level below this will be dropped. + * @param metricsEnabledDimensions Enabled dimensions for CloudWatch metrics. + */ + public CWMetricsScope(CWPublisherRunnable publisher, + MetricsLevel metricsLevel, Set metricsEnabledDimensions) { + super(metricsLevel, metricsEnabledDimensions); + this.publisher = publisher; + } + + /** + * Once we call this method, all MetricDatums added to the scope will be enqueued to the publisher runnable. + * We enqueue MetricDatumWithKey because the publisher will aggregate similar metrics (i.e. MetricDatum with the + * same metricName) in the background thread. Hence aggregation using MetricDatumWithKey will be especially useful + * when aggregating across multiple MetricScopes. + */ + @Override + public void end() { + super.end(); + + List> dataWithKeys = new ArrayList>(); + + for (MetricDatum datum : data.values()) { + datum.setDimensions(getDimensions()); + dataWithKeys.add(new MetricDatumWithKey(new CWMetricKey(datum), datum)); + } + + publisher.enqueue(dataWithKeys); + } + +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnable.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWPublisherRunnable.java similarity index 56% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnable.java rename to src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWPublisherRunnable.java index 57e92b42..50371ee4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnable.java +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/CWPublisherRunnable.java @@ -1,32 +1,38 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.impl; import java.util.Collection; import java.util.List; import java.util.Random; -import lombok.extern.slf4j.Slf4j; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; /** - * A CloudWatchPublisherRunnable contains the logic of when to publish metrics. + * A CWPublisherRunnable contains the logic of when to publish metrics. + * + * @param */ -@Slf4j -public class CloudWatchPublisherRunnable implements Runnable { - private final CloudWatchMetricsPublisher metricsPublisher; - private final MetricAccumulatingQueue queue; + +public class CWPublisherRunnable implements Runnable { + + private static final Log LOG = LogFactory.getLog(CWPublisherRunnable.class); + + private final ICWMetricsPublisher metricsPublisher; + private final MetricAccumulatingQueue queue; private final long bufferTimeMillis; /* @@ -50,29 +56,29 @@ public class CloudWatchPublisherRunnable implements Runnable { * @param batchSize size of batch that can be published at a time */ - public CloudWatchPublisherRunnable(CloudWatchMetricsPublisher metricsPublisher, - long bufferTimeMillis, - int maxQueueSize, - int batchSize) { + public CWPublisherRunnable(ICWMetricsPublisher metricsPublisher, + long bufferTimeMillis, + int maxQueueSize, + int batchSize) { this(metricsPublisher, bufferTimeMillis, maxQueueSize, batchSize, 0); } - public CloudWatchPublisherRunnable(CloudWatchMetricsPublisher metricsPublisher, - long bufferTimeMillis, - int maxQueueSize, - int batchSize, - int maxJitter) { - if (log.isDebugEnabled()) { - log.debug("Constructing CloudWatchPublisherRunnable with maxBufferTimeMillis {} maxQueueSize {} batchSize {} maxJitter {}", + public CWPublisherRunnable(ICWMetricsPublisher metricsPublisher, + long bufferTimeMillis, + int maxQueueSize, + int batchSize, + int maxJitter) { + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Constructing CWPublisherRunnable with maxBufferTimeMillis %d maxQueueSize %d batchSize %d maxJitter %d", bufferTimeMillis, maxQueueSize, batchSize, - maxJitter); + maxJitter)); } this.metricsPublisher = metricsPublisher; this.bufferTimeMillis = bufferTimeMillis; - this.queue = new MetricAccumulatingQueue<>(maxQueueSize); + this.queue = new MetricAccumulatingQueue(maxQueueSize); this.flushSize = batchSize; this.maxJitter = maxJitter; } @@ -83,18 +89,18 @@ public class CloudWatchPublisherRunnable implements Runnable { try { runOnce(); } catch (Throwable t) { - log.error("Encountered throwable in CWPublisherRunable", t); + LOG.error("Encountered throwable in CWPublisherRunable", t); } } - log.info("CWPublication thread finished."); + LOG.info("CWPublication thread finished."); } /** * Exposed for testing purposes. */ public void runOnce() { - List> dataToPublish = null; + List> dataToPublish = null; synchronized (queue) { /* * We should send if: @@ -106,13 +112,13 @@ public class CloudWatchPublisherRunnable implements Runnable { long timeSinceFlush = Math.max(0, getTime() - lastFlushTime); if (timeSinceFlush >= bufferTimeMillis || queue.size() >= flushSize || shuttingDown) { dataToPublish = queue.drain(flushSize); - if (log.isDebugEnabled()) { - log.debug("Drained {} datums from queue", dataToPublish.size()); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Drained %d datums from queue", dataToPublish.size())); } if (shuttingDown) { - if (log.isDebugEnabled()) { - log.debug("Shutting down with {} datums left on the queue", queue.size()); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Shutting down with %d datums left on the queue", queue.size())); } // If we're shutting down, we successfully shut down only when the queue is empty. @@ -120,9 +126,9 @@ public class CloudWatchPublisherRunnable implements Runnable { } } else { long waitTime = bufferTimeMillis - timeSinceFlush; - if (log.isDebugEnabled()) { - log.debug("Waiting up to {} ms for {} more datums to appear.", waitTime, flushSize - - queue.size()); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Waiting up to %dms for %d more datums to appear.", waitTime, flushSize + - queue.size())); } try { @@ -137,7 +143,7 @@ public class CloudWatchPublisherRunnable implements Runnable { try { metricsPublisher.publishMetrics(dataToPublish); } catch (Throwable t) { - log.error("Caught exception thrown by metrics Publisher in CloudWatchPublisherRunnable", t); + LOG.error("Caught exception thrown by metrics Publisher in CWPublisherRunnable", t); } // Changing the value of lastFlushTime will change the time when metrics are flushed next. lastFlushTime = getTime() + nextJitterValueToUse; @@ -156,7 +162,7 @@ public class CloudWatchPublisherRunnable implements Runnable { } public void shutdown() { - log.info("Shutting down CWPublication thread."); + LOG.info("Shutting down CWPublication thread."); synchronized (queue) { shuttingDown = true; queue.notify(); @@ -172,20 +178,20 @@ public class CloudWatchPublisherRunnable implements Runnable { * * @param data collection of MetricDatum to enqueue */ - public void enqueue(Collection> data) { + public void enqueue(Collection> data) { synchronized (queue) { if (shuttingDown) { - log.warn("Dropping metrics {} because CloudWatchPublisherRunnable is shutting down.", data); + LOG.warn(String.format("Dropping metrics %s because CWPublisherRunnable is shutting down.", data)); return; } - if (log.isDebugEnabled()) { - log.debug("Enqueueing {} datums for publication", data.size()); + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("Enqueueing %d datums for publication", data.size())); } - for (MetricDatumWithKey datumWithKey : data) { + for (MetricDatumWithKey datumWithKey : data) { if (!queue.offer(datumWithKey.key, datumWithKey.datum)) { - log.warn("Metrics queue full - dropping metric {}", datumWithKey.datum); + LOG.warn("Metrics queue full - dropping metric " + datumWithKey.datum); } } diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/DefaultCWMetricsPublisher.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/DefaultCWMetricsPublisher.java new file mode 100644 index 00000000..76ae7a05 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/DefaultCWMetricsPublisher.java @@ -0,0 +1,71 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.services.cloudwatch.AmazonCloudWatch; +import com.amazonaws.services.cloudwatch.model.MetricDatum; +import com.amazonaws.services.cloudwatch.model.PutMetricDataRequest; + +/** + * Default implementation for publishing metrics to CloudWatch. + */ + +public class DefaultCWMetricsPublisher implements ICWMetricsPublisher { + + private static final Log LOG = LogFactory.getLog(CWPublisherRunnable.class); + + // CloudWatch API has a limit of 20 MetricDatums per request + private static final int BATCH_SIZE = 20; + + private final String namespace; + private final AmazonCloudWatch cloudWatchClient; + + public DefaultCWMetricsPublisher(AmazonCloudWatch cloudWatchClient, String namespace) { + this.cloudWatchClient = cloudWatchClient; + this.namespace = namespace; + } + + @Override + public void publishMetrics(List> dataToPublish) { + for (int startIndex = 0; startIndex < dataToPublish.size(); startIndex += BATCH_SIZE) { + int endIndex = Math.min(dataToPublish.size(), startIndex + BATCH_SIZE); + + PutMetricDataRequest request = new PutMetricDataRequest(); + request.setNamespace(namespace); + + List metricData = new ArrayList(); + for (int i = startIndex; i < endIndex; i++) { + metricData.add(dataToPublish.get(i).datum); + } + + request.setMetricData(metricData); + + try { + cloudWatchClient.putMetricData(request); + + LOG.debug(String.format("Successfully published %d datums.", endIndex - startIndex)); + } catch (AmazonClientException e) { + LOG.warn(String.format("Could not publish %d datums to CloudWatch", endIndex - startIndex), e); + } + } + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/DimensionTrackingMetricsScope.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/DimensionTrackingMetricsScope.java new file mode 100644 index 00000000..d9780977 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/DimensionTrackingMetricsScope.java @@ -0,0 +1,53 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import java.util.HashSet; +import java.util.Set; + +import com.amazonaws.services.cloudwatch.model.Dimension; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; + +/** + * DimensionTrackingMetricsScope is where we provide functionality for dimensions. + * Dimensions allow the user to be able view their metrics based off of the parameters they specify. + * + * The following examples show how to add dimensions if they would like to view their all metrics + * pertaining to a particular stream or for a specific date. + * + * myScope.addDimension("StreamName", "myStreamName"); + * myScope.addDimension("Date", "Dec012013"); + * + * + */ + +public abstract class DimensionTrackingMetricsScope implements IMetricsScope { + + private Set dimensions = new HashSet(); + + @Override + public void addDimension(String name, String value) { + dimensions.add(new Dimension().withName(name).withValue(value)); + } + + /** + * @return a set of dimensions for an IMetricsScope + */ + + protected Set getDimensions() { + return dimensions; + } + +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/EndingMetricsScope.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/EndingMetricsScope.java similarity index 59% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/EndingMetricsScope.java rename to src/main/java/com/amazonaws/services/kinesis/metrics/impl/EndingMetricsScope.java index c1d474aa..964b3539 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/EndingMetricsScope.java +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/EndingMetricsScope.java @@ -1,20 +1,21 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.impl; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; public abstract class EndingMetricsScope extends DimensionTrackingMetricsScope { diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/FilteringMetricsScope.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/FilteringMetricsScope.java similarity index 81% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/FilteringMetricsScope.java rename to src/main/java/com/amazonaws/services/kinesis/metrics/impl/FilteringMetricsScope.java index e869b2e6..f10142f4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/FilteringMetricsScope.java +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/FilteringMetricsScope.java @@ -1,23 +1,24 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.impl; import java.util.Set; +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; import com.google.common.collect.ImmutableSet; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; /** * An {@code IMetricsScope} that filters {@link #addData} calls based on the provided metrics level. If the provided diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/ICWMetricsPublisher.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/ICWMetricsPublisher.java new file mode 100644 index 00000000..6c6afe17 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/ICWMetricsPublisher.java @@ -0,0 +1,36 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import java.util.List; + +/** + * An ICWMetricsPublisher is a publisher that contains the logic to publish metrics. + * + * @param is a class that stores information about a MetricDatum. This is useful when wanting + * to compare MetricDatums or aggregate similar MetricDatums. + */ + +public interface ICWMetricsPublisher { + + /** + * Given a list of MetricDatumWithKey, this method extracts the MetricDatum from each + * MetricDatumWithKey and publishes those datums. + * + * @param dataToPublish a list containing all the MetricDatums to publish + */ + + public void publishMetrics(List> dataToPublish); +} diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/InterceptingMetricsFactory.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/InterceptingMetricsFactory.java new file mode 100644 index 00000000..f89f9550 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/InterceptingMetricsFactory.java @@ -0,0 +1,87 @@ +/* + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; + +public abstract class InterceptingMetricsFactory implements IMetricsFactory { + + private final IMetricsFactory other; + + public InterceptingMetricsFactory(IMetricsFactory other) { + this.other = other; + } + + @Override + public IMetricsScope createMetrics() { + IMetricsScope otherScope = other.createMetrics(); + interceptCreateMetrics(otherScope); + return new InterceptingMetricsScope(otherScope); + } + + protected void interceptCreateMetrics(IMetricsScope scope) { + // Default implementation does nothing; + } + + protected void interceptAddData(String name, double value, StandardUnit unit, IMetricsScope scope) { + scope.addData(name, value, unit); + } + + protected void interceptAddData(String name, double value, StandardUnit unit, MetricsLevel level, IMetricsScope scope) { + scope.addData(name, value, unit, level); + } + + protected void interceptAddDimension(String name, String value, IMetricsScope scope) { + scope.addDimension(name, value); + } + + protected void interceptEnd(IMetricsScope scope) { + scope.end(); + } + + private class InterceptingMetricsScope implements IMetricsScope { + + private IMetricsScope other; + + public InterceptingMetricsScope(IMetricsScope other) { + this.other = other; + } + + @Override + public void addData(String name, double value, StandardUnit unit) { + interceptAddData(name, value, unit, other); + } + + @Override + public void addData(String name, double value, StandardUnit unit, MetricsLevel level) { + interceptAddData(name, value, unit, level, other); + } + + @Override + public void addDimension(String name, String value) { + interceptAddDimension(name, value, other); + } + + @Override + public void end() { + interceptEnd(other); + } + + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/LogMetricsFactory.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/LogMetricsFactory.java new file mode 100644 index 00000000..07986d05 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/LogMetricsFactory.java @@ -0,0 +1,29 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; + +/** + * An IMetricsFactory that creates IMetricsScopes that output themselves via log4j. + */ +public class LogMetricsFactory implements IMetricsFactory { + + @Override + public LogMetricsScope createMetrics() { + return new LogMetricsScope(); + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/LogMetricsScope.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/LogMetricsScope.java new file mode 100644 index 00000000..43773fed --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/LogMetricsScope.java @@ -0,0 +1,58 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.cloudwatch.model.Dimension; +import com.amazonaws.services.cloudwatch.model.MetricDatum; +import com.amazonaws.services.cloudwatch.model.StatisticSet; + +/** + * An AccumulatingMetricsScope that outputs via log4j. + */ +public class LogMetricsScope extends AccumulateByNameMetricsScope { + + private static final Log LOG = LogFactory.getLog(LogMetricsScope.class); + + @Override + public void end() { + StringBuilder output = new StringBuilder(); + output.append("Metrics:\n"); + + output.append("Dimensions: "); + boolean needsComma = false; + for (Dimension dimension : getDimensions()) { + output.append(String.format("%s[%s: %s]", needsComma ? ", " : "", dimension.getName(), dimension.getValue())); + needsComma = true; + } + output.append("\n"); + + for (MetricDatum datum : data.values()) { + StatisticSet statistics = datum.getStatisticValues(); + output.append(String.format("Name=%25s\tMin=%.2f\tMax=%.2f\tCount=%.2f\tSum=%.2f\tAvg=%.2f\tUnit=%s\n", + datum.getMetricName(), + statistics.getMinimum(), + statistics.getMaximum(), + statistics.getSampleCount(), + statistics.getSum(), + statistics.getSum() / statistics.getSampleCount(), + datum.getUnit())); + } + + LOG.info(output.toString()); + } +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricAccumulatingQueue.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/MetricAccumulatingQueue.java similarity index 52% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricAccumulatingQueue.java rename to src/main/java/com/amazonaws/services/kinesis/metrics/impl/MetricAccumulatingQueue.java index ae3598d7..cfd01322 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricAccumulatingQueue.java +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/MetricAccumulatingQueue.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.impl; import java.util.ArrayList; import java.util.HashMap; @@ -21,9 +21,8 @@ import java.util.Map; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; -import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; - +import com.amazonaws.services.cloudwatch.model.MetricDatum; +import com.amazonaws.services.cloudwatch.model.StatisticSet; /** * Helper class for accumulating MetricDatums with the same name and dimensions. @@ -46,11 +45,11 @@ public class MetricAccumulatingQueue { // Queue is for first in first out behavior private BlockingQueue> queue; // Map is for constant time lookup by key - private Map> map; + private Map map; public MetricAccumulatingQueue(int maxQueueSize) { - queue = new LinkedBlockingQueue<>(maxQueueSize); - map = new HashMap<>(); + queue = new LinkedBlockingQueue>(maxQueueSize); + map = new HashMap(); } /** @@ -58,9 +57,14 @@ public class MetricAccumulatingQueue { * @return a list of MetricDatums that are no longer contained within the queue or map. */ public synchronized List> drain(int maxItems) { - List> drainedItems = new ArrayList<>(maxItems); + List> drainedItems = new ArrayList>(maxItems); + queue.drainTo(drainedItems, maxItems); - drainedItems.forEach(datumWithKey -> map.remove(datumWithKey.key)); + + for (MetricDatumWithKey datumWithKey : drainedItems) { + map.remove(datumWithKey.key); + } + return drainedItems; } @@ -81,37 +85,31 @@ public class MetricAccumulatingQueue { * @return a boolean depending on whether the datum was inserted into the queue */ public synchronized boolean offer(KeyType key, MetricDatum datum) { - MetricDatumWithKey metricDatumWithKey = map.get(key); - - if (metricDatumWithKey == null) { - metricDatumWithKey = new MetricDatumWithKey<>(key, datum); - boolean offered = queue.offer(metricDatumWithKey); + MetricDatum old = map.get(key); + if (old == null) { + boolean offered = queue.offer(new MetricDatumWithKey(key, datum)); if (offered) { - map.put(key, metricDatumWithKey); + map.put(key, datum); } return offered; } else { - accumulate(metricDatumWithKey, datum); + accumulate(old, datum); return true; } } - private void accumulate(MetricDatumWithKey metricDatumWithKey, MetricDatum newDatum) { - MetricDatum oldDatum = metricDatumWithKey.datum; - if (!oldDatum.unit().equals(newDatum.unit())) { - throw new IllegalArgumentException("Unit mismatch for datum named " + oldDatum.metricName()); + private void accumulate(MetricDatum oldDatum, MetricDatum newDatum) { + if (!oldDatum.getUnit().equals(newDatum.getUnit())) { + throw new IllegalArgumentException("Unit mismatch for datum named " + oldDatum.getMetricName()); } - StatisticSet oldStats = oldDatum.statisticValues(); - StatisticSet newStats = newDatum.statisticValues(); + StatisticSet oldStats = oldDatum.getStatisticValues(); + StatisticSet newStats = newDatum.getStatisticValues(); - StatisticSet statisticSet = oldStats.toBuilder().sum(oldStats.sum() + newStats.sum()) - .minimum(Math.min(oldStats.minimum(), newStats.minimum())) - .maximum(Math.max(oldStats.maximum(), newStats.maximum())) - .sampleCount(oldStats.sampleCount() + newStats.sampleCount()).build(); - - MetricDatum datum = oldDatum.toBuilder().statisticValues(statisticSet).build(); - metricDatumWithKey.datum(datum); + oldStats.setSampleCount(oldStats.getSampleCount() + newStats.getSampleCount()); + oldStats.setMaximum(Math.max(oldStats.getMaximum(), newStats.getMaximum())); + oldStats.setMinimum(Math.min(oldStats.getMinimum(), newStats.getMinimum())); + oldStats.setSum(oldStats.getSum() + newStats.getSum()); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricDatumWithKey.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/MetricDatumWithKey.java similarity index 58% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricDatumWithKey.java rename to src/main/java/com/amazonaws/services/kinesis/metrics/impl/MetricDatumWithKey.java index 25554733..c7066bc6 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricDatumWithKey.java +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/MetricDatumWithKey.java @@ -1,26 +1,21 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; - -import lombok.AllArgsConstructor; -import lombok.Data; -import lombok.Setter; -import lombok.experimental.Accessors; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; +package com.amazonaws.services.kinesis.metrics.impl; import java.util.Objects; +import com.amazonaws.services.cloudwatch.model.MetricDatum; /** * This class is used to store a MetricDatum as well as KeyType which stores specific information about @@ -38,19 +33,20 @@ import java.util.Objects; * SampleMetricKey(System.currentTimeMillis()), datum) * */ -@AllArgsConstructor -@Setter -@Accessors(fluent = true) public class MetricDatumWithKey { - /** - * An object that stores relevant information about a MetricDatum (e.g. MetricName, accountId, TimeStamp) - */ public KeyType key; + public MetricDatum datum; /** - * Data point + * @param key an object that stores relevant information about a MetricDatum (e.g. MetricName, accountId, + * TimeStamp) + * @param datum data point */ - public MetricDatum datum; + + public MetricDatumWithKey(KeyType key, MetricDatum datum) { + this.key = key; + this.datum = datum; + } @Override public int hashCode() { diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/MetricsHelper.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/MetricsHelper.java new file mode 100644 index 00000000..4599fbaa --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/MetricsHelper.java @@ -0,0 +1,162 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; + +/** + * MetricsHelper assists with common metrics operations, most notably the storage of IMetricsScopes objects in a + * ThreadLocal so we don't have to pass one throughout the whole call stack. + */ +public class MetricsHelper { + + private static final Log LOG = LogFactory.getLog(MetricsHelper.class); + private static final NullMetricsScope NULL_METRICS_SCOPE = new NullMetricsScope(); + + private static final ThreadLocal currentScope = new ThreadLocal(); + private static final ThreadLocal referenceCount = new ThreadLocal(); + + /* + * Constants used to publish metrics. + */ + public static final String OPERATION_DIMENSION_NAME = "Operation"; + public static final String SHARD_ID_DIMENSION_NAME = "ShardId"; + public static final String TIME = "Time"; + public static final String SUCCESS = "Success"; + private static final String SEP = "."; + + public static IMetricsScope startScope(IMetricsFactory factory) { + return startScope(factory, null); + } + + public static IMetricsScope startScope(IMetricsFactory factory, String operation) { + IMetricsScope result = currentScope.get(); + if (result == null) { + result = factory.createMetrics(); + if (operation != null) { + result.addDimension(OPERATION_DIMENSION_NAME, operation); + } + currentScope.set(result); + referenceCount.set(1); + } else { + referenceCount.set(referenceCount.get() + 1); + } + + return result; + } + + /** + * Sets given metrics scope for the current thread. + * + * Method must be used with care. Metrics helper is designed such that separate metrics scopes are associated + * with each thread. However, when sharing metrics scope and setting it explicitly on a thread, thread safety must + * also be taken into account. + * @param scope + */ + public static void setMetricsScope(IMetricsScope scope) { + if (currentScope.get() != null) { + throw new RuntimeException(String.format( + "Metrics scope is already set for the current thread %s", Thread.currentThread().getName())); + } + currentScope.set(scope); + } + + /** + * Unsets the metrics scope for the current thread. + */ + public static void unsetMetricsScope() { + currentScope.remove(); + } + + public static IMetricsScope getMetricsScope() { + IMetricsScope result = currentScope.get(); + if (result == null) { + LOG.warn(String.format("No metrics scope set in thread %s, getMetricsScope returning NullMetricsScope.", + Thread.currentThread().getName())); + + return NULL_METRICS_SCOPE; + } else { + return result; + } + } + + public static void addSuccessAndLatency(long startTimeMillis, boolean success, MetricsLevel level) { + addSuccessAndLatency(null, startTimeMillis, success, level); + } + + public static void addSuccessAndLatency( + String prefix, long startTimeMillis, boolean success, MetricsLevel level) { + addSuccessAndLatencyPerShard(null, prefix, startTimeMillis, success, level); + } + + public static void addSuccessAndLatencyPerShard ( + String shardId, + String prefix, + long startTimeMillis, + boolean success, + MetricsLevel level) { + addSuccessAndLatency(shardId, prefix, startTimeMillis, success, level, true, true); + } + + public static void addLatency(long startTimeMillis, MetricsLevel level) { + addLatency(null, startTimeMillis, level); + } + + public static void addLatency(String prefix, long startTimeMillis, MetricsLevel level) { + addLatencyPerShard(null, prefix, startTimeMillis, level); + } + + public static void addLatencyPerShard(String shardId, String prefix, long startTimeMillis, MetricsLevel level) { + addSuccessAndLatency(shardId, prefix, startTimeMillis, false, level, false, true); + } + + private static void addSuccessAndLatency( + String shardId, String prefix, long startTimeMillis, boolean success, MetricsLevel level, + boolean includeSuccess, boolean includeLatency) { + IMetricsScope scope = getMetricsScope(); + + String realPrefix = prefix == null ? "" : prefix + SEP; + + if (shardId != null) { + scope.addDimension(SHARD_ID_DIMENSION_NAME, shardId); + } + if (includeSuccess) { + scope.addData(realPrefix + MetricsHelper.SUCCESS, success ? 1 : 0, StandardUnit.Count, level); + } + if (includeLatency) { + scope.addData(realPrefix + MetricsHelper.TIME, + System.currentTimeMillis() - startTimeMillis, StandardUnit.Milliseconds, level); + } + } + + public static void endScope() { + IMetricsScope scope = getMetricsScope(); + if (scope != null) { + referenceCount.set(referenceCount.get() - 1); + + if (referenceCount.get() == 0) { + scope.end(); + currentScope.remove(); + } + } + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/NullMetricsFactory.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/NullMetricsFactory.java new file mode 100644 index 00000000..4169d076 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/NullMetricsFactory.java @@ -0,0 +1,29 @@ +/* + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; + +public class NullMetricsFactory implements IMetricsFactory { + + private static final NullMetricsScope SCOPE = new NullMetricsScope(); + + @Override + public IMetricsScope createMetrics() { + return SCOPE; + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/NullMetricsScope.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/NullMetricsScope.java new file mode 100644 index 00000000..7d66dffc --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/NullMetricsScope.java @@ -0,0 +1,42 @@ +/* + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; + +public class NullMetricsScope implements IMetricsScope { + + @Override + public void addData(String name, double value, StandardUnit unit) { + + } + + @Override + public void addData(String name, double value, StandardUnit unit, MetricsLevel level) { + + } + + @Override + public void addDimension(String name, String value) { + + } + + @Override + public void end() { + + } +} diff --git a/src/main/java/com/amazonaws/services/kinesis/metrics/impl/ThreadSafeMetricsDelegatingFactory.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/ThreadSafeMetricsDelegatingFactory.java new file mode 100644 index 00000000..ede5b9c5 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/ThreadSafeMetricsDelegatingFactory.java @@ -0,0 +1,44 @@ +/* + * Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; + +/** + * Metrics scope factory that delegates metrics scope creation to another factory, but + * returns metrics scope that is thread safe. + */ +public class ThreadSafeMetricsDelegatingFactory implements IMetricsFactory { + + /** Metrics factory to delegate to. */ + private final IMetricsFactory delegate; + + /** + * Creates an instance of the metrics factory. + * @param delegate metrics factory to delegate to + */ + public ThreadSafeMetricsDelegatingFactory(IMetricsFactory delegate) { + this.delegate = delegate; + } + + /** + * {@inheritDoc} + */ + @Override + public IMetricsScope createMetrics() { + return new ThreadSafeMetricsDelegatingScope(delegate.createMetrics()); + } +} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingScope.java b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/ThreadSafeMetricsDelegatingScope.java similarity index 51% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingScope.java rename to src/main/java/com/amazonaws/services/kinesis/metrics/impl/ThreadSafeMetricsDelegatingScope.java index 8d9fb291..5af4fab8 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingScope.java +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/impl/ThreadSafeMetricsDelegatingScope.java @@ -1,36 +1,37 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.impl; - -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; /** * Metrics scope that delegates to another metrics scope and is thread safe to be shared * across different threads. */ -public class ThreadSafeMetricsDelegatingScope implements MetricsScope { +public class ThreadSafeMetricsDelegatingScope implements IMetricsScope { /** Metrics scope to delegate to. */ - private final MetricsScope delegate; + private final IMetricsScope delegate; /** * Creates an instance of the metrics scope. * @param delegate metrics scope to delegate to */ - public ThreadSafeMetricsDelegatingScope(MetricsScope delegate) { + public ThreadSafeMetricsDelegatingScope(IMetricsScope delegate) { this.delegate = delegate; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalSpecificConfig.java b/src/main/java/com/amazonaws/services/kinesis/metrics/interfaces/IMetricsFactory.java similarity index 58% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalSpecificConfig.java rename to src/main/java/com/amazonaws/services/kinesis/metrics/interfaces/IMetricsFactory.java index 40077e86..03beda07 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalSpecificConfig.java +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/interfaces/IMetricsFactory.java @@ -1,5 +1,5 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Amazon Software License (the "License"). * You may not use this file except in compliance with the License. @@ -12,14 +12,14 @@ * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ +package com.amazonaws.services.kinesis.metrics.interfaces; -package software.amazon.kinesis.retrieval; - -public interface RetrievalSpecificConfig { +/** + * Factory for MetricsScope objects. + */ +public interface IMetricsFactory { /** - * Creates and returns a retrieval factory for the specific configuration - * - * @return a retrieval factory that can create an appropriate retriever + * @return a new IMetricsScope object of the type constructed by this factory. */ - RetrievalFactory retrievalFactory(); + public IMetricsScope createMetrics(); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsScope.java b/src/main/java/com/amazonaws/services/kinesis/metrics/interfaces/IMetricsScope.java similarity index 55% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsScope.java rename to src/main/java/com/amazonaws/services/kinesis/metrics/interfaces/IMetricsScope.java index 5028e75a..5683b345 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsScope.java +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/interfaces/IMetricsScope.java @@ -1,31 +1,31 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.interfaces; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.cloudwatch.model.StandardUnit; /** * An IMetricsScope represents a set of metric data that share a set of dimensions. IMetricsScopes know how to output * themselves (perhaps to disk, perhaps over service calls, etc). */ -public interface MetricsScope { +public interface IMetricsScope { /** * Value that signifies that all dimensions are allowed for the metrics scope. */ - String METRICS_DIMENSIONS_ALL = "ALL"; + public static final String METRICS_DIMENSIONS_ALL = "ALL"; /** * Adds a data point to this IMetricsScope. Multiple calls against the same IMetricsScope with the same name @@ -35,7 +35,7 @@ public interface MetricsScope { * @param value data point value * @param unit unit of data point */ - void addData(String name, double value, StandardUnit unit); + public void addData(String name, double value, StandardUnit unit); /** * Adds a data point to this IMetricsScope if given metrics level is enabled. Multiple calls against the same @@ -46,7 +46,7 @@ public interface MetricsScope { * @param unit unit of data point * @param level metrics level of this data point */ - void addData(String name, double value, StandardUnit unit, MetricsLevel level); + public void addData(String name, double value, StandardUnit unit, MetricsLevel level); /** * Adds a dimension that applies to all metrics in this IMetricsScope. @@ -54,10 +54,10 @@ public interface MetricsScope { * @param name dimension name * @param value dimension value */ - void addDimension(String name, String value); + public void addDimension(String name, String value); /** * Flushes the data from this IMetricsScope and causes future calls to addData and addDimension to fail. */ - void end(); + public void end(); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsLevel.java b/src/main/java/com/amazonaws/services/kinesis/metrics/interfaces/MetricsLevel.java similarity index 76% rename from amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsLevel.java rename to src/main/java/com/amazonaws/services/kinesis/metrics/interfaces/MetricsLevel.java index 860df187..5ad9ed48 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsLevel.java +++ b/src/main/java/com/amazonaws/services/kinesis/metrics/interfaces/MetricsLevel.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.interfaces; /** * This class defines a set of standard metrics levels that can be used to control which metrics get emitted. diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDERRTask.java b/src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDERRTask.java similarity index 83% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDERRTask.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDERRTask.java index 7276b229..4a43a3d6 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDERRTask.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDERRTask.java @@ -16,19 +16,22 @@ package com.amazonaws.services.kinesis.multilang; import java.io.BufferedReader; -import lombok.extern.slf4j.Slf4j; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; /** * Reads lines off the STDERR of the child process and prints them to this process's (the JVM's) STDERR and log. */ -@Slf4j class DrainChildSTDERRTask extends LineReaderTask { + + private static final Log LOG = LogFactory.getLog(DrainChildSTDERRTask.class); + DrainChildSTDERRTask() { } @Override protected HandleLineResult handleLine(String line) { - log.error("Received error line from subprocess [{}] for shard {}", line, getShardId()); + LOG.error("Received error line from subprocess [" + line + "] for shard " + getShardId()); System.err.println(line); return new HandleLineResult(); } diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDOUTTask.java b/src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDOUTTask.java similarity index 63% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDOUTTask.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDOUTTask.java index 0e95a14e..54985559 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDOUTTask.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/DrainChildSTDOUTTask.java @@ -1,22 +1,23 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang; import java.io.BufferedReader; -import lombok.extern.slf4j.Slf4j; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; /** * This class is used to drain the STDOUT of the child process. After the child process has been given a shutdown @@ -35,20 +36,22 @@ import lombok.extern.slf4j.Slf4j; * To prevent the child process from becoming blocked in this way, it is the responsibility of the parent process to * drain the child process's STDOUT. We reprint each drained line to our log to permit debugging. */ -@Slf4j class DrainChildSTDOUTTask extends LineReaderTask { + + private static final Log LOG = LogFactory.getLog(DrainChildSTDOUTTask.class); + DrainChildSTDOUTTask() { } @Override protected HandleLineResult handleLine(String line) { - log.info("Drained line for shard {}: {}", getShardId(), line); + LOG.info("Drained line for shard " + getShardId() + ": " + line); return new HandleLineResult(); } @Override protected Boolean returnAfterException(Exception e) { - log.info("Encountered exception while draining STDOUT of child process for shard {}", getShardId(), e); + LOG.info("Encountered exception while draining STDOUT of child process for shard " + getShardId(), e); return false; } diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/GetNextMessageTask.java b/src/main/java/com/amazonaws/services/kinesis/multilang/GetNextMessageTask.java similarity index 75% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/GetNextMessageTask.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/GetNextMessageTask.java index 8177a8d2..7359ff40 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/GetNextMessageTask.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/GetNextMessageTask.java @@ -1,33 +1,37 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang; import java.io.BufferedReader; import java.io.IOException; -import com.amazonaws.services.kinesis.multilang.messages.Message; -import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; -import lombok.extern.slf4j.Slf4j; +import com.amazonaws.services.kinesis.multilang.messages.Message; + +import com.fasterxml.jackson.databind.ObjectMapper; /** * Gets the next message off the STDOUT of the child process. Throws an exception if a message is not found before the * end of the input stream is reached. */ -@Slf4j class GetNextMessageTask extends LineReaderTask { + + private static final Log LOG = LogFactory.getLog(GetNextMessageTask.class); + private ObjectMapper objectMapper; private static final String EMPTY_LINE = ""; @@ -64,7 +68,7 @@ class GetNextMessageTask extends LineReaderTask { return new HandleLineResult(objectMapper.readValue(line, Message.class)); } } catch (IOException e) { - log.info("Skipping unexpected line on STDOUT for shard {}: {}", getShardId(), line); + LOG.info("Skipping unexpected line on STDOUT for shard " + getShardId() + ": " + line); } return new HandleLineResult(); } diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/LineReaderTask.java b/src/main/java/com/amazonaws/services/kinesis/multilang/LineReaderTask.java similarity index 85% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/LineReaderTask.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/LineReaderTask.java index 650fc0c5..4b6402c3 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/LineReaderTask.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/LineReaderTask.java @@ -1,16 +1,16 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang; @@ -20,7 +20,8 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.util.concurrent.Callable; -import lombok.extern.slf4j.Slf4j; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; /** * This abstract class captures the process of reading from an input stream. Three methods must be provided for @@ -33,8 +34,10 @@ import lombok.extern.slf4j.Slf4j; * * @param */ -@Slf4j abstract class LineReaderTask implements Callable { + + private static final Log LOG = LogFactory.getLog(LineReaderTask.class); + private BufferedReader reader; private String description; @@ -53,7 +56,7 @@ abstract class LineReaderTask implements Callable { public T call() throws Exception { String nextLine = null; try { - log.info("Starting: {}", description); + LOG.info("Starting: " + description); while ((nextLine = reader.readLine()) != null) { HandleLineResult result = handleLine(nextLine); if (result.hasReturnValue()) { @@ -63,7 +66,7 @@ abstract class LineReaderTask implements Callable { } catch (IOException e) { return returnAfterException(e); } - log.info("Stopping: {}", description); + LOG.info("Stopping: " + description); return returnAfterEndOfInput(); } @@ -154,8 +157,8 @@ abstract class LineReaderTask implements Callable { /** * An initialization method allows us to delay setting the attributes of this class. Some of the attributes, stream * and shardId, are not known to the {@link MultiLangRecordProcessorFactory} when it constructs a - * {@link MultiLangShardRecordProcessor} but are later determined when - * {@link MultiLangShardRecordProcessor#initialize(String)} is called. So we follow a pattern where the attributes are + * {@link MultiLangRecordProcessor} but are later determined when + * {@link MultiLangRecordProcessor#initialize(String)} is called. So we follow a pattern where the attributes are * set inside this method instead of the constructor so that this object will be initialized when all its attributes * are known to the record processor. * diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MessageReader.java b/src/main/java/com/amazonaws/services/kinesis/multilang/MessageReader.java similarity index 96% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MessageReader.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/MessageReader.java index 6bd3aa93..71fbbd05 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MessageReader.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/MessageReader.java @@ -85,8 +85,8 @@ class MessageReader { /** * An initialization method allows us to delay setting the attributes of this class. Some of the attributes, * stream and shardId, are not known to the {@link MultiLangRecordProcessorFactory} when it constructs a - * {@link MultiLangShardRecordProcessor} but are later determined when - * {@link MultiLangShardRecordProcessor#initialize(String)} is called. So we follow a pattern where the attributes are + * {@link MultiLangRecordProcessor} but are later determined when + * {@link MultiLangRecordProcessor#initialize(String)} is called. So we follow a pattern where the attributes are * set inside this method instead of the constructor so that this object will be initialized when all its attributes * are known to the record processor. * diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MessageWriter.java b/src/main/java/com/amazonaws/services/kinesis/multilang/MessageWriter.java similarity index 81% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MessageWriter.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/MessageWriter.java index 164a36bf..b2ddbfe3 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MessageWriter.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/MessageWriter.java @@ -1,16 +1,16 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang; @@ -22,24 +22,26 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason; +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; import com.amazonaws.services.kinesis.multilang.messages.CheckpointMessage; import com.amazonaws.services.kinesis.multilang.messages.InitializeMessage; import com.amazonaws.services.kinesis.multilang.messages.Message; import com.amazonaws.services.kinesis.multilang.messages.ProcessRecordsMessage; import com.amazonaws.services.kinesis.multilang.messages.ShutdownMessage; -import com.amazonaws.services.kinesis.multilang.messages.ShutdownRequestedMessage; import com.fasterxml.jackson.databind.ObjectMapper; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownReason; - /** * Defines methods for writing {@link Message} objects to the child process's STDIN. */ -@Slf4j class MessageWriter { + + private static final Log LOG = LogFactory.getLog(MessageWriter.class); + private BufferedWriter writer; private volatile boolean open = true; @@ -77,7 +79,7 @@ class MessageWriter { writer.write(System.lineSeparator(), 0, System.lineSeparator().length()); writer.flush(); } - log.info("Message size == {} bytes for shard {}", message.getBytes().length, shardId); + LOG.info("Message size == " + message.getBytes().length + " bytes for shard " + shardId); } catch (IOException e) { open = false; } @@ -89,7 +91,7 @@ class MessageWriter { return this.executorService.submit(writeMessageToOutputTask); } else { String errorMessage = "Cannot write message " + message + " because writer is closed for shard " + shardId; - log.info(errorMessage); + LOG.info(errorMessage); throw new IllegalStateException(errorMessage); } } @@ -101,7 +103,7 @@ class MessageWriter { * @return */ private Future writeMessage(Message message) { - log.info("Writing {} to child process for shard {}", message.getClass().getSimpleName(), shardId); + LOG.info("Writing " + message.getClass().getSimpleName() + " to child process for shard " + shardId); try { String jsonText = objectMapper.writeValueAsString(message); return writeMessageToOutput(jsonText); @@ -109,7 +111,7 @@ class MessageWriter { String errorMessage = String.format("Encountered I/O error while writing %s action to subprocess", message.getClass() .getSimpleName()); - log.error(errorMessage, e); + LOG.error(errorMessage, e); throw new RuntimeException(errorMessage, e); } } @@ -143,13 +145,6 @@ class MessageWriter { return writeMessage(new ShutdownMessage(reason)); } - /** - * Writes a {@link ShutdownRequestedMessage} to the subprocess. - */ - Future writeShutdownRequestedMessage() { - return writeMessage(new ShutdownRequestedMessage()); - } - /** * Writes a {@link CheckpointMessage} to the subprocess. * @@ -182,8 +177,8 @@ class MessageWriter { /** * An initialization method allows us to delay setting the attributes of this class. Some of the attributes, * stream and shardId, are not known to the {@link MultiLangRecordProcessorFactory} when it constructs a - * {@link MultiLangShardRecordProcessor} but are later determined when - * {@link MultiLangShardRecordProcessor (String)} is called. So we follow a pattern where the attributes are + * {@link MultiLangRecordProcessor} but are later determined when + * {@link MultiLangRecordProcessor#initialize(String)} is called. So we follow a pattern where the attributes are * set inside this method instead of the constructor so that this object will be initialized when all its attributes * are known to the record processor. * diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemon.java b/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemon.java similarity index 65% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemon.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemon.java index ecb70d22..fdff4dc7 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemon.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemon.java @@ -1,16 +1,16 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang; @@ -20,16 +20,16 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; -import software.amazon.kinesis.coordinator.Scheduler; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker; /** - * Main app that launches the scheduler that runs the multi-language record processor. + * Main app that launches the worker that runs the multi-language record processor. * * Requires a properties file containing configuration for this daemon and the KCL. A properties file should at minimum * define these properties: @@ -55,9 +55,11 @@ import software.amazon.kinesis.processor.ShardRecordProcessorFactory; * AWSCredentialsProvider = DefaultAWSCredentialsProviderChain * */ -@Slf4j public class MultiLangDaemon implements Callable { - private Scheduler scheduler; + + private static final Log LOG = LogFactory.getLog(MultiLangDaemon.class); + + private Worker worker; /** * Constructor. @@ -73,17 +75,18 @@ public class MultiLangDaemon implements Callable { this(buildWorker(recordProcessorFactory, configuration, workerThreadPool)); } - private static Scheduler buildWorker(ShardRecordProcessorFactory recordShardRecordProcessorFactory, + private static Worker buildWorker(IRecordProcessorFactory recordProcessorFactory, KinesisClientLibConfiguration configuration, ExecutorService workerThreadPool) { - return null; + return new Worker.Builder().recordProcessorFactory(recordProcessorFactory).config(configuration) + .execService(workerThreadPool).build(); } /** * - * @param scheduler A scheduler to use instead of the default scheduler. + * @param worker A worker to use instead of the default worker. */ - public MultiLangDaemon(Scheduler scheduler) { - this.scheduler = scheduler; + public MultiLangDaemon(Worker worker) { + this.worker = worker; } /** @@ -105,9 +108,9 @@ public class MultiLangDaemon implements Callable { public Integer call() throws Exception { int exitCode = 0; try { - scheduler.run(); + worker.run(); } catch (Throwable t) { - log.error("Caught throwable while processing data.", t); + LOG.error("Caught throwable while processing data.", t); exitCode = 1; } return exitCode; @@ -137,31 +140,16 @@ public class MultiLangDaemon implements Callable { ExecutorService executorService = config.getExecutorService(); // Daemon - final MultiLangDaemon daemon = new MultiLangDaemon( + MultiLangDaemon daemon = new MultiLangDaemon( config.getKinesisClientLibConfiguration(), config.getRecordProcessorFactory(), executorService); - final long shutdownGraceMillis = config.getKinesisClientLibConfiguration().getShutdownGraceMillis(); - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - log.info("Process terminated, will initiate shutdown."); - try { - Future fut = daemon.scheduler.requestShutdown(); - fut.get(shutdownGraceMillis, TimeUnit.MILLISECONDS); - log.info("Process shutdown is complete."); - } catch (InterruptedException | ExecutionException | TimeoutException e) { - log.error("Encountered an error during shutdown.", e); - } - } - }); - Future future = executorService.submit(daemon); try { System.exit(future.get()); } catch (InterruptedException | ExecutionException e) { - log.error("Encountered an error while running daemon", e); + LOG.error("Encountered an error while running daemon", e); } System.exit(1); } diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfig.java b/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfig.java similarity index 62% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfig.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfig.java index 70f90a06..f191eedc 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfig.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfig.java @@ -1,9 +1,16 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Amazon Software License - * (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at - * http://aws.amazon.com/asl/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific - * language governing permissions and limitations under the License. + * Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang; @@ -19,18 +26,20 @@ import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; -import com.amazonaws.services.kinesis.multilang.config.KinesisClientLibConfigurator; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.retrieval.RetrievalConfig; +import com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfigurator; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration; +import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * This class captures the configuration needed to run the MultiLangDaemon. */ -@Slf4j public class MultiLangDaemonConfig { + + private static final Log LOG = LogFactory.getLog(MultiLangDaemonConfig.class); + private static final String USER_AGENT = "amazon-kinesis-multi-lang-daemon"; private static final String VERSION = "1.0.1"; @@ -47,12 +56,9 @@ public class MultiLangDaemonConfig { /** * Constructor. * - * @param propertiesFile - * The location of the properties file. - * @throws IOException - * Thrown when the properties file can't be accessed. - * @throws IllegalArgumentException - * Thrown when the contents of the properties file are not as expected. + * @param propertiesFile The location of the properties file. + * @throws IOException Thrown when the properties file can't be accessed. + * @throws IllegalArgumentException Thrown when the contents of the properties file are not as expected. */ public MultiLangDaemonConfig(String propertiesFile) throws IOException, IllegalArgumentException { this(propertiesFile, Thread.currentThread().getContextClassLoader()); @@ -60,39 +66,33 @@ public class MultiLangDaemonConfig { /** * - * @param propertiesFile - * The location of the properties file. - * @param classLoader - * A classloader, useful if trying to programmatically configure with the daemon, such as in a unit test. - * @throws IOException - * Thrown when the properties file can't be accessed. - * @throws IllegalArgumentException - * Thrown when the contents of the properties file are not as expected. + * @param propertiesFile The location of the properties file. + * @param classLoader A classloader, useful if trying to programmatically configure with the daemon, such as in a + * unit test. + * @throws IOException Thrown when the properties file can't be accessed. + * @throws IllegalArgumentException Thrown when the contents of the properties file are not as expected. */ - public MultiLangDaemonConfig(String propertiesFile, ClassLoader classLoader) - throws IOException, IllegalArgumentException { + public MultiLangDaemonConfig(String propertiesFile, ClassLoader classLoader) throws IOException, + IllegalArgumentException { this(propertiesFile, classLoader, new KinesisClientLibConfigurator()); } /** * - * @param propertiesFile - * The location of the properties file. - * @param classLoader - * A classloader, useful if trying to programmatically configure with the daemon, such as in a unit test. - * @param configurator - * A configurator to use. - * @throws IOException - * Thrown when the properties file can't be accessed. - * @throws IllegalArgumentException - * Thrown when the contents of the properties file are not as expected. + * @param propertiesFile The location of the properties file. + * @param classLoader A classloader, useful if trying to programmatically configure with the daemon, such as in a + * unit test. + * @param configurator A configurator to use. + * @throws IOException Thrown when the properties file can't be accessed. + * @throws IllegalArgumentException Thrown when the contents of the properties file are not as expected. */ - public MultiLangDaemonConfig(String propertiesFile, ClassLoader classLoader, + public MultiLangDaemonConfig(String propertiesFile, + ClassLoader classLoader, KinesisClientLibConfigurator configurator) throws IOException, IllegalArgumentException { Properties properties = loadProperties(classLoader, propertiesFile); if (!validateProperties(properties)) { - throw new IllegalArgumentException( - "Must provide an executable name in the properties file, " + "e.g. executableName = sampleapp.py"); + throw new IllegalArgumentException("Must provide an executable name in the properties file, " + + "e.g. executableName = sampleapp.py"); } String executableName = properties.getProperty(PROP_EXECUTABLE_NAME); @@ -100,11 +100,10 @@ public class MultiLangDaemonConfig { kinesisClientLibConfig = configurator.getConfiguration(properties); executorService = buildExecutorService(properties); - recordProcessorFactory = new MultiLangRecordProcessorFactory(executableName, executorService, - kinesisClientLibConfig); + recordProcessorFactory = new MultiLangRecordProcessorFactory(executableName, executorService); - log.info("Running {} to process stream {} with executable {}", kinesisClientLibConfig.getApplicationName(), - kinesisClientLibConfig.getStreamName(), executableName); + LOG.info("Running " + kinesisClientLibConfig.getApplicationName() + " to process stream " + + kinesisClientLibConfig.getStreamName() + " with executable " + executableName); prepare(processingLanguage); } @@ -112,11 +111,11 @@ public class MultiLangDaemonConfig { // Ensure the JVM will refresh the cached IP values of AWS resources (e.g. service endpoints). java.security.Security.setProperty("networkaddress.cache.ttl", "60"); - log.info("Using workerId: {}", kinesisClientLibConfig.getWorkerIdentifier()); - log.info("Using credentials with access key id: {}", - kinesisClientLibConfig.getKinesisCredentialsProvider().resolveCredentials().accessKeyId()); + LOG.info("Using workerId: " + kinesisClientLibConfig.getWorkerIdentifier()); + LOG.info("Using credentials with access key id: " + + kinesisClientLibConfig.getKinesisCredentialsProvider().getCredentials().getAWSAccessKeyId()); - StringBuilder userAgent = new StringBuilder(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT); + StringBuilder userAgent = new StringBuilder(KinesisClientLibConfiguration.KINESIS_CLIENT_LIB_USER_AGENT); userAgent.append(" "); userAgent.append(USER_AGENT); userAgent.append("/"); @@ -132,7 +131,8 @@ public class MultiLangDaemonConfig { userAgent.append(recordProcessorFactory.getCommandArray()[0]); } - log.info("MultiLangDaemon is adding the following fields to the User Agent: {}", userAgent.toString()); + LOG.info(String.format("MultiLangDaemon is adding the following fields to the User Agent: %s", + userAgent.toString())); kinesisClientLibConfig.withUserAgent(userAgent.toString()); } @@ -174,13 +174,13 @@ public class MultiLangDaemonConfig { private static ExecutorService buildExecutorService(Properties properties) { int maxActiveThreads = getMaxActiveThreads(properties); ThreadFactoryBuilder builder = new ThreadFactoryBuilder().setNameFormat("multi-lang-daemon-%04d"); - log.debug("Value for {} property is {}", PROP_MAX_ACTIVE_THREADS, maxActiveThreads); + LOG.debug(String.format("Value for %s property is %d", PROP_MAX_ACTIVE_THREADS, maxActiveThreads)); if (maxActiveThreads <= 0) { - log.info("Using a cached thread pool."); + LOG.info("Using a cached thread pool."); return new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue(), builder.build()); } else { - log.info("Using a fixed thread pool with {} max active threads.", maxActiveThreads); + LOG.info(String.format("Using a fixed thread pool with %d max active threads.", maxActiveThreads)); return new ThreadPoolExecutor(maxActiveThreads, maxActiveThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue(), builder.build()); } diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocol.java b/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocol.java similarity index 58% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocol.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocol.java index 75e552ce..64c7829f 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocol.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocol.java @@ -1,55 +1,49 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang; -import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import software.amazon.kinesis.exceptions.InvalidStateException; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; -import software.amazon.kinesis.lifecycle.ShutdownReason; -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason; +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; import com.amazonaws.services.kinesis.multilang.messages.CheckpointMessage; import com.amazonaws.services.kinesis.multilang.messages.InitializeMessage; import com.amazonaws.services.kinesis.multilang.messages.Message; import com.amazonaws.services.kinesis.multilang.messages.ProcessRecordsMessage; import com.amazonaws.services.kinesis.multilang.messages.ShutdownMessage; -import com.amazonaws.services.kinesis.multilang.messages.ShutdownRequestedMessage; import com.amazonaws.services.kinesis.multilang.messages.StatusMessage; -import lombok.extern.slf4j.Slf4j; +import lombok.extern.apachecommons.CommonsLog; /** * An implementation of the multi language protocol. */ -@Slf4j +@CommonsLog class MultiLangProtocol { private MessageReader messageReader; private MessageWriter messageWriter; private final InitializationInput initializationInput; - private KinesisClientLibConfiguration configuration; /** * Constructor. - * + * * @param messageReader * A message reader. * @param messageWriter @@ -58,17 +52,16 @@ class MultiLangProtocol { * information about the shard this processor is starting to process */ MultiLangProtocol(MessageReader messageReader, MessageWriter messageWriter, - InitializationInput initializationInput, KinesisClientLibConfiguration configuration) { + InitializationInput initializationInput) { this.messageReader = messageReader; this.messageWriter = messageWriter; this.initializationInput = initializationInput; - this.configuration = configuration; } /** * Writes an {@link InitializeMessage} to the child process's STDIN and waits for the child process to respond with * a {@link StatusMessage} on its STDOUT. - * + * * @return Whether or not this operation succeeded. */ boolean initialize() { @@ -83,49 +76,37 @@ class MultiLangProtocol { /** * Writes a {@link ProcessRecordsMessage} to the child process's STDIN and waits for the child process to respond * with a {@link StatusMessage} on its STDOUT. - * + * * @param processRecordsInput * The records, and associated metadata, to process. * @return Whether or not this operation succeeded. */ boolean processRecords(ProcessRecordsInput processRecordsInput) { Future writeFuture = messageWriter.writeProcessRecordsMessage(processRecordsInput); - return waitForStatusMessage(ProcessRecordsMessage.ACTION, processRecordsInput.checkpointer(), writeFuture); + return waitForStatusMessage(ProcessRecordsMessage.ACTION, processRecordsInput.getCheckpointer(), writeFuture); } /** * Writes a {@link ShutdownMessage} to the child process's STDIN and waits for the child process to respond with a * {@link StatusMessage} on its STDOUT. - * + * * @param checkpointer A checkpointer. * @param reason Why this processor is being shutdown. * @return Whether or not this operation succeeded. */ - boolean shutdown(RecordProcessorCheckpointer checkpointer, ShutdownReason reason) { + boolean shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) { Future writeFuture = messageWriter.writeShutdownMessage(reason); return waitForStatusMessage(ShutdownMessage.ACTION, checkpointer, writeFuture); } - /** - * Writes a {@link ShutdownRequestedMessage} to the child process's STDIN and waits for the child process to respond with a - * {@link StatusMessage} on its STDOUT. - * - * @param checkpointer A checkpointer. - * @return Whether or not this operation succeeded. - */ - boolean shutdownRequested(RecordProcessorCheckpointer checkpointer) { - Future writeFuture = messageWriter.writeShutdownRequestedMessage(); - return waitForStatusMessage(ShutdownRequestedMessage.ACTION, checkpointer, writeFuture); - } - /** * Waits for a {@link StatusMessage} for a particular action. If a {@link CheckpointMessage} is received, then this - * method will attempt to checkpoint with the provided {@link RecordProcessorCheckpointer}. This method returns + * method will attempt to checkpoint with the provided {@link IRecordProcessorCheckpointer}. This method returns * true if writing to the child process succeeds and the status message received back was for the correct action and * all communications with the child process regarding checkpointing were successful. Note that whether or not the * checkpointing itself was successful is not the concern of this method. This method simply cares whether it was * able to successfully communicate the results of its attempts to checkpoint. - * + * * @param action * What action is being waited on. * @param checkpointer @@ -134,7 +115,7 @@ class MultiLangProtocol { * The writing task. * @return Whether or not this operation succeeded. */ - private boolean waitForStatusMessage(String action, RecordProcessorCheckpointer checkpointer, + private boolean waitForStatusMessage(String action, IRecordProcessorCheckpointer checkpointer, Future writeFuture) { boolean statusWasCorrect = waitForStatusMessage(action, checkpointer); @@ -143,108 +124,80 @@ class MultiLangProtocol { boolean writerIsStillOpen = writeFuture.get(); return statusWasCorrect && writerIsStillOpen; } catch (InterruptedException e) { - log.error("Interrupted while writing {} message for shard {}", action, initializationInput.shardId()); + log.error(String.format("Interrupted while writing %s message for shard %s", action, + initializationInput.getShardId())); return false; } catch (ExecutionException e) { - log.error("Failed to write {} message for shard {}", action, initializationInput.shardId(), e); + log.error( + String.format("Failed to write %s message for shard %s", action, initializationInput.getShardId()), + e); return false; } } /** * Waits for status message and verifies it against the expectation - * + * * @param action * What action is being waited on. * @param checkpointer * the original process records request * @return Whether or not this operation succeeded. */ - boolean waitForStatusMessage(String action, RecordProcessorCheckpointer checkpointer) { - Optional statusMessage = Optional.empty(); - while (!statusMessage.isPresent()) { + private boolean waitForStatusMessage(String action, IRecordProcessorCheckpointer checkpointer) { + StatusMessage statusMessage = null; + while (statusMessage == null) { Future future = this.messageReader.getNextMessageFromSTDOUT(); - Optional message = configuration.getTimeoutInSeconds() - .map(second -> futureMethod(() -> future.get(second, TimeUnit.SECONDS), action)) - .orElse(futureMethod(future::get, action)); - - if (!message.isPresent()) { + try { + Message message = future.get(); + // Note that instanceof doubles as a check against a value being null + if (message instanceof CheckpointMessage) { + boolean checkpointWriteSucceeded = checkpoint((CheckpointMessage) message, checkpointer).get(); + if (!checkpointWriteSucceeded) { + return false; + } + } else if (message instanceof StatusMessage) { + statusMessage = (StatusMessage) message; + } + } catch (InterruptedException e) { + log.error(String.format("Interrupted while waiting for %s message for shard %s", action, + initializationInput.getShardId())); + return false; + } catch (ExecutionException e) { + log.error(String.format("Failed to get status message for %s action for shard %s", action, + initializationInput.getShardId()), e); return false; } - - Optional checkpointFailed = message.filter(m -> m instanceof CheckpointMessage ) - .map(m -> (CheckpointMessage) m) - .flatMap(m -> futureMethod(() -> checkpoint(m, checkpointer).get(), "Checkpoint")) - .map(checkpointSuccess -> !checkpointSuccess); - - if (checkpointFailed.orElse(false)) { - return false; - } - - statusMessage = message.filter(m -> m instanceof StatusMessage).map(m -> (StatusMessage) m ); } - return this.validateStatusMessage(statusMessage.get(), action); - } - - private interface FutureMethod { - T get() throws InterruptedException, TimeoutException, ExecutionException; - } - - private Optional futureMethod(FutureMethod fm, String action) { - try { - return Optional.of(fm.get()); - } catch (InterruptedException e) { - log.error("Interrupted while waiting for {} message for shard {}", action, - initializationInput.shardId(), e); - } catch (ExecutionException e) { - log.error("Failed to get status message for {} action for shard {}", action, - initializationInput.shardId(), e); - } catch (TimeoutException e) { - log.error("Timedout to get status message for {} action for shard {}. Terminating...", - action, - initializationInput.shardId(), - e); - haltJvm(1); - } - return Optional.empty(); - } - - /** - * This method is used to halt the JVM. Use this method with utmost caution, since this method will kill the JVM - * without calling the Shutdown hooks. - * - * @param exitStatus The exit status with which the JVM is to be halted. - */ - protected void haltJvm(int exitStatus) { - Runtime.getRuntime().halt(exitStatus); + return this.validateStatusMessage(statusMessage, action); } /** * Utility for confirming that the status message is for the provided action. - * + * * @param statusMessage The status of the child process. * @param action The action that was being waited on. * @return Whether or not this operation succeeded. */ private boolean validateStatusMessage(StatusMessage statusMessage, String action) { - log.info("Received response {} from subprocess while waiting for {}" - + " while processing shard {}", statusMessage, action, initializationInput.shardId()); + log.info("Received response " + statusMessage + " from subprocess while waiting for " + action + + " while processing shard " + initializationInput.getShardId()); return !(statusMessage == null || statusMessage.getResponseFor() == null || !statusMessage.getResponseFor() .equals(action)); } /** - * Attempts to checkpoint with the provided {@link RecordProcessorCheckpointer} at the sequence number in the + * Attempts to checkpoint with the provided {@link IRecordProcessorCheckpointer} at the sequence number in the * provided {@link CheckpointMessage}. If no sequence number is provided, i.e. the sequence number is null, then - * this method will call {@link RecordProcessorCheckpointer#checkpoint()}. The method returns a future representing + * this method will call {@link IRecordProcessorCheckpointer#checkpoint()}. The method returns a future representing * the attempt to write the result of this checkpoint attempt to the child process. - * + * * @param checkpointMessage A checkpoint message. * @param checkpointer A checkpointer. * @return Whether or not this operation succeeded. */ - private Future checkpoint(CheckpointMessage checkpointMessage, RecordProcessorCheckpointer checkpointer) { + private Future checkpoint(CheckpointMessage checkpointMessage, IRecordProcessorCheckpointer checkpointer) { String sequenceNumber = checkpointMessage.getSequenceNumber(); Long subSequenceNumber = checkpointMessage.getSubSequenceNumber(); try { @@ -263,7 +216,7 @@ class MultiLangProtocol { } else { String message = String.format("Was asked to checkpoint at %s but no checkpointer was provided for shard %s", - sequenceNumber, initializationInput.shardId()); + sequenceNumber, initializationInput.getShardId()); log.error(message); return this.messageWriter.writeCheckpointMessageWithError(sequenceNumber, subSequenceNumber, new InvalidStateException( @@ -276,7 +229,7 @@ class MultiLangProtocol { private String logCheckpointMessage(String sequenceNumber, Long subSequenceNumber) { return String.format("Attempting to checkpoint shard %s @ sequence number %s, and sub sequence number %s", - initializationInput.shardId(), sequenceNumber, subSequenceNumber); + initializationInput.getShardId(), sequenceNumber, subSequenceNumber); } } diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangShardRecordProcessor.java b/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangRecordProcessor.java similarity index 71% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangShardRecordProcessor.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangRecordProcessor.java index 94df3c36..9d76af54 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangShardRecordProcessor.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangRecordProcessor.java @@ -1,16 +1,16 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang; @@ -20,28 +20,24 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; -import software.amazon.kinesis.lifecycle.ShutdownReason; -import software.amazon.kinesis.lifecycle.events.LeaseLostInput; -import software.amazon.kinesis.lifecycle.events.ShardEndedInput; -import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput; -import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownInput; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput; import com.fasterxml.jackson.databind.ObjectMapper; -import lombok.extern.slf4j.Slf4j; - - /** * A record processor that manages creating a child process that implements the multi language protocol and connecting * that child process's input and outputs to a {@link MultiLangProtocol} object and calling the appropriate methods on * that object when its corresponding {@link #initialize}, {@link #processRecords}, and {@link #shutdown} methods are * called. */ -@Slf4j -public class MultiLangShardRecordProcessor implements ShardRecordProcessor { +public class MultiLangRecordProcessor implements IRecordProcessor { + + private static final Log LOG = LogFactory.getLog(MultiLangRecordProcessor.class); private static final int EXIT_VALUE = 1; /** Whether or not record processor initialization is successful. Defaults to false. */ @@ -64,12 +60,10 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { private MultiLangProtocol protocol; - private KinesisClientLibConfiguration configuration; - @Override public void initialize(InitializationInput initializationInput) { try { - this.shardId = initializationInput.shardId(); + this.shardId = initializationInput.getShardId(); try { this.process = startProcess(); } catch (IOException e) { @@ -88,7 +82,7 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { // Submit the error reader for execution stderrReadTask = executorService.submit(readSTDERRTask); - protocol = new MultiLangProtocol(messageReader, messageWriter, initializationInput, configuration); + protocol = new MultiLangProtocol(messageReader, messageWriter, initializationInput); if (!protocol.initialize()) { throw new RuntimeException("Failed to initialize child process"); } @@ -112,33 +106,11 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { } @Override - public void leaseLost(LeaseLostInput leaseLostInput) { - shutdown(ShutdownInput.builder().shutdownReason(ShutdownReason.LEASE_LOST).build()); - } - - @Override - public void shardEnded(ShardEndedInput shardEndedInput) { - shutdown(ShutdownInput.builder().shutdownReason(ShutdownReason.SHARD_END).checkpointer(shardEndedInput.checkpointer()).build()); - } - - @Override - public void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput) { - log.info("Shutdown is requested."); - if (!initialized) { - log.info("Record processor was not initialized so no need to initiate a final checkpoint."); - return; - } - log.info("Requesting a checkpoint on shutdown notification."); - if (!protocol.shutdownRequested(shutdownRequestedInput.checkpointer())) { - log.error("Child process failed to complete shutdown notification."); - } - } - - void shutdown(ShutdownInput shutdownInput) { + public void shutdown(ShutdownInput shutdownInput) { // In cases where KCL loses lease for the shard after creating record processor instance but before // record processor initialize() is called, then shutdown() may be called directly before initialize(). if (!initialized) { - log.info("Record processor was not initialized and will not have a child process, " + LOG.info("Record processor was not initialized and will not have a child process, " + "so not invoking child process shutdown."); this.state = ProcessState.SHUTDOWN; return; @@ -146,13 +118,13 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { try { if (ProcessState.ACTIVE.equals(this.state)) { - if (!protocol.shutdown(shutdownInput.checkpointer(), shutdownInput.shutdownReason())) { + if (!protocol.shutdown(shutdownInput.getCheckpointer(), shutdownInput.getShutdownReason())) { throw new RuntimeException("Child process failed to shutdown"); } childProcessShutdownSequence(); } else { - log.warn("Shutdown was called but this processor is already shutdown. Not doing anything."); + LOG.warn("Shutdown was called but this processor is already shutdown. Not doing anything."); } } catch (Throwable t) { if (ProcessState.ACTIVE.equals(this.state)) { @@ -181,10 +153,10 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { * @param objectMapper * An obejct mapper. */ - MultiLangShardRecordProcessor(ProcessBuilder processBuilder, ExecutorService executorService, - ObjectMapper objectMapper, KinesisClientLibConfiguration configuration) { + MultiLangRecordProcessor(ProcessBuilder processBuilder, ExecutorService executorService, + ObjectMapper objectMapper) { this(processBuilder, executorService, objectMapper, new MessageWriter(), new MessageReader(), - new DrainChildSTDERRTask(), configuration); + new DrainChildSTDERRTask()); } /** @@ -203,17 +175,14 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { * @param readSTDERRTask * Error reader to read from child process's stderr */ - MultiLangShardRecordProcessor(ProcessBuilder processBuilder, ExecutorService executorService, ObjectMapper objectMapper, - MessageWriter messageWriter, MessageReader messageReader, DrainChildSTDERRTask readSTDERRTask, - KinesisClientLibConfiguration configuration) { + MultiLangRecordProcessor(ProcessBuilder processBuilder, ExecutorService executorService, ObjectMapper objectMapper, + MessageWriter messageWriter, MessageReader messageReader, DrainChildSTDERRTask readSTDERRTask) { this.executorService = executorService; this.processBuilder = processBuilder; this.objectMapper = objectMapper; this.messageWriter = messageWriter; this.messageReader = messageReader; this.readSTDERRTask = readSTDERRTask; - this.configuration = configuration; - this.state = ProcessState.ACTIVE; } @@ -234,7 +203,7 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { messageWriter.close(); } } catch (IOException e) { - log.error("Encountered exception while trying to close output stream.", e); + LOG.error("Encountered exception while trying to close output stream.", e); } // We should drain the STDOUT and STDERR of the child process. If we don't, the child process might remain @@ -251,9 +220,9 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { * sure that it exits before we finish. */ try { - log.info("Child process exited with value: {}", process.waitFor()); + LOG.info("Child process exited with value: " + process.waitFor()); } catch (InterruptedException e) { - log.error("Interrupted before process finished exiting. Attempting to kill process."); + LOG.error("Interrupted before process finished exiting. Attempting to kill process."); process.destroy(); } @@ -264,7 +233,7 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { try { inputStream.close(); } catch (IOException e) { - log.error("Encountered exception while trying to close {} stream.", name, e); + LOG.error("Encountered exception while trying to close " + name + " stream.", e); } } @@ -279,7 +248,7 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { try { future.get(); } catch (InterruptedException | ExecutionException e) { - log.error("Encountered error while {} for shard {}", whatThisFutureIsDoing, shardId, e); + LOG.error("Encountered error while " + whatThisFutureIsDoing + " for shard " + shardId, e); } } @@ -292,12 +261,12 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { */ private void stopProcessing(String message, Throwable reason) { try { - log.error(message, reason); + LOG.error(message, reason); if (!state.equals(ProcessState.SHUTDOWN)) { childProcessShutdownSequence(); } } catch (Throwable t) { - log.error("Encountered error while trying to shutdown", t); + LOG.error("Encountered error while trying to shutdown", t); } exit(); } diff --git a/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangRecordProcessorFactory.java b/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangRecordProcessorFactory.java new file mode 100644 index 00000000..e55217a6 --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/MultiLangRecordProcessorFactory.java @@ -0,0 +1,74 @@ +/* + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.multilang; + +import java.util.concurrent.ExecutorService; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory; +import com.fasterxml.jackson.databind.ObjectMapper; + +/** + * Creates {@link MultiLangRecordProcessor}'s. + */ +public class MultiLangRecordProcessorFactory implements IRecordProcessorFactory { + + private static final Log LOG = LogFactory.getLog(MultiLangRecordProcessorFactory.class); + + private static final String COMMAND_DELIMETER_REGEX = " +"; + + private final String command; + private final String[] commandArray; + + private final ObjectMapper objectMapper; + + private final ExecutorService executorService; + + /** + * @param command The command that will do processing for this factory's record processors. + * @param executorService An executor service to use while processing inputs and outputs of the child process. + */ + public MultiLangRecordProcessorFactory(String command, ExecutorService executorService) { + this(command, executorService, new ObjectMapper()); + } + + /** + * @param command The command that will do processing for this factory's record processors. + * @param executorService An executor service to use while processing inputs and outputs of the child process. + * @param objectMapper An object mapper used to convert messages to json to be written to the child process + */ + public MultiLangRecordProcessorFactory(String command, ExecutorService executorService, ObjectMapper objectMapper) { + this.command = command; + this.commandArray = command.split(COMMAND_DELIMETER_REGEX); + this.executorService = executorService; + this.objectMapper = objectMapper; + } + + @Override + public IRecordProcessor createProcessor() { + LOG.debug(String.format("Creating new record processor for client executable: %s", command)); + /* + * Giving ProcessBuilder the command as an array of Strings allows users to specify command line arguments. + */ + return new MultiLangRecordProcessor(new ProcessBuilder(commandArray), executorService, this.objectMapper); + } + + String[] getCommandArray() { + return commandArray; + } +} diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/CheckpointMessage.java b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/CheckpointMessage.java similarity index 72% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/CheckpointMessage.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/messages/CheckpointMessage.java index 51159fc6..5cdc02bd 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/CheckpointMessage.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/CheckpointMessage.java @@ -1,30 +1,27 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang.messages; import lombok.Getter; -import lombok.NoArgsConstructor; import lombok.Setter; -import lombok.experimental.Accessors; /** * A checkpoint message is sent by the client's subprocess to indicate to the kcl processor that it should attempt to * checkpoint. The processor sends back a checkpoint message as an acknowledgement that it attempted to checkpoint along * with an error message which corresponds to the names of exceptions that a checkpointer can throw. */ -@NoArgsConstructor @Getter @Setter public class CheckpointMessage extends Message { @@ -44,6 +41,12 @@ public class CheckpointMessage extends Message { */ private String error; + /** + * Default constructor. + */ + public CheckpointMessage() { + } + /** * Convenience constructor. * diff --git a/src/main/java/com/amazonaws/services/kinesis/multilang/messages/InitializeMessage.java b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/InitializeMessage.java new file mode 100644 index 00000000..3795e57e --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/InitializeMessage.java @@ -0,0 +1,62 @@ +/* + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.multilang.messages; + +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import lombok.Getter; +import lombok.Setter; + +/** + * An initialize message is sent to the client's subprocess to indicate that it should perform its initialization steps. + */ +@Getter +@Setter +public class InitializeMessage extends Message { + /** + * The name used for the action field in {@link Message}. + */ + public static final String ACTION = "initialize"; + + /** + * The shard id that this processor is getting initialized for. + */ + private String shardId; + private String sequenceNumber; + private Long subSequenceNumber; + + /** + * Default constructor. + */ + public InitializeMessage() { + } + + /** + * Convenience constructor. + * + * @param shardId The shard id. + */ + public InitializeMessage(InitializationInput initializationInput) { + this.shardId = initializationInput.getShardId(); + if (initializationInput.getExtendedSequenceNumber() != null) { + this.sequenceNumber = initializationInput.getExtendedSequenceNumber().getSequenceNumber(); + this.subSequenceNumber = initializationInput.getExtendedSequenceNumber().getSubSequenceNumber(); + } else { + this.sequenceNumber = null; + this.subSequenceNumber = null; + } + + } + +} diff --git a/src/main/java/com/amazonaws/services/kinesis/multilang/messages/JsonFriendlyRecord.java b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/JsonFriendlyRecord.java new file mode 100644 index 00000000..600489fe --- /dev/null +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/JsonFriendlyRecord.java @@ -0,0 +1,69 @@ +/* + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.multilang.messages; + +import java.util.Date; + +import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord; +import com.amazonaws.services.kinesis.model.Record; +import com.fasterxml.jackson.annotation.JsonProperty; + +import lombok.Getter; +import lombok.Setter; + +/** + * Class for encoding Record objects to json. Needed because Records have byte buffers for their data field which causes + * problems for the json library we're using. + */ +@Getter +@Setter +public class JsonFriendlyRecord { + private byte[] data; + private String partitionKey; + private String sequenceNumber; + private Date approximateArrivalTimestamp; + private Long subSequenceNumber; + + public static String ACTION = "record"; + + /** + * Default Constructor. + */ + public JsonFriendlyRecord() { + } + + /** + * Convenience constructor. + * + * @param record The record that this message will represent. + */ + public JsonFriendlyRecord(Record record) { + this.data = record.getData() == null ? null : record.getData().array(); + this.partitionKey = record.getPartitionKey(); + this.sequenceNumber = record.getSequenceNumber(); + this.approximateArrivalTimestamp = record.getApproximateArrivalTimestamp(); + if (record instanceof UserRecord) { + this.subSequenceNumber = ((UserRecord) record).getSubSequenceNumber(); + } else { + this.subSequenceNumber = null; + } + } + + @JsonProperty + public String getAction() { + return ACTION; + } + +} diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/Message.java b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/Message.java similarity index 64% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/Message.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/messages/Message.java index 3c312b0b..766cdac0 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/Message.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/Message.java @@ -1,36 +1,33 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang.messages; import com.fasterxml.jackson.annotation.JsonSubTypes; -import com.fasterxml.jackson.annotation.JsonSubTypes.Type; import com.fasterxml.jackson.annotation.JsonTypeInfo; +import com.fasterxml.jackson.annotation.JsonSubTypes.Type; import com.fasterxml.jackson.databind.ObjectMapper; /** * Abstract class for all messages that are sent to the client's process. */ @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "action") -@JsonSubTypes({ - @Type(value = CheckpointMessage.class, name = CheckpointMessage.ACTION), +@JsonSubTypes({ @Type(value = CheckpointMessage.class, name = CheckpointMessage.ACTION), @Type(value = InitializeMessage.class, name = InitializeMessage.ACTION), @Type(value = ProcessRecordsMessage.class, name = ProcessRecordsMessage.ACTION), @Type(value = ShutdownMessage.class, name = ShutdownMessage.ACTION), - @Type(value = StatusMessage.class, name = StatusMessage.ACTION), - @Type(value = ShutdownRequestedMessage.class, name = ShutdownRequestedMessage.ACTION), -}) + @Type(value = StatusMessage.class, name = StatusMessage.ACTION), }) public abstract class Message { private ObjectMapper mapper = new ObjectMapper();; diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ProcessRecordsMessage.java b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ProcessRecordsMessage.java similarity index 53% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ProcessRecordsMessage.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/messages/ProcessRecordsMessage.java index e63672ff..9e382b93 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ProcessRecordsMessage.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ProcessRecordsMessage.java @@ -1,26 +1,26 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang.messages; import java.util.ArrayList; import java.util.List; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; +import com.amazonaws.services.kinesis.model.Record; import lombok.Getter; import lombok.Setter; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.retrieval.KinesisClientRecord; /** * A message to indicate to the client's process that it should process a list of records. @@ -52,10 +52,10 @@ public class ProcessRecordsMessage extends Message { * the process records input to be sent to the child */ public ProcessRecordsMessage(ProcessRecordsInput processRecordsInput) { - this.millisBehindLatest = processRecordsInput.millisBehindLatest(); - List recordMessages = new ArrayList<>(); - for (KinesisClientRecord record : processRecordsInput.records()) { - recordMessages.add(JsonFriendlyRecord.fromKinesisClientRecord(record)); + this.millisBehindLatest = processRecordsInput.getMillisBehindLatest(); + List recordMessages = new ArrayList(); + for (Record record : processRecordsInput.getRecords()) { + recordMessages.add(new JsonFriendlyRecord(record)); } this.setRecords(recordMessages); } diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ShutdownMessage.java b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ShutdownMessage.java similarity index 57% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ShutdownMessage.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/messages/ShutdownMessage.java index b2b49e3c..82ed5458 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ShutdownMessage.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/ShutdownMessage.java @@ -14,18 +14,11 @@ */ package com.amazonaws.services.kinesis.multilang.messages; -import lombok.Getter; -import lombok.NoArgsConstructor; -import lombok.Setter; -import lombok.experimental.Accessors; -import software.amazon.kinesis.lifecycle.ShutdownReason; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason; /** * A message to indicate to the client's process that it should shutdown and then terminate. */ -@NoArgsConstructor -@Getter -@Setter public class ShutdownMessage extends Message { /** * The name used for the action field in {@link Message}. @@ -33,13 +26,40 @@ public class ShutdownMessage extends Message { public static final String ACTION = "shutdown"; /** - * The reason for shutdown, e.g. SHARD_END or LEASE_LOST + * The reason for shutdown, e.g. TERMINATE or ZOMBIE */ private String reason; - public ShutdownMessage(final ShutdownReason reason) { - if (reason != null) { - this.reason = String.valueOf(reason); + /** + * Default constructor. + */ + public ShutdownMessage() { + } + + /** + * Convenience constructor. + * + * @param reason The reason. + */ + public ShutdownMessage(ShutdownReason reason) { + if (reason == null) { + this.setReason(null); + } else { + this.setReason(String.valueOf(reason)); } } + + /** + * @return reason The reason. + */ + public String getReason() { + return reason; + } + + /** + * @param reason The reason. + */ + public void setReason(String reason) { + this.reason = reason; + } } diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/StatusMessage.java b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/StatusMessage.java similarity index 64% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/StatusMessage.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/messages/StatusMessage.java index 921cca1b..5ea5aa75 100644 --- a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/messages/StatusMessage.java +++ b/src/main/java/com/amazonaws/services/kinesis/multilang/messages/StatusMessage.java @@ -14,19 +14,9 @@ */ package com.amazonaws.services.kinesis.multilang.messages; -import lombok.AllArgsConstructor; -import lombok.Getter; -import lombok.NoArgsConstructor; -import lombok.Setter; -import lombok.experimental.Accessors; - /** * A message sent by the client's process to indicate to the record processor that it completed a particular action. */ -@NoArgsConstructor -@AllArgsConstructor -@Getter -@Setter public class StatusMessage extends Message { /** * The name used for the action field in {@link Message}. @@ -37,4 +27,35 @@ public class StatusMessage extends Message { * The name of the most recently received action. */ private String responseFor; + + /** + * Default constructor. + */ + public StatusMessage() { + } + + /** + * Convenience constructor. + * + * @param responseFor The response for. + */ + public StatusMessage(String responseFor) { + this.setResponseFor(responseFor); + } + + /** + * + * @return The response for. + */ + public String getResponseFor() { + return responseFor; + } + + /** + * + * @param responseFor The response for. + */ + public void setResponseFor(String responseFor) { + this.responseFor = responseFor; + } } diff --git a/amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/package-info.java b/src/main/java/com/amazonaws/services/kinesis/multilang/package-info.java similarity index 100% rename from amazon-kinesis-client-multilang/src/main/java/com/amazonaws/services/kinesis/multilang/package-info.java rename to src/main/java/com/amazonaws/services/kinesis/multilang/package-info.java diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/config/AWSCredentialsProviderPropertyValueDecoderTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/config/AWSCredentialsProviderPropertyValueDecoderTest.java new file mode 100644 index 00000000..cddd837a --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/config/AWSCredentialsProviderPropertyValueDecoderTest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.config; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.BasicAWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSCredentialsProviderChain; +import com.amazonaws.services.kinesis.clientlibrary.config.AWSCredentialsProviderPropertyValueDecoder; + +public class AWSCredentialsProviderPropertyValueDecoderTest { + + private static final String TEST_ACCESS_KEY_ID = "123"; + private static final String TEST_SECRET_KEY = "456"; + + private String credentialName1 = + "com.amazonaws.services.kinesis.clientlibrary.config.AWSCredentialsProviderPropertyValueDecoderTest$AlwaysSucceedCredentialsProvider"; + private String credentialName2 = + "com.amazonaws.services.kinesis.clientlibrary.config.AWSCredentialsProviderPropertyValueDecoderTest$ConstructorCredentialsProvider"; + private AWSCredentialsProviderPropertyValueDecoder decoder = new AWSCredentialsProviderPropertyValueDecoder(); + + @Test + public void testSingleProvider() { + AWSCredentialsProvider provider = decoder.decodeValue(credentialName1); + assertEquals(provider.getClass(), AWSCredentialsProviderChain.class); + assertEquals(provider.getCredentials().getAWSAccessKeyId(), TEST_ACCESS_KEY_ID); + assertEquals(provider.getCredentials().getAWSSecretKey(), TEST_SECRET_KEY); + } + + @Test + public void testTwoProviders() { + AWSCredentialsProvider provider = decoder.decodeValue(credentialName1 + "," + credentialName1); + assertEquals(provider.getClass(), AWSCredentialsProviderChain.class); + assertEquals(provider.getCredentials().getAWSAccessKeyId(), TEST_ACCESS_KEY_ID); + assertEquals(provider.getCredentials().getAWSSecretKey(), TEST_SECRET_KEY); + } + + @Test + public void testProfileProviderWithOneArg() { + AWSCredentialsProvider provider = decoder.decodeValue(credentialName2 + "|arg"); + assertEquals(provider.getClass(), AWSCredentialsProviderChain.class); + assertEquals(provider.getCredentials().getAWSAccessKeyId(), "arg"); + assertEquals(provider.getCredentials().getAWSSecretKey(), "blank"); + } + + @Test + public void testProfileProviderWithTwoArgs() { + AWSCredentialsProvider provider = decoder.decodeValue(credentialName2 + + "|arg1|arg2"); + assertEquals(provider.getClass(), AWSCredentialsProviderChain.class); + assertEquals(provider.getCredentials().getAWSAccessKeyId(), "arg1"); + assertEquals(provider.getCredentials().getAWSSecretKey(), "arg2"); + } + + /** + * This credentials provider will always succeed + */ + public static class AlwaysSucceedCredentialsProvider implements AWSCredentialsProvider { + + @Override + public AWSCredentials getCredentials() { + return new BasicAWSCredentials(TEST_ACCESS_KEY_ID, TEST_SECRET_KEY); + } + + @Override + public void refresh() { + } + + } + + /** + * This credentials provider needs a constructor call to instantiate it + */ + public static class ConstructorCredentialsProvider implements AWSCredentialsProvider { + + private String arg1; + private String arg2; + + public ConstructorCredentialsProvider(String arg1) { + this.arg1 = arg1; + this.arg2 = "blank"; + } + + public ConstructorCredentialsProvider(String arg1, String arg2) { + this.arg1 = arg1; + this.arg2 = arg2; + } + + @Override + public AWSCredentials getCredentials() { + return new BasicAWSCredentials(arg1, arg2); + } + + @Override + public void refresh() { + } + + } +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/config/KinesisClientLibConfiguratorTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/config/KinesisClientLibConfiguratorTest.java new file mode 100644 index 00000000..cbdd0a2d --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/config/KinesisClientLibConfiguratorTest.java @@ -0,0 +1,521 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.config; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.ByteArrayInputStream; +import java.io.InputStream; +import java.util.Set; + +import org.apache.commons.lang.StringUtils; +import org.junit.Test; + +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; +import com.google.common.collect.ImmutableSet; + +public class KinesisClientLibConfiguratorTest { + + private String credentialName1 = + "com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProvider"; + private String credentialName2 = + "com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfiguratorTest$AlwaysFailCredentialsProvider"; + private String credentialNameKinesis = + "com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderKinesis"; + private String credentialNameDynamoDB = + "com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderDynamoDB"; + private String credentialNameCloudWatch = + "com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfiguratorTest$AlwaysSucceedCredentialsProviderCloudWatch"; + private KinesisClientLibConfigurator configurator = new KinesisClientLibConfigurator(); + + @Test + public void testWithBasicSetup() { + KinesisClientLibConfiguration config = + getConfiguration(StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123" + }, '\n')); + assertEquals(config.getApplicationName(), "b"); + assertEquals(config.getStreamName(), "a"); + assertEquals(config.getWorkerIdentifier(), "123"); + } + + @Test + public void testWithLongVariables() { + KinesisClientLibConfiguration config = + getConfiguration(StringUtils.join(new String[] { + "applicationName = app", + "streamName = 123", + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "workerId = 123", + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, '\n')); + + assertEquals(config.getApplicationName(), "app"); + assertEquals(config.getStreamName(), "123"); + assertEquals(config.getWorkerIdentifier(), "123"); + assertEquals(config.getFailoverTimeMillis(), 100); + assertEquals(config.getShardSyncIntervalMillis(), 500); + } + + @Test + public void testWithUnsupportedClientConfigurationVariables() { + KinesisClientLibConfiguration config = + getConfiguration(StringUtils.join(new String[] { + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "workerId = id", + "kinesisClientConfig = {}", + "streamName = stream", + "applicationName = b" + }, '\n')); + + assertEquals(config.getApplicationName(), "b"); + assertEquals(config.getStreamName(), "stream"); + assertEquals(config.getWorkerIdentifier(), "id"); + // by setting the configuration there is no effect on kinesisClientConfiguration variable. + } + + @Test + public void testWithIntVariables() { + KinesisClientLibConfiguration config = + getConfiguration(StringUtils.join(new String[] { + "streamName = kinesis", + "AWSCredentialsProvider = " + credentialName2 + ", " + credentialName1, + "workerId = w123", + "maxRecords = 10", + "metricsMaxQueueSize = 20", + "applicationName = kinesis" + }, '\n')); + + assertEquals(config.getApplicationName(), "kinesis"); + assertEquals(config.getStreamName(), "kinesis"); + assertEquals(config.getWorkerIdentifier(), "w123"); + assertEquals(config.getMaxRecords(), 10); + assertEquals(config.getMetricsMaxQueueSize(), 20); + } + + @Test + public void testWithBooleanVariables() { + KinesisClientLibConfiguration config = + getConfiguration(StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD, " + credentialName1, + "workerId = 0", + "cleanupLeasesUponShardCompletion = false", + "validateSequenceNumberBeforeCheckpointing = true" + }, '\n')); + + assertEquals(config.getApplicationName(), "b"); + assertEquals(config.getStreamName(), "a"); + assertEquals(config.getWorkerIdentifier(), "0"); + assertFalse(config.shouldCleanupLeasesUponShardCompletion()); + assertTrue(config.shouldValidateSequenceNumberBeforeCheckpointing()); + } + + @Test + public void testWithStringVariables() { + KinesisClientLibConfiguration config = + getConfiguration(StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 1", + "kinesisEndpoint = https://kinesis", + "metricsLevel = SUMMARY" + }, '\n')); + + assertEquals(config.getWorkerIdentifier(), "1"); + assertEquals(config.getKinesisEndpoint(), "https://kinesis"); + assertEquals(config.getMetricsLevel(), MetricsLevel.SUMMARY); + } + + @Test + public void testWithSetVariables() { + KinesisClientLibConfiguration config = + getConfiguration(StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 1", + "metricsEnabledDimensions = ShardId, WorkerIdentifier" + }, '\n')); + + Set expectedMetricsEnabledDimensions = ImmutableSet.builder().add( + "ShardId", "WorkerIdentifier").addAll( + KinesisClientLibConfiguration.METRICS_ALWAYS_ENABLED_DIMENSIONS).build(); + assertEquals(config.getMetricsEnabledDimensions(), expectedMetricsEnabledDimensions); + } + + @Test + public void testWithInitialPositionInStreamVariables() { + KinesisClientLibConfiguration config = + getConfiguration(StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = TriM_Horizon" + }, '\n')); + + assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON); + } + + @Test + public void testSkippingNonKCLVariables() { + KinesisClientLibConfiguration config = + getConfiguration(StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = TriM_Horizon", + "abc = 1" + }, '\n')); + + assertEquals(config.getApplicationName(), "b"); + assertEquals(config.getStreamName(), "a"); + assertEquals(config.getWorkerIdentifier(), "123"); + assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON); + } + + @Test + public void testWithInvalidIntValue() { + String test = StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = 100nf" + }, '\n'); + InputStream input = new ByteArrayInputStream(test.getBytes()); + + try { + configurator.getConfiguration(input); + } catch (Exception e) { + fail("Don't expect to fail on invalid variable value"); + } + } + + @Test + public void testWithNegativeIntValue() { + String test = StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = -12" + }, '\n'); + InputStream input = new ByteArrayInputStream(test.getBytes()); + + // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement + try { + configurator.getConfiguration(input); + } catch (Exception e) { + fail("Don't expect to fail on invalid variable value"); + } + } + + @Test + public void testWithMissingCredentialsProvider() { + String test = StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "workerId = 123", + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, '\n'); + InputStream input = new ByteArrayInputStream(test.getBytes()); + + // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement + try { + configurator.getConfiguration(input); + fail("expect failure with no credentials provider variables"); + } catch (Exception e) { + // succeed + } + } + + @Test + public void testWithMissingWorkerId() { + String test = StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, '\n'); + InputStream input = new ByteArrayInputStream(test.getBytes()); + KinesisClientLibConfiguration config = configurator.getConfiguration(input); + + // if workerId is not provided, configurator should assign one for it automatically + assertNotNull(config.getWorkerIdentifier()); + assertFalse(config.getWorkerIdentifier().isEmpty()); + } + + @Test + public void testWithMissingStreamName() { + String test = StringUtils.join(new String[] { + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = 100" + }, '\n'); + InputStream input = new ByteArrayInputStream(test.getBytes()); + + // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement + try { + configurator.getConfiguration(input); + fail("expect failure with no stream name variables"); + } catch (Exception e) { + // succeed + } + } + + @Test + public void testWithMissingApplicationName() { + String test = StringUtils.join(new String[] { + "streamName = a", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = 100" + }, '\n'); + InputStream input = new ByteArrayInputStream(test.getBytes()); + + // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement + try { + configurator.getConfiguration(input); + fail("expect failure with no application variables"); + } catch (Exception e) { + // succeed + } + } + + @Test + public void testWithAWSCredentialsFailed() { + String test = StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName2, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, '\n'); + InputStream input = new ByteArrayInputStream(test.getBytes()); + + // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement + try { + KinesisClientLibConfiguration config = configurator.getConfiguration(input); + config.getKinesisCredentialsProvider().getCredentials(); + fail("expect failure with wrong credentials provider"); + } catch (Exception e) { + // succeed + } + } + + @Test + public void testWithDifferentAWSCredentialsForDynamoDBAndCloudWatch() { + String test = StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialNameKinesis, + "AWSCredentialsProviderDynamoDB = " + credentialNameDynamoDB, + "AWSCredentialsProviderCloudWatch = " + credentialNameCloudWatch, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, '\n'); + InputStream input = new ByteArrayInputStream(test.getBytes()); + + // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement + KinesisClientLibConfiguration config = configurator.getConfiguration(input); + try { + config.getKinesisCredentialsProvider().getCredentials(); + } catch (Exception e) { + fail("Kinesis credential providers should not fail."); + } + try { + config.getDynamoDBCredentialsProvider().getCredentials(); + } catch (Exception e) { + fail("DynamoDB credential providers should not fail."); + } + try { + config.getCloudWatchCredentialsProvider().getCredentials(); + } catch (Exception e) { + fail("CloudWatch credential providers should not fail."); + } + } + + @Test + public void testWithDifferentAWSCredentialsForDynamoDBAndCloudWatchFailed() { + String test = StringUtils.join(new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialNameKinesis, + "AWSCredentialsProviderDynamoDB = " + credentialName1, + "AWSCredentialsProviderCloudWatch = " + credentialName1, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, '\n'); + InputStream input = new ByteArrayInputStream(test.getBytes()); + + // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement + + // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement + KinesisClientLibConfiguration config = configurator.getConfiguration(input); + try { + config.getKinesisCredentialsProvider().getCredentials(); + } catch (Exception e) { + fail("Kinesis credential providers should not fail."); + } + try { + config.getDynamoDBCredentialsProvider().getCredentials(); + fail("DynamoDB credential providers should fail."); + } catch (Exception e) { + // succeed + } + try { + config.getCloudWatchCredentialsProvider().getCredentials(); + fail("CloudWatch credential providers should fail."); + } catch (Exception e) { + // succeed + } + } + + /** + * This credentials provider will always succeed + */ + public static class AlwaysSucceedCredentialsProvider implements AWSCredentialsProvider { + + @Override + public AWSCredentials getCredentials() { + return null; + } + + @Override + public void refresh() { + } + + } + + /** + * This credentials provider will always succeed + */ + public static class AlwaysSucceedCredentialsProviderKinesis implements AWSCredentialsProvider { + + @Override + public AWSCredentials getCredentials() { + return new AWSCredentials() { + @Override + public String getAWSAccessKeyId() { + return ""; + } + + @Override + public String getAWSSecretKey() { + return ""; + } + }; + } + + @Override + public void refresh() { + } + + } + + /** + * This credentials provider will always succeed + */ + public static class AlwaysSucceedCredentialsProviderDynamoDB implements AWSCredentialsProvider { + + @Override + public AWSCredentials getCredentials() { + return new AWSCredentials() { + @Override + public String getAWSAccessKeyId() { + return ""; + } + + @Override + public String getAWSSecretKey() { + return ""; + } + }; + } + + @Override + public void refresh() { + } + + } + + /** + * This credentials provider will always succeed + */ + public static class AlwaysSucceedCredentialsProviderCloudWatch implements AWSCredentialsProvider { + + @Override + public AWSCredentials getCredentials() { + return new AWSCredentials() { + @Override + public String getAWSAccessKeyId() { + return ""; + } + + @Override + public String getAWSSecretKey() { + return ""; + } + }; + } + + @Override + public void refresh() { + } + + } + + /** + * This credentials provider will always fail + */ + public static class AlwaysFailCredentialsProvider implements AWSCredentialsProvider { + + @Override + public AWSCredentials getCredentials() { + throw new IllegalArgumentException(); + } + + @Override + public void refresh() { + } + + } + + private KinesisClientLibConfiguration getConfiguration(String configString) { + InputStream input = new ByteArrayInputStream(configString.getBytes()); + KinesisClientLibConfiguration config = configurator.getConfiguration(input); + return config; + } +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/CheckpointImplTestBase.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/CheckpointImplTestBase.java new file mode 100644 index 00000000..6e93a296 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/CheckpointImplTestBase.java @@ -0,0 +1,112 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory; + +/** + * Base class for unit testing checkpoint implementations. + * This class has tests common to InMemory and FileBased implementations. + */ +public abstract class CheckpointImplTestBase { + + protected final String startingSequenceNumber = "0001000"; + protected final String testConcurrencyToken = "testToken"; + protected ICheckpoint checkpoint; + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + MetricsHelper.startScope(new NullMetricsFactory(), "CheckpointImplTestBase"); + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + } + + /** + * Constructor. + */ + public CheckpointImplTestBase() { + super(); + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + } + + @Test + public final void testInitialSetCheckpoint() throws Exception { + String sequenceNumber = "1"; + String shardId = "myShardId"; + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); + checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); + ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId); + Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint); + } + + @Test + public final void testAdvancingSetCheckpoint() throws Exception { + String shardId = "myShardId"; + for (Integer i = 0; i < 10; i++) { + String sequenceNumber = i.toString(); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); + checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); + ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId); + Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint); + } + } + + /** + * Test method to verify setCheckpoint and getCheckpoint methods. + * + * @throws Exception + */ + @Test + public final void testSetAndGetCheckpoint() throws Exception { + String checkpointValue = "12345"; + String shardId = "testShardId-1"; + String concurrencyToken = "token-1"; + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(checkpointValue); + checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken); + Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId)); + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImpl.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImpl.java new file mode 100644 index 00000000..ad761ef5 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImpl.java @@ -0,0 +1,123 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; + +/** + * Everything is stored in memory and there is no fault-tolerance. + */ +public class InMemoryCheckpointImpl implements ICheckpoint { + + private static final Log LOG = LogFactory.getLog(InMemoryCheckpointImpl.class); + + private Map checkpoints = new HashMap<>(); + private Map flushpoints = new HashMap<>(); + private final String startingSequenceNumber; + + /** + * Constructor. + * + * @param startingSequenceNumber Initial checkpoint will be set to this sequenceNumber (for all shards). + */ + public InMemoryCheckpointImpl(String startingSequenceNumber) { + super(); + this.startingSequenceNumber = startingSequenceNumber; + } + + ExtendedSequenceNumber getLastCheckpoint(String shardId) { + ExtendedSequenceNumber checkpoint = checkpoints.get(shardId); + if (checkpoint == null) { + checkpoint = new ExtendedSequenceNumber(startingSequenceNumber); + } + LOG.debug("getLastCheckpoint shardId: " + shardId + " checkpoint: " + checkpoint); + return checkpoint; + } + + ExtendedSequenceNumber getLastFlushpoint(String shardId) { + ExtendedSequenceNumber flushpoint = flushpoints.get(shardId); + LOG.debug("getLastFlushpoint shardId: " + shardId + " flushpoint: " + flushpoint); + return flushpoint; + } + + void resetCheckpointToLastFlushpoint(String shardId) throws KinesisClientLibException { + ExtendedSequenceNumber currentFlushpoint = flushpoints.get(shardId); + if (currentFlushpoint == null) { + checkpoints.put(shardId, new ExtendedSequenceNumber(startingSequenceNumber)); + } else { + checkpoints.put(shardId, currentFlushpoint); + } + } + + ExtendedSequenceNumber getGreatestPrimaryFlushpoint(String shardId) throws KinesisClientLibException { + verifyNotEmpty(shardId, "shardId must not be null."); + ExtendedSequenceNumber greatestFlushpoint = getLastFlushpoint(shardId); + if (LOG.isDebugEnabled()) { + LOG.debug("getGreatestPrimaryFlushpoint value for shardId " + shardId + " = " + greatestFlushpoint); + } + return greatestFlushpoint; + }; + + ExtendedSequenceNumber getRestartPoint(String shardId) { + verifyNotEmpty(shardId, "shardId must not be null."); + ExtendedSequenceNumber restartPoint = getLastCheckpoint(shardId); + if (LOG.isDebugEnabled()) { + LOG.debug("getRestartPoint value for shardId " + shardId + " = " + restartPoint); + } + return restartPoint; + } + + /** + * {@inheritDoc} + */ + @Override + public void setCheckpoint(String shardId, ExtendedSequenceNumber checkpointValue, String concurrencyToken) + throws KinesisClientLibException { + checkpoints.put(shardId, checkpointValue); + flushpoints.put(shardId, checkpointValue); + + if (LOG.isDebugEnabled()) { + LOG.debug("shardId: " + shardId + " checkpoint: " + checkpointValue); + } + + } + + /** + * {@inheritDoc} + */ + @Override + public ExtendedSequenceNumber getCheckpoint(String shardId) throws KinesisClientLibException { + ExtendedSequenceNumber checkpoint = flushpoints.get(shardId); + LOG.debug("getCheckpoint shardId: " + shardId + " checkpoint: " + checkpoint); + return checkpoint; + } + + /** Check that string is neither null nor empty. + */ + static void verifyNotEmpty(String string, String message) { + if ((string == null) || (string.isEmpty())) { + throw new IllegalArgumentException(message); + } + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImplTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImplTest.java new file mode 100644 index 00000000..04408b36 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/checkpoint/InMemoryCheckpointImplTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint; + +import org.junit.Before; + + +/** + * Test the InMemoryCheckpointImplTest class. + */ +public class InMemoryCheckpointImplTest extends CheckpointImplTestBase { + /** + * Constructor. + */ + public InMemoryCheckpointImplTest() { + super(); + } + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + checkpoint = new InMemoryCheckpointImpl(startingSequenceNumber); + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/BlockOnParentShardTaskTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/BlockOnParentShardTaskTest.java new file mode 100644 index 00000000..a42e0683 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/BlockOnParentShardTaskTest.java @@ -0,0 +1,221 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; + +/** + * + */ +public class BlockOnParentShardTaskTest { + + private static final Log LOG = LogFactory.getLog(BlockOnParentShardTaskTest.class); + private final long backoffTimeInMillis = 50L; + private final String shardId = "shardId-97"; + private final String concurrencyToken = "testToken"; + private final List emptyParentShardIds = new ArrayList(); + ShardInfo defaultShardInfo = new ShardInfo(shardId, concurrencyToken, emptyParentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + } + + /** + * Test call() when there are no parent shards. + * @throws ProvisionedThroughputException + * @throws InvalidStateException + * @throws DependencyException + */ + @Test + public final void testCallNoParents() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + ILeaseManager leaseManager = mock(ILeaseManager.class); + when(leaseManager.getLease(shardId)).thenReturn(null); + + BlockOnParentShardTask task = new BlockOnParentShardTask(defaultShardInfo, leaseManager, backoffTimeInMillis); + TaskResult result = task.call(); + Assert.assertNull(result.getException()); + } + + /** + * Test call() when there are 1-2 parent shards that have been fully processed. + * @throws ProvisionedThroughputException + * @throws InvalidStateException + * @throws DependencyException + */ + @Test + public final void testCallWhenParentsHaveFinished() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + + ShardInfo shardInfo = null; + BlockOnParentShardTask task = null; + String parent1ShardId = "shardId-1"; + String parent2ShardId = "shardId-2"; + List parentShardIds = new ArrayList<>(); + TaskResult result = null; + + KinesisClientLease parent1Lease = new KinesisClientLease(); + parent1Lease.setCheckpoint(ExtendedSequenceNumber.SHARD_END); + KinesisClientLease parent2Lease = new KinesisClientLease(); + parent2Lease.setCheckpoint(ExtendedSequenceNumber.SHARD_END); + + ILeaseManager leaseManager = mock(ILeaseManager.class); + when(leaseManager.getLease(parent1ShardId)).thenReturn(parent1Lease); + when(leaseManager.getLease(parent2ShardId)).thenReturn(parent2Lease); + + // test single parent + parentShardIds.add(parent1ShardId); + shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); + task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis); + result = task.call(); + Assert.assertNull(result.getException()); + + // test two parents + parentShardIds.add(parent2ShardId); + shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); + task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis); + result = task.call(); + Assert.assertNull(result.getException()); + } + + /** + * Test call() when there are 1-2 parent shards that have NOT been fully processed. + * @throws ProvisionedThroughputException + * @throws InvalidStateException + * @throws DependencyException + */ + @Test + public final void testCallWhenParentsHaveNotFinished() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + + ShardInfo shardInfo = null; + BlockOnParentShardTask task = null; + String parent1ShardId = "shardId-1"; + String parent2ShardId = "shardId-2"; + List parentShardIds = new ArrayList<>(); + TaskResult result = null; + + KinesisClientLease parent1Lease = new KinesisClientLease(); + parent1Lease.setCheckpoint(ExtendedSequenceNumber.LATEST); + KinesisClientLease parent2Lease = new KinesisClientLease(); + // mock a sequence number checkpoint + parent2Lease.setCheckpoint(new ExtendedSequenceNumber("98182584034")); + + ILeaseManager leaseManager = mock(ILeaseManager.class); + when(leaseManager.getLease(parent1ShardId)).thenReturn(parent1Lease); + when(leaseManager.getLease(parent2ShardId)).thenReturn(parent2Lease); + + // test single parent + parentShardIds.add(parent1ShardId); + shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); + task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis); + result = task.call(); + Assert.assertNotNull(result.getException()); + + // test two parents + parentShardIds.add(parent2ShardId); + shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); + task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis); + result = task.call(); + Assert.assertNotNull(result.getException()); + } + + /** + * Test call() with 1 parent shard before and after it is completely processed. + * @throws ProvisionedThroughputException + * @throws InvalidStateException + * @throws DependencyException + */ + @Test + public final void testCallBeforeAndAfterAParentFinishes() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + + BlockOnParentShardTask task = null; + String parentShardId = "shardId-1"; + List parentShardIds = new ArrayList<>(); + parentShardIds.add(parentShardId); + ShardInfo shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); + TaskResult result = null; + KinesisClientLease parentLease = new KinesisClientLease(); + ILeaseManager leaseManager = mock(ILeaseManager.class); + when(leaseManager.getLease(parentShardId)).thenReturn(parentLease); + + // test when parent shard has not yet been fully processed + parentLease.setCheckpoint(new ExtendedSequenceNumber("98182584034")); + task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis); + result = task.call(); + Assert.assertNotNull(result.getException()); + + // test when parent has been fully processed + parentLease.setCheckpoint(ExtendedSequenceNumber.SHARD_END); + task = new BlockOnParentShardTask(shardInfo, leaseManager, backoffTimeInMillis); + result = task.call(); + Assert.assertNull(result.getException()); + } + + /** + * Test to verify we return the right task type. + */ + @Test + public final void testGetTaskType() { + BlockOnParentShardTask task = new BlockOnParentShardTask(defaultShardInfo, null, backoffTimeInMillis); + Assert.assertEquals(TaskType.BLOCK_ON_PARENT_SHARDS, task.getTaskType()); + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/CheckpointValueComparatorTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/CheckpointValueComparatorTest.java new file mode 100644 index 00000000..7abe7c52 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/CheckpointValueComparatorTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import junit.framework.Assert; + +import org.junit.Test; + +import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint; + +public class CheckpointValueComparatorTest { + @Test + public final void testCheckpointValueComparator() { + CheckpointValueComparator comparator = new CheckpointValueComparator(); + final String trimHorizon = SentinelCheckpoint.TRIM_HORIZON.toString(); + final String latest = SentinelCheckpoint.LATEST.toString(); + final String shardEnd = SentinelCheckpoint.SHARD_END.toString(); + final String lesser = "17"; + final String greater = "123"; + final String notASentinelCheckpointValue = "just-some-string"; + + String[][] equalValues = + { { trimHorizon, trimHorizon }, { latest, latest }, { greater, greater }, { shardEnd, shardEnd } }; + + // Check equal values + for (String[] pair : equalValues) { + Assert.assertTrue("Expected: " + pair[0] + " and " + pair[1] + " to be equal", + comparator.compare(pair[0], pair[1]) == 0 && comparator.compare(pair[1], pair[0]) == 0); + + } + + // Check non-equal values + String[][] lessThanValues = + { { latest, lesser }, { trimHorizon, greater }, { lesser, greater }, + { trimHorizon, shardEnd }, { latest, shardEnd }, { lesser, shardEnd }, { trimHorizon, latest } }; + for (String[] pair : lessThanValues) { + Assert.assertTrue("Expected: " + pair[0] + " < " + pair[1], + comparator.compare(pair[0], pair[1]) < 0); + Assert.assertTrue("Expected: " + pair[1] + " > " + pair[0], + comparator.compare(pair[1], pair[0]) > 0); + } + + // Check bad values + String[][] badValues = + { { null, null }, { latest, null }, { null, trimHorizon }, { null, shardEnd }, { null, lesser }, + { null, notASentinelCheckpointValue }, { latest, notASentinelCheckpointValue }, + { notASentinelCheckpointValue, trimHorizon }, { shardEnd, notASentinelCheckpointValue }, + { notASentinelCheckpointValue, lesser } }; + for (String[] pair : badValues) { + try { + comparator.compare(pair[0], pair[1]); + Assert.fail("Compare should have thrown an exception when one of its parameters is not a sequence " + + "number and not a sentinel checkpoint value but didn't when comparing " + pair[0] + " and " + + pair[1]); + } catch (Exception e1) { + try { + comparator.compare(pair[1], pair[0]); + Assert.fail("Compare should have thrown an exception when one of its parameters is not a sequence " + + "number and not a sentinel checkpoint value but didn't when comparing " + pair[1] + + " and " + pair[0]); + } catch (Exception e2) { + continue; + } + } + } + } +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ConsumerStatesTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ConsumerStatesTest.java new file mode 100644 index 00000000..c0a778e9 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ConsumerStatesTest.java @@ -0,0 +1,385 @@ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import static com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerStates.ConsumerState; +import static com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerStates.ShardConsumerState; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; + +import org.hamcrest.Condition; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeDiagnosingMatcher; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; + +@RunWith(MockitoJUnitRunner.class) +public class ConsumerStatesTest { + + @Mock + private ShardConsumer consumer; + @Mock + private StreamConfig streamConfig; + @Mock + private IRecordProcessor recordProcessor; + @Mock + private RecordProcessorCheckpointer recordProcessorCheckpointer; + @Mock + private ExecutorService executorService; + @Mock + private ShardInfo shardInfo; + @Mock + private KinesisDataFetcher dataFetcher; + @Mock + private ILeaseManager leaseManager; + @Mock + private ICheckpoint checkpoint; + @Mock + private Future future; + @Mock + private ShutdownNotification shutdownNotification; + @Mock + private IKinesisProxy kinesisProxy; + @Mock + private InitialPositionInStreamExtended initialPositionInStream; + + private long parentShardPollIntervalMillis = 0xCAFE; + private boolean cleanupLeasesOfCompletedShards = true; + private long taskBackoffTimeMillis = 0xBEEF; + private ShutdownReason reason = ShutdownReason.TERMINATE; + + @Before + public void setup() { + when(consumer.getStreamConfig()).thenReturn(streamConfig); + when(consumer.getRecordProcessor()).thenReturn(recordProcessor); + when(consumer.getRecordProcessorCheckpointer()).thenReturn(recordProcessorCheckpointer); + when(consumer.getExecutorService()).thenReturn(executorService); + when(consumer.getShardInfo()).thenReturn(shardInfo); + when(consumer.getDataFetcher()).thenReturn(dataFetcher); + when(consumer.getLeaseManager()).thenReturn(leaseManager); + when(consumer.getCheckpoint()).thenReturn(checkpoint); + when(consumer.getFuture()).thenReturn(future); + when(consumer.getShutdownNotification()).thenReturn(shutdownNotification); + when(consumer.getParentShardPollIntervalMillis()).thenReturn(parentShardPollIntervalMillis); + when(consumer.isCleanupLeasesOfCompletedShards()).thenReturn(cleanupLeasesOfCompletedShards); + when(consumer.getTaskBackoffTimeMillis()).thenReturn(taskBackoffTimeMillis); + when(consumer.getShutdownReason()).thenReturn(reason); + + } + + private static final Class> LEASE_MANAGER_CLASS = (Class>) (Class) ILeaseManager.class; + + @Test + public void blockOnParentStateTest() { + ConsumerState state = ShardConsumerState.WAITING_ON_PARENT_SHARDS.getConsumerState(); + + ITask task = state.createTask(consumer); + + assertThat(task, taskWith(BlockOnParentShardTask.class, ShardInfo.class, "shardInfo", equalTo(shardInfo))); + assertThat(task, + taskWith(BlockOnParentShardTask.class, LEASE_MANAGER_CLASS, "leaseManager", equalTo(leaseManager))); + assertThat(task, taskWith(BlockOnParentShardTask.class, Long.class, "parentShardPollIntervalMillis", + equalTo(parentShardPollIntervalMillis))); + + assertThat(state.successTransition(), equalTo(ShardConsumerState.INITIALIZING.getConsumerState())); + for (ShutdownReason shutdownReason : ShutdownReason.values()) { + assertThat(state.shutdownTransition(shutdownReason), + equalTo(ShardConsumerState.SHUTDOWN_COMPLETE.getConsumerState())); + } + + assertThat(state.getState(), equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)); + assertThat(state.getTaskType(), equalTo(TaskType.BLOCK_ON_PARENT_SHARDS)); + + } + + @Test + public void initializingStateTest() { + ConsumerState state = ShardConsumerState.INITIALIZING.getConsumerState(); + ITask task = state.createTask(consumer); + + assertThat(task, initTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); + assertThat(task, initTask(IRecordProcessor.class, "recordProcessor", equalTo(recordProcessor))); + assertThat(task, initTask(KinesisDataFetcher.class, "dataFetcher", equalTo(dataFetcher))); + assertThat(task, initTask(ICheckpoint.class, "checkpoint", equalTo(checkpoint))); + assertThat(task, initTask(RecordProcessorCheckpointer.class, "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); + assertThat(task, initTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); + assertThat(task, initTask(StreamConfig.class, "streamConfig", equalTo(streamConfig))); + + assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.getConsumerState())); + + assertThat(state.shutdownTransition(ShutdownReason.ZOMBIE), + equalTo(ShardConsumerState.SHUTTING_DOWN.getConsumerState())); + assertThat(state.shutdownTransition(ShutdownReason.TERMINATE), + equalTo(ShardConsumerState.SHUTTING_DOWN.getConsumerState())); + assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.getConsumerState())); + + assertThat(state.getState(), equalTo(ShardConsumerState.INITIALIZING)); + assertThat(state.getTaskType(), equalTo(TaskType.INITIALIZE)); + } + + @Test + public void processingStateTest() { + ConsumerState state = ShardConsumerState.PROCESSING.getConsumerState(); + ITask task = state.createTask(consumer); + + assertThat(task, procTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); + assertThat(task, procTask(IRecordProcessor.class, "recordProcessor", equalTo(recordProcessor))); + assertThat(task, procTask(RecordProcessorCheckpointer.class, "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); + assertThat(task, procTask(KinesisDataFetcher.class, "dataFetcher", equalTo(dataFetcher))); + assertThat(task, procTask(StreamConfig.class, "streamConfig", equalTo(streamConfig))); + assertThat(task, procTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); + + assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.getConsumerState())); + + assertThat(state.shutdownTransition(ShutdownReason.ZOMBIE), + equalTo(ShardConsumerState.SHUTTING_DOWN.getConsumerState())); + assertThat(state.shutdownTransition(ShutdownReason.TERMINATE), + equalTo(ShardConsumerState.SHUTTING_DOWN.getConsumerState())); + assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.getConsumerState())); + + assertThat(state.getState(), equalTo(ShardConsumerState.PROCESSING)); + assertThat(state.getTaskType(), equalTo(TaskType.PROCESS)); + + } + + @Test + public void shutdownRequestState() { + ConsumerState state = ShardConsumerState.SHUTDOWN_REQUESTED.getConsumerState(); + + ITask task = state.createTask(consumer); + + assertThat(task, shutdownReqTask(IRecordProcessor.class, "recordProcessor", equalTo(recordProcessor))); + assertThat(task, shutdownReqTask(IRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", + equalTo((IRecordProcessorCheckpointer) recordProcessorCheckpointer))); + assertThat(task, shutdownReqTask(ShutdownNotification.class, "shutdownNotification", equalTo(shutdownNotification))); + + assertThat(state.successTransition(), equalTo(ConsumerStates.SHUTDOWN_REQUEST_COMPLETION_STATE)); + assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + equalTo(ConsumerStates.SHUTDOWN_REQUEST_COMPLETION_STATE)); + assertThat(state.shutdownTransition(ShutdownReason.ZOMBIE), + equalTo(ShardConsumerState.SHUTTING_DOWN.getConsumerState())); + assertThat(state.shutdownTransition(ShutdownReason.TERMINATE), + equalTo(ShardConsumerState.SHUTTING_DOWN.getConsumerState())); + + assertThat(state.getState(), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED)); + assertThat(state.getTaskType(), equalTo(TaskType.SHUTDOWN_NOTIFICATION)); + + } + + @Test + public void shutdownRequestCompleteStateTest() { + ConsumerState state = ConsumerStates.SHUTDOWN_REQUEST_COMPLETION_STATE; + + assertThat(state.createTask(consumer), nullValue()); + + assertThat(state.successTransition(), equalTo(state)); + + assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(state)); + assertThat(state.shutdownTransition(ShutdownReason.ZOMBIE), + equalTo(ShardConsumerState.SHUTTING_DOWN.getConsumerState())); + assertThat(state.shutdownTransition(ShutdownReason.TERMINATE), + equalTo(ShardConsumerState.SHUTTING_DOWN.getConsumerState())); + + assertThat(state.getState(), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED)); + assertThat(state.getTaskType(), equalTo(TaskType.SHUTDOWN_NOTIFICATION)); + + } + + @Test + public void shuttingDownStateTest() { + ConsumerState state = ShardConsumerState.SHUTTING_DOWN.getConsumerState(); + + when(streamConfig.getStreamProxy()).thenReturn(kinesisProxy); + when(streamConfig.getInitialPositionInStream()).thenReturn(initialPositionInStream); + + ITask task = state.createTask(consumer); + + assertThat(task, shutdownTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); + assertThat(task, shutdownTask(IRecordProcessor.class, "recordProcessor", equalTo(recordProcessor))); + assertThat(task, shutdownTask(RecordProcessorCheckpointer.class, "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); + assertThat(task, shutdownTask(ShutdownReason.class, "reason", equalTo(reason))); + assertThat(task, shutdownTask(IKinesisProxy.class, "kinesisProxy", equalTo(kinesisProxy))); + assertThat(task, shutdownTask(LEASE_MANAGER_CLASS, "leaseManager", equalTo(leaseManager))); + assertThat(task, shutdownTask(InitialPositionInStreamExtended.class, "initialPositionInStream", + equalTo(initialPositionInStream))); + assertThat(task, + shutdownTask(Boolean.class, "cleanupLeasesOfCompletedShards", equalTo(cleanupLeasesOfCompletedShards))); + assertThat(task, shutdownTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); + + assertThat(state.successTransition(), equalTo(ShardConsumerState.SHUTDOWN_COMPLETE.getConsumerState())); + + for (ShutdownReason reason : ShutdownReason.values()) { + assertThat(state.shutdownTransition(reason), + equalTo(ShardConsumerState.SHUTDOWN_COMPLETE.getConsumerState())); + } + + assertThat(state.getState(), equalTo(ShardConsumerState.SHUTTING_DOWN)); + assertThat(state.getTaskType(), equalTo(TaskType.SHUTDOWN)); + + } + + @Test + public void shutdownCompleteStateTest() { + ConsumerState state = ShardConsumerState.SHUTDOWN_COMPLETE.getConsumerState(); + + assertThat(state.createTask(consumer), nullValue()); + verify(consumer, times(2)).getShutdownNotification(); + verify(shutdownNotification).shutdownComplete(); + + assertThat(state.successTransition(), equalTo(state)); + for(ShutdownReason reason : ShutdownReason.values()) { + assertThat(state.shutdownTransition(reason), equalTo(state)); + } + + assertThat(state.getState(), equalTo(ShardConsumerState.SHUTDOWN_COMPLETE)); + assertThat(state.getTaskType(), equalTo(TaskType.SHUTDOWN_COMPLETE)); + } + + @Test + public void shutdownCompleteStateNullNotificationTest() { + ConsumerState state = ShardConsumerState.SHUTDOWN_COMPLETE.getConsumerState(); + + when(consumer.getShutdownNotification()).thenReturn(null); + assertThat(state.createTask(consumer), nullValue()); + + verify(consumer).getShutdownNotification(); + verify(shutdownNotification, never()).shutdownComplete(); + } + + static ReflectionPropertyMatcher shutdownTask(Class valueTypeClass, + String propertyName, Matcher matcher) { + return taskWith(ShutdownTask.class, valueTypeClass, propertyName, matcher); + } + + static ReflectionPropertyMatcher shutdownReqTask( + Class valueTypeClass, String propertyName, Matcher matcher) { + return taskWith(ShutdownNotificationTask.class, valueTypeClass, propertyName, matcher); + } + + static ReflectionPropertyMatcher procTask(Class valueTypeClass, + String propertyName, Matcher matcher) { + return taskWith(ProcessTask.class, valueTypeClass, propertyName, matcher); + } + + static ReflectionPropertyMatcher initTask(Class valueTypeClass, + String propertyName, Matcher matcher) { + return taskWith(InitializeTask.class, valueTypeClass, propertyName, matcher); + } + + static ReflectionPropertyMatcher taskWith(Class taskTypeClass, + Class valueTypeClass, String propertyName, Matcher matcher) { + return new ReflectionPropertyMatcher<>(taskTypeClass, valueTypeClass, matcher, propertyName); + } + + private static class ReflectionPropertyMatcher extends TypeSafeDiagnosingMatcher { + + private final Class taskTypeClass; + private final Class valueTypeClazz; + private final Matcher matcher; + private final String propertyName; + private final Field matchingField; + + private ReflectionPropertyMatcher(Class taskTypeClass, Class valueTypeClass, + Matcher matcher, String propertyName) { + this.taskTypeClass = taskTypeClass; + this.valueTypeClazz = valueTypeClass; + this.matcher = matcher; + this.propertyName = propertyName; + + Field[] fields = taskTypeClass.getDeclaredFields(); + Field matching = null; + for (Field field : fields) { + if (propertyName.equals(field.getName())) { + matching = field; + } + } + this.matchingField = matching; + + } + + @Override + protected boolean matchesSafely(ITask item, Description mismatchDescription) { + + return Condition.matched(item, mismatchDescription).and(new Condition.Step() { + @Override + public Condition apply(ITask value, Description mismatch) { + if (taskTypeClass.equals(value.getClass())) { + return Condition.matched(taskTypeClass.cast(value), mismatch); + } + mismatch.appendText("Expected task type of ").appendText(taskTypeClass.getName()) + .appendText(" but was ").appendText(value.getClass().getName()); + return Condition.notMatched(); + } + }).and(new Condition.Step() { + @Override + public Condition apply(TaskType value, Description mismatch) { + if (matchingField == null) { + mismatch.appendText("Field ").appendText(propertyName).appendText(" not present in ") + .appendText(taskTypeClass.getName()); + return Condition.notMatched(); + } + + try { + return Condition.matched(getValue(value), mismatch); + } catch (RuntimeException re) { + mismatch.appendText("Failure while retrieving value for ").appendText(propertyName); + return Condition.notMatched(); + } + + } + }).and(new Condition.Step() { + @Override + public Condition apply(Object value, Description mismatch) { + if (value != null && !valueTypeClazz.isAssignableFrom(value.getClass())) { + mismatch.appendText("Expected a value of type ").appendText(valueTypeClazz.getName()) + .appendText(" but was ").appendText(value.getClass().getName()); + return Condition.notMatched(); + } + return Condition.matched(valueTypeClazz.cast(value), mismatch); + } + }).matching(matcher); + } + + @Override + public void describeTo(Description description) { + description + .appendText( + "A " + taskTypeClass.getName() + " task with the property " + propertyName + " matching ") + .appendDescriptionOf(matcher); + } + + private Object getValue(TaskType task) { + + matchingField.setAccessible(true); + try { + return matchingField.get(task); + } catch (IllegalAccessException e) { + throw new RuntimeException("Failed to retrieve the value for " + matchingField.getName()); + } + } + } + +} \ No newline at end of file diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ExceptionThrowingLeaseManager.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ExceptionThrowingLeaseManager.java new file mode 100644 index 00000000..2a07d1ed --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ExceptionThrowingLeaseManager.java @@ -0,0 +1,221 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.Arrays; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; + +/** + * Mock Lease Manager by randomly throwing Leasing Exceptions. + * + */ +class ExceptionThrowingLeaseManager implements ILeaseManager { + private static final Log LOG = LogFactory.getLog(ExceptionThrowingLeaseManager.class); + private static final Throwable EXCEPTION_MSG = new Throwable("Test Exception"); + + // Use array below to control in what situations we want to throw exceptions. + private int[] leaseManagerMethodCallingCount; + + /** + * Methods which we support (simulate exceptions). + */ + enum ExceptionThrowingLeaseManagerMethods { + CREATELEASETABLEIFNOTEXISTS(0), + LEASETABLEEXISTS(1), + WAITUNTILLEASETABLEEXISTS(2), + LISTLEASES(3), + CREATELEASEIFNOTEXISTS(4), + GETLEASE(5), + RENEWLEASE(6), + TAKELEASE(7), + EVICTLEASE(8), + DELETELEASE(9), + DELETEALL(10), + UPDATELEASE(11), + NONE(Integer.MIN_VALUE); + + private Integer index; + + ExceptionThrowingLeaseManagerMethods(Integer index) { + this.index = index; + } + + Integer getIndex() { + return this.index; + } + } + + // Define which method should throw exception and when it should throw exception. + private ExceptionThrowingLeaseManagerMethods methodThrowingException = ExceptionThrowingLeaseManagerMethods.NONE; + private int timeThrowingException = Integer.MAX_VALUE; + + // The real local lease manager which would do the real implementations. + private final ILeaseManager leaseManager; + + /** + * Constructor accepts lease manager as only argument. + * + * @param leaseManager which will do the real implementations + */ + ExceptionThrowingLeaseManager(ILeaseManager leaseManager) { + this.leaseManager = leaseManager; + this.leaseManagerMethodCallingCount = new int[ExceptionThrowingLeaseManagerMethods.values().length]; + } + + /** + * Set parameters used for throwing exception. + * + * @param method which would throw exception + * @param throwingTime defines what time to throw exception + */ + void setLeaseLeaseManagerThrowingExceptionScenario(ExceptionThrowingLeaseManagerMethods method, int throwingTime) { + this.methodThrowingException = method; + this.timeThrowingException = throwingTime; + } + + /** + * Reset all parameters used for throwing exception. + */ + void clearLeaseManagerThrowingExceptionScenario() { + Arrays.fill(leaseManagerMethodCallingCount, 0); + this.methodThrowingException = ExceptionThrowingLeaseManagerMethods.NONE; + this.timeThrowingException = Integer.MAX_VALUE; + } + + // Throw exception when the conditions are satisfied : + // 1). method equals to methodThrowingException + // 2). method calling count equals to what we want + private void throwExceptions(String methodName, ExceptionThrowingLeaseManagerMethods method) + throws DependencyException { + // Increase calling count for this method + leaseManagerMethodCallingCount[method.getIndex()]++; + if (method.equals(methodThrowingException) + && (leaseManagerMethodCallingCount[method.getIndex()] == timeThrowingException)) { + // Throw Dependency Exception if all conditions are satisfied. + LOG.debug("Throwing DependencyException in " + methodName); + throw new DependencyException(EXCEPTION_MSG); + } + } + + @Override + public boolean createLeaseTableIfNotExists(Long readCapacity, Long writeCapacity) + throws ProvisionedThroughputException, DependencyException { + throwExceptions("createLeaseTableIfNotExists", + ExceptionThrowingLeaseManagerMethods.CREATELEASETABLEIFNOTEXISTS); + + return leaseManager.createLeaseTableIfNotExists(readCapacity, writeCapacity); + } + + @Override + public boolean leaseTableExists() throws DependencyException { + throwExceptions("leaseTableExists", ExceptionThrowingLeaseManagerMethods.LEASETABLEEXISTS); + + return leaseManager.leaseTableExists(); + } + + @Override + public boolean waitUntilLeaseTableExists(long secondsBetweenPolls, long timeoutSeconds) throws DependencyException { + throwExceptions("waitUntilLeaseTableExists", ExceptionThrowingLeaseManagerMethods.WAITUNTILLEASETABLEEXISTS); + + return leaseManager.waitUntilLeaseTableExists(secondsBetweenPolls, timeoutSeconds); + } + + @Override + public List listLeases() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throwExceptions("listLeases", ExceptionThrowingLeaseManagerMethods.LISTLEASES); + + return leaseManager.listLeases(); + } + + @Override + public boolean createLeaseIfNotExists(KinesisClientLease lease) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throwExceptions("createLeaseIfNotExists", ExceptionThrowingLeaseManagerMethods.CREATELEASEIFNOTEXISTS); + + return leaseManager.createLeaseIfNotExists(lease); + } + + @Override + public boolean renewLease(KinesisClientLease lease) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throwExceptions("renewLease", ExceptionThrowingLeaseManagerMethods.RENEWLEASE); + + return leaseManager.renewLease(lease); + } + + @Override + public boolean takeLease(KinesisClientLease lease, String owner) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throwExceptions("takeLease", ExceptionThrowingLeaseManagerMethods.TAKELEASE); + + return leaseManager.takeLease(lease, owner); + } + + @Override + public boolean evictLease(KinesisClientLease lease) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throwExceptions("evictLease", ExceptionThrowingLeaseManagerMethods.EVICTLEASE); + + return leaseManager.evictLease(lease); + } + + @Override + public void deleteLease(KinesisClientLease lease) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throwExceptions("deleteLease", ExceptionThrowingLeaseManagerMethods.DELETELEASE); + + leaseManager.deleteLease(lease); + } + + @Override + public boolean updateLease(KinesisClientLease lease) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throwExceptions("updateLease", ExceptionThrowingLeaseManagerMethods.UPDATELEASE); + + return leaseManager.updateLease(lease); + } + + @Override + public KinesisClientLease getLease(String shardId) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throwExceptions("getLease", ExceptionThrowingLeaseManagerMethods.GETLEASE); + + return leaseManager.getLease(shardId); + } + + @Override + public void deleteAll() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throwExceptions("deleteAll", ExceptionThrowingLeaseManagerMethods.DELETEALL); + + leaseManager.deleteAll(); + } + + @Override + public boolean isLeaseTableEmpty() throws DependencyException, + InvalidStateException, ProvisionedThroughputException { + return false; + } + +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/KinesisClientLibConfigurationTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfigurationTest.java similarity index 79% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/KinesisClientLibConfigurationTest.java rename to src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfigurationTest.java index 12b63042..4874a164 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/KinesisClientLibConfigurationTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibConfigurationTest.java @@ -1,22 +1,44 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.coordinator; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import junit.framework.Assert; + +import org.junit.Test; +import org.mockito.Mockito; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.regions.Region; +import com.amazonaws.regions.RegionUtils; +import com.amazonaws.services.cloudwatch.AmazonCloudWatchClient; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient; +import com.amazonaws.services.kinesis.AmazonKinesisClient; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; +import com.google.common.collect.ImmutableSet; + +import java.util.Date; -// TODO: Remove this test class public class KinesisClientLibConfigurationTest { - /*private static final long INVALID_LONG = 0L; + private static final long INVALID_LONG = 0L; private static final int INVALID_INT = 0; private static final long TEST_VALUE_LONG = 1000L; @@ -62,8 +84,7 @@ public class KinesisClientLibConfigurationTest { TEST_VALUE_LONG, TEST_VALUE_INT, skipCheckpointValidationValue, - null, - TEST_VALUE_LONG); + null); } @Test @@ -73,8 +94,7 @@ public class KinesisClientLibConfigurationTest { // Try each argument at one time. KinesisClientLibConfiguration config = null; long[] longValues = - { TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, - TEST_VALUE_LONG }; + { TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG, TEST_VALUE_LONG }; for (int i = 0; i < PARAMETER_COUNT; i++) { longValues[i] = INVALID_LONG; try { @@ -102,8 +122,7 @@ public class KinesisClientLibConfigurationTest { longValues[5], TEST_VALUE_INT, skipCheckpointValidationValue, - null, - longValues[6]); + null); } catch (IllegalArgumentException e) { System.out.println(e.getMessage()); } @@ -137,8 +156,7 @@ public class KinesisClientLibConfigurationTest { TEST_VALUE_LONG, intValues[1], skipCheckpointValidationValue, - null, - TEST_VALUE_LONG); + null); } catch (IllegalArgumentException e) { System.out.println(e.getMessage()); } @@ -245,8 +263,8 @@ public class KinesisClientLibConfigurationTest { IRecordProcessorFactory processorFactory = Mockito.mock(IRecordProcessorFactory.class); new Worker(processorFactory, kclConfig); - Mockito.verify(kclConfig, Mockito.times(5)).getRegionName(); - Mockito.verify(kclConfig, Mockito.times(2)).getKinesisEndpoint(); + Mockito.verify(kclConfig, Mockito.times(9)).getRegionName(); + Mockito.verify(kclConfig, Mockito.times(4)).getKinesisEndpoint(); kclConfig = Mockito.spy( new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0") @@ -254,11 +272,59 @@ public class KinesisClientLibConfigurationTest { new Worker(processorFactory, kclConfig); - Mockito.verify(kclConfig, Mockito.times(2)).getRegionName(); - Mockito.verify(kclConfig, Mockito.times(2)).getKinesisEndpoint(); + Mockito.verify(kclConfig, Mockito.times(3)).getRegionName(); + Mockito.verify(kclConfig, Mockito.times(3)).getKinesisEndpoint(); } + @Test + public void testKCLConfigurationWithMultiRegionWithIlligalRegionName() { + // test with illegal region name + AWSCredentialsProvider credentialsProvider = Mockito.mock(AWSCredentialsProvider.class); + KinesisClientLibConfiguration kclConfig = + new KinesisClientLibConfiguration("Test", "Test", credentialsProvider, "0"); + try { + kclConfig = kclConfig.withRegionName("abcd"); + Assert.fail("No expected Exception is thrown."); + } catch (IllegalArgumentException e) { + System.out.println(e.getMessage()); + } + } + + @Test + public void testKCLConfigurationWithMultiRegionWithIlligalRegionNameInFullConstructor() { + // test with illegal region name + Mockito.mock(AWSCredentialsProvider.class); + try { + new KinesisClientLibConfiguration(TEST_STRING, + TEST_STRING, + TEST_STRING, + TEST_STRING, + null, + null, + null, + null, + TEST_VALUE_LONG, + TEST_STRING, + 3, + TEST_VALUE_LONG, + false, + TEST_VALUE_LONG, + TEST_VALUE_LONG, + true, + new ClientConfiguration(), + new ClientConfiguration(), + new ClientConfiguration(), + TEST_VALUE_LONG, + TEST_VALUE_LONG, + 1, + skipCheckpointValidationValue, + "abcd"); + Assert.fail("No expected Exception is thrown."); + } catch(IllegalArgumentException e) { + System.out.println(e.getMessage()); + } + } @Test public void testKCLConfigurationMetricsDefaults() { @@ -336,14 +402,4 @@ public class KinesisClientLibConfigurationTest { fail("Should not have thrown"); } } - - @Test - public void testKCLConfigurationWithIgnoreUnexpectedChildShards() { - KinesisClientLibConfiguration config = - new KinesisClientLibConfiguration("TestApplication", "TestStream", null, "TestWorker"); - // By default, unexpected child shards should not be ignored. - assertFalse(config.shouldIgnoreUnexpectedChildShards()); - config = config.withIgnoreUnexpectedChildShards(true); - assertTrue(config.shouldIgnoreUnexpectedChildShards()); - }*/ } diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorIntegrationTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorIntegrationTest.java new file mode 100644 index 00000000..00c1310d --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorIntegrationTest.java @@ -0,0 +1,253 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.Callable; + +import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; +import com.amazonaws.auth.SystemPropertiesCredentialsProvider; +import junit.framework.Assert; + +import org.junit.Before; +import org.junit.Test; + + +import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.LeasingException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager; +import com.amazonaws.services.kinesis.leases.impl.Lease; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseRenewer; + +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.MatcherAssert.assertThat; + +public class KinesisClientLibLeaseCoordinatorIntegrationTest { + + private static KinesisClientLeaseManager leaseManager; + private KinesisClientLibLeaseCoordinator coordinator; + private static final String TABLE_NAME = KinesisClientLibLeaseCoordinatorIntegrationTest.class.getSimpleName(); + private static final String WORKER_ID = UUID.randomUUID().toString(); + private final String leaseKey = "shd-1"; + + + @Before + public void setUp() throws ProvisionedThroughputException, DependencyException, InvalidStateException { + final boolean useConsistentReads = true; + if (leaseManager == null) { + AmazonDynamoDBClient ddb = new AmazonDynamoDBClient(new DefaultAWSCredentialsProviderChain()); + leaseManager = + new KinesisClientLeaseManager(TABLE_NAME, ddb, useConsistentReads); + } + leaseManager.createLeaseTableIfNotExists(10L, 10L); + leaseManager.deleteAll(); + coordinator = new KinesisClientLibLeaseCoordinator(leaseManager, WORKER_ID, 5000L, 50L); + coordinator.start(); + } + + /** + * Tests update checkpoint success. + */ + @Test + public void testUpdateCheckpoint() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(); + builder.withLease(leaseKey, null).build(); + + // Run the taker and renewer in-between getting the Lease object and calling setCheckpoint + coordinator.runLeaseTaker(); + coordinator.runLeaseRenewer(); + + KinesisClientLease lease = coordinator.getCurrentlyHeldLease(leaseKey); + if (lease == null) { + List leases = leaseManager.listLeases(); + for (KinesisClientLease kinesisClientLease : leases) { + System.out.println(kinesisClientLease); + } + } + + assertThat(lease, notNullValue()); + ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint"); + // lease's leaseCounter is wrong at this point, but it shouldn't matter. + Assert.assertTrue(coordinator.setCheckpoint(lease.getLeaseKey(), newCheckpoint, lease.getConcurrencyToken())); + + Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey()); + + lease.setLeaseCounter(lease.getLeaseCounter() + 1); + lease.setCheckpoint(newCheckpoint); + lease.setLeaseOwner(coordinator.getWorkerIdentifier()); + Assert.assertEquals(lease, fromDynamo); + } + + /** + * Tests updateCheckpoint when the lease has changed out from under us. + */ + @Test + public void testUpdateCheckpointLeaseUpdated() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(); + builder.withLease(leaseKey, null).build(); + + coordinator.runLeaseTaker(); + coordinator.runLeaseRenewer(); + KinesisClientLease lease = coordinator.getCurrentlyHeldLease(leaseKey); + + assertThat(lease, notNullValue()); + leaseManager.renewLease(coordinator.getCurrentlyHeldLease(leaseKey)); + + ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint"); + Assert.assertFalse(coordinator.setCheckpoint(lease.getLeaseKey(), newCheckpoint, lease.getConcurrencyToken())); + + Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey()); + + lease.setLeaseCounter(lease.getLeaseCounter() + 1); + // Counter and owner changed, but checkpoint did not. + lease.setLeaseOwner(coordinator.getWorkerIdentifier()); + Assert.assertEquals(lease, fromDynamo); + } + + /** + * Tests updateCheckpoint with a bad concurrency token. + */ + @Test + public void testUpdateCheckpointBadConcurrencyToken() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(); + builder.withLease(leaseKey, null).build(); + + coordinator.runLeaseTaker(); + coordinator.runLeaseRenewer(); + KinesisClientLease lease = coordinator.getCurrentlyHeldLease(leaseKey); + + assertThat(lease, notNullValue()); + + ExtendedSequenceNumber newCheckpoint = new ExtendedSequenceNumber("newCheckpoint"); + Assert.assertFalse(coordinator.setCheckpoint(lease.getLeaseKey(), newCheckpoint, UUID.randomUUID())); + + Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey()); + + // Owner should be the only thing that changed. + lease.setLeaseOwner(coordinator.getWorkerIdentifier()); + Assert.assertEquals(lease, fromDynamo); + } + + public static class TestHarnessBuilder { + + private long currentTimeNanos; + + private Map leases = new HashMap(); + + private Callable timeProvider = new Callable() { + + @Override + public Long call() throws Exception { + return currentTimeNanos; + } + + }; + + public TestHarnessBuilder withLease(String shardId) { + return withLease(shardId, "leaseOwner"); + } + + public TestHarnessBuilder withLease(String shardId, String owner) { + KinesisClientLease lease = new KinesisClientLease(); + lease.setCheckpoint(new ExtendedSequenceNumber("checkpoint")); + lease.setOwnerSwitchesSinceCheckpoint(0L); + lease.setLeaseCounter(0L); + lease.setLeaseOwner(owner); + lease.setParentShardIds(Collections.singleton("parentShardId")); + lease.setLeaseKey(shardId); + + leases.put(shardId, lease); + return this; + } + + public Map build() throws LeasingException { + for (KinesisClientLease lease : leases.values()) { + leaseManager.createLeaseIfNotExists(lease); + if (lease.getLeaseOwner() != null) { + lease.setLastCounterIncrementNanos(System.nanoTime()); + } + } + + currentTimeNanos = System.nanoTime(); + + return leases; + } + + public void passTime(long millis) { + currentTimeNanos += millis * 1000000; + } + + private void mutateAssert(String newWorkerIdentifier, KinesisClientLease original, KinesisClientLease actual) { + original.setLeaseCounter(original.getLeaseCounter() + 1); + if (original.getLeaseOwner() != null && !newWorkerIdentifier.equals(original.getLeaseOwner())) { + original.setOwnerSwitchesSinceCheckpoint(original.getOwnerSwitchesSinceCheckpoint() + 1); + } + original.setLeaseOwner(newWorkerIdentifier); + + Assert.assertEquals(original, actual); // Assert the contents of the lease + } + + public void addLeasesToRenew(ILeaseRenewer renewer, String... shardIds) + throws DependencyException, InvalidStateException { + List leasesToRenew = new ArrayList(); + + for (String shardId : shardIds) { + KinesisClientLease lease = leases.get(shardId); + Assert.assertNotNull(lease); + leasesToRenew.add(lease); + } + + renewer.addLeasesToRenew(leasesToRenew); + } + + public Map renewMutateAssert(ILeaseRenewer renewer, + String... renewedShardIds) throws DependencyException, InvalidStateException { + renewer.renewLeases(); + + Map heldLeases = renewer.getCurrentlyHeldLeases(); + Assert.assertEquals(renewedShardIds.length, heldLeases.size()); + + for (String shardId : renewedShardIds) { + KinesisClientLease original = leases.get(shardId); + Assert.assertNotNull(original); + + KinesisClientLease actual = heldLeases.get(shardId); + Assert.assertNotNull(actual); + + original.setLeaseCounter(original.getLeaseCounter() + 1); + Assert.assertEquals(original, actual); + } + + return heldLeases; + } + + public void renewAllLeases() throws LeasingException { + for (KinesisClientLease lease : leases.values()) { + leaseManager.renewLease(lease); + } + } + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorTest.java new file mode 100644 index 00000000..11962d8f --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisClientLibLeaseCoordinatorTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.doReturn; + +import java.util.UUID; + +import junit.framework.Assert; + +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; + +public class KinesisClientLibLeaseCoordinatorTest { + private static final String SHARD_ID = "shardId-test"; + private static final String WORK_ID = "workId-test"; + private static final long TEST_LONG = 1000L; + private static final ExtendedSequenceNumber TEST_CHKPT = new ExtendedSequenceNumber("string-test"); + private static final UUID TEST_UUID = UUID.randomUUID(); + + @SuppressWarnings("rawtypes") + @Mock + private ILeaseManager mockLeaseManager; + + private KinesisClientLibLeaseCoordinator leaseCoordinator; + + @SuppressWarnings("unchecked") + @Before + public void setUpLeaseCoordinator() throws ProvisionedThroughputException, DependencyException { + // Initialize the annotation + MockitoAnnotations.initMocks(this); + // Set up lease coordinator + doReturn(true).when(mockLeaseManager).createLeaseTableIfNotExists(anyLong(), anyLong()); + leaseCoordinator = new KinesisClientLibLeaseCoordinator(mockLeaseManager, WORK_ID, TEST_LONG, TEST_LONG); + } + + @Test(expected = ShutdownException.class) + public void testSetCheckpointWithUnownedShardId() + throws KinesisClientLibException, DependencyException, InvalidStateException, ProvisionedThroughputException { + final boolean succeess = leaseCoordinator.setCheckpoint(SHARD_ID, TEST_CHKPT, TEST_UUID); + Assert.assertFalse("Set Checkpoint should return failure", succeess); + leaseCoordinator.setCheckpoint(SHARD_ID, TEST_CHKPT, TEST_UUID.toString()); + } + + @Test(expected = DependencyException.class) + public void testWaitLeaseTableTimeout() + throws DependencyException, ProvisionedThroughputException, IllegalStateException { + // Set mock lease manager to return false in waiting + doReturn(false).when(mockLeaseManager).waitUntilLeaseTableExists(anyLong(), anyLong()); + leaseCoordinator.initialize(); + } +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisDataFetcherTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisDataFetcherTest.java new file mode 100644 index 00000000..dd56a256 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/KinesisDataFetcherTest.java @@ -0,0 +1,215 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.amazonaws.services.kinesis.model.GetRecordsResult; +import com.amazonaws.services.kinesis.model.Record; +import com.amazonaws.services.kinesis.model.ResourceNotFoundException; +import com.amazonaws.services.kinesis.model.ShardIteratorType; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory; + +/** + * Unit tests for KinesisDataFetcher. + */ +public class KinesisDataFetcherTest { + + private static final int MAX_RECORDS = 1; + private static final String SHARD_ID = "shardId-1"; + private static final String AT_SEQUENCE_NUMBER = ShardIteratorType.AT_SEQUENCE_NUMBER.toString(); + private static final ShardInfo SHARD_INFO = new ShardInfo(SHARD_ID, null, null, null); + private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); + private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(1000)); + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + MetricsHelper.startScope(new NullMetricsFactory(), "KinesisDataFetcherTest"); + } + + /** + * Test initialize() with the LATEST iterator instruction + */ + @Test + public final void testInitializeLatest() throws Exception { + testInitializeAndFetch(ShardIteratorType.LATEST.toString(), + ShardIteratorType.LATEST.toString(), + INITIAL_POSITION_LATEST); + } + + /** + * Test initialize() with the TIME_ZERO iterator instruction + */ + @Test + public final void testInitializeTimeZero() throws Exception { + testInitializeAndFetch(ShardIteratorType.TRIM_HORIZON.toString(), + ShardIteratorType.TRIM_HORIZON.toString(), + INITIAL_POSITION_TRIM_HORIZON); + } + + /** + * Test initialize() with the AT_TIMESTAMP iterator instruction + */ + @Test + public final void testInitializeAtTimestamp() throws Exception { + testInitializeAndFetch(ShardIteratorType.AT_TIMESTAMP.toString(), + ShardIteratorType.AT_TIMESTAMP.toString(), + INITIAL_POSITION_AT_TIMESTAMP); + } + + + /** + * Test initialize() when a flushpoint exists. + */ + @Test + public final void testInitializeFlushpoint() throws Exception { + testInitializeAndFetch("foo", "123", INITIAL_POSITION_LATEST); + } + + /** + * Test initialize() with an invalid iterator instruction + */ + @Test(expected = IllegalArgumentException.class) + public final void testInitializeInvalid() throws Exception { + testInitializeAndFetch("foo", null, INITIAL_POSITION_LATEST); + } + + @Test + public void testadvanceIteratorTo() throws KinesisClientLibException { + IKinesisProxy kinesis = mock(IKinesisProxy.class); + ICheckpoint checkpoint = mock(ICheckpoint.class); + + KinesisDataFetcher fetcher = new KinesisDataFetcher(kinesis, SHARD_INFO); + + String iteratorA = "foo"; + String iteratorB = "bar"; + String seqA = "123"; + String seqB = "456"; + GetRecordsResult outputA = new GetRecordsResult(); + List recordsA = new ArrayList(); + outputA.setRecords(recordsA); + GetRecordsResult outputB = new GetRecordsResult(); + List recordsB = new ArrayList(); + outputB.setRecords(recordsB); + + when(kinesis.getIterator(SHARD_ID, AT_SEQUENCE_NUMBER, seqA)).thenReturn(iteratorA); + when(kinesis.getIterator(SHARD_ID, AT_SEQUENCE_NUMBER, seqB)).thenReturn(iteratorB); + when(kinesis.get(iteratorA, MAX_RECORDS)).thenReturn(outputA); + when(kinesis.get(iteratorB, MAX_RECORDS)).thenReturn(outputB); + + when(checkpoint.getCheckpoint(SHARD_ID)).thenReturn(new ExtendedSequenceNumber(seqA)); + fetcher.initialize(seqA, null); + + fetcher.advanceIteratorTo(seqA, null); + Assert.assertEquals(recordsA, fetcher.getRecords(MAX_RECORDS).getRecords()); + + fetcher.advanceIteratorTo(seqB, null); + Assert.assertEquals(recordsB, fetcher.getRecords(MAX_RECORDS).getRecords()); + } + + @Test + public void testadvanceIteratorToTrimHorizonLatestAndAtTimestamp() { + IKinesisProxy kinesis = mock(IKinesisProxy.class); + + KinesisDataFetcher fetcher = new KinesisDataFetcher(kinesis, SHARD_INFO); + + String iteratorHorizon = "horizon"; + when(kinesis.getIterator(SHARD_ID, ShardIteratorType.TRIM_HORIZON.toString())).thenReturn(iteratorHorizon); + fetcher.advanceIteratorTo(ShardIteratorType.TRIM_HORIZON.toString(), INITIAL_POSITION_TRIM_HORIZON); + Assert.assertEquals(iteratorHorizon, fetcher.getNextIterator()); + + String iteratorLatest = "latest"; + when(kinesis.getIterator(SHARD_ID, ShardIteratorType.LATEST.toString())).thenReturn(iteratorLatest); + fetcher.advanceIteratorTo(ShardIteratorType.LATEST.toString(), INITIAL_POSITION_LATEST); + Assert.assertEquals(iteratorLatest, fetcher.getNextIterator()); + + Date timestamp = new Date(1000L); + String iteratorAtTimestamp = "AT_TIMESTAMP"; + when(kinesis.getIterator(SHARD_ID, timestamp)).thenReturn(iteratorAtTimestamp); + fetcher.advanceIteratorTo(ShardIteratorType.AT_TIMESTAMP.toString(), INITIAL_POSITION_AT_TIMESTAMP); + Assert.assertEquals(iteratorAtTimestamp, fetcher.getNextIterator()); + } + + @Test + public void testGetRecordsWithResourceNotFoundException() { + // Set up arguments used by proxy + String nextIterator = "TestShardIterator"; + int maxRecords = 100; + + // Set up proxy mock methods + KinesisProxy mockProxy = mock(KinesisProxy.class); + doReturn(nextIterator).when(mockProxy).getIterator(SHARD_ID, ShardIteratorType.LATEST.toString()); + doThrow(new ResourceNotFoundException("Test Exception")).when(mockProxy).get(nextIterator, maxRecords); + + // Create data fectcher and initialize it with latest type checkpoint + KinesisDataFetcher dataFetcher = new KinesisDataFetcher(mockProxy, SHARD_INFO); + dataFetcher.initialize(SentinelCheckpoint.LATEST.toString(), INITIAL_POSITION_LATEST); + // Call getRecords of dataFetcher which will throw an exception + dataFetcher.getRecords(maxRecords); + + // Test shard has reached the end + Assert.assertTrue("Shard should reach the end", dataFetcher.isShardEndReached()); + } + + private void testInitializeAndFetch(String iteratorType, + String seqNo, + InitialPositionInStreamExtended initialPositionInStream) throws Exception { + IKinesisProxy kinesis = mock(IKinesisProxy.class); + String iterator = "foo"; + List expectedRecords = new ArrayList(); + GetRecordsResult response = new GetRecordsResult(); + response.setRecords(expectedRecords); + + when(kinesis.getIterator(SHARD_ID, initialPositionInStream.getTimestamp())).thenReturn(iterator); + when(kinesis.getIterator(SHARD_ID, AT_SEQUENCE_NUMBER, seqNo)).thenReturn(iterator); + when(kinesis.getIterator(SHARD_ID, iteratorType)).thenReturn(iterator); + when(kinesis.get(iterator, MAX_RECORDS)).thenReturn(response); + + ICheckpoint checkpoint = mock(ICheckpoint.class); + when(checkpoint.getCheckpoint(SHARD_ID)).thenReturn(new ExtendedSequenceNumber(seqNo)); + + KinesisDataFetcher fetcher = new KinesisDataFetcher(kinesis, SHARD_INFO); + fetcher.initialize(seqNo, initialPositionInStream); + List actualRecords = fetcher.getRecords(MAX_RECORDS).getRecords(); + + Assert.assertEquals(expectedRecords, actualRecords); + } + +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritizationUnitTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ParentsFirstShardPrioritizationUnitTest.java similarity index 88% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritizationUnitTest.java rename to src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ParentsFirstShardPrioritizationUnitTest.java index 3815f179..7ba0753d 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritizationUnitTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ParentsFirstShardPrioritizationUnitTest.java @@ -1,18 +1,4 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.leases; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; @@ -25,7 +11,7 @@ import java.util.Random; import org.junit.Test; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; public class ParentsFirstShardPrioritizationUnitTest { @@ -67,7 +53,7 @@ public class ParentsFirstShardPrioritizationUnitTest { assertEquals(numberOfShards, ordered.size()); for (int shardNumber = 0; shardNumber < numberOfShards; shardNumber++) { String shardId = shardId(shardNumber); - assertEquals(shardId, ordered.get(shardNumber).shardId()); + assertEquals(shardId, ordered.get(shardNumber).getShardId()); } } @@ -97,7 +83,7 @@ public class ParentsFirstShardPrioritizationUnitTest { for (int shardNumber = 0; shardNumber < maxDepth; shardNumber++) { String shardId = shardId(shardNumber); - assertEquals(shardId, ordered.get(shardNumber).shardId()); + assertEquals(shardId, ordered.get(shardNumber).getShardId()); } } @@ -122,7 +108,7 @@ public class ParentsFirstShardPrioritizationUnitTest { assertEquals(numberOfShards, ordered.size()); for (int shardNumber = 0; shardNumber < numberOfShards; shardNumber++) { String shardId = shardId(shardNumber); - assertEquals(shardId, ordered.get(shardNumber).shardId()); + assertEquals(shardId, ordered.get(shardNumber).getShardId()); } } diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ProcessTaskTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ProcessTaskTest.java new file mode 100644 index 00000000..e95aef50 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ProcessTaskTest.java @@ -0,0 +1,377 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.anyInt; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Random; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.Messages.AggregatedRecord; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; +import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord; +import com.amazonaws.services.kinesis.model.GetRecordsResult; +import com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException; +import com.amazonaws.services.kinesis.model.Record; +import com.google.protobuf.ByteString; + +public class ProcessTaskTest { + + @SuppressWarnings("serial") + private static class RecordSubclass extends Record {} + + private static final byte[] TEST_DATA = new byte[] { 1, 2, 3, 4 }; + + private final int maxRecords = 100; + private final String shardId = "shard-test"; + private final long idleTimeMillis = 1000L; + private final long taskBackoffTimeMillis = 1L; + private final boolean callProcessRecordsForEmptyRecordList = true; + // We don't want any of these tests to run checkpoint validation + private final boolean skipCheckpointValidationValue = false; + private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); + + private @Mock KinesisDataFetcher mockDataFetcher; + private @Mock IRecordProcessor mockRecordProcessor; + private @Mock RecordProcessorCheckpointer mockCheckpointer; + @Mock + private ThrottlingReporter throttlingReporter; + + private List processedRecords; + private ExtendedSequenceNumber newLargestPermittedCheckpointValue; + + private ProcessTask processTask; + + @Before + public void setUpProcessTask() { + // Initialize the annotation + MockitoAnnotations.initMocks(this); + // Set up process task + final StreamConfig config = + new StreamConfig(null, maxRecords, idleTimeMillis, callProcessRecordsForEmptyRecordList, + skipCheckpointValidationValue, + INITIAL_POSITION_LATEST); + final ShardInfo shardInfo = new ShardInfo(shardId, null, null, null); + processTask = new ProcessTask( + shardInfo, config, mockRecordProcessor, mockCheckpointer, mockDataFetcher, taskBackoffTimeMillis, + KinesisClientLibConfiguration.DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST, throttlingReporter); + } + + @Test + public void testProcessTaskWithProvisionedThroughputExceededException() { + // Set data fetcher to throw exception + doReturn(false).when(mockDataFetcher).isShardEndReached(); + doThrow(new ProvisionedThroughputExceededException("Test Exception")).when(mockDataFetcher) + .getRecords(maxRecords); + + TaskResult result = processTask.call(); + verify(throttlingReporter).throttled(); + verify(throttlingReporter, never()).success(); + assertTrue("Result should contain ProvisionedThroughputExceededException", + result.getException() instanceof ProvisionedThroughputExceededException); + } + + @Test + public void testProcessTaskWithNonExistentStream() { + // Data fetcher returns a null Result when the stream does not exist + doReturn(null).when(mockDataFetcher).getRecords(maxRecords); + + TaskResult result = processTask.call(); + assertNull("Task should not throw an exception", result.getException()); + } + + @Test + public void testProcessTaskWithShardEndReached() { + // Set data fetcher to return true for shard end reached + doReturn(true).when(mockDataFetcher).isShardEndReached(); + + TaskResult result = processTask.call(); + assertTrue("Result should contain shardEndReached true", result.isShardEndReached()); + } + + @Test + public void testNonAggregatedKinesisRecord() { + final String sqn = new BigInteger(128, new Random()).toString(); + final String pk = UUID.randomUUID().toString(); + final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS)); + final Record r = new Record() + .withPartitionKey(pk) + .withData(ByteBuffer.wrap(TEST_DATA)) + .withSequenceNumber(sqn) + .withApproximateArrivalTimestamp(ts); + + testWithRecord(r); + + assertEquals(1, processedRecords.size()); + + Record pr = processedRecords.get(0); + assertEquals(pk, pr.getPartitionKey()); + assertEquals(ts, pr.getApproximateArrivalTimestamp()); + byte[] b = new byte[pr.getData().remaining()]; + pr.getData().get(b); + assertTrue(Arrays.equals(TEST_DATA, b)); + + assertEquals(sqn, newLargestPermittedCheckpointValue.getSequenceNumber()); + assertEquals(0, newLargestPermittedCheckpointValue.getSubSequenceNumber()); + } + + @Test + public void testDoesNotDeaggregateSubclassOfRecord() { + final String sqn = new BigInteger(128, new Random()).toString(); + final Record r = new RecordSubclass() + .withSequenceNumber(sqn) + .withData(ByteBuffer.wrap(new byte[0])); + + testWithRecord(r); + + assertEquals(1, processedRecords.size(), 1); + assertSame(r, processedRecords.get(0)); + + assertEquals(sqn, newLargestPermittedCheckpointValue.getSequenceNumber()); + assertEquals(0, newLargestPermittedCheckpointValue.getSubSequenceNumber()); + } + + @Test + public void testDeaggregatesRecord() { + final String sqn = new BigInteger(128, new Random()).toString(); + final String pk = UUID.randomUUID().toString(); + final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS)); + final Record r = new Record() + .withPartitionKey("-") + .withData(generateAggregatedRecord(pk)) + .withSequenceNumber(sqn) + .withApproximateArrivalTimestamp(ts); + + testWithRecord(r); + + assertEquals(3, processedRecords.size()); + for (Record pr : processedRecords) { + assertTrue(pr instanceof UserRecord); + assertEquals(pk, pr.getPartitionKey()); + assertEquals(ts, pr.getApproximateArrivalTimestamp()); + byte[] b = new byte[pr.getData().remaining()]; + pr.getData().get(b); + assertTrue(Arrays.equals(TEST_DATA, b)); + } + + assertEquals(sqn, newLargestPermittedCheckpointValue.getSequenceNumber()); + assertEquals(processedRecords.size() - 1, newLargestPermittedCheckpointValue.getSubSequenceNumber()); + } + + @Test + public void testDeaggregatesRecordWithNoArrivalTimestamp() { + final String sqn = new BigInteger(128, new Random()).toString(); + final String pk = UUID.randomUUID().toString(); + final Record r = new Record() + .withPartitionKey("-") + .withData(generateAggregatedRecord(pk)) + .withSequenceNumber(sqn); + + testWithRecord(r); + + assertEquals(3, processedRecords.size()); + for (Record pr : processedRecords) { + assertTrue(pr instanceof UserRecord); + assertEquals(pk, pr.getPartitionKey()); + assertNull(pr.getApproximateArrivalTimestamp()); + } + } + + @Test + public void testLargestPermittedCheckpointValue() { + // Some sequence number value from previous processRecords call to mock. + final BigInteger previousCheckpointSqn = new BigInteger(128, new Random()); + + // Values for this processRecords call. + final int numberOfRecords = 104; + // Start these batch of records's sequence number that is greater than previous checkpoint value. + final BigInteger startingSqn = previousCheckpointSqn.add(BigInteger.valueOf(10)); + final List records = generateConsecutiveRecords( + numberOfRecords, "-", ByteBuffer.wrap(TEST_DATA), new Date(), startingSqn); + + testWithRecords(records, new ExtendedSequenceNumber(previousCheckpointSqn.toString()), + new ExtendedSequenceNumber(previousCheckpointSqn.toString())); + + final ExtendedSequenceNumber expectedLargestPermittedEsqn = new ExtendedSequenceNumber( + startingSqn.add(BigInteger.valueOf(numberOfRecords - 1)).toString()); + assertEquals(expectedLargestPermittedEsqn, newLargestPermittedCheckpointValue); + } + + @Test + public void testLargestPermittedCheckpointValueWithEmptyRecords() { + // Some sequence number value from previous processRecords call. + final BigInteger baseSqn = new BigInteger(128, new Random()); + final ExtendedSequenceNumber lastCheckpointEspn = new ExtendedSequenceNumber(baseSqn.toString()); + final ExtendedSequenceNumber largestPermittedEsqn = new ExtendedSequenceNumber( + baseSqn.add(BigInteger.valueOf(100)).toString()); + + testWithRecords(Collections.emptyList(), lastCheckpointEspn, largestPermittedEsqn); + + // Make sure that even with empty records, largest permitted sequence number does not change. + assertEquals(largestPermittedEsqn, newLargestPermittedCheckpointValue); + } + + @Test + public void testFilterBasedOnLastCheckpointValue() { + // Explanation of setup: + // * Assume in previous processRecord call, user got 3 sub-records that all belonged to one + // Kinesis record. So sequence number was X, and sub-sequence numbers were 0, 1, 2. + // * 2nd sub-record was checkpointed (extended sequnce number X.1). + // * Worker crashed and restarted. So now DDB has checkpoint value of X.1. + // Test: + // * Now in the subsequent processRecords call, KCL should filter out X.0 and X.1. + final BigInteger previousCheckpointSqn = new BigInteger(128, new Random()); + final long previousCheckpointSsqn = 1; + + // Values for this processRecords call. + final String startingSqn = previousCheckpointSqn.toString(); + final String pk = UUID.randomUUID().toString(); + final Record r = new Record() + .withPartitionKey("-") + .withData(generateAggregatedRecord(pk)) + .withSequenceNumber(startingSqn); + + testWithRecords(Collections.singletonList(r), + new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn), + new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn)); + + // First two records should be dropped - and only 1 remaining records should be there. + assertEquals(1, processedRecords.size()); + assertTrue(processedRecords.get(0) instanceof UserRecord); + + // Verify user record's extended sequence number and other fields. + final UserRecord pr = (UserRecord)processedRecords.get(0); + assertEquals(pk, pr.getPartitionKey()); + assertEquals(startingSqn, pr.getSequenceNumber()); + assertEquals(previousCheckpointSsqn + 1, pr.getSubSequenceNumber()); + assertNull(pr.getApproximateArrivalTimestamp()); + + // Expected largest permitted sequence number will be last sub-record sequence number. + final ExtendedSequenceNumber expectedLargestPermittedEsqn = new ExtendedSequenceNumber( + previousCheckpointSqn.toString(), 2L); + assertEquals(expectedLargestPermittedEsqn, newLargestPermittedCheckpointValue); + } + + private void testWithRecord(Record record) { + testWithRecords(Collections.singletonList(record), + ExtendedSequenceNumber.TRIM_HORIZON, ExtendedSequenceNumber.TRIM_HORIZON); + } + + private void testWithRecords(List records, + ExtendedSequenceNumber lastCheckpointValue, + ExtendedSequenceNumber largestPermittedCheckpointValue) { + when(mockDataFetcher.getRecords(anyInt())).thenReturn( + new GetRecordsResult().withRecords(records)); + when(mockCheckpointer.getLastCheckpointValue()).thenReturn(lastCheckpointValue); + when(mockCheckpointer.getLargestPermittedCheckpointValue()).thenReturn(largestPermittedCheckpointValue); + processTask.call(); + verify(throttlingReporter).success(); + verify(throttlingReporter, never()).throttled(); + + ArgumentCaptor priCaptor = ArgumentCaptor.forClass(ProcessRecordsInput.class); + verify(mockRecordProcessor).processRecords(priCaptor.capture()); + processedRecords = priCaptor.getValue().getRecords(); + + ArgumentCaptor esnCaptor = ArgumentCaptor.forClass(ExtendedSequenceNumber.class); + verify(mockCheckpointer).setLargestPermittedCheckpointValue(esnCaptor.capture()); + newLargestPermittedCheckpointValue = esnCaptor.getValue(); + } + + /** + * See the KPL documentation on GitHub for more details about the binary + * format. + * + * @param pk + * Partition key to use. All the records will have the same + * partition key. + * @return ByteBuffer containing the serialized form of the aggregated + * record, along with the necessary header and footer. + */ + private static ByteBuffer generateAggregatedRecord(String pk) { + ByteBuffer bb = ByteBuffer.allocate(1024); + bb.put(new byte[] {-13, -119, -102, -62 }); + + com.amazonaws.services.kinesis.clientlibrary.types.Messages.Record r = + com.amazonaws.services.kinesis.clientlibrary.types.Messages.Record.newBuilder() + .setData(ByteString.copyFrom(TEST_DATA)) + .setPartitionKeyIndex(0) + .build(); + + byte[] payload = AggregatedRecord.newBuilder() + .addPartitionKeyTable(pk) + .addRecords(r) + .addRecords(r) + .addRecords(r) + .build() + .toByteArray(); + + bb.put(payload); + bb.put(md5(payload)); + bb.limit(bb.position()); + bb.rewind(); + return bb; + } + + private static List generateConsecutiveRecords( + int numberOfRecords, String partitionKey, ByteBuffer data, + Date arrivalTimestamp, BigInteger startSequenceNumber) { + List records = new ArrayList<>(); + for (int i = 0 ; i < numberOfRecords ; ++i) { + records.add(new Record() + .withPartitionKey(partitionKey) + .withData(data) + .withSequenceNumber(startSequenceNumber.add(BigInteger.valueOf(i)).toString()) + .withApproximateArrivalTimestamp(arrivalTimestamp)); + } + return records; + } + + private static byte[] md5(byte[] b) { + try { + MessageDigest md = MessageDigest.getInstance("MD5"); + return md.digest(b); + } catch (Exception e) { + throw new RuntimeException(e); + } + } +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/RecordProcessorCheckpointerTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/RecordProcessorCheckpointerTest.java new file mode 100644 index 00000000..d5f6b53f --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/RecordProcessorCheckpointerTest.java @@ -0,0 +1,418 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map.Entry; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.InMemoryCheckpointImpl; +import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord; +import com.amazonaws.services.kinesis.model.Record; + +import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; +import static org.mockito.Matchers.anyString; + +/** + * + */ +public class RecordProcessorCheckpointerTest { + private String startingSequenceNumber = "13"; + private ExtendedSequenceNumber startingExtendedSequenceNumber = new ExtendedSequenceNumber(startingSequenceNumber); + private String testConcurrencyToken = "testToken"; + private ICheckpoint checkpoint; + private String shardId = "shardId-123"; + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + checkpoint = new InMemoryCheckpointImpl(startingSequenceNumber); + // A real checkpoint will return a checkpoint value after it is initialized. + checkpoint.setCheckpoint(shardId, startingExtendedSequenceNumber, testConcurrencyToken); + Assert.assertEquals(this.startingExtendedSequenceNumber, checkpoint.getCheckpoint(shardId)); + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + } + + /** + * Test method for + * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint()}. + */ + @Test + public final void testCheckpoint() throws Exception { + ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); + + // First call to checkpoint + RecordProcessorCheckpointer processingCheckpointer = + new RecordProcessorCheckpointer(shardInfo, checkpoint, null); + processingCheckpointer.setLargestPermittedCheckpointValue(startingExtendedSequenceNumber); + processingCheckpointer.checkpoint(); + Assert.assertEquals(startingExtendedSequenceNumber, checkpoint.getCheckpoint(shardId)); + + // Advance checkpoint + ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("5019"); + + processingCheckpointer.setLargestPermittedCheckpointValue(sequenceNumber); + processingCheckpointer.checkpoint(); + Assert.assertEquals(sequenceNumber, checkpoint.getCheckpoint(shardId)); + } + + /** + * Test method for + * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(Record record)}. + */ + @Test + public final void testCheckpointRecord() throws Exception { + ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); + SequenceNumberValidator sequenceNumberValidator = + new SequenceNumberValidator(null, shardId, false); + RecordProcessorCheckpointer processingCheckpointer = + new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator); + processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5025"); + Record record = new Record().withSequenceNumber("5025"); + processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber); + processingCheckpointer.checkpoint(record); + Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId)); + } + + /** + * Test method for + * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(UserRecord record)}. + */ + @Test + public final void testCheckpointSubRecord() throws Exception { + ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); + SequenceNumberValidator sequenceNumberValidator = + new SequenceNumberValidator(null, shardId, false); + RecordProcessorCheckpointer processingCheckpointer = + new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator); + processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030"); + Record record = new Record().withSequenceNumber("5030"); + UserRecord subRecord = new UserRecord(record); + processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber); + processingCheckpointer.checkpoint(subRecord); + Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId)); + } + + /** + * Test method for + * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(String sequenceNumber)}. + */ + @Test + public final void testCheckpointSequenceNumber() throws Exception { + ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); + SequenceNumberValidator sequenceNumberValidator = + new SequenceNumberValidator(null, shardId, false); + RecordProcessorCheckpointer processingCheckpointer = + new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator); + processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5035"); + processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber); + processingCheckpointer.checkpoint("5035"); + Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId)); + } + + /** + * Test method for + * {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.RecordProcessorCheckpointer#checkpoint(String sequenceNumber, long subSequenceNumber)}. + */ + @Test + public final void testCheckpointExtendedSequenceNumber() throws Exception { + ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); + SequenceNumberValidator sequenceNumberValidator = + new SequenceNumberValidator(null, shardId, false); + RecordProcessorCheckpointer processingCheckpointer = + new RecordProcessorCheckpointer(shardInfo, checkpoint, sequenceNumberValidator); + processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); + ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5040"); + processingCheckpointer.setLargestPermittedCheckpointValue(extendedSequenceNumber); + processingCheckpointer.checkpoint("5040", 0); + Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId)); + } + + /** + * Test method for update() + * + */ + @Test + public final void testUpdate() throws Exception { + ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); + + RecordProcessorCheckpointer checkpointer = new RecordProcessorCheckpointer(shardInfo, checkpoint, null); + + ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("10"); + checkpointer.setLargestPermittedCheckpointValue(sequenceNumber); + Assert.assertEquals(sequenceNumber, checkpointer.getLargestPermittedCheckpointValue()); + + sequenceNumber = new ExtendedSequenceNumber("90259185948592875928375908214918273491783097"); + checkpointer.setLargestPermittedCheckpointValue(sequenceNumber); + Assert.assertEquals(sequenceNumber, checkpointer.getLargestPermittedCheckpointValue()); + } + + /* + * This test is a mixed test of checking some basic functionality of checkpointing at a sequence number and making + * sure certain bounds checks and validations are being performed inside the checkpointer to prevent clients from + * checkpointing out of order/too big/non-numeric values that aren't valid strings for them to be checkpointing + */ + @Test + public final void testClientSpecifiedCheckpoint() throws Exception { + ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); + + SequenceNumberValidator validator = mock(SequenceNumberValidator.class); + Mockito.doNothing().when(validator).validateSequenceNumber(anyString()); + RecordProcessorCheckpointer processingCheckpointer = + new RecordProcessorCheckpointer(shardInfo, checkpoint, validator); + + // Several checkpoints we're gonna hit + ExtendedSequenceNumber tooSmall = new ExtendedSequenceNumber("2"); + ExtendedSequenceNumber firstSequenceNumber = checkpoint.getCheckpoint(shardId); // 13 + ExtendedSequenceNumber secondSequenceNumber = new ExtendedSequenceNumber("127"); + ExtendedSequenceNumber thirdSequenceNumber = new ExtendedSequenceNumber("5019"); + ExtendedSequenceNumber lastSequenceNumberOfShard = new ExtendedSequenceNumber("6789"); + ExtendedSequenceNumber tooBigSequenceNumber = new ExtendedSequenceNumber("9000"); + + processingCheckpointer.setInitialCheckpointValue(firstSequenceNumber); + processingCheckpointer.setLargestPermittedCheckpointValue(thirdSequenceNumber); + + // confirm that we cannot move backward + try { + processingCheckpointer.checkpoint(tooSmall.getSequenceNumber(), tooSmall.getSubSequenceNumber()); + Assert.fail("You shouldn't be able to checkpoint earlier than the initial checkpoint."); + } catch (IllegalArgumentException e) { + // yay! + } + + // advance to first + processingCheckpointer.checkpoint(firstSequenceNumber.getSequenceNumber(), firstSequenceNumber.getSubSequenceNumber()); + Assert.assertEquals(firstSequenceNumber, checkpoint.getCheckpoint(shardId)); + processingCheckpointer.checkpoint(firstSequenceNumber.getSequenceNumber(), firstSequenceNumber.getSubSequenceNumber()); + Assert.assertEquals(firstSequenceNumber, checkpoint.getCheckpoint(shardId)); + + // advance to second + processingCheckpointer.checkpoint(secondSequenceNumber.getSequenceNumber(), secondSequenceNumber.getSubSequenceNumber()); + Assert.assertEquals(secondSequenceNumber, checkpoint.getCheckpoint(shardId)); + + ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt = + { tooSmall, // Shouldn't be able to move before the first value we ever checkpointed + firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number + tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer + lastSequenceNumberOfShard, // Just another big value that we will use later + null, // Not a valid sequence number + new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string + ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max + ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value + ExtendedSequenceNumber.LATEST // Can't go back to an initial sentinel value + }; + for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) { + try { + processingCheckpointer.checkpoint(badCheckpointValue.getSequenceNumber(), badCheckpointValue.getSubSequenceNumber()); + fail("checkpointing at bad or out of order sequence didn't throw exception"); + } catch (IllegalArgumentException e) { + + } catch (NullPointerException e) { + + } + Assert.assertEquals("Checkpoint value should not have changed", + secondSequenceNumber, + checkpoint.getCheckpoint(shardId)); + Assert.assertEquals("Last checkpoint value should not have changed", + secondSequenceNumber, + processingCheckpointer.getLastCheckpointValue()); + Assert.assertEquals("Largest sequence number should not have changed", + thirdSequenceNumber, + processingCheckpointer.getLargestPermittedCheckpointValue()); + } + + // advance to third number + processingCheckpointer.checkpoint(thirdSequenceNumber.getSequenceNumber(), thirdSequenceNumber.getSubSequenceNumber()); + Assert.assertEquals(thirdSequenceNumber, checkpoint.getCheckpoint(shardId)); + + // Testing a feature that prevents checkpointing at SHARD_END twice + processingCheckpointer.setLargestPermittedCheckpointValue(lastSequenceNumberOfShard); + processingCheckpointer.setSequenceNumberAtShardEnd(processingCheckpointer.getLargestPermittedCheckpointValue()); + processingCheckpointer.setLargestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END); + processingCheckpointer.checkpoint(lastSequenceNumberOfShard.getSequenceNumber(), lastSequenceNumberOfShard.getSubSequenceNumber()); + Assert.assertEquals("Checkpoing at the sequence number at the end of a shard should be the same as " + + "checkpointing at SHARD_END", + ExtendedSequenceNumber.SHARD_END, + processingCheckpointer.getLastCheckpointValue()); + } + + private enum CheckpointAction { + NONE, NO_SEQUENCE_NUMBER, WITH_SEQUENCE_NUMBER; + } + + /** + * Tests a bunch of mixed calls between checkpoint() and checkpoint(sequenceNumber) using a helper function. + * + * Also covers an edge case scenario where a shard consumer is started on a shard that never receives any records + * and is then shutdown + * + * @throws Exception + */ + @SuppressWarnings("serial") + @Test + public final void testMixedCheckpointCalls() throws Exception { + ShardInfo shardInfo = new ShardInfo(shardId, testConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); + + SequenceNumberValidator validator = mock(SequenceNumberValidator.class); + Mockito.doNothing().when(validator).validateSequenceNumber(anyString()); + + RecordProcessorCheckpointer processingCheckpointer = + new RecordProcessorCheckpointer(shardInfo, checkpoint, validator); + + List> testPlans = + new ArrayList>(); + + /* + * Simulate a scenario where the checkpointer is created at "latest". + * + * Then the processor is called with no records (e.g. no more records are added, but the processor might be + * called just to allow checkpointing). + * + * Then the processor is shutdown. + */ + testPlans.add(new LinkedHashMap() { + { + put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); + put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); + } + }); + // Nearly the same as the previous test, but we don't call checkpoint after LATEST + testPlans.add(new LinkedHashMap() { + { + put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NONE); + put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); + } + }); + + // Start with TRIM_HORIZON + testPlans.add(new LinkedHashMap() { + { + put(SentinelCheckpoint.TRIM_HORIZON.toString(), CheckpointAction.NONE); + put("1", CheckpointAction.NONE); + put("2", CheckpointAction.NO_SEQUENCE_NUMBER); + put("3", CheckpointAction.NONE); + put("4", CheckpointAction.WITH_SEQUENCE_NUMBER); + put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); + } + }); + + // Start with LATEST and a bit more complexity + testPlans.add(new LinkedHashMap() { + { + put(SentinelCheckpoint.LATEST.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); + put("30", CheckpointAction.NONE); + put("332", CheckpointAction.WITH_SEQUENCE_NUMBER); + put("349", CheckpointAction.NONE); + put("4332", CheckpointAction.NO_SEQUENCE_NUMBER); + put("4338", CheckpointAction.NONE); + put("5349", CheckpointAction.WITH_SEQUENCE_NUMBER); + put("5358", CheckpointAction.NONE); + put("64332", CheckpointAction.NO_SEQUENCE_NUMBER); + put("64338", CheckpointAction.NO_SEQUENCE_NUMBER); + put("65358", CheckpointAction.WITH_SEQUENCE_NUMBER); + put("764338", CheckpointAction.WITH_SEQUENCE_NUMBER); + put("765349", CheckpointAction.NO_SEQUENCE_NUMBER); + put("765358", CheckpointAction.NONE); + put(SentinelCheckpoint.SHARD_END.toString(), CheckpointAction.NO_SEQUENCE_NUMBER); + } + }); + + for (LinkedHashMap testPlan : testPlans) { + processingCheckpointer = + new RecordProcessorCheckpointer(shardInfo, checkpoint, validator); + testMixedCheckpointCalls(processingCheckpointer, testPlan); + } + } + + /** + * A utility function to simplify various sequences of intermixed updates to the checkpointer, and calls to + * checpoint() and checkpoint(sequenceNumber). Takes a map where the key is a new sequence number to set in the + * checkpointer and the value is a CheckpointAction indicating an action to take: NONE -> Set the sequence number, + * don't do anything else NO_SEQUENCE_NUMBER -> Set the sequence number and call checkpoint() WITH_SEQUENCE_NUMBER + * -> Set the sequence number and call checkpoint(sequenceNumber) with that sequence number + * + * @param processingCheckpointer + * @param checkpointValueAndAction + * A map describing which checkpoint value to set in the checkpointer, and what action to take + * @throws Exception + */ + private void testMixedCheckpointCalls(RecordProcessorCheckpointer processingCheckpointer, + LinkedHashMap checkpointValueAndAction) throws Exception { + + for (Entry entry : checkpointValueAndAction.entrySet()) { + ExtendedSequenceNumber lastCheckpointValue = processingCheckpointer.getLastCheckpointValue(); + + if (SentinelCheckpoint.SHARD_END.toString().equals(entry.getKey())) { + // Before shard end, we will pretend to do what we expect the shutdown task to do + processingCheckpointer.setSequenceNumberAtShardEnd(processingCheckpointer + .getLargestPermittedCheckpointValue()); + } + // Advance the largest checkpoint and check that it is updated. + processingCheckpointer.setLargestPermittedCheckpointValue(new ExtendedSequenceNumber(entry.getKey())); + Assert.assertEquals("Expected the largest checkpoint value to be updated after setting it", + new ExtendedSequenceNumber(entry.getKey()), + processingCheckpointer.getLargestPermittedCheckpointValue()); + switch (entry.getValue()) { + case NONE: + // We were told to not checkpoint, so lets just make sure the last checkpoint value is the same as + // when this block started then continue to the next instruction + Assert.assertEquals("Expected the last checkpoint value to stay the same if we didn't checkpoint", + lastCheckpointValue, + processingCheckpointer.getLastCheckpointValue()); + continue; + case NO_SEQUENCE_NUMBER: + processingCheckpointer.checkpoint(); + break; + case WITH_SEQUENCE_NUMBER: + processingCheckpointer.checkpoint(entry.getKey()); + break; + } + // We must have checkpointed to get here, so let's make sure our last checkpoint value is up to date + Assert.assertEquals("Expected the last checkpoint value to change after checkpointing", + new ExtendedSequenceNumber(entry.getKey()), + processingCheckpointer.getLastCheckpointValue()); + Assert.assertEquals("Expected the largest checkpoint value to remain the same since the last set", + new ExtendedSequenceNumber(entry.getKey()), + processingCheckpointer.getLargestPermittedCheckpointValue()); + } + } +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/SequenceNumberValidatorTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/SequenceNumberValidatorTest.java new file mode 100644 index 00000000..aae93f29 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/SequenceNumberValidatorTest.java @@ -0,0 +1,139 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import junit.framework.Assert; + +import org.junit.Test; +import org.mockito.Mockito; + +import static org.junit.Assert.fail; + +import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.model.InvalidArgumentException; +import com.amazonaws.services.kinesis.model.ShardIteratorType; + +public class SequenceNumberValidatorTest { + + private final boolean validateWithGetIterator = true; + private final String shardId = "shardid-123"; + + @Test + public final void testSequenceNumberValidator() { + + IKinesisProxy proxy = Mockito.mock(IKinesisProxy.class); + + SequenceNumberValidator validator = new SequenceNumberValidator(proxy, shardId, validateWithGetIterator); + + String goodSequence = "456"; + String iterator = "happyiterator"; + String badSequence = "789"; + Mockito.doReturn(iterator) + .when(proxy) + .getIterator(shardId, ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), goodSequence); + Mockito.doThrow(new InvalidArgumentException("")) + .when(proxy) + .getIterator(shardId, ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), badSequence); + + validator.validateSequenceNumber(goodSequence); + Mockito.verify(proxy, Mockito.times(1)).getIterator(shardId, + ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), + goodSequence); + + try { + validator.validateSequenceNumber(badSequence); + fail("Bad sequence number did not cause the validator to throw an exception"); + } catch (IllegalArgumentException e) { + Mockito.verify(proxy, Mockito.times(1)).getIterator(shardId, + ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), + badSequence); + } + + nonNumericValueValidationTest(validator, proxy, validateWithGetIterator); + } + + @Test + public final void testNoValidation() { + IKinesisProxy proxy = Mockito.mock(IKinesisProxy.class); + String shardId = "shardid-123"; + SequenceNumberValidator validator = new SequenceNumberValidator(proxy, shardId, !validateWithGetIterator); + String goodSequence = "456"; + + // Just checking that the false flag for validating against getIterator is honored + validator.validateSequenceNumber(goodSequence); + Mockito.verify(proxy, Mockito.times(0)).getIterator(shardId, + ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), + goodSequence); + + // Validator should still validate sentinel values + nonNumericValueValidationTest(validator, proxy, !validateWithGetIterator); + } + + private void nonNumericValueValidationTest(SequenceNumberValidator validator, + IKinesisProxy proxy, + boolean validateWithGetIterator) { + + String[] nonNumericStrings = { null, "bogus-sequence-number", SentinelCheckpoint.LATEST.toString(), + SentinelCheckpoint.SHARD_END.toString(), SentinelCheckpoint.TRIM_HORIZON.toString(), + SentinelCheckpoint.AT_TIMESTAMP.toString() }; + + for (String nonNumericString : nonNumericStrings) { + try { + validator.validateSequenceNumber(nonNumericString); + fail("Validator should not consider " + nonNumericString + " a valid sequence number"); + } catch (IllegalArgumentException e) { + // Non-numeric strings should always be rejected by the validator before the proxy can be called so we + // check that the proxy was not called at all + Mockito.verify(proxy, Mockito.times(0)).getIterator(shardId, + ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), + nonNumericString); + } + } + } + + @Test + public final void testIsDigits() { + // Check things that are all digits + String[] stringsOfDigits = { + "0", + "12", + "07897803434", + "12324456576788", + }; + for (String digits : stringsOfDigits) { + Assert.assertTrue("Expected that " + digits + " would be considered a string of digits.", + SequenceNumberValidator.isDigits(digits)); + } + // Check things that are not all digits + String[] stringsWithNonDigits = { + null, + "", + " ", // white spaces + "6 4", + "\t45", + "5242354235234\n", + "7\n6\n5\n", + "12s", // last character + "c07897803434", // first character + "1232445wef6576788", // interior + "no-digits", + }; + for (String notAllDigits : stringsWithNonDigits) { + Assert.assertFalse("Expected that " + notAllDigits + " would not be considered a string of digits.", + SequenceNumberValidator.isDigits(notAllDigits)); + } + } +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumerTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumerTest.java new file mode 100644 index 00000000..893f64ed --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardConsumerTest.java @@ -0,0 +1,472 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.File; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.ListIterator; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.InMemoryCheckpointImpl; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisLocalFileProxy; +import com.amazonaws.services.kinesis.clientlibrary.proxies.util.KinesisLocalFileDataCreator; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; +import com.amazonaws.services.kinesis.model.Record; +import com.amazonaws.services.kinesis.model.ShardIteratorType; + +/** + * Unit tests of {@link ShardConsumer}. + */ +@RunWith(MockitoJUnitRunner.class) +public class ShardConsumerTest { + + private static final Log LOG = LogFactory.getLog(ShardConsumerTest.class); + + private final IMetricsFactory metricsFactory = new NullMetricsFactory(); + private final boolean callProcessRecordsForEmptyRecordList = false; + private final long taskBackoffTimeMillis = 500L; + private final long parentShardPollIntervalMillis = 50L; + private final boolean cleanupLeasesOfCompletedShards = true; + // We don't want any of these tests to run checkpoint validation + private final boolean skipCheckpointValidationValue = false; + private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); + + // Use Executors.newFixedThreadPool since it returns ThreadPoolExecutor, which is + // ... a non-final public class, and so can be mocked and spied. + private final ExecutorService executorService = Executors.newFixedThreadPool(1); + + @Mock + private IRecordProcessor processor; + @Mock + private IKinesisProxy streamProxy; + @Mock + private ILeaseManager leaseManager; + @Mock + private ICheckpoint checkpoint; + @Mock + private ShutdownNotification shutdownNotification; + + /** + * Test method to verify consumer stays in INITIALIZING state when InitializationTask fails. + */ + @SuppressWarnings("unchecked") + @Test + public final void testInitializationStateUponFailure() throws Exception { + ShardInfo shardInfo = new ShardInfo("s-0-0", "testToken", null, ExtendedSequenceNumber.TRIM_HORIZON); + + when(checkpoint.getCheckpoint(anyString())).thenThrow(NullPointerException.class); + + when(leaseManager.getLease(anyString())).thenReturn(null); + StreamConfig streamConfig = + new StreamConfig(streamProxy, + 1, + 10, + callProcessRecordsForEmptyRecordList, + skipCheckpointValidationValue, INITIAL_POSITION_LATEST); + + ShardConsumer consumer = + new ShardConsumer(shardInfo, + streamConfig, + checkpoint, + processor, + null, + parentShardPollIntervalMillis, + cleanupLeasesOfCompletedShards, + executorService, + metricsFactory, + taskBackoffTimeMillis, + KinesisClientLibConfiguration.DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST); + + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.WAITING_ON_PARENT_SHARDS))); + consumer.consumeShard(); // initialize + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.WAITING_ON_PARENT_SHARDS))); + consumer.consumeShard(); // initialize + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.INITIALIZING))); + consumer.consumeShard(); // initialize + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.INITIALIZING))); + consumer.consumeShard(); // initialize + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.INITIALIZING))); + } + + + /** + * Test method to verify consumer stays in INITIALIZING state when InitializationTask fails. + */ + @SuppressWarnings("unchecked") + @Test + public final void testInitializationStateUponSubmissionFailure() throws Exception { + ShardInfo shardInfo = new ShardInfo("s-0-0", "testToken", null, ExtendedSequenceNumber.TRIM_HORIZON); + ExecutorService spyExecutorService = spy(executorService); + + when(checkpoint.getCheckpoint(anyString())).thenThrow(NullPointerException.class); + when(leaseManager.getLease(anyString())).thenReturn(null); + StreamConfig streamConfig = + new StreamConfig(streamProxy, + 1, + 10, + callProcessRecordsForEmptyRecordList, + skipCheckpointValidationValue, INITIAL_POSITION_LATEST); + + ShardConsumer consumer = + new ShardConsumer(shardInfo, + streamConfig, + checkpoint, + processor, + null, + parentShardPollIntervalMillis, + cleanupLeasesOfCompletedShards, + spyExecutorService, + metricsFactory, + taskBackoffTimeMillis, + KinesisClientLibConfiguration.DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST); + + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.WAITING_ON_PARENT_SHARDS))); + consumer.consumeShard(); // initialize + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.WAITING_ON_PARENT_SHARDS))); + + doThrow(new RejectedExecutionException()).when(spyExecutorService).submit(any(InitializeTask.class)); + consumer.consumeShard(); // initialize + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.INITIALIZING))); + consumer.consumeShard(); // initialize + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.INITIALIZING))); + consumer.consumeShard(); // initialize + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.INITIALIZING))); + } + + @SuppressWarnings("unchecked") + @Test + public final void testRecordProcessorThrowable() throws Exception { + ShardInfo shardInfo = new ShardInfo("s-0-0", "testToken", null, ExtendedSequenceNumber.TRIM_HORIZON); + StreamConfig streamConfig = + new StreamConfig(streamProxy, + 1, + 10, + callProcessRecordsForEmptyRecordList, + skipCheckpointValidationValue, INITIAL_POSITION_LATEST); + + ShardConsumer consumer = + new ShardConsumer(shardInfo, + streamConfig, + checkpoint, + processor, + null, + parentShardPollIntervalMillis, + cleanupLeasesOfCompletedShards, + executorService, + metricsFactory, + taskBackoffTimeMillis, + KinesisClientLibConfiguration.DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST); + + when(leaseManager.getLease(anyString())).thenReturn(null); + when(checkpoint.getCheckpoint(anyString())).thenReturn(new ExtendedSequenceNumber("123")); + + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.WAITING_ON_PARENT_SHARDS))); + consumer.consumeShard(); // submit BlockOnParentShardTask + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.WAITING_ON_PARENT_SHARDS))); + verify(processor, times(0)).initialize(any(InitializationInput.class)); + + // Throw Error when IRecordProcessor.initialize() is invoked. + doThrow(new Error("ThrowableTest")).when(processor).initialize(any(InitializationInput.class)); + + consumer.consumeShard(); // submit InitializeTask + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.INITIALIZING))); + verify(processor, times(1)).initialize(any(InitializationInput.class)); + + try { + // Checking the status of submitted InitializeTask from above should throw exception. + consumer.consumeShard(); + fail("ShardConsumer should have thrown exception."); + } catch (RuntimeException e) { + assertThat(e.getCause(), instanceOf(ExecutionException.class)); + } + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.INITIALIZING))); + verify(processor, times(1)).initialize(any(InitializationInput.class)); + + doNothing().when(processor).initialize(any(InitializationInput.class)); + + consumer.consumeShard(); // submit InitializeTask again. + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.INITIALIZING))); + verify(processor, times(2)).initialize(any(InitializationInput.class)); + + // Checking the status of submitted InitializeTask from above should pass. + consumer.consumeShard(); + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.PROCESSING))); + } + + /** + * Test method for {@link ShardConsumer#consumeShard()} + */ + @Test + public final void testConsumeShard() throws Exception { + int numRecs = 10; + BigInteger startSeqNum = BigInteger.ONE; + String streamShardId = "kinesis-0-0"; + String testConcurrencyToken = "testToken"; + File file = + KinesisLocalFileDataCreator.generateTempDataFile(1, + "kinesis-0-", + numRecs, + startSeqNum, + "unitTestSCT001"); + + IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath()); + + final int maxRecords = 2; + final int idleTimeMS = 0; // keep unit tests fast + ICheckpoint checkpoint = new InMemoryCheckpointImpl(startSeqNum.toString()); + checkpoint.setCheckpoint(streamShardId, ExtendedSequenceNumber.TRIM_HORIZON, testConcurrencyToken); + when(leaseManager.getLease(anyString())).thenReturn(null); + + TestStreamlet processor = new TestStreamlet(); + + StreamConfig streamConfig = + new StreamConfig(fileBasedProxy, + maxRecords, + idleTimeMS, + callProcessRecordsForEmptyRecordList, + skipCheckpointValidationValue, INITIAL_POSITION_LATEST); + + ShardInfo shardInfo = new ShardInfo(streamShardId, testConcurrencyToken, null, null); + ShardConsumer consumer = + new ShardConsumer(shardInfo, + streamConfig, + checkpoint, + processor, + leaseManager, + parentShardPollIntervalMillis, + cleanupLeasesOfCompletedShards, + executorService, + metricsFactory, + taskBackoffTimeMillis, + KinesisClientLibConfiguration.DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST); + + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.WAITING_ON_PARENT_SHARDS))); + consumer.consumeShard(); // check on parent shards + Thread.sleep(50L); + consumer.consumeShard(); // start initialization + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.INITIALIZING))); + consumer.consumeShard(); // initialize + processor.getInitializeLatch().await(5, TimeUnit.SECONDS); + + // We expect to process all records in numRecs calls + for (int i = 0; i < numRecs;) { + boolean newTaskSubmitted = consumer.consumeShard(); + if (newTaskSubmitted) { + LOG.debug("New processing task was submitted, call # " + i); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.PROCESSING))); + // CHECKSTYLE:IGNORE ModifiedControlVariable FOR NEXT 1 LINES + i += maxRecords; + } + Thread.sleep(50L); + } + + assertThat(processor.getShutdownReason(), nullValue()); + consumer.notifyShutdownRequested(shutdownNotification); + consumer.consumeShard(); + assertThat(processor.getNotifyShutdownLatch().await(1, TimeUnit.SECONDS), is(true)); + Thread.sleep(50); + assertThat(consumer.getShutdownReason(), equalTo(ShutdownReason.REQUESTED)); + assertThat(consumer.getCurrentState(), equalTo(ConsumerStates.ShardConsumerState.SHUTDOWN_REQUESTED)); + verify(shutdownNotification).shutdownNotificationComplete(); + assertThat(processor.isShutdownNotificationCalled(), equalTo(true)); + consumer.consumeShard(); + Thread.sleep(50); + assertThat(consumer.getCurrentState(), equalTo(ConsumerStates.ShardConsumerState.SHUTDOWN_REQUESTED)); + + consumer.beginShutdown(); + Thread.sleep(50L); + assertThat(consumer.getShutdownReason(), equalTo(ShutdownReason.ZOMBIE)); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.SHUTTING_DOWN))); + consumer.beginShutdown(); + consumer.consumeShard(); + verify(shutdownNotification, atLeastOnce()).shutdownComplete(); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.SHUTDOWN_COMPLETE))); + assertThat(processor.getShutdownReason(), is(equalTo(ShutdownReason.ZOMBIE))); + + executorService.shutdown(); + executorService.awaitTermination(60, TimeUnit.SECONDS); + + String iterator = fileBasedProxy.getIterator(streamShardId, ShardIteratorType.TRIM_HORIZON.toString()); + List expectedRecords = toUserRecords(fileBasedProxy.get(iterator, numRecs).getRecords()); + verifyConsumedRecords(expectedRecords, processor.getProcessedRecords()); + file.delete(); + } + + /** + * Test method for {@link ShardConsumer#consumeShard()} that starts from initial position of type AT_TIMESTAMP. + */ + @Test + public final void testConsumeShardWithInitialPositionAtTimestamp() throws Exception { + int numRecs = 7; + BigInteger startSeqNum = BigInteger.ONE; + Date timestamp = new Date(KinesisLocalFileDataCreator.STARTING_TIMESTAMP + 3); + InitialPositionInStreamExtended atTimestamp = + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(timestamp); + String streamShardId = "kinesis-0-0"; + String testConcurrencyToken = "testToken"; + File file = + KinesisLocalFileDataCreator.generateTempDataFile(1, + "kinesis-0-", + numRecs, + startSeqNum, + "unitTestSCT002"); + + IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath()); + + final int maxRecords = 2; + final int idleTimeMS = 0; // keep unit tests fast + ICheckpoint checkpoint = new InMemoryCheckpointImpl(startSeqNum.toString()); + checkpoint.setCheckpoint(streamShardId, ExtendedSequenceNumber.AT_TIMESTAMP, testConcurrencyToken); + when(leaseManager.getLease(anyString())).thenReturn(null); + + TestStreamlet processor = new TestStreamlet(); + + StreamConfig streamConfig = + new StreamConfig(fileBasedProxy, + maxRecords, + idleTimeMS, + callProcessRecordsForEmptyRecordList, + skipCheckpointValidationValue, + atTimestamp); + + ShardInfo shardInfo = new ShardInfo(streamShardId, testConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); + ShardConsumer consumer = + new ShardConsumer(shardInfo, + streamConfig, + checkpoint, + processor, + leaseManager, + parentShardPollIntervalMillis, + cleanupLeasesOfCompletedShards, + executorService, + metricsFactory, + taskBackoffTimeMillis, + KinesisClientLibConfiguration.DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST); + + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.WAITING_ON_PARENT_SHARDS))); + consumer.consumeShard(); // check on parent shards + Thread.sleep(50L); + consumer.consumeShard(); // start initialization + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.INITIALIZING))); + consumer.consumeShard(); // initialize + Thread.sleep(50L); + + // We expect to process all records in numRecs calls + for (int i = 0; i < numRecs;) { + boolean newTaskSubmitted = consumer.consumeShard(); + if (newTaskSubmitted) { + LOG.debug("New processing task was submitted, call # " + i); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.PROCESSING))); + // CHECKSTYLE:IGNORE ModifiedControlVariable FOR NEXT 1 LINES + i += maxRecords; + } + Thread.sleep(50L); + } + + assertThat(processor.getShutdownReason(), nullValue()); + consumer.beginShutdown(); + Thread.sleep(50L); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.SHUTTING_DOWN))); + consumer.beginShutdown(); + assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.SHUTDOWN_COMPLETE))); + assertThat(processor.getShutdownReason(), is(equalTo(ShutdownReason.ZOMBIE))); + + executorService.shutdown(); + executorService.awaitTermination(60, TimeUnit.SECONDS); + + String iterator = fileBasedProxy.getIterator(streamShardId, timestamp); + List expectedRecords = toUserRecords(fileBasedProxy.get(iterator, numRecs).getRecords()); + verifyConsumedRecords(expectedRecords, processor.getProcessedRecords()); + assertEquals(4, processor.getProcessedRecords().size()); + file.delete(); + } + + //@formatter:off (gets the formatting wrong) + private void verifyConsumedRecords(List expectedRecords, + List actualRecords) { + //@formatter:on + assertThat(actualRecords.size(), is(equalTo(expectedRecords.size()))); + ListIterator expectedIter = expectedRecords.listIterator(); + ListIterator actualIter = actualRecords.listIterator(); + for (int i = 0; i < expectedRecords.size(); ++i) { + assertThat(actualIter.next(), is(equalTo(expectedIter.next()))); + } + } + + private List toUserRecords(List records) { + if (records == null || records.isEmpty()) { + return records; + } + List userRecords = new ArrayList(); + for (Record record : records) { + userRecords.add(new UserRecord(record)); + } + return userRecords; + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardInfoTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardInfoTest.java similarity index 64% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardInfoTest.java rename to src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardInfoTest.java index 5e46efa5..511b5a1b 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardInfoTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardInfoTest.java @@ -1,24 +1,22 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.leases; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.HashSet; @@ -30,8 +28,7 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; public class ShardInfoTest { private static final String CONCURRENCY_TOKEN = UUID.randomUUID().toString(); @@ -51,21 +48,29 @@ public class ShardInfoTest { @Test public void testPacboyShardInfoEqualsWithSameArgs() { ShardInfo equalShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); - assertTrue("Equal should return true for arguments all the same", testShardInfo.equals(equalShardInfo)); + Assert.assertTrue("Equal should return true for arguments all the same", testShardInfo.equals(equalShardInfo)); } @Test public void testPacboyShardInfoEqualsWithNull() { - assertFalse("Equal should return false when object is null", testShardInfo.equals(null)); + Assert.assertFalse("Equal should return false when object is null", testShardInfo.equals(null)); + } + + @Test + public void testPacboyShardInfoEqualsForShardId() { + ShardInfo diffShardInfo = new ShardInfo("shardId-diff", CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); + Assert.assertFalse("Equal should return false with different shard id", diffShardInfo.equals(testShardInfo)); + diffShardInfo = new ShardInfo(null, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); + Assert.assertFalse("Equal should return false with null shard id", diffShardInfo.equals(testShardInfo)); } @Test public void testPacboyShardInfoEqualsForfToken() { ShardInfo diffShardInfo = new ShardInfo(SHARD_ID, UUID.randomUUID().toString(), parentShardIds, ExtendedSequenceNumber.LATEST); - assertFalse("Equal should return false with different concurrency token", + Assert.assertFalse("Equal should return false with different concurrency token", diffShardInfo.equals(testShardInfo)); diffShardInfo = new ShardInfo(SHARD_ID, null, parentShardIds, ExtendedSequenceNumber.LATEST); - assertFalse("Equal should return false for null concurrency token", diffShardInfo.equals(testShardInfo)); + Assert.assertFalse("Equal should return false for null concurrency token", diffShardInfo.equals(testShardInfo)); } @Test @@ -75,7 +80,7 @@ public class ShardInfoTest { differentlyOrderedParentShardIds.add("shard-1"); ShardInfo shardInfoWithDifferentlyOrderedParentShardIds = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, differentlyOrderedParentShardIds, ExtendedSequenceNumber.LATEST); - assertTrue("Equal should return true even with parent shard Ids reordered", + Assert.assertTrue("Equal should return true even with parent shard Ids reordered", shardInfoWithDifferentlyOrderedParentShardIds.equals(testShardInfo)); } @@ -85,10 +90,10 @@ public class ShardInfoTest { diffParentIds.add("shard-3"); diffParentIds.add("shard-4"); ShardInfo diffShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, diffParentIds, ExtendedSequenceNumber.LATEST); - assertFalse("Equal should return false with different parent shard Ids", + Assert.assertFalse("Equal should return false with different parent shard Ids", diffShardInfo.equals(testShardInfo)); diffShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, null, ExtendedSequenceNumber.LATEST); - assertFalse("Equal should return false with null parent shard Ids", diffShardInfo.equals(testShardInfo)); + Assert.assertFalse("Equal should return false with null parent shard Ids", diffShardInfo.equals(testShardInfo)); } @Test @@ -111,7 +116,7 @@ public class ShardInfoTest { @Test public void testPacboyShardInfoSameHashCode() { ShardInfo equalShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); - assertTrue("Shard info objects should have same hashCode for the same arguments", + Assert.assertTrue("Shard info objects should have same hashCode for the same arguments", equalShardInfo.hashCode() == testShardInfo.hashCode()); } } diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardObjectHelper.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardObjectHelper.java new file mode 100644 index 00000000..f154119a --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardObjectHelper.java @@ -0,0 +1,132 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.List; + +import com.amazonaws.services.kinesis.model.HashKeyRange; +import com.amazonaws.services.kinesis.model.SequenceNumberRange; +import com.amazonaws.services.kinesis.model.Shard; + +/** + * Helper class to create Shard, SequenceRange and related objects. + */ +class ShardObjectHelper { + + private static final int EXPONENT = 128; + + /** + * Max value of a sequence number (2^128 -1). Useful for defining sequence number range for a shard. + */ + static final String MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); + + /** + * Min value of a sequence number (0). Useful for defining sequence number range for a shard. + */ + static final String MIN_SEQUENCE_NUMBER = BigInteger.ZERO.toString(); + + /** + * Max value of a hash key (2^128 -1). Useful for defining hash key range for a shard. + */ + static final String MAX_HASH_KEY = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); + + /** + * Min value of a hash key (0). Useful for defining sequence number range for a shard. + */ + public static final String MIN_HASH_KEY = BigInteger.ZERO.toString(); + + /** + * + */ + private ShardObjectHelper() { + } + + + /** Helper method to create a new shard object. + * @param shardId + * @param parentShardId + * @param adjacentParentShardId + * @param sequenceNumberRange + * @return + */ + static Shard newShard(String shardId, + String parentShardId, + String adjacentParentShardId, + SequenceNumberRange sequenceNumberRange) { + return newShard(shardId, parentShardId, adjacentParentShardId, sequenceNumberRange, null); + } + + /** Helper method to create a new shard object. + * @param shardId + * @param parentShardId + * @param adjacentParentShardId + * @param sequenceNumberRange + * @param hashKeyRange + * @return + */ + static Shard newShard(String shardId, + String parentShardId, + String adjacentParentShardId, + SequenceNumberRange sequenceNumberRange, + HashKeyRange hashKeyRange) { + Shard shard = new Shard(); + shard.setShardId(shardId); + shard.setParentShardId(parentShardId); + shard.setAdjacentParentShardId(adjacentParentShardId); + shard.setSequenceNumberRange(sequenceNumberRange); + shard.setHashKeyRange(hashKeyRange); + + return shard; + } + + /** Helper method. + * @param startingSequenceNumber + * @param endingSequenceNumber + * @return + */ + static SequenceNumberRange newSequenceNumberRange(String startingSequenceNumber, String endingSequenceNumber) { + SequenceNumberRange range = new SequenceNumberRange(); + range.setStartingSequenceNumber(startingSequenceNumber); + range.setEndingSequenceNumber(endingSequenceNumber); + return range; + } + + /** Helper method. + * @param startingHashKey + * @param endingHashKey + * @return + */ + static HashKeyRange newHashKeyRange(String startingHashKey, String endingHashKey) { + HashKeyRange range = new HashKeyRange(); + range.setStartingHashKey(startingHashKey); + range.setEndingHashKey(endingHashKey); + return range; + } + + static List getParentShardIds(Shard shard) { + List parentShardIds = new ArrayList<>(2); + if (shard.getAdjacentParentShardId() != null) { + parentShardIds.add(shard.getAdjacentParentShardId()); + } + if (shard.getParentShardId() != null) { + parentShardIds.add(shard.getParentShardId()); + } + return parentShardIds; + } + + +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSequenceVerifier.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSequenceVerifier.java similarity index 57% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSequenceVerifier.java rename to src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSequenceVerifier.java index 4cd19a81..314974b0 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSequenceVerifier.java +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSequenceVerifier.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.leases; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; import java.util.ArrayList; import java.util.Collections; @@ -21,18 +21,20 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentSkipListSet; - import junit.framework.Assert; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.kinesis.model.Shard; -import software.amazon.kinesis.lifecycle.ShutdownReason; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.model.Shard; /** * Helper class to verify shard lineage in unit tests that use TestStreamlet. * Verifies that parent shard processors were shutdown before child shard processor was initialized. */ -@Slf4j -public class ShardSequenceVerifier { +class ShardSequenceVerifier { + + private static final Log LOG = LogFactory.getLog(ShardSequenceVerifier.class); private Map shardIdToShards = new HashMap(); private ConcurrentSkipListSet initializedShards = new ConcurrentSkipListSet<>(); private ConcurrentSkipListSet shutdownShards = new ConcurrentSkipListSet<>(); @@ -41,20 +43,20 @@ public class ShardSequenceVerifier { /** * Constructor with the shard list for the stream. */ - public ShardSequenceVerifier(List shardList) { + ShardSequenceVerifier(List shardList) { for (Shard shard : shardList) { - shardIdToShards.put(shard.shardId(), shard); + shardIdToShards.put(shard.getShardId(), shard); } } - public void registerInitialization(String shardId) { + void registerInitialization(String shardId) { List parentShardIds = ShardObjectHelper.getParentShardIds(shardIdToShards.get(shardId)); for (String parentShardId : parentShardIds) { if (initializedShards.contains(parentShardId)) { if (!shutdownShards.contains(parentShardId)) { String message = "Parent shard " + parentShardId + " was not shutdown before shard " + shardId + " was initialized."; - log.error(message); + LOG.error(message); validationFailures.add(message); } } @@ -62,15 +64,15 @@ public class ShardSequenceVerifier { initializedShards.add(shardId); } - public void registerShutdown(String shardId, ShutdownReason reason) { - if (reason.equals(ShutdownReason.SHARD_END)) { + void registerShutdown(String shardId, ShutdownReason reason) { + if (reason.equals(ShutdownReason.TERMINATE)) { shutdownShards.add(shardId); } } - public void verify() { + void verify() { for (String message : validationFailures) { - log.error(message); + LOG.error(message); } Assert.assertTrue(validationFailures.isEmpty()); } diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTaskIntegrationTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTaskIntegrationTest.java new file mode 100644 index 00000000..307596e3 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncTaskIntegrationTest.java @@ -0,0 +1,141 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient; +import com.amazonaws.services.kinesis.AmazonKinesis; +import com.amazonaws.services.kinesis.AmazonKinesisClient; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager; +import com.amazonaws.services.kinesis.leases.interfaces.IKinesisClientLeaseManager; +import com.amazonaws.services.kinesis.model.StreamStatus; + +/** + * WARN: to run this integration test you'll have to provide a AwsCredentials.properties file on the classpath. + */ +public class ShardSyncTaskIntegrationTest { + + private static final String STREAM_NAME = "IntegrationTestStream02"; + private static final String KINESIS_ENDPOINT = "https://kinesis.us-east-1.amazonaws.com"; + + private static AWSCredentialsProvider credentialsProvider; + private IKinesisClientLeaseManager leaseManager; + private IKinesisProxy kinesisProxy; + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + credentialsProvider = new DefaultAWSCredentialsProviderChain(); + AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider); + + try { + kinesis.createStream(STREAM_NAME, 1); + } catch (AmazonServiceException ase) { + + } + StreamStatus status; + do { + status = StreamStatus.fromValue(kinesis.describeStream(STREAM_NAME).getStreamDescription().getStreamStatus()); + } while (status != StreamStatus.ACTIVE); + + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + boolean useConsistentReads = true; + leaseManager = + new KinesisClientLeaseManager("ShardSyncTaskIntegrationTest", + new AmazonDynamoDBClient(credentialsProvider), + useConsistentReads); + kinesisProxy = + new KinesisProxy(STREAM_NAME, + new DefaultAWSCredentialsProviderChain(), + KINESIS_ENDPOINT); + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + } + + /** + * Test method for call(). + * + * @throws CapacityExceededException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + */ + @Test + public final void testCall() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + if (!leaseManager.leaseTableExists()) { + final Long readCapacity = 10L; + final Long writeCapacity = 10L; + leaseManager.createLeaseTableIfNotExists(readCapacity, writeCapacity); + } + leaseManager.deleteAll(); + Set shardIds = kinesisProxy.getAllShardIds(); + ShardSyncTask syncTask = new ShardSyncTask(kinesisProxy, + leaseManager, + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST), + false, + 0L); + syncTask.call(); + List leases = leaseManager.listLeases(); + Set leaseKeys = new HashSet(); + for (KinesisClientLease lease : leases) { + leaseKeys.add(lease.getLeaseKey()); + } + + // Verify that all shardIds had leases for them + Assert.assertEquals(shardIds.size(), leases.size()); + shardIds.removeAll(leaseKeys); + Assert.assertTrue(shardIds.isEmpty()); + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncerTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncerTest.java new file mode 100644 index 00000000..b8f6ae56 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShardSyncerTest.java @@ -0,0 +1,1638 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.io.File; +import java.io.IOException; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.amazonaws.services.dynamodbv2.local.embedded.DynamoDBEmbedded; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.KinesisClientLibIOException; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ExceptionThrowingLeaseManager.ExceptionThrowingLeaseManagerMethods; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisLocalFileProxy; +import com.amazonaws.services.kinesis.clientlibrary.proxies.util.KinesisLocalFileDataCreator; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.LeasingException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager; +import com.amazonaws.services.kinesis.leases.impl.LeaseManager; +import com.amazonaws.services.kinesis.model.HashKeyRange; +import com.amazonaws.services.kinesis.model.SequenceNumberRange; +import com.amazonaws.services.kinesis.model.Shard; + +import junit.framework.Assert; + +/** + * + */ +// CHECKSTYLE:IGNORE JavaNCSS FOR NEXT 800 LINES +public class ShardSyncerTest { + private static final Log LOG = LogFactory.getLog(ShardSyncer.class); + private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); + private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(1000L)); + private final boolean cleanupLeasesOfCompletedShards = true; + AmazonDynamoDB ddbClient = DynamoDBEmbedded.create().amazonDynamoDB(); + LeaseManager leaseManager = new KinesisClientLeaseManager("tempTestTable", ddbClient); + private static final int EXPONENT = 128; + /** + * Old/Obsolete max value of a sequence number (2^128 -1). + */ + public static final BigInteger MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE); + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + boolean created = leaseManager.createLeaseTableIfNotExists(1L, 1L); + if (created) { + LOG.info("New table created."); + } + leaseManager.deleteAll(); + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + leaseManager.deleteAll(); + } + + /** + * Test determineNewLeasesToCreate() where there are no shards + */ + @Test + public final void testDetermineNewLeasesToCreateNoShards() { + List shards = new ArrayList(); + List leases = new ArrayList(); + + Assert.assertTrue(ShardSyncer.determineNewLeasesToCreate(shards, leases, INITIAL_POSITION_LATEST).isEmpty()); + } + + /** + * Test determineNewLeasesToCreate() where there are no leases and no resharding operations have been performed + */ + @Test + public final void testDetermineNewLeasesToCreate0Leases0Reshards() { + List shards = new ArrayList(); + List currentLeases = new ArrayList(); + SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); + + String shardId0 = "shardId-0"; + shards.add(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange)); + + String shardId1 = "shardId-1"; + shards.add(ShardObjectHelper.newShard(shardId1, null, null, sequenceRange)); + + List newLeases = + ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, INITIAL_POSITION_LATEST); + Assert.assertEquals(2, newLeases.size()); + Set expectedLeaseShardIds = new HashSet(); + expectedLeaseShardIds.add(shardId0); + expectedLeaseShardIds.add(shardId1); + for (KinesisClientLease lease : newLeases) { + Assert.assertTrue(expectedLeaseShardIds.contains(lease.getLeaseKey())); + } + } + + /** + * Test bootstrapShardLeases() starting at TRIM_HORIZON ("beginning" of stream) + * + * @throws ProvisionedThroughputException + * @throws InvalidStateException + * @throws DependencyException + * @throws IOException + * @throws KinesisClientLibIOException + */ + @Test + public final void testBootstrapShardLeasesAtTrimHorizon() + throws DependencyException, InvalidStateException, ProvisionedThroughputException, IOException, + KinesisClientLibIOException { + testBootstrapShardLeasesAtStartingPosition(INITIAL_POSITION_TRIM_HORIZON); + } + + /** + * Test bootstrapShardLeases() starting at LATEST (tip of stream) + * + * @throws ProvisionedThroughputException + * @throws InvalidStateException + * @throws DependencyException + * @throws IOException + * @throws KinesisClientLibIOException + */ + @Test + public final void testBootstrapShardLeasesAtLatest() + throws DependencyException, InvalidStateException, ProvisionedThroughputException, IOException, + KinesisClientLibIOException { + testBootstrapShardLeasesAtStartingPosition(INITIAL_POSITION_LATEST); + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test + public final void testCheckAndCreateLeasesForNewShardsAtLatest() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException, + IOException { + List shards = constructShardListForGraphA(); + File dataFile = KinesisLocalFileDataCreator.generateTempDataFile(shards, 2, "testBootstrap1"); + dataFile.deleteOnExit(); + IKinesisProxy kinesisProxy = new KinesisLocalFileProxy(dataFile.getAbsolutePath()); + + ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy, leaseManager, INITIAL_POSITION_LATEST, + cleanupLeasesOfCompletedShards); + List newLeases = leaseManager.listLeases(); + Set expectedLeaseShardIds = new HashSet(); + expectedLeaseShardIds.add("shardId-4"); + expectedLeaseShardIds.add("shardId-8"); + expectedLeaseShardIds.add("shardId-9"); + expectedLeaseShardIds.add("shardId-10"); + Assert.assertEquals(expectedLeaseShardIds.size(), newLeases.size()); + for (KinesisClientLease lease1 : newLeases) { + Assert.assertTrue(expectedLeaseShardIds.contains(lease1.getLeaseKey())); + Assert.assertEquals(ExtendedSequenceNumber.LATEST, lease1.getCheckpoint()); + } + dataFile.delete(); + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test + public final void testCheckAndCreateLeasesForNewShardsAtTrimHorizon() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException, + IOException { + List shards = constructShardListForGraphA(); + File dataFile = KinesisLocalFileDataCreator.generateTempDataFile(shards, 2, "testBootstrap1"); + dataFile.deleteOnExit(); + IKinesisProxy kinesisProxy = new KinesisLocalFileProxy(dataFile.getAbsolutePath()); + + ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy, leaseManager, INITIAL_POSITION_TRIM_HORIZON, + cleanupLeasesOfCompletedShards); + List newLeases = leaseManager.listLeases(); + Set expectedLeaseShardIds = new HashSet(); + for (int i = 0; i < 11; i++) { + expectedLeaseShardIds.add("shardId-" + i); + } + Assert.assertEquals(expectedLeaseShardIds.size(), newLeases.size()); + for (KinesisClientLease lease1 : newLeases) { + Assert.assertTrue(expectedLeaseShardIds.contains(lease1.getLeaseKey())); + Assert.assertEquals(ExtendedSequenceNumber.TRIM_HORIZON, lease1.getCheckpoint()); + } + dataFile.delete(); + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test + public final void testCheckAndCreateLeasesForNewShardsAtTimestamp() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, + ProvisionedThroughputException, IOException { + List shards = constructShardListForGraphA(); + File dataFile = KinesisLocalFileDataCreator.generateTempDataFile(shards, 1, "testBootstrap1"); + dataFile.deleteOnExit(); + IKinesisProxy kinesisProxy = new KinesisLocalFileProxy(dataFile.getAbsolutePath()); + + ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy, leaseManager, INITIAL_POSITION_AT_TIMESTAMP, + cleanupLeasesOfCompletedShards); + List newLeases = leaseManager.listLeases(); + Set expectedLeaseShardIds = new HashSet(); + for (int i = 0; i < 11; i++) { + expectedLeaseShardIds.add("shardId-" + i); + } + Assert.assertEquals(expectedLeaseShardIds.size(), newLeases.size()); + for (KinesisClientLease lease1 : newLeases) { + Assert.assertTrue(expectedLeaseShardIds.contains(lease1.getLeaseKey())); + Assert.assertEquals(ExtendedSequenceNumber.AT_TIMESTAMP, lease1.getCheckpoint()); + } + dataFile.delete(); + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test(expected = KinesisClientLibIOException.class) + public final void testCheckAndCreateLeasesForNewShardsWhenParentIsOpen() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException, + IOException { + List shards = constructShardListForGraphA(); + SequenceNumberRange range = shards.get(0).getSequenceNumberRange(); + range.setEndingSequenceNumber(null); + shards.get(3).setSequenceNumberRange(range); + File dataFile = KinesisLocalFileDataCreator.generateTempDataFile(shards, 2, "testBootstrap1"); + dataFile.deleteOnExit(); + IKinesisProxy kinesisProxy = new KinesisLocalFileProxy(dataFile.getAbsolutePath()); + + ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy, leaseManager, INITIAL_POSITION_TRIM_HORIZON, + cleanupLeasesOfCompletedShards); + dataFile.delete(); + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test + public final void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShard() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, + ProvisionedThroughputException, IOException { + testCheckAndCreateLeasesForNewShardsAtSpecifiedPositionAndClosedShardImpl(null, + Integer.MAX_VALUE, INITIAL_POSITION_TRIM_HORIZON); + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test + public final void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithDeleteLeaseExceptions() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException, + IOException { + // Define the max calling count for lease manager methods. + // From the Shard Graph, the max count of calling could be 10 + int maxCallingCount = 10; + for (int c = 1; c <= maxCallingCount; c = c + 2) { + testCheckAndCreateLeasesForNewShardsAtSpecifiedPositionAndClosedShardImpl( + ExceptionThrowingLeaseManagerMethods.DELETELEASE, c, INITIAL_POSITION_TRIM_HORIZON); + // Need to clean up lease manager every time after calling ShardSyncer + leaseManager.deleteAll(); + } + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test + public final void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithListLeasesExceptions() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException, + IOException { + // Define the max calling count for lease manager methods. + // From the Shard Graph, the max count of calling could be 10 + int maxCallingCount = 10; + for (int c = 1; c <= maxCallingCount; c = c + 2) { + testCheckAndCreateLeasesForNewShardsAtSpecifiedPositionAndClosedShardImpl( + ExceptionThrowingLeaseManagerMethods.LISTLEASES, c, INITIAL_POSITION_TRIM_HORIZON); + // Need to clean up lease manager every time after calling ShardSyncer + leaseManager.deleteAll(); + } + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test + public final void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithCreateLeaseExceptions() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException, + IOException { + // Define the max calling count for lease manager methods. + // From the Shard Graph, the max count of calling could be 10 + int maxCallingCount = 5; + for (int c = 1; c <= maxCallingCount; c = c + 2) { + testCheckAndCreateLeasesForNewShardsAtSpecifiedPositionAndClosedShardImpl( + ExceptionThrowingLeaseManagerMethods.CREATELEASEIFNOTEXISTS, c,INITIAL_POSITION_TRIM_HORIZON); + // Need to clean up lease manager every time after calling ShardSyncer + leaseManager.deleteAll(); + } + } + + // Try catch leaseException for different lease manager methods and eventually let it succeed. + // This would not throw any exceptions if: + // 1). exceptionMethod equals to null or NONE. + // 2). exceptionTime is a very big or negative value. + private void retryCheckAndCreateLeaseForNewShards(IKinesisProxy kinesisProxy, + ExceptionThrowingLeaseManagerMethods exceptionMethod, + int exceptionTime, InitialPositionInStreamExtended position) + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException { + if (exceptionMethod != null) { + ExceptionThrowingLeaseManager exceptionThrowingLeaseManager = + new ExceptionThrowingLeaseManager(leaseManager); + // Set exception and throwing time for exceptionThrowingManager. + exceptionThrowingLeaseManager.setLeaseLeaseManagerThrowingExceptionScenario(exceptionMethod, exceptionTime); + // Only need to try two times. + for (int i = 1; i <= 2; i++) { + try { + ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy, + exceptionThrowingLeaseManager, + position, + cleanupLeasesOfCompletedShards); + return; + } catch (LeasingException e) { + LOG.debug("Catch leasing exception", e); + } + // Clear throwing exception scenario every time after calling ShardSyncer + exceptionThrowingLeaseManager.clearLeaseManagerThrowingExceptionScenario(); + } + } else { + ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy, + leaseManager, + position, + cleanupLeasesOfCompletedShards); + } + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test + public final void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShard() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, + ProvisionedThroughputException, IOException { + testCheckAndCreateLeasesForNewShardsAtSpecifiedPositionAndClosedShardImpl(null, + Integer.MAX_VALUE, INITIAL_POSITION_AT_TIMESTAMP); + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test + public final void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShardWithDeleteLeaseExceptions() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException, + IOException { + // Define the max calling count for lease manager methods. + // From the Shard Graph, the max count of calling could be 10 + int maxCallingCount = 10; + for (int c = 1; c <= maxCallingCount; c = c + 2) { + testCheckAndCreateLeasesForNewShardsAtSpecifiedPositionAndClosedShardImpl( + ExceptionThrowingLeaseManagerMethods.DELETELEASE, + c, INITIAL_POSITION_AT_TIMESTAMP); + // Need to clean up lease manager every time after calling ShardSyncer + leaseManager.deleteAll(); + } + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test + public final void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShardWithListLeasesExceptions() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException, + IOException { + // Define the max calling count for lease manager methods. + // From the Shard Graph, the max count of calling could be 10 + int maxCallingCount = 10; + for (int c = 1; c <= maxCallingCount; c = c + 2) { + testCheckAndCreateLeasesForNewShardsAtSpecifiedPositionAndClosedShardImpl( + ExceptionThrowingLeaseManagerMethods.LISTLEASES, + c, INITIAL_POSITION_AT_TIMESTAMP); + // Need to clean up lease manager every time after calling ShardSyncer + leaseManager.deleteAll(); + } + } + + /** + * @throws KinesisClientLibIOException + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + * @throws IOException + */ + @Test + public final void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShardWithCreateLeaseExceptions() + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException, + IOException { + // Define the max calling count for lease manager methods. + // From the Shard Graph, the max count of calling could be 10 + int maxCallingCount = 5; + for (int c = 1; c <= maxCallingCount; c = c + 2) { + testCheckAndCreateLeasesForNewShardsAtSpecifiedPositionAndClosedShardImpl( + ExceptionThrowingLeaseManagerMethods.CREATELEASEIFNOTEXISTS, + c, INITIAL_POSITION_AT_TIMESTAMP); + // Need to clean up lease manager every time after calling ShardSyncer + leaseManager.deleteAll(); + } + } + + // Real implementation of testing CheckAndCreateLeasesForNewShards with different leaseManager types. + private void testCheckAndCreateLeasesForNewShardsAtSpecifiedPositionAndClosedShardImpl( + ExceptionThrowingLeaseManagerMethods exceptionMethod, + int exceptionTime, + InitialPositionInStreamExtended position) + throws KinesisClientLibIOException, DependencyException, InvalidStateException, ProvisionedThroughputException, + IOException { + ExtendedSequenceNumber extendedSequenceNumber = + new ExtendedSequenceNumber(position.getInitialPositionInStream().toString()); + List shards = constructShardListForGraphA(); + File dataFile = KinesisLocalFileDataCreator.generateTempDataFile(shards, 2, "testBootstrap1"); + dataFile.deleteOnExit(); + IKinesisProxy kinesisProxy = new KinesisLocalFileProxy(dataFile.getAbsolutePath()); + + retryCheckAndCreateLeaseForNewShards(kinesisProxy, exceptionMethod, exceptionTime, position); + + List newLeases = leaseManager.listLeases(); + Map expectedShardIdToCheckpointMap = + new HashMap(); + for (int i = 0; i < 11; i++) { + expectedShardIdToCheckpointMap.put("shardId-" + i, extendedSequenceNumber); + } + Assert.assertEquals(expectedShardIdToCheckpointMap.size(), newLeases.size()); + for (KinesisClientLease lease1 : newLeases) { + ExtendedSequenceNumber expectedCheckpoint = expectedShardIdToCheckpointMap.get(lease1.getLeaseKey()); + Assert.assertNotNull(expectedCheckpoint); + Assert.assertEquals(expectedCheckpoint, lease1.getCheckpoint()); + } + + KinesisClientLease closedShardLease = leaseManager.getLease("shardId-0"); + closedShardLease.setCheckpoint(ExtendedSequenceNumber.SHARD_END); + leaseManager.updateLease(closedShardLease); + expectedShardIdToCheckpointMap.remove(closedShardLease.getLeaseKey()); + KinesisClientLease childShardLease = leaseManager.getLease("shardId-6"); + childShardLease.setCheckpoint(new ExtendedSequenceNumber("34290")); + leaseManager.updateLease(childShardLease); + expectedShardIdToCheckpointMap.put(childShardLease.getLeaseKey(), new ExtendedSequenceNumber("34290")); + + retryCheckAndCreateLeaseForNewShards(kinesisProxy, exceptionMethod, exceptionTime, position); + + newLeases = leaseManager.listLeases(); + Assert.assertEquals(expectedShardIdToCheckpointMap.size(), newLeases.size()); + for (KinesisClientLease lease1 : newLeases) { + ExtendedSequenceNumber expectedCheckpoint = expectedShardIdToCheckpointMap.get(lease1.getLeaseKey()); + Assert.assertNotNull(expectedCheckpoint); + Assert.assertEquals(expectedCheckpoint, lease1.getCheckpoint()); + } + + dataFile.delete(); + } + + /** + * Test bootstrapShardLeases() - cleanup garbage leases. + * + * @throws ProvisionedThroughputException + * @throws InvalidStateException + * @throws DependencyException + * @throws IOException + * @throws KinesisClientLibIOException + */ + @Test + public final void testBootstrapShardLeasesCleanupGarbage() + throws DependencyException, InvalidStateException, ProvisionedThroughputException, IOException, + KinesisClientLibIOException { + String garbageShardId = "shardId-garbage-001"; + KinesisClientLease garbageLease = ShardSyncer.newKCLLease(ShardObjectHelper.newShard(garbageShardId, + null, + null, + ShardObjectHelper.newSequenceNumberRange("101", null))); + garbageLease.setCheckpoint(new ExtendedSequenceNumber("999")); + leaseManager.createLeaseIfNotExists(garbageLease); + Assert.assertEquals(garbageShardId, leaseManager.getLease(garbageShardId).getLeaseKey()); + testBootstrapShardLeasesAtStartingPosition(INITIAL_POSITION_LATEST); + Assert.assertNull(leaseManager.getLease(garbageShardId)); + } + + private void testBootstrapShardLeasesAtStartingPosition(InitialPositionInStreamExtended initialPosition) + throws DependencyException, InvalidStateException, ProvisionedThroughputException, IOException, + KinesisClientLibIOException { + List shards = new ArrayList(); + SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); + + String shardId0 = "shardId-0"; + shards.add(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange)); + String shardId1 = "shardId-1"; + shards.add(ShardObjectHelper.newShard(shardId1, null, null, sequenceRange)); + File dataFile = KinesisLocalFileDataCreator.generateTempDataFile(shards, 2, "testBootstrap1"); + dataFile.deleteOnExit(); + IKinesisProxy kinesisProxy = new KinesisLocalFileProxy(dataFile.getAbsolutePath()); + + ShardSyncer.bootstrapShardLeases(kinesisProxy, leaseManager, initialPosition, cleanupLeasesOfCompletedShards); + List newLeases = leaseManager.listLeases(); + Assert.assertEquals(2, newLeases.size()); + Set expectedLeaseShardIds = new HashSet(); + expectedLeaseShardIds.add(shardId0); + expectedLeaseShardIds.add(shardId1); + for (KinesisClientLease lease1 : newLeases) { + Assert.assertTrue(expectedLeaseShardIds.contains(lease1.getLeaseKey())); + Assert.assertEquals(new ExtendedSequenceNumber(initialPosition.getInitialPositionInStream().toString()), + lease1.getCheckpoint()); + } + dataFile.delete(); + } + + /** + * Test determineNewLeasesToCreate() starting at latest and at trim horizon ("beginning" of shard) + */ + @Test + public final void testDetermineNewLeasesToCreateStartingPosition() { + List shards = new ArrayList(); + List currentLeases = new ArrayList(); + SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null); + + String shardId0 = "shardId-0"; + shards.add(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange)); + + String shardId1 = "shardId-1"; + shards.add(ShardObjectHelper.newShard(shardId1, null, null, sequenceRange)); + + Set initialPositions = new HashSet(); + initialPositions.add(INITIAL_POSITION_LATEST); + initialPositions.add(INITIAL_POSITION_TRIM_HORIZON); + + for (InitialPositionInStreamExtended initialPosition : initialPositions) { + List newLeases = + ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, initialPosition); + Assert.assertEquals(2, newLeases.size()); + Set expectedLeaseShardIds = new HashSet(); + expectedLeaseShardIds.add(shardId0); + expectedLeaseShardIds.add(shardId1); + for (KinesisClientLease lease : newLeases) { + Assert.assertTrue(expectedLeaseShardIds.contains(lease.getLeaseKey())); + Assert.assertEquals(new ExtendedSequenceNumber(initialPosition.getInitialPositionInStream().toString()), + lease.getCheckpoint()); + } + } + } + + /** + * Test determineNewLeasesToCreate() - 1 closed and 1 open shard (ignore closed shard) + */ + @Test + public final void testDetermineNewLeasesToCreateIgnoreClosedShard() { + List shards = new ArrayList(); + List currentLeases = new ArrayList(); + + shards.add(ShardObjectHelper.newShard("shardId-0", + null, + null, + ShardObjectHelper.newSequenceNumberRange("303", "404"))); + String lastShardId = "shardId-1"; + shards.add(ShardObjectHelper.newShard(lastShardId, + null, + null, + ShardObjectHelper.newSequenceNumberRange("405", null))); + + List newLeases = + ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, INITIAL_POSITION_LATEST); + Assert.assertEquals(1, newLeases.size()); + Assert.assertEquals(lastShardId, newLeases.get(0).getLeaseKey()); + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position Latest) + * Shard structure (each level depicts a stream segment): + * 0 1 2 3 4 5- shards till epoch 102 + * \ / \ / | | + * 6 7 4 5- shards from epoch 103 - 205 + * \ / | /\ + * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) + * Current leases: (3, 4, 5) + */ + @Test + public final void testDetermineNewLeasesToCreateSplitMergeLatest1() { + List shards = constructShardListForGraphA(); + List currentLeases = new ArrayList(); + + currentLeases.add(newLease("shardId-3")); + currentLeases.add(newLease("shardId-4")); + currentLeases.add(newLease("shardId-5")); + + List newLeases = + ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, INITIAL_POSITION_LATEST); + Map expectedShardIdCheckpointMap = + new HashMap(); + expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST); + expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.LATEST); + expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.TRIM_HORIZON); + + Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size()); + for (KinesisClientLease lease : newLeases) { + Assert.assertTrue("Unexpected lease: " + lease, + expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey())); + Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint()); + } + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position Latest) + * Shard structure (each level depicts a stream segment): + * 0 1 2 3 4 5- shards till epoch 102 + * \ / \ / | | + * 6 7 4 5- shards from epoch 103 - 205 + * \ / | /\ + * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) + * Current leases: (4, 5, 7) + */ + @Test + public final void testDetermineNewLeasesToCreateSplitMergeLatest2() { + List shards = constructShardListForGraphA(); + List currentLeases = new ArrayList(); + + currentLeases.add(newLease("shardId-4")); + currentLeases.add(newLease("shardId-5")); + currentLeases.add(newLease("shardId-7")); + + List newLeases = + ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, INITIAL_POSITION_LATEST); + Map expectedShardIdCheckpointMap = + new HashMap(); + expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST); + + Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size()); + for (KinesisClientLease lease : newLeases) { + Assert.assertTrue("Unexpected lease: " + lease, + expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey())); + Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint()); + } + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position TrimHorizon) + * Shard structure (each level depicts a stream segment): + * 0 1 2 3 4 5- shards till epoch 102 + * \ / \ / | | + * 6 7 4 5- shards from epoch 103 - 205 + * \ / | /\ + * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) + * Current leases: (3, 4, 5) + */ + @Test + public final void testDetermineNewLeasesToCreateSplitMergeHorizon1() { + List shards = constructShardListForGraphA(); + List currentLeases = new ArrayList(); + + currentLeases.add(newLease("shardId-3")); + currentLeases.add(newLease("shardId-4")); + currentLeases.add(newLease("shardId-5")); + + List newLeases = + ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, INITIAL_POSITION_TRIM_HORIZON); + Map expectedShardIdCheckpointMap = + new HashMap(); + expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON); + + Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size()); + for (KinesisClientLease lease : newLeases) { + Assert.assertTrue("Unexpected lease: " + lease, + expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey())); + Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint()); + } + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position TrimHorizon) + * Shard structure (each level depicts a stream segment): + * 0 1 2 3 4 5- shards till epoch 102 + * \ / \ / | | + * 6 7 4 5- shards from epoch 103 - 205 + * \ / | /\ + * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) + * Current leases: (4, 5, 7) + */ + @Test + public final void testDetermineNewLeasesToCreateSplitMergeHorizon2() { + List shards = constructShardListForGraphA(); + List currentLeases = new ArrayList(); + + currentLeases.add(newLease("shardId-4")); + currentLeases.add(newLease("shardId-5")); + currentLeases.add(newLease("shardId-7")); + + List newLeases = + ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, INITIAL_POSITION_TRIM_HORIZON); + Map expectedShardIdCheckpointMap = + new HashMap(); + expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON); + expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON); + + Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size()); + for (KinesisClientLease lease : newLeases) { + Assert.assertTrue("Unexpected lease: " + lease, + expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey())); + Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint()); + } + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position TrimHorizon) + * For shard graph B (see the construct method doc for structure). + * + * Current leases: empty set + */ + @Test + public final void testDetermineNewLeasesToCreateGraphBNoInitialLeasesTrim() { + List shards = constructShardListForGraphB(); + List currentLeases = new ArrayList(); + List newLeases = + ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, INITIAL_POSITION_TRIM_HORIZON); + Map expectedShardIdCheckpointMap = + new HashMap(); + for (int i = 0; i < 11; i++) { + String expectedShardId = "shardId-" + i; + expectedShardIdCheckpointMap.put(expectedShardId, ExtendedSequenceNumber.TRIM_HORIZON); + } + + Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size()); + for (KinesisClientLease lease : newLeases) { + Assert.assertTrue("Unexpected lease: " + lease, + expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey())); + Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint()); + } + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position AT_TIMESTAMP) + * Shard structure (each level depicts a stream segment): + * 0 1 2 3 4 5- shards till epoch 102 + * \ / \ / | | + * 6 7 4 5- shards from epoch 103 - 205 + * \ / | /\ + * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) + * Current leases: (3, 4, 5) + */ + @Test + public final void testDetermineNewLeasesToCreateSplitMergeAtTimestamp1() { + List shards = constructShardListForGraphA(); + List currentLeases = new ArrayList(); + + + currentLeases.add(newLease("shardId-3")); + currentLeases.add(newLease("shardId-4")); + currentLeases.add(newLease("shardId-5")); + + List newLeases = + ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, INITIAL_POSITION_AT_TIMESTAMP); + Map expectedShardIdCheckpointMap = new HashMap(); + expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.AT_TIMESTAMP); + + Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size()); + for (KinesisClientLease lease : newLeases) { + Assert.assertTrue("Unexpected lease: " + lease, + expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey())); + Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint()); + } + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position AT_TIMESTAMP) + * Shard structure (each level depicts a stream segment): + * 0 1 2 3 4 5- shards till epoch 102 + * \ / \ / | | + * 6 7 4 5- shards from epoch 103 - 205 + * \ / | /\ + * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) + * Current leases: (4, 5, 7) + */ + @Test + public final void testDetermineNewLeasesToCreateSplitMergeAtTimestamp2() { + List shards = constructShardListForGraphA(); + List currentLeases = new ArrayList(); + + currentLeases.add(newLease("shardId-4")); + currentLeases.add(newLease("shardId-5")); + currentLeases.add(newLease("shardId-7")); + + List newLeases = + ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, INITIAL_POSITION_AT_TIMESTAMP); + Map expectedShardIdCheckpointMap = new HashMap(); + expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.AT_TIMESTAMP); + expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.AT_TIMESTAMP); + + Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size()); + for (KinesisClientLease lease : newLeases) { + Assert.assertTrue("Unexpected lease: " + lease, + expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey())); + Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint()); + } + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors (initial position AT_TIMESTAMP) + * For shard graph B (see the construct method doc for structure). + * Current leases: empty set + */ + @Test + public final void testDetermineNewLeasesToCreateGraphBNoInitialLeasesAtTimestamp() { + List shards = constructShardListForGraphB(); + List currentLeases = new ArrayList(); + List newLeases = + ShardSyncer.determineNewLeasesToCreate(shards, currentLeases, INITIAL_POSITION_AT_TIMESTAMP); + Map expectedShardIdCheckpointMap = + new HashMap(); + for (int i = 0; i < shards.size(); i++) { + String expectedShardId = "shardId-" + i; + expectedShardIdCheckpointMap.put(expectedShardId, ExtendedSequenceNumber.AT_TIMESTAMP); + } + + Assert.assertEquals(expectedShardIdCheckpointMap.size(), newLeases.size()); + for (KinesisClientLease lease : newLeases) { + Assert.assertTrue("Unexpected lease: " + lease, + expectedShardIdCheckpointMap.containsKey(lease.getLeaseKey())); + Assert.assertEquals(expectedShardIdCheckpointMap.get(lease.getLeaseKey()), lease.getCheckpoint()); + } + } + + + /* + * Helper method to construct a shard list for graph A. Graph A is defined below. + * Shard structure (y-axis is epochs): + * 0 1 2 3 4 5- shards till epoch 102 + * \ / \ / | | + * 6 7 4 5- shards from epoch 103 - 205 + * \ / | /\ + * 8 4 9 10 - shards from epoch 206 (open - no ending sequenceNumber) + */ + List constructShardListForGraphA() { + List shards = new ArrayList(); + + SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("11", "102"); + SequenceNumberRange range1 = ShardObjectHelper.newSequenceNumberRange("11", null); + SequenceNumberRange range2 = ShardObjectHelper.newSequenceNumberRange("11", "205"); + SequenceNumberRange range3 = ShardObjectHelper.newSequenceNumberRange("103", "205"); + SequenceNumberRange range4 = ShardObjectHelper.newSequenceNumberRange("206", null); + + HashKeyRange hashRange0 = ShardObjectHelper.newHashKeyRange("0", "99"); + HashKeyRange hashRange1 = ShardObjectHelper.newHashKeyRange("100", "199"); + HashKeyRange hashRange2 = ShardObjectHelper.newHashKeyRange("200", "299"); + HashKeyRange hashRange3 = ShardObjectHelper.newHashKeyRange("300", "399"); + HashKeyRange hashRange4 = ShardObjectHelper.newHashKeyRange("400", "499"); + HashKeyRange hashRange5 = ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY); + HashKeyRange hashRange6 = ShardObjectHelper.newHashKeyRange("0", "199"); + HashKeyRange hashRange7 = ShardObjectHelper.newHashKeyRange("200", "399"); + HashKeyRange hashRange8 = ShardObjectHelper.newHashKeyRange("0", "399"); + HashKeyRange hashRange9 = ShardObjectHelper.newHashKeyRange("500", "799"); + HashKeyRange hashRange10 = ShardObjectHelper.newHashKeyRange("800", ShardObjectHelper.MAX_HASH_KEY); + + shards.add(ShardObjectHelper.newShard("shardId-0", null, null, range0, hashRange0)); + shards.add(ShardObjectHelper.newShard("shardId-1", null, null, range0, hashRange1)); + shards.add(ShardObjectHelper.newShard("shardId-2", null, null, range0, hashRange2)); + shards.add(ShardObjectHelper.newShard("shardId-3", null, null, range0, hashRange3)); + shards.add(ShardObjectHelper.newShard("shardId-4", null, null, range1, hashRange4)); + shards.add(ShardObjectHelper.newShard("shardId-5", null, null, range2, hashRange5)); + + shards.add(ShardObjectHelper.newShard("shardId-6", "shardId-0", "shardId-1", range3, hashRange6)); + shards.add(ShardObjectHelper.newShard("shardId-7", "shardId-2", "shardId-3", range3, hashRange7)); + + shards.add(ShardObjectHelper.newShard("shardId-8", "shardId-6", "shardId-7", range4, hashRange8)); + shards.add(ShardObjectHelper.newShard("shardId-9", "shardId-5", null, range4, hashRange9)); + shards.add(ShardObjectHelper.newShard("shardId-10", null, "shardId-5", range4, hashRange10)); + + return shards; + } + + /* + * Helper method to construct a shard list for graph B. Graph B is defined below. + * Shard structure (x-axis is epochs): + * 0 3 6 9 + * \ / \ / \ / + * 2 5 8 + * / \ / \ / \ + * 1 4 7 10 + */ + List constructShardListForGraphB() { + List shards = new ArrayList(); + + SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("1000", "1049"); + SequenceNumberRange range1 = ShardObjectHelper.newSequenceNumberRange("1050", "1099"); + SequenceNumberRange range2 = ShardObjectHelper.newSequenceNumberRange("1100", "1149"); + SequenceNumberRange range3 = ShardObjectHelper.newSequenceNumberRange("1150", "1199"); + SequenceNumberRange range4 = ShardObjectHelper.newSequenceNumberRange("1200", "1249"); + SequenceNumberRange range5 = ShardObjectHelper.newSequenceNumberRange("1250", "1299"); + SequenceNumberRange range6 = ShardObjectHelper.newSequenceNumberRange("1300", null); + + HashKeyRange hashRange0 = ShardObjectHelper.newHashKeyRange("0", "499"); + HashKeyRange hashRange1 = ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY); + HashKeyRange hashRange2 = ShardObjectHelper.newHashKeyRange("0", ShardObjectHelper.MAX_HASH_KEY); + + shards.add(ShardObjectHelper.newShard("shardId-0", null, null, range0, hashRange0)); + shards.add(ShardObjectHelper.newShard("shardId-1", null, null, range0, hashRange1)); + shards.add(ShardObjectHelper.newShard("shardId-2", "shardId-0", "shardId-1", range1, hashRange2)); + shards.add(ShardObjectHelper.newShard("shardId-3", "shardId-2", null, range2, hashRange0)); + shards.add(ShardObjectHelper.newShard("shardId-4", "shardId-2", null, range2, hashRange1)); + shards.add(ShardObjectHelper.newShard("shardId-5", "shardId-3", "shardId-4", range3, hashRange2)); + shards.add(ShardObjectHelper.newShard("shardId-6", "shardId-5", null, range4, hashRange0)); + shards.add(ShardObjectHelper.newShard("shardId-7", "shardId-5", null, range4, hashRange1)); + shards.add(ShardObjectHelper.newShard("shardId-8", "shardId-6", "shardId-7", range5, hashRange2)); + shards.add(ShardObjectHelper.newShard("shardId-9", "shardId-8", null, range6, hashRange0)); + shards.add(ShardObjectHelper.newShard("shardId-10", null, "shardId-8", range6, hashRange1)); + + return shards; + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors when shardId is null + */ + @Test + public final void testCheckIfDescendantAndAddNewLeasesForAncestorsNullShardId() { + Map memoizationContext = new HashMap<>(); + Assert.assertFalse(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(null, INITIAL_POSITION_LATEST, + null, + null, + null, + memoizationContext)); + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors when shard has been trimmed + */ + @Test + public final void testCheckIfDescendantAndAddNewLeasesForAncestorsTrimmedShard() { + String shardId = "shardId-trimmed"; + Map kinesisShards = new HashMap(); + Map memoizationContext = new HashMap<>(); + Assert.assertFalse(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, + null, + kinesisShards, + null, + memoizationContext)); + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors when there is a current lease for the shard + */ + @Test + public final void testCheckIfDescendantAndAddNewLeasesForAncestorsForShardWithCurrentLease() { + String shardId = "shardId-current"; + Map kinesisShards = new HashMap(); + kinesisShards.put(shardId, ShardObjectHelper.newShard(shardId, null, null, null)); + Set shardIdsOfCurrentLeases = new HashSet(); + shardIdsOfCurrentLeases.add(shardId); + Map newLeaseMap = new HashMap(); + Map memoizationContext = new HashMap<>(); + Assert.assertTrue(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, + shardIdsOfCurrentLeases, + kinesisShards, + newLeaseMap, + memoizationContext)); + Assert.assertTrue(newLeaseMap.isEmpty()); + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors - two parents, two ancestors, not descendant + */ + @Test + public final void testCheckIfDescendantAndAddNewLeasesForAncestors2P2ANotDescendant() { + Set shardIdsOfCurrentLeases = new HashSet(); + Map newLeaseMap = new HashMap(); + Map kinesisShards = new HashMap(); + + String parentShardId = "shardId-parent"; + kinesisShards.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); + + String adjacentParentShardId = "shardId-adjacentParent"; + kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); + + String shardId = "shardId-9-1"; + kinesisShards.put(shardId, ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null)); + + Map memoizationContext = new HashMap<>(); + Assert.assertFalse(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, + shardIdsOfCurrentLeases, + kinesisShards, + newLeaseMap, + memoizationContext)); + Assert.assertTrue(newLeaseMap.isEmpty()); + } + + /** + * Test CheckIfDescendantAndAddNewLeasesForAncestors - two parents, there is a lease for one parent. + */ + @Test + public final void testCheckIfDescendantAndAddNewLeasesForAncestors2P2A1PDescendant() { + Set shardIdsOfCurrentLeases = new HashSet(); + Map newLeaseMap = new HashMap(); + Map kinesisShards = new HashMap(); + + String parentShardId = "shardId-parent"; + kinesisShards.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); + shardIdsOfCurrentLeases.add(parentShardId); + + String adjacentParentShardId = "shardId-adjacentParent"; + kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); + + String shardId = "shardId-9-1"; + Shard shard = ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null); + kinesisShards.put(shardId, shard); + + Map memoizationContext = new HashMap<>(); + Assert.assertTrue(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, + shardIdsOfCurrentLeases, + kinesisShards, + newLeaseMap, + memoizationContext)); + Assert.assertEquals(1, newLeaseMap.size()); + Assert.assertTrue(newLeaseMap.containsKey(adjacentParentShardId)); + KinesisClientLease adjacentParentLease = newLeaseMap.get(adjacentParentShardId); + Assert.assertEquals(ExtendedSequenceNumber.LATEST, adjacentParentLease.getCheckpoint()); + } + + /** + * Test getParentShardIds() when the shard has no parents. + */ + @Test + public final void testGetParentShardIdsNoParents() { + Shard shard = new Shard(); + Assert.assertTrue(ShardSyncer.getParentShardIds(shard, null).isEmpty()); + } + + /** + * Test getParentShardIds() when the shard has no parents. + */ + @Test + public final void testGetParentShardIdsTrimmedParents() { + Map shardMap = new HashMap(); + Shard shard = ShardObjectHelper.newShard("shardId-test", "foo", "bar", null); + Assert.assertTrue(ShardSyncer.getParentShardIds(shard, shardMap).isEmpty()); + } + + /** + * Test getParentShardIds() when the shard has a single parent. + */ + @Test + public final void testGetParentShardIdsSingleParent() { + Map shardMap = new HashMap(); + + String parentShardId = "shardId-parent"; + shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); + + Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, null, null); + Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + Assert.assertEquals(1, parentShardIds.size()); + Assert.assertTrue(parentShardIds.contains(parentShardId)); + + shard.setParentShardId(null); + parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + Assert.assertTrue(parentShardIds.isEmpty()); + + shard.setAdjacentParentShardId(parentShardId); + parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + Assert.assertEquals(1, parentShardIds.size()); + Assert.assertTrue(parentShardIds.contains(parentShardId)); + } + + /** + * Test getParentShardIds() when the shard has two parents, one is trimmed. + */ + @Test + public final void testGetParentShardIdsOneTrimmedParent() { + Map shardMap = new HashMap(); + + String parentShardId = "shardId-parent"; + Shard parent = ShardObjectHelper.newShard(parentShardId, null, null, null); + + String adjacentParentShardId = "shardId-adjacentParent"; + Shard adjacentParent = ShardObjectHelper.newShard(adjacentParentShardId, null, null, null); + + Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); + + shardMap.put(parentShardId, parent); + Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + Assert.assertEquals(1, parentShardIds.size()); + Assert.assertTrue(parentShardIds.contains(parentShardId)); + + shardMap.remove(parentShardId); + parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + Assert.assertTrue(parentShardIds.isEmpty()); + + shardMap.put(adjacentParentShardId, adjacentParent); + parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + Assert.assertEquals(1, parentShardIds.size()); + Assert.assertTrue(parentShardIds.contains(adjacentParentShardId)); + } + + /** + * Test getParentShardIds() when the shard has two parents. + */ + @Test + public final void testGetParentShardIdsTwoParents() { + Map shardMap = new HashMap(); + + String parentShardId = "shardId-parent"; + shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); + + String adjacentParentShardId = "shardId-adjacentParent"; + shardMap.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); + + Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); + + Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + Assert.assertEquals(2, parentShardIds.size()); + Assert.assertTrue(parentShardIds.contains(parentShardId)); + Assert.assertTrue(parentShardIds.contains(adjacentParentShardId)); + } + + /** + */ + @Test + public final void testNewLease() { + Shard shard = new Shard(); + String shardId = "shardId-95"; + shard.setShardId(shardId); + String parentShardId = "shardId-parent"; + String adjacentParentShardId = "shardId-adjacentParent"; + shard.setParentShardId(parentShardId); + shard.setAdjacentParentShardId(adjacentParentShardId); + + KinesisClientLease lease = ShardSyncer.newKCLLease(shard); + Assert.assertEquals(shardId, lease.getLeaseKey()); + Assert.assertNull(lease.getCheckpoint()); + Set parentIds = lease.getParentShardIds(); + Assert.assertEquals(2, parentIds.size()); + Assert.assertTrue(parentIds.contains(parentShardId)); + Assert.assertTrue(parentIds.contains(adjacentParentShardId)); + } + + /** + * Test method for constructShardIdToShardMap. + * + * . + */ + @Test + public final void testConstructShardIdToShardMap() { + List shards = new ArrayList(2); + shards.add(ShardObjectHelper.newShard("shardId-0", null, null, null)); + shards.add(ShardObjectHelper.newShard("shardId-1", null, null, null)); + + Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + Assert.assertEquals(shards.size(), shardIdToShardMap.size()); + for (Shard shard : shards) { + Assert.assertSame(shard, shardIdToShardMap.get(shard.getShardId())); + } + } + + /** + * Test getOpenShards() - no shards are open. + */ + @Test + public final void testGetOpenShardsNoneOpen() { + List shards = new ArrayList(); + shards.add(ShardObjectHelper.newShard("shardId-9384", + null, + null, + ShardObjectHelper.newSequenceNumberRange("123", "345"))); + Assert.assertTrue(ShardSyncer.getOpenShards(shards).isEmpty()); + } + + /** + * Test getOpenShards() - test null and max end sequence number. + */ + @Test + public final void testGetOpenShardsNullAndMaxEndSeqNum() { + List shards = new ArrayList(); + String shardId = "shardId-2738"; + SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("123", null); + shards.add(ShardObjectHelper.newShard(shardId, null, null, sequenceNumberRange)); + + // Verify shard is considered open when it has a null end sequence number + List openShards = ShardSyncer.getOpenShards(shards); + Assert.assertEquals(1, openShards.size()); + Assert.assertEquals(shardId, openShards.get(0).getShardId()); + + // Close shard before testing for max sequence number + sequenceNumberRange.setEndingSequenceNumber("1000"); + openShards = ShardSyncer.getOpenShards(shards); + Assert.assertTrue(openShards.isEmpty()); + + // Verify shard is considered closed when the end sequence number is set to max allowed sequence number + sequenceNumberRange.setEndingSequenceNumber(MAX_SEQUENCE_NUMBER.toString()); + openShards = ShardSyncer.getOpenShards(shards); + Assert.assertEquals(0, openShards.size()); + } + + /** + * Test isCandidateForCleanup + * + * @throws KinesisClientLibIOException + */ + @Test + public final void testIsCandidateForCleanup() throws KinesisClientLibIOException { + String parentShardId = "shardId-0000"; + String adjacentParentShardId = "shardId-0001"; + String shardId = "shardId-0002"; + KinesisClientLease lease = newLease(shardId); + List parentShardIds = new ArrayList<>(); + parentShardIds.add(parentShardId); + parentShardIds.add(adjacentParentShardId); + lease.setParentShardIds(parentShardIds); + Set currentKinesisShardIds = new HashSet<>(); + + currentKinesisShardIds.add(shardId); + Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + + currentKinesisShardIds.clear(); + Assert.assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + + currentKinesisShardIds.add(parentShardId); + // Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + + currentKinesisShardIds.clear(); + Assert.assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + + currentKinesisShardIds.add(adjacentParentShardId); + // Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + currentKinesisShardIds.add(parentShardId); + // Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + currentKinesisShardIds.add(shardId); + Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + } + + /** + * Test isCandidateForCleanup + * + * @throws KinesisClientLibIOException + */ + @Test(expected = KinesisClientLibIOException.class) + public final void testIsCandidateForCleanupParentExists() throws KinesisClientLibIOException { + String parentShardId = "shardId-0000"; + String adjacentParentShardId = "shardId-0001"; + String shardId = "shardId-0002"; + KinesisClientLease lease = newLease(shardId); + List parentShardIds = new ArrayList<>(); + parentShardIds.add(parentShardId); + parentShardIds.add(adjacentParentShardId); + lease.setParentShardIds(parentShardIds); + Set currentKinesisShardIds = new HashSet<>(); + + currentKinesisShardIds.add(parentShardId); + Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + } + + /** + * Test isCandidateForCleanup + * + * @throws KinesisClientLibIOException + */ + @Test(expected = KinesisClientLibIOException.class) + public final void testIsCandidateForCleanupAdjacentParentExists() throws KinesisClientLibIOException { + String parentShardId = "shardId-0000"; + String adjacentParentShardId = "shardId-0001"; + String shardId = "shardId-0002"; + KinesisClientLease lease = newLease(shardId); + List parentShardIds = new ArrayList<>(); + parentShardIds.add(parentShardId); + parentShardIds.add(adjacentParentShardId); + lease.setParentShardIds(parentShardIds); + Set currentKinesisShardIds = new HashSet<>(); + + currentKinesisShardIds.add(adjacentParentShardId); + Assert.assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + } + + /** + * Test cleanup of lease for a shard that has been fully processed (and processing of child shards has begun). + * + * @throws DependencyException + * @throws InvalidStateException + * @throws ProvisionedThroughputException + */ + @Test + public final void testCleanupLeaseForClosedShard() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + String closedShardId = "shardId-2"; + KinesisClientLease leaseForClosedShard = newLease(closedShardId); + leaseForClosedShard.setCheckpoint(new ExtendedSequenceNumber("1234")); + leaseManager.createLeaseIfNotExists(leaseForClosedShard); + + Set childShardIds = new HashSet<>(); + List trackedLeases = new ArrayList<>(); + Set parentShardIds = new HashSet<>(); + parentShardIds.add(closedShardId); + String childShardId1 = "shardId-5"; + KinesisClientLease childLease1 = newLease(childShardId1); + childLease1.setParentShardIds(parentShardIds); + childLease1.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON); + String childShardId2 = "shardId-7"; + KinesisClientLease childLease2 = newLease(childShardId2); + childLease2.setParentShardIds(parentShardIds); + childLease2.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON); + Map trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + + // empty list of leases + ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager); + Assert.assertNotNull(leaseManager.getLease(closedShardId)); + + // closed shard has not been fully processed yet (checkpoint != SHARD_END) + trackedLeases.add(leaseForClosedShard); + trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager); + Assert.assertNotNull(leaseManager.getLease(closedShardId)); + + // closed shard has been fully processed yet (checkpoint == SHARD_END) + leaseForClosedShard.setCheckpoint(ExtendedSequenceNumber.SHARD_END); + leaseManager.updateLease(leaseForClosedShard); + ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager); + Assert.assertNull(leaseManager.getLease(closedShardId)); + + // lease for only one child exists + childShardIds.add(childShardId1); + childShardIds.add(childShardId2); + leaseManager.createLeaseIfNotExists(leaseForClosedShard); + leaseManager.createLeaseIfNotExists(childLease1); + trackedLeases.add(childLease1); + trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager); + Assert.assertNotNull(leaseManager.getLease(closedShardId)); + + // leases for both children exists, but they are both at TRIM_HORIZON + leaseManager.createLeaseIfNotExists(childLease2); + trackedLeases.add(childLease2); + trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager); + Assert.assertNotNull(leaseManager.getLease(closedShardId)); + + // leases for both children exists, one is at TRIM_HORIZON + childLease1.setCheckpoint(new ExtendedSequenceNumber("34890")); + leaseManager.updateLease(childLease1); + ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager); + Assert.assertNotNull(leaseManager.getLease(closedShardId)); + + // leases for both children exists, NONE of them are at TRIM_HORIZON + childLease2.setCheckpoint(new ExtendedSequenceNumber("43789")); + leaseManager.updateLease(childLease2); + ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, leaseManager); + Assert.assertNull(leaseManager.getLease(closedShardId)); + } + + /** + * Test we can handle trimmed Kinesis shards (absent from the shard list), and valid closed shards. + * + * @throws KinesisClientLibIOException + */ + @Test + public final void testAssertShardCoveredOrAbsentTestAbsentAndValid() throws KinesisClientLibIOException { + List shards = new ArrayList<>(); + String expectedClosedShardId = "shardId-34098"; + SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); + HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + Shard closedShard = + ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); + SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); + Shard child1 = + ShardObjectHelper.newShard("shardId-54879", expectedClosedShardId, null, childSequenceNumberRange); + Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + Map> shardIdToChildShardIdsMap = + ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + Set closedShardIds = new HashSet<>(); + closedShardIds.add(expectedClosedShardId); + ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); + + // test for case where shard has been trimmed (absent from list) + ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); + + // Populate shards. + shards.add(closedShard); + shards.add(child1); + shardIdToShardMap.put(expectedClosedShardId, closedShard); + shardIdToShardMap.put(child1.getShardId(), child1); + shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + + // test degenerate split/merge + child1.setHashKeyRange(hashKeyRange); + ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); + + // test merge + child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("10", "2985")); + ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); + child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("3", "25")); + ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); + + // test split + HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); + HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); + child1.setHashKeyRange(childHashKeyRange1); + Shard child2 = ShardObjectHelper.newShard("shardId-43789", + null, + expectedClosedShardId, + childSequenceNumberRange, + childHashKeyRange2); + shards.add(child2); + shardIdToShardMap.put(child2.getShardId(), child2); + shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); + } + + /** + * Test we throw an exception if the shard is open + * + * @throws KinesisClientLibIOException + */ + @Test(expected = KinesisClientLibIOException.class) + public final void testAssertShardCoveredOrAbsentTestOpen() throws KinesisClientLibIOException { + List shards = new ArrayList<>(); + String expectedClosedShardId = "shardId-34098"; + SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", null); + HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + Shard openShard = + ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); + shards.add(openShard); + Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + Map> shardIdToChildShardIdsMap = + ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + Set closedShardIds = new HashSet<>(); + closedShardIds.add(expectedClosedShardId); + ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); + } + + /** + * Test we throw an exception if there are no children + * + * @throws KinesisClientLibIOException + */ + @Test(expected = KinesisClientLibIOException.class) + public final void testAssertShardCoveredOrAbsentTestNoChildren() throws KinesisClientLibIOException { + List shards = new ArrayList<>(); + String expectedClosedShardId = "shardId-34098"; + SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); + HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + Shard closedShard = + ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); + shards.add(closedShard); + Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + Map> shardIdToChildShardIdsMap = + ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + Set closedShardIds = new HashSet<>(); + closedShardIds.add(expectedClosedShardId); + ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); + } + + /** + * Test we throw an exception if children don't cover hash key range (min of children > min of parent) + * + * @throws KinesisClientLibIOException + */ + @Test(expected = KinesisClientLibIOException.class) + public final void testAssertShardCoveredOrAbsentTestIncompleteSplitMin() throws KinesisClientLibIOException { + HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("12", "15"); + HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); + testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); + } + + /** + * Test we throw an exception if children don't cover hash key range (max of children < max of parent) + * + * @throws KinesisClientLibIOException + */ + @Test(expected = KinesisClientLibIOException.class) + public final void testAssertShardCoveredOrAbsentTestIncompleteSplitMax() throws KinesisClientLibIOException { + HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); + HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "23"); + testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); + } + + private void testAssertShardCoveredOrAbsentTestIncompleteSplit(HashKeyRange parentHashKeyRange, + HashKeyRange child1HashKeyRange, + HashKeyRange child2HashKeyRange) + throws KinesisClientLibIOException { + List shards = new ArrayList<>(); + String expectedClosedShardId = "shardId-34098"; + SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); + Shard closedShard = + ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, parentHashKeyRange); + shards.add(closedShard); + + SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); + Shard child1 = ShardObjectHelper.newShard("shardId-43789", + null, + expectedClosedShardId, + childSequenceNumberRange, + child1HashKeyRange); + shards.add(child1); + Shard child2 = ShardObjectHelper.newShard("shardId-43789", + null, + expectedClosedShardId, + childSequenceNumberRange, + child2HashKeyRange); + shards.add(child2); + + Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + Map> shardIdToChildShardIdsMap = + ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + Set closedShardIds = new HashSet<>(); + closedShardIds.add(expectedClosedShardId); + ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); + } + + /** + * Helper method. + * + * @param shardId + * @return + */ + private KinesisClientLease newLease(String shardId) { + KinesisClientLease lease = new KinesisClientLease(); + lease.setLeaseKey(shardId); + + return lease; + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownFutureTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownFutureTest.java new file mode 100644 index 00000000..cccbc9a1 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownFutureTest.java @@ -0,0 +1,236 @@ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; +import org.mockito.stubbing.OngoingStubbing; + +@RunWith(MockitoJUnitRunner.class) +public class ShutdownFutureTest { + + @Mock + private CountDownLatch shutdownCompleteLatch; + @Mock + private CountDownLatch notificationCompleteLatch; + @Mock + private Worker worker; + @Mock + private ConcurrentMap shardInfoConsumerMap; + + @Test + public void testSimpleGetAlreadyCompleted() throws Exception { + ShutdownFuture future = new ShutdownFuture(shutdownCompleteLatch, notificationCompleteLatch, worker); + + mockNotificationComplete(true); + mockShutdownComplete(true); + + future.get(); + + verify(notificationCompleteLatch).await(anyLong(), any(TimeUnit.class)); + verify(worker).shutdown(); + verify(shutdownCompleteLatch).await(anyLong(), any(TimeUnit.class)); + } + + @Test + public void testNotificationNotCompleted() throws Exception { + ShutdownFuture future = new ShutdownFuture(shutdownCompleteLatch, notificationCompleteLatch, worker); + + mockNotificationComplete(false, true); + mockShutdownComplete(true); + + when(worker.getShardInfoShardConsumerMap()).thenReturn(shardInfoConsumerMap); + when(shardInfoConsumerMap.isEmpty()).thenReturn(false); + when(worker.isShutdownComplete()).thenReturn(false); + + when(notificationCompleteLatch.getCount()).thenReturn(1L); + when(shutdownCompleteLatch.getCount()).thenReturn(1L); + + expectedTimeoutException(future); + + verify(worker, never()).shutdown(); + + awaitFuture(future); + + verify(notificationCompleteLatch).getCount(); + verifyLatchAwait(notificationCompleteLatch, 2); + + verify(shutdownCompleteLatch).getCount(); + verifyLatchAwait(shutdownCompleteLatch); + + verify(worker).shutdown(); + + } + + @Test + public void testShutdownNotCompleted() throws Exception { + ShutdownFuture future = new ShutdownFuture(shutdownCompleteLatch, notificationCompleteLatch, worker); + mockNotificationComplete(true); + mockShutdownComplete(false, true); + + when(shutdownCompleteLatch.getCount()).thenReturn(1L); + when(worker.isShutdownComplete()).thenReturn(false); + + mockShardInfoConsumerMap(1); + + expectedTimeoutException(future); + verify(worker).shutdown(); + awaitFuture(future); + + verifyLatchAwait(notificationCompleteLatch, 2); + verifyLatchAwait(shutdownCompleteLatch, 2); + + verify(worker).isShutdownComplete(); + verify(worker).getShardInfoShardConsumerMap(); + + } + + @Test + public void testShutdownNotCompleteButWorkerShutdown() throws Exception { + ShutdownFuture future = create(); + + mockNotificationComplete(true); + mockShutdownComplete(false); + + when(shutdownCompleteLatch.getCount()).thenReturn(1L); + when(worker.isShutdownComplete()).thenReturn(true); + mockShardInfoConsumerMap(1); + + awaitFuture(future); + verify(worker).shutdown(); + verifyLatchAwait(notificationCompleteLatch); + verifyLatchAwait(shutdownCompleteLatch); + + verify(worker, times(2)).isShutdownComplete(); + verify(worker).getShardInfoShardConsumerMap(); + verify(shardInfoConsumerMap).size(); + } + + @Test + public void testShutdownNotCompleteButShardConsumerEmpty() throws Exception { + ShutdownFuture future = create(); + mockNotificationComplete(true); + mockShutdownComplete(false); + + mockOutstanding(shutdownCompleteLatch, 1L); + + when(worker.isShutdownComplete()).thenReturn(false); + mockShardInfoConsumerMap(0); + + awaitFuture(future); + verify(worker).shutdown(); + verifyLatchAwait(notificationCompleteLatch); + verifyLatchAwait(shutdownCompleteLatch); + + verify(worker, times(2)).isShutdownComplete(); + verify(worker, times(2)).getShardInfoShardConsumerMap(); + + verify(shardInfoConsumerMap).isEmpty(); + verify(shardInfoConsumerMap).size(); + } + + @Test + public void testNotificationNotCompleteButShardConsumerEmpty() throws Exception { + ShutdownFuture future = create(); + mockNotificationComplete(false); + mockShutdownComplete(false); + + mockOutstanding(notificationCompleteLatch, 1L); + mockOutstanding(shutdownCompleteLatch, 1L); + + when(worker.isShutdownComplete()).thenReturn(false); + mockShardInfoConsumerMap(0); + + awaitFuture(future); + verify(worker, never()).shutdown(); + verifyLatchAwait(notificationCompleteLatch); + verify(shutdownCompleteLatch, never()).await(); + + verify(worker, times(2)).isShutdownComplete(); + verify(worker, times(2)).getShardInfoShardConsumerMap(); + + verify(shardInfoConsumerMap).isEmpty(); + verify(shardInfoConsumerMap).size(); + } + + @Test(expected = TimeoutException.class) + public void testTimeExceededException() throws Exception { + ShutdownFuture future = create(); + mockNotificationComplete(false); + mockOutstanding(notificationCompleteLatch, 1L); + when(worker.isShutdownComplete()).thenReturn(false); + mockShardInfoConsumerMap(1); + + future.get(1, TimeUnit.NANOSECONDS); + } + + private ShutdownFuture create() { + return new ShutdownFuture(shutdownCompleteLatch, notificationCompleteLatch, worker); + } + + private void mockShardInfoConsumerMap(Integer initialItemCount, Integer ... additionalItemCounts) { + when(worker.getShardInfoShardConsumerMap()).thenReturn(shardInfoConsumerMap); + Boolean additionalEmptyStates[] = new Boolean[additionalItemCounts.length]; + for(int i = 0; i < additionalItemCounts.length; ++i) { + additionalEmptyStates[i] = additionalItemCounts[i] == 0; + } + when(shardInfoConsumerMap.size()).thenReturn(initialItemCount, additionalItemCounts); + when(shardInfoConsumerMap.isEmpty()).thenReturn(initialItemCount == 0, additionalEmptyStates); + } + + private void verifyLatchAwait(CountDownLatch latch) throws Exception { + verifyLatchAwait(latch, 1); + } + + private void verifyLatchAwait(CountDownLatch latch, int times) throws Exception { + verify(latch, times(times)).await(anyLong(), any(TimeUnit.class)); + } + + private void expectedTimeoutException(ShutdownFuture future) throws Exception { + boolean gotTimeout = false; + try { + awaitFuture(future); + } catch (TimeoutException te) { + gotTimeout = true; + } + assertThat("Expected a timeout exception to occur", gotTimeout); + } + + private void awaitFuture(ShutdownFuture future) throws Exception { + future.get(1, TimeUnit.SECONDS); + } + + private void mockNotificationComplete(Boolean initial, Boolean... states) throws Exception { + mockLatch(notificationCompleteLatch, initial, states); + + } + + private void mockShutdownComplete(Boolean initial, Boolean... states) throws Exception { + mockLatch(shutdownCompleteLatch, initial, states); + } + + private void mockLatch(CountDownLatch latch, Boolean initial, Boolean... states) throws Exception { + when(latch.await(anyLong(), any(TimeUnit.class))).thenReturn(initial, states); + } + + private void mockOutstanding(CountDownLatch latch, Long remaining, Long ... additionalRemaining) throws Exception { + when(latch.getCount()).thenReturn(remaining, additionalRemaining); + } + +} \ No newline at end of file diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownTaskTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownTaskTest.java new file mode 100644 index 00000000..9eaf7e8e --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ShutdownTaskTest.java @@ -0,0 +1,141 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.HashSet; +import java.util.Set; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.KinesisClientLibIOException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; + +/** + * + */ +public class ShutdownTaskTest { + private static final long TASK_BACKOFF_TIME_MILLIS = 1L; + private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + + Set defaultParentShardIds = new HashSet<>(); + String defaultConcurrencyToken = "testToken4398"; + String defaultShardId = "shardId-0000397840"; + ShardInfo defaultShardInfo = new ShardInfo(defaultShardId, + defaultConcurrencyToken, + defaultParentShardIds, + ExtendedSequenceNumber.LATEST); + IRecordProcessor defaultRecordProcessor = new TestStreamlet(); + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + } + + /** + * Test method for {@link ShutdownTask#call()}. + */ + @Test + public final void testCallWhenApplicationDoesNotCheckpoint() { + RecordProcessorCheckpointer checkpointer = mock(RecordProcessorCheckpointer.class); + when(checkpointer.getLastCheckpointValue()).thenReturn(new ExtendedSequenceNumber("3298")); + IKinesisProxy kinesisProxy = mock(IKinesisProxy.class); + ILeaseManager leaseManager = mock(KinesisClientLeaseManager.class); + boolean cleanupLeasesOfCompletedShards = false; + ShutdownTask task = new ShutdownTask(defaultShardInfo, + defaultRecordProcessor, + checkpointer, + ShutdownReason.TERMINATE, + kinesisProxy, + INITIAL_POSITION_TRIM_HORIZON, + cleanupLeasesOfCompletedShards, + leaseManager, + TASK_BACKOFF_TIME_MILLIS); + TaskResult result = task.call(); + Assert.assertNotNull(result.getException()); + Assert.assertTrue(result.getException() instanceof IllegalArgumentException); + } + + /** + * Test method for {@link ShutdownTask#call()}. + */ + @Test + public final void testCallWhenSyncingShardsThrows() { + RecordProcessorCheckpointer checkpointer = mock(RecordProcessorCheckpointer.class); + when(checkpointer.getLastCheckpointValue()).thenReturn(ExtendedSequenceNumber.SHARD_END); + IKinesisProxy kinesisProxy = mock(IKinesisProxy.class); + when(kinesisProxy.getShardList()).thenReturn(null); + ILeaseManager leaseManager = mock(KinesisClientLeaseManager.class); + boolean cleanupLeasesOfCompletedShards = false; + ShutdownTask task = new ShutdownTask(defaultShardInfo, + defaultRecordProcessor, + checkpointer, + ShutdownReason.TERMINATE, + kinesisProxy, + INITIAL_POSITION_TRIM_HORIZON, + cleanupLeasesOfCompletedShards, + leaseManager, + TASK_BACKOFF_TIME_MILLIS); + TaskResult result = task.call(); + Assert.assertNotNull(result.getException()); + Assert.assertTrue(result.getException() instanceof KinesisClientLibIOException); + } + + /** + * Test method for {@link ShutdownTask#getTaskType()}. + */ + @Test + public final void testGetTaskType() { + ShutdownTask task = new ShutdownTask(null, null, null, null, null, null, false, null, 0); + Assert.assertEquals(TaskType.SHUTDOWN, task.getTaskType()); + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamlet.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamlet.java new file mode 100644 index 00000000..174410e7 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamlet.java @@ -0,0 +1,181 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Semaphore; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IShutdownNotificationAware; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.model.Record; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibNonRetryableException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput; + +/** + * Streamlet that tracks records it's seen - useful for testing. + */ +class TestStreamlet implements IRecordProcessor, IShutdownNotificationAware { + + private static final Log LOG = LogFactory.getLog(TestStreamlet.class); + + private List records = new ArrayList(); + + private Set processedSeqNums = new HashSet(); // used for deduping + + private Semaphore sem; // used to allow test cases to wait for all records to be processed + + private String shardId; + + // record the last shutdown reason we were called with. + private ShutdownReason shutdownReason; + private ShardSequenceVerifier shardSequenceVerifier; + private long numProcessRecordsCallsWithEmptyRecordList; + private boolean shutdownNotificationCalled; + + private final CountDownLatch initializeLatch = new CountDownLatch(1); + private final CountDownLatch notifyShutdownLatch = new CountDownLatch(1); + private final CountDownLatch shutdownLatch = new CountDownLatch(1); + + public TestStreamlet() { + + } + + public TestStreamlet(Semaphore sem, ShardSequenceVerifier shardSequenceVerifier) { + this(); + this.sem = sem; + this.shardSequenceVerifier = shardSequenceVerifier; + } + + public List getProcessedRecords() { + return records; + } + + @Override + public void initialize(InitializationInput input) { + shardId = input.getShardId(); + if (shardSequenceVerifier != null) { + shardSequenceVerifier.registerInitialization(shardId); + } + initializeLatch.countDown(); + } + + @Override + public void processRecords(ProcessRecordsInput input) { + List dataRecords = input.getRecords(); + IRecordProcessorCheckpointer checkpointer = input.getCheckpointer(); + if ((dataRecords != null) && (!dataRecords.isEmpty())) { + for (Record record : dataRecords) { + LOG.debug("Processing record: " + record); + String seqNum = record.getSequenceNumber(); + if (!processedSeqNums.contains(seqNum)) { + records.add(record); + processedSeqNums.add(seqNum); + } + } + } + if (dataRecords.isEmpty()) { + numProcessRecordsCallsWithEmptyRecordList++; + } + try { + checkpointer.checkpoint(); + } catch (ThrottlingException | ShutdownException + | KinesisClientLibDependencyException | InvalidStateException e) { + // Continue processing records and checkpoint next time if we get a transient error. + // Don't checkpoint if the processor has been shutdown. + LOG.debug("Caught exception while checkpointing: ", e); + } + + if (sem != null) { + sem.release(dataRecords.size()); + } + } + + @Override + public void shutdown(ShutdownInput input) { + ShutdownReason reason = input.getShutdownReason(); + IRecordProcessorCheckpointer checkpointer = input.getCheckpointer(); + if (shardSequenceVerifier != null) { + shardSequenceVerifier.registerShutdown(shardId, reason); + } + shutdownReason = reason; + if (reason.equals(ShutdownReason.TERMINATE)) { + try { + checkpointer.checkpoint(); + } catch (KinesisClientLibNonRetryableException e) { + LOG.error("Caught exception when checkpointing while shutdown.", e); + throw new RuntimeException(e); + } + } + + shutdownLatch.countDown(); + } + + /** + * @return the shardId + */ + String getShardId() { + return shardId; + } + + /** + * @return the shutdownReason + */ + ShutdownReason getShutdownReason() { + return shutdownReason; + } + + /** + * @return the numProcessRecordsCallsWithEmptyRecordList + */ + long getNumProcessRecordsCallsWithEmptyRecordList() { + return numProcessRecordsCallsWithEmptyRecordList; + } + + boolean isShutdownNotificationCalled() { + return shutdownNotificationCalled; + } + + @Override + public void shutdownRequested(IRecordProcessorCheckpointer checkpointer) { + shutdownNotificationCalled = true; + notifyShutdownLatch.countDown(); + } + + public CountDownLatch getInitializeLatch() { + return initializeLatch; + } + + public CountDownLatch getNotifyShutdownLatch() { + return notifyShutdownLatch; + } + + public CountDownLatch getShutdownLatch() { + return shutdownLatch; + } +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamletFactory.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamletFactory.java new file mode 100644 index 00000000..3446f52d --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/TestStreamletFactory.java @@ -0,0 +1,64 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Semaphore; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory; + +/** + * Factory for TestStreamlet record processors. + */ +class TestStreamletFactory implements IRecordProcessorFactory { + + // Will be passed to the TestStreamlet. Can be used to check if all records have been processed. + private Semaphore semaphore; + private ShardSequenceVerifier shardSequenceVerifier; + List testStreamlets = new ArrayList<>(); + + /** + * Constructor. + */ + TestStreamletFactory(Semaphore semaphore, ShardSequenceVerifier shardSequenceVerifier) { + this.semaphore = semaphore; + this.shardSequenceVerifier = shardSequenceVerifier; + } + + @Override + public synchronized IRecordProcessor createProcessor() { + TestStreamlet processor = new TestStreamlet(semaphore, shardSequenceVerifier); + testStreamlets.add(processor); + return processor; + } + + Semaphore getSemaphore() { + return semaphore; + } + + ShardSequenceVerifier getShardSequenceVerifier() { + return shardSequenceVerifier; + } + + /** + * @return the testStreamlets + */ + List getTestStreamlets() { + return testStreamlets; + } + +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/ThrottlingReporterTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ThrottlingReporterTest.java similarity index 55% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/ThrottlingReporterTest.java rename to src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ThrottlingReporterTest.java index d0feb8f4..d0645229 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/ThrottlingReporterTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/ThrottlingReporterTest.java @@ -1,30 +1,15 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package software.amazon.kinesis.retrieval; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; -import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.any; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import org.apache.commons.logging.Log; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; -import org.slf4j.Logger; -import software.amazon.kinesis.retrieval.ThrottlingReporter; @RunWith(MockitoJUnitRunner.class) public class ThrottlingReporterTest { @@ -32,14 +17,14 @@ public class ThrottlingReporterTest { private static final String SHARD_ID = "Shard-001"; @Mock - private Logger throttleLog; + private Log throttleLog; @Test public void testLessThanMaxThrottles() { ThrottlingReporter reporter = new LogTestingThrottingReporter(5, SHARD_ID); reporter.throttled(); - verify(throttleLog).warn(anyString()); - verify(throttleLog, never()).error(anyString()); + verify(throttleLog).warn(any(Object.class)); + verify(throttleLog, never()).error(any(Object.class)); } @@ -48,8 +33,8 @@ public class ThrottlingReporterTest { ThrottlingReporter reporter = new LogTestingThrottingReporter(1, SHARD_ID); reporter.throttled(); reporter.throttled(); - verify(throttleLog).warn(anyString()); - verify(throttleLog).error(anyString()); + verify(throttleLog).warn(any(Object.class)); + verify(throttleLog).error(any(Object.class)); } @Test @@ -61,8 +46,8 @@ public class ThrottlingReporterTest { reporter.throttled(); reporter.success(); reporter.throttled(); - verify(throttleLog, times(2)).warn(anyString()); - verify(throttleLog, times(3)).error(anyString()); + verify(throttleLog, times(2)).warn(any(Object.class)); + verify(throttleLog, times(3)).error(any(Object.class)); } @@ -73,7 +58,7 @@ public class ThrottlingReporterTest { } @Override - protected Logger getLog() { + protected Log getLog() { return throttleLog; } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/WorkerTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/WorkerTest.java similarity index 68% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/WorkerTest.java rename to src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/WorkerTest.java index 97392e7a..daf58165 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/WorkerTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/lib/worker/WorkerTest.java @@ -1,27 +1,115 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.coordinator; +package com.amazonaws.services.kinesis.clientlibrary.lib.worker; +import static org.hamcrest.CoreMatchers.both; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.isA; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.junit.Assert.fail; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.argThat; +import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.*; + +import java.io.File; +import java.lang.Thread.State; +import java.lang.reflect.Field; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.hamcrest.Condition; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeDiagnosingMatcher; +import org.hamcrest.TypeSafeMatcher; +import org.junit.Assert; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Matchers; +import org.mockito.Mock; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; +import com.amazonaws.services.dynamodbv2.local.embedded.DynamoDBEmbedded; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibNonRetryableException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker.WorkerCWMetricsFactory; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker.WorkerThreadPoolExecutor; +import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy; +import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisLocalFileProxy; +import com.amazonaws.services.kinesis.clientlibrary.proxies.util.KinesisLocalFileDataCreator; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseBuilder; +import com.amazonaws.services.kinesis.leases.impl.KinesisClientLeaseManager; +import com.amazonaws.services.kinesis.leases.impl.LeaseManager; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.metrics.impl.CWMetricsFactory; +import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory; +import com.amazonaws.services.kinesis.model.HashKeyRange; +import com.amazonaws.services.kinesis.model.Record; +import com.amazonaws.services.kinesis.model.SequenceNumberRange; +import com.amazonaws.services.kinesis.model.Shard; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +import lombok.RequiredArgsConstructor; /** * Unit tests of Worker. */ -// TODO: remove this test class +@RunWith(MockitoJUnitRunner.class) public class WorkerTest { - /*// @Rule + + private static final Log LOG = LogFactory.getLog(WorkerTest.class); + + // @Rule // public Timeout timeout = new Timeout((int)TimeUnit.SECONDS.toMillis(30)); private final NullMetricsFactory nullMetricsFactory = new NullMetricsFactory(); @@ -42,15 +130,12 @@ public class WorkerTest { private static final String KINESIS_SHARD_ID_FORMAT = "kinesis-0-0-%d"; private static final String CONCURRENCY_TOKEN_FORMAT = "testToken-%d"; - private RecordsFetcherFactory recordsFetcherFactory; - private KinesisClientLibConfiguration config; - @Mock private KinesisClientLibLeaseCoordinator leaseCoordinator; @Mock - private ILeaseManager leaseRefresher; + private ILeaseManager leaseManager; @Mock - private software.amazon.kinesis.processor.IRecordProcessorFactory v1RecordProcessorFactory; + private com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory v1RecordProcessorFactory; @Mock private IKinesisProxy proxy; @Mock @@ -69,64 +154,55 @@ public class WorkerTest { private Future taskFuture; @Mock private TaskResult taskResult; - @Mock - private WorkerStateChangeListener workerStateChangeListener; - - @Before - public void setup() { - config = spy(new KinesisClientLibConfiguration("app", null, null, null)); - recordsFetcherFactory = spy(new SimpleRecordsFetcherFactory()); - when(config.getRecordsFetcherFactory()).thenReturn(recordsFetcherFactory); - } // CHECKSTYLE:IGNORE AnonInnerLengthCheck FOR NEXT 50 LINES - private static final software.amazon.kinesis.processor.IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY = - new software.amazon.kinesis.processor.IRecordProcessorFactory() { + private static final com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY = + new com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorFactory() { @Override - public software.amazon.kinesis.processor.IRecordProcessor createProcessor() { - return new IRecordProcessor() { + public com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor createProcessor() { + return new com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessor() { @Override - public void initialize(final InitializationInput initializationInput) { - - } - - @Override - public void processRecords(final ProcessRecordsInput processRecordsInput) { - try { - processRecordsInput.checkpointer().checkpoint(); - } catch (KinesisClientLibNonRetryableException e) { - throw new RuntimeException(e); - } - } - - @Override - public void shutdown(final ShutdownInput shutdownInput) { - if (shutdownInput.shutdownReason() == ShutdownReason.TERMINATE) { + public void shutdown(IRecordProcessorCheckpointer checkpointer, ShutdownReason reason) { + if (reason == ShutdownReason.TERMINATE) { try { - shutdownInput.checkpointer().checkpoint(); + checkpointer.checkpoint(); } catch (KinesisClientLibNonRetryableException e) { throw new RuntimeException(e); } } } + @Override + public void processRecords(List dataRecords, IRecordProcessorCheckpointer checkpointer) { + try { + checkpointer.checkpoint(); + } catch (KinesisClientLibNonRetryableException e) { + throw new RuntimeException(e); + } + } + + @Override + public void initialize(String shardId) { + } }; } }; - - private static final IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY_V2 = SAMPLE_RECORD_PROCESSOR_FACTORY; + + private static final IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY_V2 = + new V1ToV2RecordProcessorFactoryAdapter(SAMPLE_RECORD_PROCESSOR_FACTORY); - *//** - * Test method for {@link Worker#getApplicationName()}. - *//* + /** + * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#getApplicationName()}. + */ @Test public final void testGetStageName() { final String stageName = "testStageName"; - config = new KinesisClientLibConfiguration(stageName, null, null, null); - Worker worker = new Worker(v1RecordProcessorFactory, config); + final KinesisClientLibConfiguration clientConfig = + new KinesisClientLibConfiguration(stageName, null, null, null); + Worker worker = new Worker(v1RecordProcessorFactory, clientConfig); Assert.assertEquals(stageName, worker.getApplicationName()); } @@ -134,7 +210,6 @@ public class WorkerTest { public final void testCreateOrGetShardConsumer() { final String stageName = "testStageName"; IRecordProcessorFactory streamletFactory = SAMPLE_RECORD_PROCESSOR_FACTORY_V2; - config = new KinesisClientLibConfiguration(stageName, null, null, null); IKinesisProxy proxy = null; ICheckpoint checkpoint = null; int maxRecords = 1; @@ -149,13 +224,11 @@ public class WorkerTest { final String dummyKinesisShardId = "kinesis-0-0"; ExecutorService execService = null; - when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); + when(leaseCoordinator.getLeaseManager()).thenReturn(leaseManager); Worker worker = new Worker(stageName, - streamletFactory, - config, - streamConfig, INITIAL_POSITION_LATEST, + streamletFactory, streamConfig, INITIAL_POSITION_LATEST, parentShardPollIntervalMillis, shardSyncIntervalMillis, cleanupLeasesUponShardCompletion, @@ -193,7 +266,7 @@ public class WorkerTest { ExecutorService execService = null; - when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); + when(leaseCoordinator.getLeaseManager()).thenReturn(leaseManager); List initialState = createShardInfoList(ExtendedSequenceNumber.TRIM_HORIZON); List firstCheckpoint = createShardInfoList(new ExtendedSequenceNumber("1000")); @@ -202,33 +275,21 @@ public class WorkerTest { when(leaseCoordinator.getCurrentAssignments()).thenReturn(initialState).thenReturn(firstCheckpoint) .thenReturn(secondCheckpoint); - Worker worker = new Worker(stageName, - streamletFactory, - config, - streamConfig, - INITIAL_POSITION_LATEST, - parentShardPollIntervalMillis, - shardSyncIntervalMillis, - cleanupLeasesUponShardCompletion, - checkpoint, - leaseCoordinator, - execService, - nullMetricsFactory, - taskBackoffTimeMillis, - failoverTimeMillis, - KinesisClientLibConfiguration.DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST, - shardPrioritization); + Worker worker = new Worker(stageName, streamletFactory, streamConfig, INITIAL_POSITION_LATEST, + parentShardPollIntervalMillis, shardSyncIntervalMillis, cleanupLeasesUponShardCompletion, checkpoint, + leaseCoordinator, execService, nullMetricsFactory, taskBackoffTimeMillis, failoverTimeMillis, + KinesisClientLibConfiguration.DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST, shardPrioritization); Worker workerSpy = spy(worker); - doReturn(shardConsumer).when(workerSpy).buildConsumer(eq(initialState.get(0))); + doReturn(shardConsumer).when(workerSpy).buildConsumer(eq(initialState.get(0)), any(IRecordProcessorFactory.class)); workerSpy.runProcessLoop(); workerSpy.runProcessLoop(); workerSpy.runProcessLoop(); - verify(workerSpy).buildConsumer(same(initialState.get(0))); - verify(workerSpy, never()).buildConsumer(same(firstCheckpoint.get(0))); - verify(workerSpy, never()).buildConsumer(same(secondCheckpoint.get(0))); + verify(workerSpy).buildConsumer(same(initialState.get(0)), any(IRecordProcessorFactory.class)); + verify(workerSpy, never()).buildConsumer(same(firstCheckpoint.get(0)), any(IRecordProcessorFactory.class)); + verify(workerSpy, never()).buildConsumer(same(secondCheckpoint.get(0)), any(IRecordProcessorFactory.class)); } @@ -253,7 +314,6 @@ public class WorkerTest { public final void testCleanupShardConsumers() { final String stageName = "testStageName"; IRecordProcessorFactory streamletFactory = SAMPLE_RECORD_PROCESSOR_FACTORY_V2; - config = new KinesisClientLibConfiguration(stageName, null, null, null); IKinesisProxy proxy = null; ICheckpoint checkpoint = null; int maxRecords = 1; @@ -268,13 +328,11 @@ public class WorkerTest { final String dummyKinesisShardId = "kinesis-0-0"; final String anotherDummyKinesisShardId = "kinesis-0-1"; ExecutorService execService = null; - when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); + when(leaseCoordinator.getLeaseManager()).thenReturn(leaseManager); Worker worker = new Worker(stageName, - streamletFactory, - config, - streamConfig, INITIAL_POSITION_LATEST, + streamletFactory, streamConfig, INITIAL_POSITION_LATEST, parentShardPollIntervalMillis, shardSyncIntervalMillis, cleanupLeasesUponShardCompletion, @@ -313,7 +371,6 @@ public class WorkerTest { public final void testInitializationFailureWithRetries() { String stageName = "testInitializationWorker"; IRecordProcessorFactory recordProcessorFactory = new TestStreamletFactory(null, null); - config = new KinesisClientLibConfiguration(stageName, null, null, null); int count = 0; when(proxy.getShardList()).thenThrow(new RuntimeException(Integer.toString(count++))); int maxRecords = 2; @@ -323,13 +380,12 @@ public class WorkerTest { maxRecords, idleTimeInMilliseconds, callProcessRecordsForEmptyRecordList, skipCheckpointValidationValue, INITIAL_POSITION_LATEST); - when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); + when(leaseCoordinator.getLeaseManager()).thenReturn(leaseManager); ExecutorService execService = Executors.newSingleThreadExecutor(); long shardPollInterval = 0L; Worker worker = new Worker(stageName, recordProcessorFactory, - config, streamConfig, INITIAL_POSITION_TRIM_HORIZON, shardPollInterval, shardSyncIntervalMillis, @@ -346,10 +402,10 @@ public class WorkerTest { Assert.assertTrue(count > 0); } - *//** + /** * Runs worker with threadPoolSize == numShards - * Test method for {@link Worker#run()}. - *//* + * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}. + */ @Test public final void testRunWithThreadPoolSizeEqualToNumShards() throws Exception { final int numShards = 1; @@ -357,10 +413,10 @@ public class WorkerTest { runAndTestWorker(numShards, threadPoolSize); } - *//** + /** * Runs worker with threadPoolSize < numShards - * Test method for {@link Worker#run()}. - *//* + * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}. + */ @Test public final void testRunWithThreadPoolSizeLessThanNumShards() throws Exception { final int numShards = 3; @@ -368,10 +424,10 @@ public class WorkerTest { runAndTestWorker(numShards, threadPoolSize); } - *//** + /** * Runs worker with threadPoolSize > numShards - * Test method for {@link Worker#run()}. - *//* + * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}. + */ @Test public final void testRunWithThreadPoolSizeMoreThanNumShards() throws Exception { final int numShards = 3; @@ -379,10 +435,10 @@ public class WorkerTest { runAndTestWorker(numShards, threadPoolSize); } - *//** + /** * Runs worker with threadPoolSize < numShards - * Test method for {@link Worker#run()}. - *//* + * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}. + */ @Test public final void testOneSplitShard2Threads() throws Exception { final int threadPoolSize = 2; @@ -390,15 +446,15 @@ public class WorkerTest { List shardList = createShardListWithOneSplit(); List initialLeases = new ArrayList(); KinesisClientLease lease = ShardSyncer.newKCLLease(shardList.get(0)); - lease.checkpoint(new ExtendedSequenceNumber("2")); + lease.setCheckpoint(new ExtendedSequenceNumber("2")); initialLeases.add(lease); - runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard, config); + runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard); } - *//** + /** * Runs worker with threadPoolSize < numShards - * Test method for {@link Worker#run()}. - *//* + * Test method for {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker#run()}. + */ @Test public final void testOneSplitShard2ThreadsWithCallsForEmptyRecords() throws Exception { final int threadPoolSize = 2; @@ -406,13 +462,10 @@ public class WorkerTest { List shardList = createShardListWithOneSplit(); List initialLeases = new ArrayList(); KinesisClientLease lease = ShardSyncer.newKCLLease(shardList.get(0)); - lease.checkpoint(new ExtendedSequenceNumber("2")); + lease.setCheckpoint(new ExtendedSequenceNumber("2")); initialLeases.add(lease); boolean callProcessRecordsForEmptyRecordList = true; - RecordsFetcherFactory recordsFetcherFactory = new SimpleRecordsFetcherFactory(); - recordsFetcherFactory.idleMillisBetweenCalls(0L); - when(config.getRecordsFetcherFactory()).thenReturn(recordsFetcherFactory); - runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard, config); + runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard); } @Test @@ -437,8 +490,7 @@ public class WorkerTest { 10, kinesisProxy, v2RecordProcessorFactory, executorService, - cwMetricsFactory, - config); + cwMetricsFactory); // Give some time for thread to run. workerStarted.await(); @@ -446,7 +498,7 @@ public class WorkerTest { workerThread.getWorker().shutdown(); workerThread.join(); - Assert.assertTrue(workerThread.state() == State.TERMINATED); + Assert.assertTrue(workerThread.getState() == State.TERMINATED); verify(executorService, times(1)).shutdownNow(); verify(cwMetricsFactory, times(1)).shutdown(); } @@ -456,7 +508,7 @@ public class WorkerTest { final long failoverTimeMillis = 20L; final ExecutorService executorService = mock(ThreadPoolExecutor.class); - final CloudWatchMetricsFactory cwMetricsFactory = mock(CloudWatchMetricsFactory.class); + final CWMetricsFactory cwMetricsFactory = mock(CWMetricsFactory.class); // Make sure that worker thread is run before invoking shutdown. final CountDownLatch workerStarted = new CountDownLatch(1); doAnswer(new Answer() { @@ -474,8 +526,7 @@ public class WorkerTest { 10, kinesisProxy, v2RecordProcessorFactory, executorService, - cwMetricsFactory, - config); + cwMetricsFactory); // Give some time for thread to run. workerStarted.await(); @@ -483,7 +534,7 @@ public class WorkerTest { workerThread.getWorker().shutdown(); workerThread.join(); - Assert.assertTrue(workerThread.state() == State.TERMINATED); + Assert.assertTrue(workerThread.getState() == State.TERMINATED); verify(executorService, times(0)).shutdownNow(); verify(cwMetricsFactory, times(0)).shutdown(); } @@ -498,7 +549,7 @@ public class WorkerTest { final List initialLeases = new ArrayList(); for (Shard shard : shardList) { KinesisClientLease lease = ShardSyncer.newKCLLease(shard); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + lease.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON); initialLeases.add(lease); } @@ -522,14 +573,6 @@ public class WorkerTest { } }).when(v2RecordProcessor).processRecords(any(ProcessRecordsInput.class)); - RecordsFetcherFactory recordsFetcherFactory = mock(RecordsFetcherFactory.class); - RecordsPublisher getRecordsCache = mock(RecordsPublisher.class); - when(config.getRecordsFetcherFactory()).thenReturn(recordsFetcherFactory); - when(recordsFetcherFactory.createRecordsFetcher(any(GetRecordsRetrievalStrategy.class), anyString(), - any(IMetricsFactory.class), anyInt())) - .thenReturn(getRecordsCache); - when(getRecordsCache.getNextResult()).thenReturn(new ProcessRecordsInput().records(Collections.emptyList()).millisBehindLatest(0L)); - WorkerThread workerThread = runWorker(shardList, initialLeases, callProcessRecordsForEmptyRecordList, @@ -538,8 +581,7 @@ public class WorkerTest { fileBasedProxy, v2RecordProcessorFactory, executorService, - nullMetricsFactory, - config); + nullMetricsFactory); // Only sleep for time that is required. processRecordsLatch.await(); @@ -553,17 +595,17 @@ public class WorkerTest { workerThread.getWorker().shutdown(); workerThread.join(); - Assert.assertTrue(workerThread.state() == State.TERMINATED); + Assert.assertTrue(workerThread.getState() == State.TERMINATED); verify(v2RecordProcessor, times(1)).shutdown(any(ShutdownInput.class)); } - *//** + /** * This test is testing the {@link Worker}'s shutdown behavior and by extension the behavior of * {@link ThreadPoolExecutor#shutdownNow()}. It depends on the thread pool sending an interrupt to the pool threads. * This behavior makes the test a bit racy, since we need to ensure a specific order of events. - * + * * @throws Exception - *//* + */ @Test public final void testWorkerForcefulShutdown() throws Exception { final List shardList = createShardListWithOneShard(); @@ -574,7 +616,7 @@ public class WorkerTest { final List initialLeases = new ArrayList(); for (Shard shard : shardList) { KinesisClientLease lease = ShardSyncer.newKCLLease(shard); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + lease.setCheckpoint(ExtendedSequenceNumber.TRIM_HORIZON); initialLeases.add(lease); } @@ -605,18 +647,18 @@ public class WorkerTest { final long startTimeMillis = System.currentTimeMillis(); long elapsedTimeMillis = 0; - log.info("Entering sleep @ {} with elapsedMills: {}", startTimeMillis, elapsedTimeMillis); + LOG.info("Entering sleep @ " + startTimeMillis + " with elapsedMills: " + elapsedTimeMillis); shutdownBlocker.acquire(); try { actionBlocker.acquire(); } catch (InterruptedException e) { - log.info("Sleep interrupted @ {} elapsedMillis: {}", System.currentTimeMillis(), - (System.currentTimeMillis() - startTimeMillis)); + LOG.info("Sleep interrupted @ " + System.currentTimeMillis() + " elapsedMillis: " + + (System.currentTimeMillis() - startTimeMillis)); recordProcessorInterrupted.getAndSet(true); } shutdownBlocker.release(); elapsedTimeMillis = System.currentTimeMillis() - startTimeMillis; - log.info("Sleep completed @ {} elapsedMillis: {}", System.currentTimeMillis(), elapsedTimeMillis); + LOG.info("Sleep completed @ " + System.currentTimeMillis() + " elapsedMillis: " + elapsedTimeMillis); return null; } @@ -630,8 +672,7 @@ public class WorkerTest { fileBasedProxy, v2RecordProcessorFactory, executorService, - nullMetricsFactory, - config); + nullMetricsFactory); // Only sleep for time that is required. processRecordsLatch.await(); @@ -645,7 +686,7 @@ public class WorkerTest { workerThread.getWorker().shutdown(); workerThread.join(); - Assert.assertTrue(workerThread.state() == State.TERMINATED); + Assert.assertTrue(workerThread.getState() == State.TERMINATED); // Shutdown should not be called in this case because record processor is blocked. verify(v2RecordProcessor, times(0)).shutdown(any(ShutdownInput.class)); @@ -672,16 +713,16 @@ public class WorkerTest { IMetricsFactory metricsFactory = mock(IMetricsFactory.class); ExtendedSequenceNumber checkpoint = new ExtendedSequenceNumber("123", 0L); - KinesisClientLeaseBuilder builder = new KinesisClientLeaseBuilder().checkpoint(checkpoint) - .concurrencyToken(UUID.randomUUID()).lastCounterIncrementNanos(0L).leaseCounter(0L) - .ownerSwitchesSinceCheckpoint(0L).leaseOwner("Self"); + KinesisClientLeaseBuilder builder = new KinesisClientLeaseBuilder().withCheckpoint(checkpoint) + .withConcurrencyToken(UUID.randomUUID()).withLastCounterIncrementNanos(0L).withLeaseCounter(0L) + .withOwnerSwitchesSinceCheckpoint(0L).withLeaseOwner("Self"); final List leases = new ArrayList<>(); final List currentAssignments = new ArrayList<>(); - KinesisClientLease lease = builder.leaseKey(String.format("shardId-%03d", 1)).build(); + KinesisClientLease lease = builder.withLeaseKey(String.format("shardId-%03d", 1)).build(); leases.add(lease); - currentAssignments.add(new ShardInfo(lease.leaseKey(), lease.concurrencyToken().toString(), - lease.parentShardIds(), lease.checkpoint())); + currentAssignments.add(new ShardInfo(lease.getLeaseKey(), lease.getConcurrencyToken().toString(), + lease.getParentShardIds(), lease.getCheckpoint())); when(leaseCoordinator.getAssignments()).thenAnswer(new Answer>() { @@ -701,22 +742,10 @@ public class WorkerTest { when(recordProcessorFactory.createProcessor()).thenReturn(processor); - Worker worker = new Worker("testRequestShutdown", - recordProcessorFactory, - config, - streamConfig, - INITIAL_POSITION_TRIM_HORIZON, - parentShardPollIntervalMillis, - shardSyncIntervalMillis, - cleanupLeasesUponShardCompletion, - leaseCoordinator, - leaseCoordinator, - executorService, - metricsFactory, - taskBackoffTimeMillis, - failoverTimeMillis, - false, - shardPrioritization); + Worker worker = new Worker("testRequestShutdown", recordProcessorFactory, streamConfig, + INITIAL_POSITION_TRIM_HORIZON, parentShardPollIntervalMillis, shardSyncIntervalMillis, + cleanupLeasesUponShardCompletion, leaseCoordinator, leaseCoordinator, executorService, metricsFactory, + taskBackoffTimeMillis, failoverTimeMillis, false, shardPrioritization); when(executorService.submit(Matchers.> any())) .thenAnswer(new ShutdownHandlingAnswer(taskFuture)); @@ -733,7 +762,7 @@ public class WorkerTest { verify(executorService, atLeastOnce()).submit(argThat( both(isA(MetricsCollectingTaskDecorator.class)).and(TaskTypeMatcher.isOfType(TaskType.INITIALIZE)))); - worker.createWorkerShutdownCallable().call(); + worker.requestShutdown(); worker.runProcessLoop(); verify(executorService, atLeastOnce()).submit(argThat(both(isA(MetricsCollectingTaskDecorator.class)) @@ -752,158 +781,6 @@ public class WorkerTest { } - @Test(expected = IllegalStateException.class) - public void testShutdownCallableNotAllowedTwice() throws Exception { - - IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class); - StreamConfig streamConfig = mock(StreamConfig.class); - IMetricsFactory metricsFactory = mock(IMetricsFactory.class); - - ExtendedSequenceNumber checkpoint = new ExtendedSequenceNumber("123", 0L); - KinesisClientLeaseBuilder builder = new KinesisClientLeaseBuilder().checkpoint(checkpoint) - .concurrencyToken(UUID.randomUUID()).lastCounterIncrementNanos(0L).leaseCounter(0L) - .ownerSwitchesSinceCheckpoint(0L).leaseOwner("Self"); - - final List leases = new ArrayList<>(); - final List currentAssignments = new ArrayList<>(); - KinesisClientLease lease = builder.leaseKey(String.format("shardId-%03d", 1)).build(); - leases.add(lease); - currentAssignments.add(new ShardInfo(lease.leaseKey(), lease.concurrencyToken().toString(), - lease.parentShardIds(), lease.checkpoint())); - - when(leaseCoordinator.getAssignments()).thenAnswer(new Answer>() { - @Override - public List answer(InvocationOnMock invocation) throws Throwable { - return leases; - } - }); - when(leaseCoordinator.getCurrentAssignments()).thenAnswer(new Answer>() { - @Override - public List answer(InvocationOnMock invocation) throws Throwable { - return currentAssignments; - } - }); - - IRecordProcessor processor = mock(IRecordProcessor.class); - when(recordProcessorFactory.createProcessor()).thenReturn(processor); - - Worker worker = new InjectableWorker("testRequestShutdown", recordProcessorFactory, config, streamConfig, - INITIAL_POSITION_TRIM_HORIZON, parentShardPollIntervalMillis, shardSyncIntervalMillis, - cleanupLeasesUponShardCompletion, leaseCoordinator, leaseCoordinator, executorService, metricsFactory, - taskBackoffTimeMillis, failoverTimeMillis, false, shardPrioritization) { - @Override - void postConstruct() { - this.gracefuleShutdownStarted = true; - } - }; - - when(executorService.submit(Matchers.> any())) - .thenAnswer(new ShutdownHandlingAnswer(taskFuture)); - when(taskFuture.isDone()).thenReturn(true); - when(taskFuture.get()).thenReturn(taskResult); - - worker.runProcessLoop(); - - verify(executorService, atLeastOnce()).submit(argThat(both(isA(MetricsCollectingTaskDecorator.class)) - .and(TaskTypeMatcher.isOfType(TaskType.BLOCK_ON_PARENT_SHARDS)))); - - worker.runProcessLoop(); - - verify(executorService, atLeastOnce()).submit(argThat( - both(isA(MetricsCollectingTaskDecorator.class)).and(TaskTypeMatcher.isOfType(TaskType.INITIALIZE)))); - - assertThat(worker.hasGracefulShutdownStarted(), equalTo(true)); - worker.createWorkerShutdownCallable().call(); - - } - - @Test - public void testGracefulShutdownSingleFuture() throws Exception { - - IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class); - StreamConfig streamConfig = mock(StreamConfig.class); - IMetricsFactory metricsFactory = mock(IMetricsFactory.class); - - ExtendedSequenceNumber checkpoint = new ExtendedSequenceNumber("123", 0L); - KinesisClientLeaseBuilder builder = new KinesisClientLeaseBuilder().checkpoint(checkpoint) - .concurrencyToken(UUID.randomUUID()).lastCounterIncrementNanos(0L).leaseCounter(0L) - .ownerSwitchesSinceCheckpoint(0L).leaseOwner("Self"); - - final List leases = new ArrayList<>(); - final List currentAssignments = new ArrayList<>(); - KinesisClientLease lease = builder.leaseKey(String.format("shardId-%03d", 1)).build(); - leases.add(lease); - currentAssignments.add(new ShardInfo(lease.leaseKey(), lease.concurrencyToken().toString(), - lease.parentShardIds(), lease.checkpoint())); - - when(leaseCoordinator.getAssignments()).thenAnswer(new Answer>() { - @Override - public List answer(InvocationOnMock invocation) throws Throwable { - return leases; - } - }); - when(leaseCoordinator.getCurrentAssignments()).thenAnswer(new Answer>() { - @Override - public List answer(InvocationOnMock invocation) throws Throwable { - return currentAssignments; - } - }); - - IRecordProcessor processor = mock(IRecordProcessor.class); - when(recordProcessorFactory.createProcessor()).thenReturn(processor); - - GracefulShutdownCoordinator coordinator = mock(GracefulShutdownCoordinator.class); - when(coordinator.createGracefulShutdownCallable(any(Callable.class))).thenReturn(() -> true); - - Future gracefulShutdownFuture = mock(Future.class); - - when(coordinator.startGracefulShutdown(any(Callable.class))).thenReturn(gracefulShutdownFuture); - - Worker worker = new InjectableWorker("testRequestShutdown", - recordProcessorFactory, - config, - streamConfig, - INITIAL_POSITION_TRIM_HORIZON, - parentShardPollIntervalMillis, - shardSyncIntervalMillis, - cleanupLeasesUponShardCompletion, - leaseCoordinator, - leaseCoordinator, - executorService, - metricsFactory, - taskBackoffTimeMillis, - failoverTimeMillis, - false, - shardPrioritization) { - @Override - void postConstruct() { - this.gracefulShutdownCoordinator = coordinator; - } - }; - - when(executorService.submit(Matchers.> any())) - .thenAnswer(new ShutdownHandlingAnswer(taskFuture)); - when(taskFuture.isDone()).thenReturn(true); - when(taskFuture.get()).thenReturn(taskResult); - - worker.runProcessLoop(); - - verify(executorService, atLeastOnce()).submit(argThat(both(isA(MetricsCollectingTaskDecorator.class)) - .and(TaskTypeMatcher.isOfType(TaskType.BLOCK_ON_PARENT_SHARDS)))); - - worker.runProcessLoop(); - - verify(executorService, atLeastOnce()).submit(argThat( - both(isA(MetricsCollectingTaskDecorator.class)).and(TaskTypeMatcher.isOfType(TaskType.INITIALIZE)))); - - Future firstFuture = worker.startGracefulShutdown(); - Future secondFuture = worker.startGracefulShutdown(); - - assertThat(firstFuture, equalTo(secondFuture)); - verify(coordinator).startGracefulShutdown(any(Callable.class)); - - } - @Test public void testRequestShutdownNoLeases() throws Exception { @@ -933,22 +810,10 @@ public class WorkerTest { when(recordProcessorFactory.createProcessor()).thenReturn(processor); - Worker worker = new Worker("testRequestShutdown", - recordProcessorFactory, - config, - streamConfig, - INITIAL_POSITION_TRIM_HORIZON, - parentShardPollIntervalMillis, - shardSyncIntervalMillis, - cleanupLeasesUponShardCompletion, - leaseCoordinator, - leaseCoordinator, - executorService, - metricsFactory, - taskBackoffTimeMillis, - failoverTimeMillis, - false, - shardPrioritization); + Worker worker = new Worker("testRequestShutdown", recordProcessorFactory, streamConfig, + INITIAL_POSITION_TRIM_HORIZON, parentShardPollIntervalMillis, shardSyncIntervalMillis, + cleanupLeasesUponShardCompletion, leaseCoordinator, leaseCoordinator, executorService, metricsFactory, + taskBackoffTimeMillis, failoverTimeMillis, false, shardPrioritization); when(executorService.submit(Matchers.> any())) .thenAnswer(new ShutdownHandlingAnswer(taskFuture)); @@ -965,7 +830,7 @@ public class WorkerTest { verify(executorService, never()).submit(argThat( both(isA(MetricsCollectingTaskDecorator.class)).and(TaskTypeMatcher.isOfType(TaskType.INITIALIZE)))); - worker.createWorkerShutdownCallable().call(); + worker.requestShutdown(); worker.runProcessLoop(); verify(executorService, never()).submit(argThat(both(isA(MetricsCollectingTaskDecorator.class)) @@ -1015,22 +880,10 @@ public class WorkerTest { IRecordProcessor processor = mock(IRecordProcessor.class); when(recordProcessorFactory.createProcessor()).thenReturn(processor); - Worker worker = new Worker("testRequestShutdown", - recordProcessorFactory, - config, - streamConfig, - INITIAL_POSITION_TRIM_HORIZON, - parentShardPollIntervalMillis, - shardSyncIntervalMillis, - cleanupLeasesUponShardCompletion, - leaseCoordinator, - leaseCoordinator, - executorService, - metricsFactory, - taskBackoffTimeMillis, - failoverTimeMillis, - false, - shardPrioritization); + Worker worker = new Worker("testRequestShutdown", recordProcessorFactory, streamConfig, + INITIAL_POSITION_TRIM_HORIZON, parentShardPollIntervalMillis, shardSyncIntervalMillis, + cleanupLeasesUponShardCompletion, leaseCoordinator, leaseCoordinator, executorService, metricsFactory, + taskBackoffTimeMillis, failoverTimeMillis, false, shardPrioritization); when(executorService.submit(Matchers.> any())) .thenAnswer(new ShutdownHandlingAnswer(taskFuture)); @@ -1056,7 +909,7 @@ public class WorkerTest { .withField(InitializeTask.class, "shardInfo", equalTo(shardInfo2))))); worker.getShardInfoShardConsumerMap().remove(shardInfo2); - worker.createWorkerShutdownCallable().call(); + worker.requestShutdown(); leases.remove(1); currentAssignments.remove(1); worker.runProcessLoop(); @@ -1128,22 +981,10 @@ public class WorkerTest { IRecordProcessor processor = mock(IRecordProcessor.class); when(recordProcessorFactory.createProcessor()).thenReturn(processor); - Worker worker = new Worker("testRequestShutdown", - recordProcessorFactory, - config, - streamConfig, - INITIAL_POSITION_TRIM_HORIZON, - parentShardPollIntervalMillis, - shardSyncIntervalMillis, - cleanupLeasesUponShardCompletion, - leaseCoordinator, - leaseCoordinator, - executorService, - metricsFactory, - taskBackoffTimeMillis, - failoverTimeMillis, - false, - shardPrioritization); + Worker worker = new Worker("testRequestShutdown", recordProcessorFactory, streamConfig, + INITIAL_POSITION_TRIM_HORIZON, parentShardPollIntervalMillis, shardSyncIntervalMillis, + cleanupLeasesUponShardCompletion, leaseCoordinator, leaseCoordinator, executorService, metricsFactory, + taskBackoffTimeMillis, failoverTimeMillis, false, shardPrioritization); when(executorService.submit(Matchers.> any())) .thenAnswer(new ShutdownHandlingAnswer(taskFuture)); @@ -1218,16 +1059,16 @@ public class WorkerTest { IMetricsFactory metricsFactory = mock(IMetricsFactory.class); ExtendedSequenceNumber checkpoint = new ExtendedSequenceNumber("123", 0L); - KinesisClientLeaseBuilder builder = new KinesisClientLeaseBuilder().checkpoint(checkpoint) - .concurrencyToken(UUID.randomUUID()).lastCounterIncrementNanos(0L).leaseCounter(0L) - .ownerSwitchesSinceCheckpoint(0L).leaseOwner("Self"); + KinesisClientLeaseBuilder builder = new KinesisClientLeaseBuilder().withCheckpoint(checkpoint) + .withConcurrencyToken(UUID.randomUUID()).withLastCounterIncrementNanos(0L).withLeaseCounter(0L) + .withOwnerSwitchesSinceCheckpoint(0L).withLeaseOwner("Self"); final List leases = new ArrayList<>(); final List currentAssignments = new ArrayList<>(); - KinesisClientLease lease = builder.leaseKey(String.format("shardId-%03d", 1)).build(); + KinesisClientLease lease = builder.withLeaseKey(String.format("shardId-%03d", 1)).build(); leases.add(lease); - currentAssignments.add(new ShardInfo(lease.leaseKey(), lease.concurrencyToken().toString(), - lease.parentShardIds(), lease.checkpoint())); + currentAssignments.add(new ShardInfo(lease.getLeaseKey(), lease.getConcurrencyToken().toString(), + lease.getParentShardIds(), lease.getCheckpoint())); when(leaseCoordinator.getAssignments()).thenAnswer(new Answer>() { @Override @@ -1245,22 +1086,10 @@ public class WorkerTest { IRecordProcessor processor = mock(IRecordProcessor.class); when(recordProcessorFactory.createProcessor()).thenReturn(processor); - Worker worker = new Worker("testRequestShutdown", - recordProcessorFactory, - config, - streamConfig, - INITIAL_POSITION_TRIM_HORIZON, - parentShardPollIntervalMillis, - shardSyncIntervalMillis, - cleanupLeasesUponShardCompletion, - leaseCoordinator, - leaseCoordinator, - executorService, - metricsFactory, - taskBackoffTimeMillis, - failoverTimeMillis, - false, - shardPrioritization); + Worker worker = new Worker("testRequestShutdown", recordProcessorFactory, streamConfig, + INITIAL_POSITION_TRIM_HORIZON, parentShardPollIntervalMillis, shardSyncIntervalMillis, + cleanupLeasesUponShardCompletion, leaseCoordinator, leaseCoordinator, executorService, metricsFactory, + taskBackoffTimeMillis, failoverTimeMillis, false, shardPrioritization); when(executorService.submit(Matchers.> any())) .thenAnswer(new ShutdownHandlingAnswer(taskFuture)); @@ -1302,16 +1131,16 @@ public class WorkerTest { IMetricsFactory metricsFactory = mock(IMetricsFactory.class); ExtendedSequenceNumber checkpoint = new ExtendedSequenceNumber("123", 0L); - KinesisClientLeaseBuilder builder = new KinesisClientLeaseBuilder().checkpoint(checkpoint) - .concurrencyToken(UUID.randomUUID()).lastCounterIncrementNanos(0L).leaseCounter(0L) - .ownerSwitchesSinceCheckpoint(0L).leaseOwner("Self"); + KinesisClientLeaseBuilder builder = new KinesisClientLeaseBuilder().withCheckpoint(checkpoint) + .withConcurrencyToken(UUID.randomUUID()).withLastCounterIncrementNanos(0L).withLeaseCounter(0L) + .withOwnerSwitchesSinceCheckpoint(0L).withLeaseOwner("Self"); final List leases = new ArrayList<>(); final List currentAssignments = new ArrayList<>(); - KinesisClientLease lease = builder.leaseKey(String.format("shardId-%03d", 1)).build(); + KinesisClientLease lease = builder.withLeaseKey(String.format("shardId-%03d", 1)).build(); leases.add(lease); - currentAssignments.add(new ShardInfo(lease.leaseKey(), lease.concurrencyToken().toString(), - lease.parentShardIds(), lease.checkpoint())); + currentAssignments.add(new ShardInfo(lease.getLeaseKey(), lease.getConcurrencyToken().toString(), + lease.getParentShardIds(), lease.getCheckpoint())); when(leaseCoordinator.getAssignments()).thenAnswer(new Answer>() { @Override @@ -1329,22 +1158,10 @@ public class WorkerTest { IRecordProcessor processor = mock(IRecordProcessor.class); when(recordProcessorFactory.createProcessor()).thenReturn(processor); - Worker worker = new Worker("testRequestShutdown", - recordProcessorFactory, - config, - streamConfig, - INITIAL_POSITION_TRIM_HORIZON, - parentShardPollIntervalMillis, - shardSyncIntervalMillis, - cleanupLeasesUponShardCompletion, - leaseCoordinator, - leaseCoordinator, - executorService, - metricsFactory, - taskBackoffTimeMillis, - failoverTimeMillis, - false, - shardPrioritization); + Worker worker = new Worker("testRequestShutdown", recordProcessorFactory, streamConfig, + INITIAL_POSITION_TRIM_HORIZON, parentShardPollIntervalMillis, shardSyncIntervalMillis, + cleanupLeasesUponShardCompletion, leaseCoordinator, leaseCoordinator, executorService, metricsFactory, + taskBackoffTimeMillis, failoverTimeMillis, false, shardPrioritization); when(executorService.submit(Matchers.> any())) .thenAnswer(new ShutdownHandlingAnswer(taskFuture)); @@ -1377,198 +1194,15 @@ public class WorkerTest { } - @Test - public void testBuilderWithDefaultKinesisProxy() { - IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class); - Worker worker = new Worker.Builder() - .recordProcessorFactory(recordProcessorFactory) - .config(config) - .build(); - Assert.assertNotNull(worker.getStreamConfig().getStreamProxy()); - Assert.assertTrue(worker.getStreamConfig().getStreamProxy() instanceof KinesisProxy); - } - - @Test - public void testBuilderWhenKinesisProxyIsSet() { - IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class); - // Create an instance of KinesisLocalFileProxy for injection and validation - IKinesisProxy kinesisProxy = mock(KinesisLocalFileProxy.class); - Worker worker = new Worker.Builder() - .recordProcessorFactory(recordProcessorFactory) - .config(config) - .kinesisProxy(kinesisProxy) - .build(); - Assert.assertNotNull(worker.getStreamConfig().getStreamProxy()); - Assert.assertTrue(worker.getStreamConfig().getStreamProxy() instanceof KinesisLocalFileProxy); - } - - @Test - public void testBuilderForWorkerStateListener() { - IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class); - Worker worker = new Worker.Builder() - .recordProcessorFactory(recordProcessorFactory) - .config(config) - .build(); - Assert.assertTrue(worker.getWorkerStateChangeListener() instanceof NoOpWorkerStateChangeListener); - } - - @Test - public void testBuilderWhenWorkerStateListenerIsSet() { - IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class); - Worker worker = new Worker.Builder() - .recordProcessorFactory(recordProcessorFactory) - .workerStateChangeListener(workerStateChangeListener) - .config(config) - .build(); - Assert.assertSame(workerStateChangeListener, worker.getWorkerStateChangeListener()); - } - - @Test - public void testWorkerStateListenerStatePassesThroughCreatedState() { - IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class); - new Worker.Builder() - .recordProcessorFactory(recordProcessorFactory) - .workerStateChangeListener(workerStateChangeListener) - .config(config) - .build(); - - verify(workerStateChangeListener, times(1)).onWorkerStateChange(eq(WorkerState.CREATED)); - } - - @Test - @Ignore - public void testWorkerStateChangeListenerGoesThroughStates() throws Exception { - - final CountDownLatch workerInitialized = new CountDownLatch(1); - final CountDownLatch workerStarted = new CountDownLatch(1); - final IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class); - final IRecordProcessor processor = mock(IRecordProcessor.class); - - ExtendedSequenceNumber checkpoint = new ExtendedSequenceNumber("123", 0L); - KinesisClientLeaseBuilder builder = new KinesisClientLeaseBuilder().checkpoint(checkpoint) - .concurrencyToken(UUID.randomUUID()).lastCounterIncrementNanos(0L).leaseCounter(0L) - .ownerSwitchesSinceCheckpoint(0L).leaseOwner("Self"); - final List leases = new ArrayList<>(); - KinesisClientLease lease = builder.leaseKey(String.format("shardId-%03d", 1)).build(); - leases.add(lease); - - doAnswer(new Answer() { - @Override - public Boolean answer(InvocationOnMock invocation) throws Throwable { - workerInitialized.countDown(); - return true; - } - }).when(leaseRefresher).waitUntilLeaseTableExists(anyLong(), anyLong()); - doAnswer(new Answer() { - @Override - public IRecordProcessor answer(InvocationOnMock invocation) throws Throwable { - workerStarted.countDown(); - return processor; - } - }).when(recordProcessorFactory).createProcessor(); - - when(config.workerIdentifier()).thenReturn("Self"); - when(leaseRefresher.listLeases()).thenReturn(leases); - when(leaseRefresher.renewLease(leases.get(0))).thenReturn(true); - when(executorService.submit(Matchers.> any())) - .thenAnswer(new ShutdownHandlingAnswer(taskFuture)); - when(taskFuture.isDone()).thenReturn(true); - when(taskFuture.get()).thenReturn(taskResult); - when(taskResult.isShardEndReached()).thenReturn(true); - - Worker worker = new Worker.Builder() - .recordProcessorFactory(recordProcessorFactory) - .config(config) - .leaseRefresher(leaseRefresher) - .kinesisProxy(kinesisProxy) - .execService(executorService) - .workerStateChangeListener(workerStateChangeListener) - .build(); - - verify(workerStateChangeListener, times(1)).onWorkerStateChange(eq(WorkerState.CREATED)); - - WorkerThread workerThread = new WorkerThread(worker); - workerThread.start(); - - workerInitialized.await(); - verify(workerStateChangeListener, times(1)).onWorkerStateChange(eq(WorkerState.INITIALIZING)); - - workerStarted.await(); - verify(workerStateChangeListener, times(1)).onWorkerStateChange(eq(WorkerState.STARTED)); - - boolean workerShutdown = worker.createGracefulShutdownCallable() - .call(); - - verify(workerStateChangeListener, times(1)).onWorkerStateChange(eq(WorkerState.SHUT_DOWN)); - } - - @Test - public void testBuilderWithDefaultLeaseManager() { - IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class); - - Worker worker = new Worker.Builder() - .recordProcessorFactory(recordProcessorFactory) - .config(config) - .build(); - - Assert.assertNotNull(worker.getLeaseCoordinator().leaseRefresher()); - } - - @SuppressWarnings("unchecked") - @Test - public void testBuilderWhenLeaseManagerIsSet() { - IRecordProcessorFactory recordProcessorFactory = mock(IRecordProcessorFactory.class); - // Create an instance of ILeaseManager for injection and validation - ILeaseManager leaseRefresher = (ILeaseManager) mock(ILeaseManager.class); - Worker worker = new Worker.Builder() - .recordProcessorFactory(recordProcessorFactory) - .config(config) - .leaseRefresher(leaseRefresher) - .build(); - - Assert.assertSame(leaseRefresher, worker.getLeaseCoordinator().leaseRefresher()); - } - - private abstract class InjectableWorker extends Worker { - InjectableWorker(String applicationName, IRecordProcessorFactory recordProcessorFactory, - KinesisClientLibConfiguration config, StreamConfig streamConfig, - InitialPositionInStreamExtended initialPositionInStream, - long parentShardPollIntervalMillis, long shardSyncIdleTimeMillis, - boolean cleanupLeasesUponShardCompletion, ICheckpoint checkpoint, - KinesisClientLibLeaseCoordinator leaseCoordinator, ExecutorService execService, - IMetricsFactory metricsFactory, long taskBackoffTimeMillis, long failoverTimeMillis, - boolean skipShardSyncAtWorkerInitializationIfLeasesExist, ShardPrioritization shardPrioritization) { - super(applicationName, - recordProcessorFactory, - config, - streamConfig, - initialPositionInStream, - parentShardPollIntervalMillis, - shardSyncIdleTimeMillis, - cleanupLeasesUponShardCompletion, - checkpoint, - leaseCoordinator, - execService, - metricsFactory, - taskBackoffTimeMillis, - failoverTimeMillis, - skipShardSyncAtWorkerInitializationIfLeasesExist, - shardPrioritization); - postConstruct(); - } - - abstract void postConstruct(); - } - private KinesisClientLease makeLease(ExtendedSequenceNumber checkpoint, int shardId) { - return new KinesisClientLeaseBuilder().checkpoint(checkpoint).concurrencyToken(UUID.randomUUID()) - .lastCounterIncrementNanos(0L).leaseCounter(0L).ownerSwitchesSinceCheckpoint(0L) - .leaseOwner("Self").leaseKey(String.format("shardId-%03d", shardId)).build(); + return new KinesisClientLeaseBuilder().withCheckpoint(checkpoint).withConcurrencyToken(UUID.randomUUID()) + .withLastCounterIncrementNanos(0L).withLeaseCounter(0L).withOwnerSwitchesSinceCheckpoint(0L) + .withLeaseOwner("Self").withLeaseKey(String.format("shardId-%03d", shardId)).build(); } private ShardInfo makeShardInfo(KinesisClientLease lease) { - return new ShardInfo(lease.leaseKey(), lease.concurrencyToken().toString(), lease.parentShardIds(), - lease.checkpoint()); + return new ShardInfo(lease.getLeaseKey(), lease.getConcurrencyToken().toString(), lease.getParentShardIds(), + lease.getCheckpoint()); } private static class ShutdownReasonMatcher extends TypeSafeDiagnosingMatcher { @@ -1620,7 +1254,7 @@ public class WorkerTest { @Override public Future answer(InvocationOnMock invocation) throws Throwable { - ConsumerTask rootTask = (ConsumerTask) invocation.getArguments()[0]; + ITask rootTask = (ITask) invocation.getArguments()[0]; if (rootTask instanceof MetricsCollectingTaskDecorator && ((MetricsCollectingTaskDecorator) rootTask).getOther() instanceof ShutdownNotificationTask) { ShutdownNotificationTask task = (ShutdownNotificationTask) ((MetricsCollectingTaskDecorator) rootTask).getOther(); @@ -1644,7 +1278,7 @@ public class WorkerTest { @Override protected boolean matchesSafely(MetricsCollectingTaskDecorator item) { - return expectedTaskType.matches(item.taskType()); + return expectedTaskType.matches(item.getTaskType()); } @Override @@ -1662,7 +1296,7 @@ public class WorkerTest { } } - private static class InnerTaskMatcher extends TypeSafeMatcher { + private static class InnerTaskMatcher extends TypeSafeMatcher { final Matcher matcher; @@ -1680,13 +1314,13 @@ public class WorkerTest { matcher.describeTo(description); } - static InnerTaskMatcher taskWith(Class clazz, Matcher matcher) { + static InnerTaskMatcher taskWith(Class clazz, Matcher matcher) { return new InnerTaskMatcher<>(matcher); } } @RequiredArgsConstructor - private static class ReflectionFieldMatcher + private static class ReflectionFieldMatcher extends TypeSafeDiagnosingMatcher { private final Class itemClass; @@ -1699,7 +1333,7 @@ public class WorkerTest { mismatchDescription.appendText("inner task is null"); return false; } - ConsumerTask inner = item.getOther(); + ITask inner = item.getOther(); if (!itemClass.equals(inner.getClass())) { mismatchDescription.appendText("inner task isn't an instance of ").appendText(itemClass.getName()); return false; @@ -1729,19 +1363,19 @@ public class WorkerTest { .appendText(fieldName).appendText("' matching ").appendDescriptionOf(fieldMatcher); } - static ReflectionFieldMatcher withField(Class itemClass, String fieldName, + static ReflectionFieldMatcher withField(Class itemClass, String fieldName, Matcher fieldMatcher) { return new ReflectionFieldMatcher<>(itemClass, fieldName, fieldMatcher); } } - *//** + /** * Returns executor service that will be owned by the worker. This is useful to test the scenario * where worker shuts down the executor service also during shutdown flow. * * @return Executor service that will be owned by the worker. - *//* + */ private WorkerThreadPoolExecutor getWorkerThreadPoolExecutor() { - ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("ShardRecordProcessor-%04d").build(); + ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("RecordProcessor-%04d").build(); return new WorkerThreadPoolExecutor(threadFactory); } @@ -1756,9 +1390,9 @@ public class WorkerTest { return shards; } - *//** + /** * @return - *//* + */ private List createShardListWithOneSplit() { List shards = new ArrayList(); SequenceNumberRange range0 = ShardObjectHelper.newSequenceNumberRange("39428", "987324"); @@ -1783,18 +1417,17 @@ public class WorkerTest { List initialLeases = new ArrayList(); for (Shard shard : shardList) { KinesisClientLease lease = ShardSyncer.newKCLLease(shard); - lease.checkpoint(ExtendedSequenceNumber.AT_TIMESTAMP); + lease.setCheckpoint(ExtendedSequenceNumber.AT_TIMESTAMP); initialLeases.add(lease); } - runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard, config); + runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard); } private void runAndTestWorker(List shardList, - int threadPoolSize, - List initialLeases, - boolean callProcessRecordsForEmptyRecordList, - int numberOfRecordsPerShard, - KinesisClientLibConfiguration clientConfig) throws Exception { + int threadPoolSize, + List initialLeases, + boolean callProcessRecordsForEmptyRecordList, + int numberOfRecordsPerShard) throws Exception { File file = KinesisLocalFileDataCreator.generateTempDataFile(shardList, numberOfRecordsPerShard, "unitTestWT001"); IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath()); @@ -1806,7 +1439,7 @@ public class WorkerTest { WorkerThread workerThread = runWorker( shardList, initialLeases, callProcessRecordsForEmptyRecordList, failoverTimeMillis, - numberOfRecordsPerShard, fileBasedProxy, recordProcessorFactory, executorService, nullMetricsFactory, clientConfig); + numberOfRecordsPerShard, fileBasedProxy, recordProcessorFactory, executorService, nullMetricsFactory); // TestStreamlet will release the semaphore once for every record it processes recordCounter.acquire(numberOfRecordsPerShard * shardList.size()); @@ -1823,15 +1456,14 @@ public class WorkerTest { } private WorkerThread runWorker(List shardList, - List initialLeases, - boolean callProcessRecordsForEmptyRecordList, - long failoverTimeMillis, - int numberOfRecordsPerShard, - IKinesisProxy kinesisProxy, - IRecordProcessorFactory recordProcessorFactory, - ExecutorService executorService, - IMetricsFactory metricsFactory, - KinesisClientLibConfiguration clientConfig) throws Exception { + List initialLeases, + boolean callProcessRecordsForEmptyRecordList, + long failoverTimeMillis, + int numberOfRecordsPerShard, + IKinesisProxy kinesisProxy, + IRecordProcessorFactory recordProcessorFactory, + ExecutorService executorService, + IMetricsFactory metricsFactory) throws Exception { final String stageName = "testStageName"; final int maxRecords = 2; @@ -1840,14 +1472,14 @@ public class WorkerTest { final long idleTimeInMilliseconds = 2L; AmazonDynamoDB ddbClient = DynamoDBEmbedded.create().amazonDynamoDB(); - LeaseManager leaseRefresher = new KinesisClientLeaseManager("foo", ddbClient); - leaseRefresher.createLeaseTableIfNotExists(1L, 1L); + LeaseManager leaseManager = new KinesisClientLeaseManager("foo", ddbClient); + leaseManager.createLeaseTableIfNotExists(1L, 1L); for (KinesisClientLease initialLease : initialLeases) { - leaseRefresher.createLeaseIfNotExists(initialLease); + leaseManager.createLeaseIfNotExists(initialLease); } KinesisClientLibLeaseCoordinator leaseCoordinator = - new KinesisClientLibLeaseCoordinator(leaseRefresher, + new KinesisClientLibLeaseCoordinator(leaseManager, stageName, leaseDurationMillis, epsilonMillis, @@ -1863,7 +1495,6 @@ public class WorkerTest { Worker worker = new Worker(stageName, recordProcessorFactory, - clientConfig, streamConfig, INITIAL_POSITION_TRIM_HORIZON, parentShardPollIntervalMillis, shardSyncIntervalMillis, @@ -1876,7 +1507,7 @@ public class WorkerTest { failoverTimeMillis, KinesisClientLibConfiguration.DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST, shardPrioritization); - + WorkerThread workerThread = new WorkerThread(worker); workerThread.start(); return workerThread; @@ -1896,7 +1527,7 @@ public class WorkerTest { Map shardsLastProcessorShutdownReason = new HashMap(); Map shardsNumProcessRecordsCallsWithEmptyRecordList = new HashMap(); for (TestStreamlet processor : recordProcessorFactory.getTestStreamlets()) { - String shardId = processor.shardId(); + String shardId = processor.getShardId(); if (shardStreamletsRecords.get(shardId) == null) { shardStreamletsRecords.put(shardId, processor.getProcessedRecords()); } else { @@ -1914,7 +1545,7 @@ public class WorkerTest { shardsNumProcessRecordsCallsWithEmptyRecordList.put(shardId, totalShardsNumProcessRecordsCallsWithEmptyRecordList); } - shardsLastProcessorShutdownReason.put(processor.shardId(), processor.getShutdownReason()); + shardsLastProcessorShutdownReason.put(processor.getShardId(), processor.getShutdownReason()); } // verify that all records were processed at least once @@ -1946,8 +1577,8 @@ public class WorkerTest { for (TestStreamlet processor : recordProcessorFactory.getTestStreamlets()) { List processedRecords = processor.getProcessedRecords(); for (int i = 0; i < processedRecords.size() - 1; i++) { - BigInteger sequenceNumberOfcurrentRecord = new BigInteger(processedRecords.get(i).sequenceNumber()); - BigInteger sequenceNumberOfNextRecord = new BigInteger(processedRecords.get(i + 1).sequenceNumber()); + BigInteger sequenceNumberOfcurrentRecord = new BigInteger(processedRecords.get(i).getSequenceNumber()); + BigInteger sequenceNumberOfNextRecord = new BigInteger(processedRecords.get(i + 1).getSequenceNumber()); Assert.assertTrue(sequenceNumberOfcurrentRecord.subtract(sequenceNumberOfNextRecord).signum() == -1); } } @@ -1963,10 +1594,10 @@ public class WorkerTest { Map shardIdsAndStreamLetsOfShardsWithOnlyOneProcessor = findShardIdsAndStreamLetsOfShardsWithOnlyOneProcessor(recordProcessorFactory); for (Shard shard : shardList) { - String shardId = shard.shardId(); + String shardId = shard.getShardId(); String iterator = fileBasedProxy.getIterator(shardId, new Date(KinesisLocalFileDataCreator.STARTING_TIMESTAMP)); - List expectedRecords = fileBasedProxy.get(iterator, numRecs).records(); + List expectedRecords = fileBasedProxy.get(iterator, numRecs).getRecords(); if (shardIdsAndStreamLetsOfShardsWithOnlyOneProcessor.containsKey(shardId)) { verifyAllRecordsWereConsumedExactlyOnce(expectedRecords, shardIdsAndStreamLetsOfShardsWithOnlyOneProcessor.get(shardId).getProcessedRecords()); @@ -1980,10 +1611,10 @@ public class WorkerTest { int numRecs, Map> shardStreamletsRecords) { for (Shard shard : shardList) { - String shardId = shard.shardId(); + String shardId = shard.getShardId(); String iterator = fileBasedProxy.getIterator(shardId, new Date(KinesisLocalFileDataCreator.STARTING_TIMESTAMP)); - List expectedRecords = fileBasedProxy.get(iterator, numRecs).records(); + List expectedRecords = fileBasedProxy.get(iterator, numRecs).getRecords(); verifyAllRecordsWereConsumedAtLeastOnce(expectedRecords, shardStreamletsRecords.get(shardId)); } @@ -1993,10 +1624,10 @@ public class WorkerTest { private void verifyLastProcessorOfClosedShardsWasShutdownWithTerminate(List shardList, Map shardsLastProcessorShutdownReason) { for (Shard shard : shardList) { - String shardId = shard.shardId(); + String shardId = shard.getShardId(); String endingSequenceNumber = shard.getSequenceNumberRange().getEndingSequenceNumber(); if (endingSequenceNumber != null) { - log.info("Closed shard {} has an endingSequenceNumber {}", shardId, endingSequenceNumber); + LOG.info("Closed shard " + shardId + " has an endingSequenceNumber " + endingSequenceNumber); Assert.assertEquals(ShutdownReason.TERMINATE, shardsLastProcessorShutdownReason.get(shardId)); } } @@ -2008,7 +1639,7 @@ public class WorkerTest { Map shardsNumProcessRecordsCallsWithEmptyRecordList, boolean callProcessRecordsForEmptyRecordList) { for (Shard shard : shardList) { - String shardId = shard.shardId(); + String shardId = shard.getShardId(); String endingSequenceNumber = shard.getSequenceNumberRange().getEndingSequenceNumber(); // check only for open shards if (endingSequenceNumber == null) { @@ -2027,7 +1658,7 @@ public class WorkerTest { new HashMap(); Set seenShardIds = new HashSet(); for (TestStreamlet processor : recordProcessorFactory.getTestStreamlets()) { - String shardId = processor.shardId(); + String shardId = processor.getShardId(); if (seenShardIds.add(shardId)) { shardIdsAndStreamLetsOfShardsWithOnlyOneProcessor.put(shardId, processor); } else { @@ -2071,5 +1702,5 @@ public class WorkerTest { public Worker getWorker() { return worker; } - }*/ + } } diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisLocalFileProxy.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisLocalFileProxy.java new file mode 100644 index 00000000..db70b5de --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisLocalFileProxy.java @@ -0,0 +1,464 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.proxies; + +import java.io.BufferedReader; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.Charset; +import java.nio.charset.CharsetEncoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.services.kinesis.model.DescribeStreamResult; +import com.amazonaws.services.kinesis.model.ExpiredIteratorException; +import com.amazonaws.services.kinesis.model.GetRecordsResult; +import com.amazonaws.services.kinesis.model.InvalidArgumentException; +import com.amazonaws.services.kinesis.model.PutRecordResult; +import com.amazonaws.services.kinesis.model.Record; +import com.amazonaws.services.kinesis.model.ResourceNotFoundException; +import com.amazonaws.services.kinesis.model.SequenceNumberRange; +import com.amazonaws.services.kinesis.model.Shard; +import com.amazonaws.services.kinesis.model.ShardIteratorType; +import com.fasterxml.jackson.databind.ObjectMapper; + +/** + * This is a (temporary) test utility class, to mimic Kinesis without having to integrate with Alpha. + * In future, we should consider moving this to the Kinesis client/sampleApp package (if useful to + * other Kinesis clients). + */ +public class KinesisLocalFileProxy implements IKinesisProxy { + + /** + * Fields in the local file and their position in a line. + */ + public enum LocalFileFields { + /** Shard identifier. */ + SHARD_ID(0), + /** Sequence number (assumed unique across shards. */ + SEQUENCE_NUMBER(1), + /** Partition key associated with data record. */ + PARTITION_KEY(2), + /** Data. */ + DATA(3), + /** Approximate arrival timestamp. */ + APPROXIMATE_ARRIVAL_TIMESTAMP(4); + + private final int position; + + LocalFileFields(int position) { + this.position = position; + } + + /** + * @return Position of the field in the line. + */ + public int getPosition() { + return position; + } + }; + + private static final Log LOG = LogFactory.getLog(KinesisLocalFileProxy.class); + + private static final String ITERATOR_DELIMITER = ":"; + + private static final int NUM_FIELDS_IN_FILE = LocalFileFields.values().length; + + private final Map> shardedDataRecords = new HashMap>(); + + private List shardList; + + // Ids of shards that are closed - used to return a null iterator in getRecords after the last record + private Set closedShards = new HashSet(); + + private static final int EXPONENT = 128; + + /** + * Max value of the hashed partition key (2^128-1). Useful for constructing shards for a stream. + */ + public static final BigInteger MAX_HASHKEY_VALUE = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE); + + /** + * Max value of a sequence number (2^128 -1). Useful for defining sequence number range for a shard. + */ + public static final BigInteger MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE); + + /** + * @param fileName File with data records (one per line). + * File format (shardId, sequenceNumber, partitionKey, dataRecord). + * @throws IOException IOException + */ + public KinesisLocalFileProxy(String fileName) throws IOException { + super(); + populateDataRecordsFromFile(fileName); + } + + private void populateDataRecordsFromFile(String file) throws IOException { + try (BufferedReader in = new BufferedReader( + new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8))) { + Charset charset = Charset.forName("UTF-8"); + CharsetEncoder encoder = charset.newEncoder(); + String str; + str = in.readLine(); + if (str != null) { + ObjectMapper objectMapper = new ObjectMapper(); + SerializedShardList shards = objectMapper.readValue(str, SerializedShardList.class); + shardList = shards.getShardList(); + } + if (shardList == null) { + shardList = new ArrayList(); + } + + // Populate shardIds of shards that have an ending sequence number (and which != maxSeqNum). + // GetRecords will return a null iterator for these after all data has been returned. + for (Shard shard : shardList) { + SequenceNumberRange range = shard.getSequenceNumberRange(); + if ((range != null) && (range.getEndingSequenceNumber() != null)) { + BigInteger endingSequenceNumber = new BigInteger(range.getEndingSequenceNumber()); + if (endingSequenceNumber.compareTo(MAX_SEQUENCE_NUMBER) != 0) { + closedShards.add(shard.getShardId()); + } + } + shardedDataRecords.put(shard.getShardId(), new ArrayList()); + } + + while ((str = in.readLine()) != null) { + String[] strArr = str.split(","); + if (strArr.length != NUM_FIELDS_IN_FILE) { + throw new InvalidArgumentException("Unexpected input in file." + + "Expected format (shardId, sequenceNumber, partitionKey, dataRecord, timestamp)"); + } + String shardId = strArr[LocalFileFields.SHARD_ID.getPosition()]; + Record record = new Record(); + record.setSequenceNumber(strArr[LocalFileFields.SEQUENCE_NUMBER.getPosition()]); + record.setPartitionKey(strArr[LocalFileFields.PARTITION_KEY.getPosition()]); + ByteBuffer byteBuffer = encoder.encode(CharBuffer.wrap(strArr[LocalFileFields.DATA.getPosition()])); + record.setData(byteBuffer); + Date timestamp = + new Date(Long.parseLong(strArr[LocalFileFields.APPROXIMATE_ARRIVAL_TIMESTAMP.getPosition()])); + record.setApproximateArrivalTimestamp(timestamp); + List shardRecords = shardedDataRecords.get(shardId); + if (shardRecords == null) { + shardRecords = new ArrayList(); + } + shardRecords.add(record); + shardedDataRecords.put(shardId, shardRecords); + } + } + } + + /* + * (non-Javadoc) + * + * @see com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy#getStreamInfo() + */ + @Override + public DescribeStreamResult getStreamInfo(String startShardId) throws ResourceNotFoundException { + assert false : "getStreamInfo is not implemented."; + return null; + } + + @Override + public Set getAllShardIds() throws ResourceNotFoundException { + Set shardIds = new HashSet(); + if (shardedDataRecords != null) { + shardIds.addAll(shardedDataRecords.keySet()); + } + + return shardIds; + } + + /** + * Note, this method has package level access solely for testing purposes. + */ + static String serializeIterator(String shardId, String sequenceNumber) { + return String.format("%s%s%s", shardId, ITERATOR_DELIMITER, sequenceNumber); + } + + /** + * Container class for the return tuple of deserializeIterator. + */ + // CHECKSTYLE:IGNORE VisibilityModifier FOR NEXT 10 LINES + static class IteratorInfo { + public String shardId; + + public String sequenceNumber; + + public IteratorInfo(String shardId, String sequenceNumber) { + this.shardId = shardId; + this.sequenceNumber = sequenceNumber; + } + } + + /** + * Deserialize our iterator - used by test cases to inspect returned iterators. + * + * @param iterator + * @return iteratorInfo + */ + static IteratorInfo deserializeIterator(String iterator) { + String[] splits = iterator.split(ITERATOR_DELIMITER); + return new IteratorInfo(splits[0], splits[1]); + } + + /** + * {@inheritDoc} + */ + @Override + public String getIterator(String shardId, String iteratorEnum, String sequenceNumber) + throws ResourceNotFoundException, InvalidArgumentException { + /* + * If we don't have records in this shard, any iterator will return the empty list. Using a + * sequence number of 1 on an empty shard will give this behavior. + */ + List shardRecords = shardedDataRecords.get(shardId); + if (shardRecords == null) { + throw new ResourceNotFoundException(shardId + " does not exist"); + } + if (shardRecords.isEmpty()) { + return serializeIterator(shardId, "1"); + } + + if (ShardIteratorType.LATEST.toString().equals(iteratorEnum)) { + /* + * If we do have records, LATEST should return an iterator that can be used to read the + * last record. Our iterators are inclusive for convenience. + */ + Record last = shardRecords.get(shardRecords.size() - 1); + return serializeIterator(shardId, last.getSequenceNumber()); + } else if (ShardIteratorType.TRIM_HORIZON.toString().equals(iteratorEnum)) { + return serializeIterator(shardId, shardRecords.get(0).getSequenceNumber()); + } else if (ShardIteratorType.AT_SEQUENCE_NUMBER.toString().equals(iteratorEnum)) { + return serializeIterator(shardId, sequenceNumber); + } else if (ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString().equals(iteratorEnum)) { + BigInteger num = new BigInteger(sequenceNumber); + num = num.add(BigInteger.ONE); + return serializeIterator(shardId, num.toString()); + } else { + throw new IllegalArgumentException("IteratorEnum value was invalid: " + iteratorEnum); + } + } + + /** + * {@inheritDoc} + */ + @Override + public String getIterator(String shardId, String iteratorEnum) + throws ResourceNotFoundException, InvalidArgumentException { + /* + * If we don't have records in this shard, any iterator will return the empty list. Using a + * sequence number of 1 on an empty shard will give this behavior. + */ + List shardRecords = shardedDataRecords.get(shardId); + if (shardRecords == null) { + throw new ResourceNotFoundException(shardId + " does not exist"); + } + if (shardRecords.isEmpty()) { + return serializeIterator(shardId, "1"); + } + + final String serializedIterator; + if (ShardIteratorType.LATEST.toString().equals(iteratorEnum)) { + /* + * If we do have records, LATEST should return an iterator that can be used to read the + * last record. Our iterators are inclusive for convenience. + */ + Record last = shardRecords.get(shardRecords.size() - 1); + serializedIterator = serializeIterator(shardId, last.getSequenceNumber()); + } else if (ShardIteratorType.TRIM_HORIZON.toString().equals(iteratorEnum)) { + serializedIterator = serializeIterator(shardId, shardRecords.get(0).getSequenceNumber()); + } else { + throw new IllegalArgumentException("IteratorEnum value was invalid: " + iteratorEnum); + } + return serializedIterator; + } + + /** + * {@inheritDoc} + */ + @Override + public String getIterator(String shardId, Date timestamp) + throws ResourceNotFoundException, InvalidArgumentException { + /* + * If we don't have records in this shard, any iterator will return the empty list. Using a + * sequence number of 1 on an empty shard will give this behavior. + */ + List shardRecords = shardedDataRecords.get(shardId); + if (shardRecords == null) { + throw new ResourceNotFoundException(shardId + " does not exist"); + } + if (shardRecords.isEmpty()) { + return serializeIterator(shardId, "1"); + } + + final String serializedIterator; + if (timestamp != null) { + String seqNumAtTimestamp = findSequenceNumberAtTimestamp(shardRecords, timestamp); + serializedIterator = serializeIterator(shardId, seqNumAtTimestamp); + } else { + throw new IllegalArgumentException("Timestamp must be specified for AT_TIMESTAMP iterator"); + } + return serializedIterator; + } + + private String findSequenceNumberAtTimestamp(final List shardRecords, final Date timestamp) { + for (Record rec : shardRecords) { + if (rec.getApproximateArrivalTimestamp().getTime() >= timestamp.getTime()) { + return rec.getSequenceNumber(); + } + } + return null; + } + + /* + * (non-Javadoc) + * + * @see com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy#get(java.nio.ByteBuffer, int) + */ + @Override + public GetRecordsResult get(String serializedKinesisIterator, int maxRecords) + throws ResourceNotFoundException, InvalidArgumentException, ExpiredIteratorException { + IteratorInfo iterator = deserializeIterator(serializedKinesisIterator); + + BigInteger startingPosition = new BigInteger(iterator.sequenceNumber); + BigInteger lastRecordsSeqNo = BigInteger.ONE; + List recordsToReturn = new ArrayList(); + List shardRecords = shardedDataRecords.get(iterator.shardId); + if (shardRecords == null) { + throw new ResourceNotFoundException(iterator.shardId + " does not exist"); + } + + boolean isHasMoreShards = false; + + for (int i = 0; i < shardRecords.size(); i++) { + Record record = shardRecords.get(i); + BigInteger recordSequenceNumber = new BigInteger(record.getSequenceNumber()); + // update lastRecordsSeqNo so if we return no records, it will be the seqNo of the last record. + lastRecordsSeqNo = recordSequenceNumber; + if (recordSequenceNumber.compareTo(startingPosition) >= 0) { + // Set endIndex (of sublist) to cap at either maxRecords or end of list. + int endIndex = Math.min(i + maxRecords, shardRecords.size()); + recordsToReturn.addAll(shardRecords.subList(i, endIndex)); + + lastRecordsSeqNo = new BigInteger(shardRecords.get(endIndex - 1).getSequenceNumber()); + if (endIndex < shardRecords.size()) { + isHasMoreShards = true; + } + + break; + } + } + + GetRecordsResult response = new GetRecordsResult(); + response.setRecords(recordsToReturn); + + // Set iterator only if the shard is not closed. + if (isHasMoreShards || (!closedShards.contains(iterator.shardId))) { + /* + * Use the sequence number of the last record returned + 1 to compute the next iterator. + */ + response.setNextShardIterator(serializeIterator(iterator.shardId, lastRecordsSeqNo.add(BigInteger.ONE) + .toString())); + LOG.debug("Returning a non null iterator for shard " + iterator.shardId); + } else { + LOG.info("Returning null iterator for shard " + iterator.shardId); + } + + return response; + } + + /** + * {@inheritDoc} + */ + @Override + public PutRecordResult put(String exclusiveMinimumSequenceNumber, + String explicitHashKey, + String partitionKey, + ByteBuffer data) throws ResourceNotFoundException, InvalidArgumentException { + PutRecordResult output = new PutRecordResult(); + + BigInteger startingPosition = BigInteger.ONE; + + if (exclusiveMinimumSequenceNumber != null) { + startingPosition = new BigInteger(exclusiveMinimumSequenceNumber).add(BigInteger.ONE); + } + + output.setSequenceNumber(startingPosition.toString()); + return output; + } + + /** + * {@inheritDoc} + */ + @Override + public List getShardList() throws ResourceNotFoundException { + List shards = new LinkedList(); + shards.addAll(shardList); + return shards; + } + + /** + * Used for serializing/deserializing the shard list to the file. + */ + public static class SerializedShardList { + + private List shardList = new LinkedList(); + + /** + * Public to enable Jackson object mapper serialization. + */ + public SerializedShardList() { + } + + /** + * @param shardList List of shards for the stream. + */ + public SerializedShardList(List shardList) { + this.shardList.addAll(shardList); + } + + /** + * public to enable Jackson object mapper serialization. + * + * @return shardList + */ + public List getShardList() { + return shardList; + } + + /** + * public to enable Jackson object mapper deserialization. + * + * @param shardList List of shards + */ + public void setShardList(List shardList) { + this.shardList = shardList; + } + } +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisLocalFileProxyFactory.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisLocalFileProxyFactory.java new file mode 100644 index 00000000..8a053ec4 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisLocalFileProxyFactory.java @@ -0,0 +1,64 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.proxies; + +import java.io.File; +import java.io.IOException; +import java.math.BigInteger; + +import com.amazonaws.services.kinesis.clientlibrary.proxies.util.KinesisLocalFileDataCreator; + +/** Factory for KinesisProxy objects that use a local file for data. Useful for testing purposes. + * + */ +public class KinesisLocalFileProxyFactory implements IKinesisProxyFactory { + + private static final int DEFAULT_NUM_SHARDS = 3; + private static final String DEFAULT_SHARD_ID_PREFIX = "ShardId-"; + private static final int DEFAULT_NUM_RECORDS_PER_SHARD = 10; + private static final BigInteger DEFAULT_STARTING_SEQUENCE_NUMBER = BigInteger.ZERO; + + private static final String DEFAULT_TEST_PROXY_FILE = "defaultKinesisProxyLocalFile"; + + private IKinesisProxy testKinesisProxy; + + + /** + * @param fileName File to be used for stream data. + * If the file exists then it is expected to contain information for creating a test proxy object. + * If the file does not exist then a temporary file containing default values for a test proxy object + * will be created and used. + + * @throws IOException This will be thrown if we can't read/create the data file. + */ + public KinesisLocalFileProxyFactory(String fileName) throws IOException { + File f = new File(fileName); + if (!f.exists()) { + f = KinesisLocalFileDataCreator.generateTempDataFile( + DEFAULT_NUM_SHARDS, DEFAULT_SHARD_ID_PREFIX, DEFAULT_NUM_RECORDS_PER_SHARD, + DEFAULT_STARTING_SEQUENCE_NUMBER, DEFAULT_TEST_PROXY_FILE); + } + testKinesisProxy = new KinesisLocalFileProxy(f.getAbsolutePath()); + } + + /* (non-Javadoc) + * @see com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxyFactory#getProxy(java.lang.String) + */ + @Override + public IKinesisProxy getProxy(String streamARN) { + return testKinesisProxy; + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxyTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxyTest.java new file mode 100644 index 00000000..2c1107b2 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/KinesisProxyTest.java @@ -0,0 +1,167 @@ +package com.amazonaws.services.kinesis.clientlibrary.proxies; + +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasProperty; +import static org.hamcrest.Matchers.isA; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.argThat; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import com.amazonaws.AmazonServiceException; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentMatcher; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.kinesis.AmazonKinesisClient; +import com.amazonaws.services.kinesis.model.DescribeStreamRequest; +import com.amazonaws.services.kinesis.model.DescribeStreamResult; +import com.amazonaws.services.kinesis.model.GetShardIteratorRequest; +import com.amazonaws.services.kinesis.model.GetShardIteratorResult; +import com.amazonaws.services.kinesis.model.LimitExceededException; +import com.amazonaws.services.kinesis.model.Shard; +import com.amazonaws.services.kinesis.model.ShardIteratorType; +import com.amazonaws.services.kinesis.model.StreamDescription; +import com.amazonaws.services.kinesis.model.StreamStatus; + +import junit.framework.Assert; + +@RunWith(MockitoJUnitRunner.class) +public class KinesisProxyTest { + private static final String TEST_STRING = "TestString"; + private static final long BACKOFF_TIME = 10L; + private static final int RETRY_TIMES = 50; + + @Mock + private AmazonKinesisClient mockClient; + @Mock + private AWSCredentialsProvider mockCredentialsProvider; + @Mock + private GetShardIteratorResult shardIteratorResult; + private KinesisProxy proxy; + + // Test shards for verifying. + private Set shardIdSet; + private List shards; + + @Before + public void setUpTest() { + // Set up kinesis proxy + proxy = new KinesisProxy(TEST_STRING, mockCredentialsProvider, mockClient, BACKOFF_TIME, RETRY_TIMES); + when(mockCredentialsProvider.getCredentials()).thenReturn(null); + // Set up test shards + shardIdSet = new HashSet<>(); + shards = new ArrayList<>(); + String[] shardIds = new String[] { "shard-1", "shard-2", "shard-3", "shard-4" }; + for (String shardId : shardIds) { + Shard shard = new Shard(); + shard.setShardId(shardId); + shards.add(shard); + shardIdSet.add(shardId); + } + } + + @Test + public void testGetShardListWithMoreDataAvailable() { + // Set up mock : + // First call describeStream returning response with first two shards in the list; + // Second call describeStream returning response with rest shards. + DescribeStreamResult responseWithMoreData = createGetStreamInfoResponse(shards.subList(0, 2), true); + DescribeStreamResult responseFinal = createGetStreamInfoResponse(shards.subList(2, shards.size()), false); + doReturn(responseWithMoreData).when(mockClient).describeStream(argThat(new IsRequestWithStartShardId(null))); + doReturn(responseFinal).when(mockClient) + .describeStream(argThat(new IsRequestWithStartShardId(shards.get(1).getShardId()))); + + Set resultShardIdSets = proxy.getAllShardIds(); + Assert.assertTrue("Result set should equal to Test set", shardIdSet.equals(resultShardIdSets)); + } + + @Test + public void testGetShardListWithLimitExceededException() { + // Set up mock : + // First call describeStream throwing LimitExceededException; + // Second call describeStream returning shards list. + DescribeStreamResult response = createGetStreamInfoResponse(shards, false); + doThrow(new LimitExceededException("Test Exception")).doReturn(response).when(mockClient) + .describeStream(argThat(new IsRequestWithStartShardId(null))); + + Set resultShardIdSet = proxy.getAllShardIds(); + Assert.assertTrue("Result set should equal to Test set", shardIdSet.equals(resultShardIdSet)); + } + + @Test + public void testValidShardIteratorType() { + when(mockClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(shardIteratorResult); + String expectedShardIteratorType = ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(); + proxy.getIterator("Shard-001", expectedShardIteratorType, "1234"); + + verify(mockClient).getShardIterator(argThat(both(isA(GetShardIteratorRequest.class)) + .and(hasProperty("shardIteratorType", equalTo(expectedShardIteratorType))))); + } + + @Test + public void testInvalidShardIteratorIsntChanged() { + when(mockClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(shardIteratorResult); + String expectedShardIteratorType = ShardIteratorType.AT_TIMESTAMP.toString(); + proxy.getIterator("Shard-001", expectedShardIteratorType, "1234"); + + verify(mockClient).getShardIterator(argThat(both(isA(GetShardIteratorRequest.class)) + .and(hasProperty("shardIteratorType", equalTo(expectedShardIteratorType))))); + } + + @Test(expected = AmazonServiceException.class) + public void testNullShardIteratorType() { + when(mockClient.getShardIterator(any(GetShardIteratorRequest.class))).thenThrow(new AmazonServiceException("expected null")); + String expectedShardIteratorType = null; + proxy.getIterator("Shard-001", expectedShardIteratorType, "1234"); + + verify(mockClient).getShardIterator(argThat(both(isA(GetShardIteratorRequest.class)) + .and(hasProperty("shardIteratorType", nullValue(String.class))))); + } + + private DescribeStreamResult createGetStreamInfoResponse(List shards1, boolean isHasMoreShards) { + // Create stream description + StreamDescription description = new StreamDescription(); + description.setHasMoreShards(isHasMoreShards); + description.setShards(shards1); + description.setStreamStatus(StreamStatus.ACTIVE); + + // Create Describe Stream Result + DescribeStreamResult response = new DescribeStreamResult(); + response.setStreamDescription(description); + return response; + } + + // Matcher for testing describe stream request with specific start shard ID. + private static class IsRequestWithStartShardId extends ArgumentMatcher { + private final String shardId; + + public IsRequestWithStartShardId(String shardId) { + this.shardId = shardId; + } + + @Override + public boolean matches(Object request) { + String startShardId = ((DescribeStreamRequest) request).getExclusiveStartShardId(); + // If startShardId equals to null, shardId should also be null. + if (startShardId == null) { + return shardId == null; + } + return startShardId.equals(shardId); + } + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/util/KinesisLocalFileDataCreator.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/util/KinesisLocalFileDataCreator.java new file mode 100644 index 00000000..e5e4419a --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/proxies/util/KinesisLocalFileDataCreator.java @@ -0,0 +1,228 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.clientlibrary.proxies.util; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.math.BigInteger; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisLocalFileProxy; +import com.amazonaws.services.kinesis.model.HashKeyRange; +import com.amazonaws.services.kinesis.model.SequenceNumberRange; +import com.amazonaws.services.kinesis.model.Shard; +import com.fasterxml.jackson.databind.ObjectMapper; + +/** + * Temporary util class for generating data in a local file (used by KinesisLocalFileProxy). + */ +public class KinesisLocalFileDataCreator { + + /** + * Partition key prefix - also referenced in KinesisLocalFileProxyTest. + */ + public static final String PARTITION_KEY_PREFIX = "PK_"; + + private static final String FILE_NAME_SUFFIX = ".dat"; + + private static final long RAND_SEED_VALUE = 1092387456L; + // Used to cap the size of the random "hole" in sequence numbers. + private static final int NUM_BITS = 3; + private static Random randomGenerator = new Random(RAND_SEED_VALUE); + + private static final int PARTITION_KEY_LENGTH = 10; + private static final int DATA_LENGTH = 40; + + /** + * Starting timestamp - also referenced in KinesisLocalFileProxyTest. + */ + public static final long STARTING_TIMESTAMP = 1462345678910L; + + /** + * This is used to allow few records to have the same timestamps (to mimic real life scenarios). + * Records 5n-1 and 5n will have the same timestamp (n > 0). + */ + private static final int DIVISOR = 5; + + private KinesisLocalFileDataCreator() { + } + + /** Creates a temp file (in default temp file location) with fake Kinesis data records. + * This method does not support resharding use cases. + * @param numShards Number of shards + * @param shardIdPrefix Prefix for shardIds (1, 2, ..., N will be added at the end to create shardIds) + * @param numRecordsPerShard Number of records to generate per shard + * @param startingSequenceNumber Sequence numbers in the generated data will be >= this number + * @param fileNamePrefix Prefix of the filename + * @return File created with the fake Kinesis records. + * @throws IOException Thrown if there are issues creating the file. + */ + public static File generateTempDataFile( + int numShards, + String shardIdPrefix, + int numRecordsPerShard, + BigInteger startingSequenceNumber, + String fileNamePrefix) + throws IOException { + List shardList = createShardList(numShards, shardIdPrefix, startingSequenceNumber); + return generateTempDataFile(shardList, numRecordsPerShard, fileNamePrefix); + } + + /** + * Creates a temp file (in default temp file location) with fake Kinesis data records. + * Records will be put in all shards. + * @param fileNamePrefix Prefix for the name of the temp file + * @param shardList List of shards (we use the shardId and sequenceNumberRange fields) + * @param numRecordsPerShard Num records per shard (the shard sequenceNumberRange should be large enough + * for us to allow these many records with some "holes") + * @return File with stream data filled in + * @throws IOException Thrown if there are issues creating/updating the file + */ + public static File generateTempDataFile(List shardList, int numRecordsPerShard, String fileNamePrefix) + throws IOException { + File file = File.createTempFile(fileNamePrefix, FILE_NAME_SUFFIX); + try (BufferedWriter fileWriter = new BufferedWriter( + new OutputStreamWriter(new FileOutputStream(file), StandardCharsets.UTF_8))) { + ObjectMapper objectMapper = new ObjectMapper(); + String serializedShardList = + objectMapper.writeValueAsString(new KinesisLocalFileProxy.SerializedShardList(shardList)); + fileWriter.write(serializedShardList); + fileWriter.newLine(); + BigInteger sequenceNumberIncrement = new BigInteger("0"); + long timestamp = STARTING_TIMESTAMP; + for (int i = 0; i < numRecordsPerShard; i++) { + for (Shard shard : shardList) { + BigInteger sequenceNumber = + new BigInteger(shard.getSequenceNumberRange().getStartingSequenceNumber()).add( + sequenceNumberIncrement); + String endingSequenceNumber = shard.getSequenceNumberRange().getEndingSequenceNumber(); + BigInteger maxSequenceNumber = KinesisLocalFileProxy.MAX_SEQUENCE_NUMBER; + if (endingSequenceNumber != null) { + maxSequenceNumber = new BigInteger(endingSequenceNumber); + } + if (maxSequenceNumber.compareTo(sequenceNumber) != 1) { + throw new IllegalArgumentException("Not enough space in shard"); + } + String partitionKey = + PARTITION_KEY_PREFIX + shard.getShardId() + generateRandomString(PARTITION_KEY_LENGTH); + String data = generateRandomString(DATA_LENGTH); + + // Allow few records to have the same timestamps (to mimic real life scenarios). + timestamp = (i % DIVISOR == 0) ? timestamp : timestamp + 1; + String line = shard.getShardId() + "," + sequenceNumber + "," + partitionKey + "," + data + "," + + timestamp; + + fileWriter.write(line); + fileWriter.newLine(); + sequenceNumberIncrement = sequenceNumberIncrement.add(BigInteger.ONE); + sequenceNumberIncrement = sequenceNumberIncrement.add(new BigInteger(NUM_BITS, randomGenerator)); + } + } + } + return file; + } + + /** Helper method to create a list of shards (which can then be used to generate data files). + * @param numShards Number of shards + * @param shardIdPrefix Prefix for the shardIds + * @param startingSequenceNumber Starting sequence number for all the shards + * @return List of shards (with no reshard events). + */ + public static List createShardList(int numShards, String shardIdPrefix, BigInteger startingSequenceNumber) { + List shards = new ArrayList(numShards); + + SequenceNumberRange sequenceNumberRange = new SequenceNumberRange(); + sequenceNumberRange.setStartingSequenceNumber(startingSequenceNumber.toString()); + sequenceNumberRange.setEndingSequenceNumber(null); + BigInteger perShardHashKeyRange = + KinesisLocalFileProxy.MAX_HASHKEY_VALUE.divide(new BigInteger(Integer.toString(numShards))); + BigInteger hashKeyRangeStart = new BigInteger("0"); + for (int i = 0; i < numShards; i++) { + Shard shard = new Shard(); + shard.setShardId(shardIdPrefix + i); + shard.setSequenceNumberRange(sequenceNumberRange); + BigInteger hashKeyRangeEnd = hashKeyRangeStart.add(perShardHashKeyRange); + HashKeyRange hashKeyRange = new HashKeyRange(); + hashKeyRange.setStartingHashKey(hashKeyRangeStart.toString()); + hashKeyRange.setEndingHashKey(hashKeyRangeEnd.toString()); + shards.add(shard); + } + + return shards; + } + + /** Generates a random string of specified length. + * @param length String of length will be generated + * @return Random generated string + */ + private static String generateRandomString(int length) { + StringBuffer str = new StringBuffer(); + final int startingCharAsciiValue = 97; + final int numChars = 26; + for (int i = 0; i < length; i++) { + str.append((char) (randomGenerator.nextInt(numChars - 1) + startingCharAsciiValue)); + } + return str.toString(); + } + + /** Creates a new temp file populated with fake Kinesis data records. + * @param args Expects 5 args: numShards, shardPrefix, numRecordsPerShard, startingSequenceNumber, fileNamePrefix + */ + // CHECKSTYLE:OFF MagicNumber + // CHECKSTYLE:IGNORE UncommentedMain FOR NEXT 2 LINES + public static void main(String[] args) { + int numShards = 1; + String shardIdPrefix = "shardId"; + int numRecordsPerShard = 17; + BigInteger startingSequenceNumber = new BigInteger("99"); + String fileNamePrefix = "kinesisFakeRecords"; + + try { + if ((args.length != 0) && (args.length != 5)) { + // Temporary util code, so not providing detailed usage feedback. + System.out.println("Unexpected number of arguments."); + System.exit(0); + } + + if (args.length == 5) { + numShards = Integer.parseInt(args[0]); + shardIdPrefix = args[1]; + numRecordsPerShard = Integer.parseInt(args[2]); + startingSequenceNumber = new BigInteger(args[3]); + fileNamePrefix = args[4]; + } + + File file = KinesisLocalFileDataCreator.generateTempDataFile( + numShards, + shardIdPrefix, + numRecordsPerShard, + startingSequenceNumber, + fileNamePrefix); + System.out.println("Created fake kinesis records in file: " + file.getAbsolutePath()); + } catch (Exception e) { + // CHECKSTYLE:IGNORE IllegalCatch FOR NEXT -1 LINES + System.out.println("Caught Exception: " + e); + } + + } + // CHECKSTYLE:ON MagicNumber + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/clientlibrary/types/ShutdownReasonTest.java b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/types/ShutdownReasonTest.java new file mode 100644 index 00000000..011e0721 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/clientlibrary/types/ShutdownReasonTest.java @@ -0,0 +1,32 @@ +package com.amazonaws.services.kinesis.clientlibrary.types; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertThat; + +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason; +import org.junit.Test; + +/** + * Unit tests of ShutdownReason enum class. + */ +public class ShutdownReasonTest { + + @Test + public void testTransitionZombie() { + assertThat(ShutdownReason.ZOMBIE.canTransitionTo(ShutdownReason.TERMINATE), equalTo(false)); + assertThat(ShutdownReason.ZOMBIE.canTransitionTo(ShutdownReason.REQUESTED), equalTo(false)); + } + + @Test + public void testTransitionTerminate() { + assertThat(ShutdownReason.TERMINATE.canTransitionTo(ShutdownReason.ZOMBIE), equalTo(true)); + assertThat(ShutdownReason.TERMINATE.canTransitionTo(ShutdownReason.REQUESTED), equalTo(false)); + } + + @Test + public void testTransitionRequested() { + assertThat(ShutdownReason.REQUESTED.canTransitionTo(ShutdownReason.ZOMBIE), equalTo(true)); + assertThat(ShutdownReason.REQUESTED.canTransitionTo(ShutdownReason.TERMINATE), equalTo(true)); + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLeaseBuilder.java b/src/test/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLeaseBuilder.java new file mode 100644 index 00000000..df39b9f2 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/leases/impl/KinesisClientLeaseBuilder.java @@ -0,0 +1,63 @@ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.HashSet; +import java.util.Set; +import java.util.UUID; + +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; + +public class KinesisClientLeaseBuilder { + private String leaseKey; + private String leaseOwner; + private Long leaseCounter = 0L; + private UUID concurrencyToken; + private Long lastCounterIncrementNanos; + private ExtendedSequenceNumber checkpoint; + private Long ownerSwitchesSinceCheckpoint = 0L; + private Set parentShardIds = new HashSet<>(); + + public KinesisClientLeaseBuilder withLeaseKey(String leaseKey) { + this.leaseKey = leaseKey; + return this; + } + + public KinesisClientLeaseBuilder withLeaseOwner(String leaseOwner) { + this.leaseOwner = leaseOwner; + return this; + } + + public KinesisClientLeaseBuilder withLeaseCounter(Long leaseCounter) { + this.leaseCounter = leaseCounter; + return this; + } + + public KinesisClientLeaseBuilder withConcurrencyToken(UUID concurrencyToken) { + this.concurrencyToken = concurrencyToken; + return this; + } + + public KinesisClientLeaseBuilder withLastCounterIncrementNanos(Long lastCounterIncrementNanos) { + this.lastCounterIncrementNanos = lastCounterIncrementNanos; + return this; + } + + public KinesisClientLeaseBuilder withCheckpoint(ExtendedSequenceNumber checkpoint) { + this.checkpoint = checkpoint; + return this; + } + + public KinesisClientLeaseBuilder withOwnerSwitchesSinceCheckpoint(Long ownerSwitchesSinceCheckpoint) { + this.ownerSwitchesSinceCheckpoint = ownerSwitchesSinceCheckpoint; + return this; + } + + public KinesisClientLeaseBuilder withParentShardIds(Set parentShardIds) { + this.parentShardIds = parentShardIds; + return this; + } + + public KinesisClientLease build() { + return new KinesisClientLease(leaseKey, leaseOwner, leaseCounter, concurrencyToken, lastCounterIncrementNanos, + checkpoint, ownerSwitchesSinceCheckpoint, parentShardIds); + } +} \ No newline at end of file diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCoordinatorExerciser.java b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseCoordinatorExerciser.java similarity index 50% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCoordinatorExerciser.java rename to src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseCoordinatorExerciser.java index f2829936..3c67a827 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCoordinatorExerciser.java +++ b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseCoordinatorExerciser.java @@ -1,5 +1,5 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Amazon Software License (the "License"). * You may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ -package software.amazon.kinesis.leases; +package com.amazonaws.services.kinesis.leases.impl; import java.awt.*; import java.awt.event.ActionEvent; @@ -27,79 +27,69 @@ import java.util.Map; import javax.swing.*; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; -import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; -import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseCoordinator; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher; -import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseSerializer; -import software.amazon.kinesis.leases.dynamodb.TableCreatorCallback; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.InvalidStateException; -import software.amazon.kinesis.leases.exceptions.LeasingException; -import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; -import software.amazon.kinesis.metrics.CloudWatchMetricsFactory; -import software.amazon.kinesis.metrics.MetricsConfig; -import software.amazon.kinesis.metrics.MetricsLevel; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.LeasingException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; +import com.amazonaws.services.kinesis.metrics.impl.CWMetricsFactory; -@Slf4j public class LeaseCoordinatorExerciser { - private static final int MAX_LEASES_FOR_WORKER = Integer.MAX_VALUE; - private static final int MAX_LEASES_TO_STEAL_AT_ONE_TIME = 1; - private static final int MAX_LEASE_RENEWER_THREAD_COUNT = 20; - private static final MetricsLevel METRICS_LEVEL = MetricsLevel.DETAILED; - private static final int FLUSH_SIZE = 200; - private static final long INITIAL_LEASE_TABLE_READ_CAPACITY = 10L; - private static final long INITIAL_LEASE_TABLE_WRITE_CAPACITY = 50L; - public static void main(String[] args) throws InterruptedException, DependencyException, InvalidStateException, - ProvisionedThroughputException, IOException { + private static final Log LOG = LogFactory.getLog(LeaseCoordinatorExerciser.class); + + public static void main(String[] args) + throws InterruptedException, DependencyException, InvalidStateException, ProvisionedThroughputException, + IOException { int numCoordinators = 9; int numLeases = 73; int leaseDurationMillis = 10000; int epsilonMillis = 100; - DynamoDbAsyncClient dynamoDBClient = DynamoDbAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); + AWSCredentialsProvider creds = + new DefaultAWSCredentialsProviderChain(); + AmazonDynamoDBClient ddb = new AmazonDynamoDBClient(creds); - LeaseRefresher leaseRefresher = new DynamoDBLeaseRefresher("nagl_ShardProgress", dynamoDBClient, - new DynamoDBLeaseSerializer(), true, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); + ILeaseManager leaseManager = new KinesisClientLeaseManager("nagl_ShardProgress", ddb); - if (leaseRefresher.createLeaseTableIfNotExists(INITIAL_LEASE_TABLE_READ_CAPACITY, - INITIAL_LEASE_TABLE_WRITE_CAPACITY)) { - log.info("Waiting for newly created lease table"); - if (!leaseRefresher.waitUntilLeaseTableExists(10, 300)) { - log.error("Table was not created in time"); + if (leaseManager.createLeaseTableIfNotExists(10L, 50L)) { + LOG.info("Waiting for newly created lease table"); + if (!leaseManager.waitUntilLeaseTableExists(10, 300)) { + LOG.error("Table was not created in time"); return; } } - CloudWatchAsyncClient client = CloudWatchAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); - CloudWatchMetricsFactory metricsFactory = new CloudWatchMetricsFactory(client, "testNamespace", 30 * 1000, 1000, - METRICS_LEVEL, MetricsConfig.METRICS_DIMENSIONS_ALL, FLUSH_SIZE); - final List coordinators = new ArrayList<>(); + CWMetricsFactory metricsFactory = new CWMetricsFactory(creds, "testNamespace", 30 * 1000, 1000); + final List> coordinators = + new ArrayList>(); for (int i = 0; i < numCoordinators; i++) { String workerIdentifier = "worker-" + Integer.toString(i); - LeaseCoordinator coord = new DynamoDBLeaseCoordinator(leaseRefresher, workerIdentifier, leaseDurationMillis, - epsilonMillis, MAX_LEASES_FOR_WORKER, MAX_LEASES_TO_STEAL_AT_ONE_TIME, - MAX_LEASE_RENEWER_THREAD_COUNT, INITIAL_LEASE_TABLE_READ_CAPACITY, - INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); + LeaseCoordinator coord = new LeaseCoordinator(leaseManager, + workerIdentifier, + leaseDurationMillis, + epsilonMillis, + metricsFactory); coordinators.add(coord); } - leaseRefresher.deleteAll(); + leaseManager.deleteAll(); for (int i = 0; i < numLeases; i++) { - Lease lease = new Lease(); - lease.leaseKey(Integer.toString(i)); - lease.checkpoint(new ExtendedSequenceNumber("checkpoint")); - leaseRefresher.createLeaseIfNotExists(lease); + KinesisClientLease lease = new KinesisClientLease(); + lease.setLeaseKey(Integer.toString(i)); + lease.setCheckpoint(new ExtendedSequenceNumber("checkpoint")); + leaseManager.createLeaseIfNotExists(lease); } final JFrame frame = new JFrame("Test Visualizer"); @@ -110,10 +100,10 @@ public class LeaseCoordinatorExerciser { frame.getContentPane().add(panel); final Map labels = new HashMap(); - for (final LeaseCoordinator coord : coordinators) { + for (final LeaseCoordinator coord : coordinators) { JPanel coordPanel = new JPanel(); coordPanel.setLayout(new BoxLayout(coordPanel, BoxLayout.X_AXIS)); - final Button button = new Button("Stop " + coord.workerIdentifier()); + final Button button = new Button("Stop " + coord.getWorkerIdentifier()); button.setMaximumSize(new Dimension(200, 50)); button.addActionListener(new ActionListener() { @@ -121,14 +111,14 @@ public class LeaseCoordinatorExerciser { public void actionPerformed(ActionEvent arg0) { if (coord.isRunning()) { coord.stop(); - button.setLabel("Start " + coord.workerIdentifier()); + button.setLabel("Start " + coord.getWorkerIdentifier()); } else { try { coord.start(); } catch (LeasingException e) { - log.error("{}", e); + LOG.error(e); } - button.setLabel("Stop " + coord.workerIdentifier()); + button.setLabel("Stop " + coord.getWorkerIdentifier()); } } @@ -137,7 +127,7 @@ public class LeaseCoordinatorExerciser { JLabel label = new JLabel(); coordPanel.add(label); - labels.put(coord.workerIdentifier(), label); + labels.put(coord.getWorkerIdentifier(), label); panel.add(coordPanel); } @@ -154,17 +144,17 @@ public class LeaseCoordinatorExerciser { @Override public void run() { while (true) { - for (LeaseCoordinator coord : coordinators) { - String workerIdentifier = coord.workerIdentifier(); + for (LeaseCoordinator coord : coordinators) { + String workerIdentifier = coord.getWorkerIdentifier(); JLabel label = labels.get(workerIdentifier); - List asgn = new ArrayList<>(coord.getAssignments()); - Collections.sort(asgn, new Comparator() { + List asgn = new ArrayList(coord.getAssignments()); + Collections.sort(asgn, new Comparator() { @Override - public int compare(final Lease arg0, final Lease arg1) { - return arg0.leaseKey().compareTo(arg1.leaseKey()); + public int compare(KinesisClientLease arg0, KinesisClientLease arg1) { + return arg0.getLeaseKey().compareTo(arg1.getLeaseKey()); } }); @@ -173,22 +163,23 @@ public class LeaseCoordinatorExerciser { builder.append(""); builder.append(workerIdentifier).append(":").append(asgn.size()).append(" "); - for (Lease lease : asgn) { - String leaseKey = lease.leaseKey(); + for (KinesisClientLease lease : asgn) { + String leaseKey = lease.getLeaseKey(); String lastOwner = lastOwners.get(leaseKey); // Color things green when they switch owners, decay the green-ness over time. Integer greenNess = greenNesses.get(leaseKey); - if (greenNess == null || lastOwner == null || !lastOwner.equals(lease.leaseOwner())) { + if (greenNess == null || lastOwner == null || !lastOwner.equals(lease.getLeaseOwner())) { greenNess = 200; } else { greenNess = Math.max(0, greenNess - 20); } greenNesses.put(leaseKey, greenNess); - lastOwners.put(leaseKey, lease.leaseOwner()); + lastOwners.put(leaseKey, lease.getLeaseOwner()); builder.append(String.format("%03d", - String.format("#00%02x00", greenNess), Integer.parseInt(leaseKey))).append(" "); + String.format("#00%02x00", greenNess), + Integer.parseInt(leaseKey))).append(" "); } builder.append(""); @@ -215,7 +206,7 @@ public class LeaseCoordinatorExerciser { frame.pack(); frame.setVisible(true); - for (LeaseCoordinator coord : coordinators) { + for (LeaseCoordinator coord : coordinators) { coord.start(); } } diff --git a/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseIntegrationTest.java b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseIntegrationTest.java new file mode 100644 index 00000000..57a9c99b --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseIntegrationTest.java @@ -0,0 +1,74 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.logging.Logger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; + +import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; +import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient; +import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper; +import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory; + +@Ignore +public class LeaseIntegrationTest { + + protected static KinesisClientLeaseManager leaseManager; + protected static AmazonDynamoDBClient ddbClient = + new AmazonDynamoDBClient(new DefaultAWSCredentialsProviderChain()); + + private static final Log LOG = LogFactory.getLog(LeaseIntegrationTest.class); + + @Rule + public TestWatcher watcher = new TestWatcher() { + + @Override + protected void starting(Description description) { + if (leaseManager == null) { + // Do some static setup once per class. + + leaseManager = new KinesisClientLeaseManager("nagl_ShardProgress", ddbClient, true); + + MetricsHelper.startScope(new NullMetricsFactory()); + } + + try { + if (!leaseManager.leaseTableExists()) { + LOG.info("Creating lease table"); + leaseManager.createLeaseTableIfNotExists(10L, 10L); + + leaseManager.waitUntilLeaseTableExists(10, 500); + } + + LOG.info("Beginning test case " + description.getMethodName()); + for (KinesisClientLease lease : leaseManager.listLeases()) { + leaseManager.deleteLease(lease); + } + } catch (Exception e) { + String message = + "Test case " + description.getMethodName() + " fails because of exception during init: " + e; + LOG.error(message); + throw new RuntimeException(message, e); + } + } + }; + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseManagerIntegrationTest.java b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseManagerIntegrationTest.java new file mode 100644 index 00000000..23cc9fc1 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseManagerIntegrationTest.java @@ -0,0 +1,268 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import junit.framework.Assert; + +import org.junit.Test; + +import com.amazonaws.services.kinesis.leases.exceptions.LeasingException; + +public class LeaseManagerIntegrationTest extends LeaseIntegrationTest { + + /** + * Test listLeases when no records are present. + */ + @Test + public void testListNoRecords() throws LeasingException { + List leases = leaseManager.listLeases(); + Assert.assertTrue(leases.isEmpty()); + } + + /** + * Tests listLeases when records are present. Exercise dynamo's paging functionality. + */ + @Test + public void testListWithRecords() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); + + int numRecordsToPut = 10; + + for (int i = 0; i < numRecordsToPut; i++) { + builder.withLease(Integer.toString(i)); + } + + Collection expected = builder.build().values(); + + // The / 3 here ensures that we will test Dynamo's paging mechanics. + List actual = leaseManager.list(numRecordsToPut / 3); + + for (KinesisClientLease lease : actual) { + Assert.assertNotNull(expected.remove(lease)); + } + + Assert.assertTrue(expected.isEmpty()); + } + + /** + * Tests getLease when a record is present. + */ + @Test + public void testGetLease() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); + Lease expected = builder.withLease("1").build().get("1"); + + Lease actual = leaseManager.getLease(expected.getLeaseKey()); + Assert.assertEquals(expected, actual); + } + + /** + * Tests leaseManager.get() when the looked-for record is absent. + */ + @Test + public void testGetNull() throws LeasingException { + Lease actual = leaseManager.getLease("bogusShardId"); + Assert.assertNull(actual); + } + + /** + * Tests leaseManager.holdLease's success scenario. + */ + @Test + public void testRenewLease() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); + KinesisClientLease lease = builder.withLease("1").build().get("1"); + Long originalLeaseCounter = lease.getLeaseCounter(); + + leaseManager.renewLease(lease); + Assert.assertTrue(originalLeaseCounter + 1 == lease.getLeaseCounter()); + + Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey()); + + Assert.assertEquals(lease, fromDynamo); + } + + /** + * Tests leaseManager.holdLease when the lease has changed out from under us. + */ + @Test + public void testHoldUpdatedLease() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); + KinesisClientLease lease = builder.withLease("1").build().get("1"); + + KinesisClientLease leaseCopy = leaseManager.getLease(lease.getLeaseKey()); + + leaseManager.renewLease(lease); + + Assert.assertFalse(leaseManager.renewLease(leaseCopy)); + } + + /** + * Tests takeLease when the lease is not already owned. + */ + @Test + public void testTakeUnownedLease() throws LeasingException { + testTakeLease(false); + } + + /** + * Tests takeLease when the lease is already owned. + */ + @Test + public void testTakeOwnedLease() throws LeasingException { + testTakeLease(true); + } + + private void testTakeLease(boolean owned) throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); + KinesisClientLease lease = builder.withLease("1", owned ? "originalOwner" : null).build().get("1"); + Long originalLeaseCounter = lease.getLeaseCounter(); + + String newOwner = "newOwner"; + leaseManager.takeLease(lease, newOwner); + Assert.assertTrue(originalLeaseCounter + 1 == lease.getLeaseCounter()); + Assert.assertTrue((owned ? 1 : 0) == lease.getOwnerSwitchesSinceCheckpoint()); + Assert.assertEquals(newOwner, lease.getLeaseOwner()); + + Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey()); + + Assert.assertEquals(lease, fromDynamo); + } + + /** + * Tests takeLease when the lease has changed out from under us. + */ + @Test + public void testTakeUpdatedLease() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); + KinesisClientLease lease = builder.withLease("1").build().get("1"); + + KinesisClientLease leaseCopy = leaseManager.getLease(lease.getLeaseKey()); + + String newOwner = "newOwner"; + leaseManager.takeLease(lease, newOwner); + + Assert.assertFalse(leaseManager.takeLease(leaseCopy, newOwner)); + } + + /** + * Tests evictLease when the lease is currently unowned. + */ + public void testEvictUnownedLease() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); + KinesisClientLease lease = builder.withLease("1", null).build().get("1"); + + Assert.assertFalse(leaseManager.evictLease(lease)); + } + + /** + * Tests evictLease when the lease is currently owned. + */ + @Test + public void testEvictOwnedLease() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); + KinesisClientLease lease = builder.withLease("1").build().get("1"); + Long originalLeaseCounter = lease.getLeaseCounter(); + + leaseManager.evictLease(lease); + Assert.assertNull(lease.getLeaseOwner()); + Assert.assertTrue(originalLeaseCounter + 1 == lease.getLeaseCounter()); + + Lease fromDynamo = leaseManager.getLease(lease.getLeaseKey()); + + Assert.assertEquals(lease, fromDynamo); + } + + /** + * Tests evictLease when the lease has changed out from under us. Note that evicting leases + * is conditional on the lease owner, unlike everything else which is conditional on the + * lease counter. + */ + @Test + public void testEvictChangedLease() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); + KinesisClientLease lease = builder.withLease("1").build().get("1"); + + // Change the owner only - this should cause our optimistic lock to fail. + lease.setLeaseOwner("otherOwner"); + Assert.assertFalse(leaseManager.evictLease(lease)); + } + + /** + * Tests deleteLease when a lease exists. + */ + @Test + public void testDeleteLease() throws LeasingException { + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); + KinesisClientLease lease = builder.withLease("1").build().get("1"); + + leaseManager.deleteLease(lease); + + KinesisClientLease newLease = leaseManager.getLease(lease.getLeaseKey()); + Assert.assertNull(newLease); + } + + /** + * Tests deleteLease when a lease does not exist. + */ + @Test + public void testDeleteNonexistentLease() throws LeasingException { + KinesisClientLease lease = new KinesisClientLease(); + lease.setLeaseKey("1"); + // The lease has not been written to DDB - try to delete it and expect success. + + leaseManager.deleteLease(lease); + } + + @Test + public void testWaitUntilLeaseTableExists() throws LeasingException { + KinesisClientLeaseManager manager = new KinesisClientLeaseManager("nagl_ShardProgress", ddbClient, true) { + + @Override + long sleep(long timeToSleepMillis) { + Assert.fail("Should not sleep"); + return 0L; + } + + }; + + Assert.assertTrue(manager.waitUntilLeaseTableExists(1, 1)); + } + + @Test + public void testWaitUntilLeaseTableExistsTimeout() throws LeasingException { + /* + * Just using AtomicInteger for the indirection it provides. + */ + final AtomicInteger sleepCounter = new AtomicInteger(0); + KinesisClientLeaseManager manager = new KinesisClientLeaseManager("nonexistentTable", ddbClient, true) { + + @Override + long sleep(long timeToSleepMillis) { + Assert.assertEquals(1000L, timeToSleepMillis); + sleepCounter.incrementAndGet(); + return 1000L; + } + + }; + + Assert.assertFalse(manager.waitUntilLeaseTableExists(2, 1)); + Assert.assertEquals(1, sleepCounter.get()); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerIntegrationTest.java b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseRenewerIntegrationTest.java similarity index 51% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerIntegrationTest.java rename to src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseRenewerIntegrationTest.java index 449c1420..9792d006 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerIntegrationTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseRenewerIntegrationTest.java @@ -1,55 +1,47 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.leases.dynamodb; - -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; +package com.amazonaws.services.kinesis.leases.impl; import java.util.Collections; import java.util.Map; import java.util.concurrent.Executors; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseIntegrationTest; -import software.amazon.kinesis.leases.LeaseRenewer; -import software.amazon.kinesis.leases.exceptions.LeasingException; -import software.amazon.kinesis.metrics.NullMetricsFactory; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.LeasingException; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseRenewer; -public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { - private final String TEST_METRIC = "TestOperation"; +public class LeaseRenewerIntegrationTest extends LeaseIntegrationTest { // This test case's leases last 2 seconds private static final long LEASE_DURATION_MILLIS = 2000L; - private LeaseRenewer renewer; + private ILeaseRenewer renewer; @Before - public void setup() { - renewer = new DynamoDBLeaseRenewer(leaseRefresher, "foo", LEASE_DURATION_MILLIS, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + public void setUp() { + renewer = new LeaseRenewer( + leaseManager, "foo", LEASE_DURATION_MILLIS, Executors.newCachedThreadPool()); } @Test public void testSimpleRenew() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "foo").build(); @@ -59,22 +51,20 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { @Test public void testLeaseLoss() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "foo").withLease("2", "foo").build(); builder.addLeasesToRenew(renewer, "1", "2"); - Lease renewedLease = builder.renewMutateAssert(renewer, "1", "2").get("2"); - - // lose lease 2 - leaseRefresher.takeLease(renewedLease, "bar"); + KinesisClientLease renewedLease = builder.renewMutateAssert(renewer, "1", "2").get("2"); + leaseManager.updateLease(renewedLease); builder.renewMutateAssert(renewer, "1"); } @Test public void testClear() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "foo").build(); builder.addLeasesToRenew(renewer, "1"); @@ -86,153 +76,150 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { @Test public void testGetCurrentlyHeldLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "foo").build(); builder.addLeasesToRenew(renewer, "1"); builder.renewMutateAssert(renewer, "1"); // this should be a copy that doesn't get updated - Lease lease = renewer.getCurrentlyHeldLease("1"); - assertThat(lease.leaseCounter(), equalTo(1L)); + KinesisClientLease lease = renewer.getCurrentlyHeldLease("1"); + Assert.assertEquals((Long) 1L, lease.getLeaseCounter()); // do one renewal and make sure the old copy doesn't get updated builder.renewMutateAssert(renewer, "1"); - assertThat(lease.leaseCounter(), equalTo(1L)); + Assert.assertEquals((Long) 1L, lease.getLeaseCounter()); } @Test public void testGetCurrentlyHeldLeases() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); - builder.withLease("1", "foo").withLease("2", "foo").build(); + KinesisClientLease lease2 = builder.withLease("1", "foo").withLease("2", "foo").build().get("2"); builder.addLeasesToRenew(renewer, "1", "2"); - Lease lease2 = builder.renewMutateAssert(renewer, "1", "2").get("2"); + builder.renewMutateAssert(renewer, "1", "2"); // This should be a copy that doesn't get updated - Map heldLeases = renewer.getCurrentlyHeldLeases(); - assertThat(heldLeases.size(), equalTo(2)); - assertThat(heldLeases.get("1").leaseCounter(), equalTo(1L)); - assertThat(heldLeases.get("2").leaseCounter(), equalTo(1L)); - - // lose lease 2 - leaseRefresher.takeLease(lease2, "bar"); + Map heldLeases = renewer.getCurrentlyHeldLeases(); + Assert.assertEquals(2, heldLeases.size()); + Assert.assertEquals((Long) 1L, heldLeases.get("1").getLeaseCounter()); + Assert.assertEquals((Long) 1L, heldLeases.get("2").getLeaseCounter()); + leaseManager.updateLease(lease2); // lose lease 2 // Do another renewal and make sure the copy doesn't change builder.renewMutateAssert(renewer, "1"); - assertThat(heldLeases.size(), equalTo(2)); - assertThat(heldLeases.get("1").leaseCounter(), equalTo(1L)); - assertThat(heldLeases.get("2").leaseCounter(), equalTo(1L)); + Assert.assertEquals(2, heldLeases.size()); + Assert.assertEquals((Long) 1L, heldLeases.get("1").getLeaseCounter()); + Assert.assertEquals((Long) 1L, heldLeases.get("2").getLeaseCounter()); } @Test public void testUpdateLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "foo").build(); builder.addLeasesToRenew(renewer, "1"); builder.renewMutateAssert(renewer, "1"); - Lease expected = renewer.getCurrentlyHeldLease("1"); - expected.checkpoint(new ExtendedSequenceNumber("new checkpoint")); - assertThat(renewer.updateLease(expected, expected.concurrencyToken(), TEST_METRIC, expected.leaseKey()), - equalTo(true)); + KinesisClientLease expected = renewer.getCurrentlyHeldLease("1"); + expected.setCheckpoint(new ExtendedSequenceNumber("new checkpoint")); + Assert.assertTrue(renewer.updateLease(expected, expected.getConcurrencyToken())); // Assert that the counter and data have changed immediately after the update... - Lease actual = renewer.getCurrentlyHeldLease("1"); - expected.leaseCounter(expected.leaseCounter() + 1); - assertThat(actual, equalTo(expected)); + KinesisClientLease actual = renewer.getCurrentlyHeldLease("1"); + expected.setLeaseCounter(expected.getLeaseCounter() + 1); + Assert.assertEquals(expected, actual); // ...and after another round of renewal renewer.renewLeases(); actual = renewer.getCurrentlyHeldLease("1"); - expected.leaseCounter(expected.leaseCounter() + 1); - assertThat(actual, equalTo(expected)); + expected.setLeaseCounter(expected.getLeaseCounter() + 1); + Assert.assertEquals(expected, actual); } @Test public void testUpdateLostLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "foo").build(); builder.addLeasesToRenew(renewer, "1"); builder.renewMutateAssert(renewer, "1"); - Lease lease = renewer.getCurrentlyHeldLease("1"); + KinesisClientLease lease = renewer.getCurrentlyHeldLease("1"); // cause lease loss such that the renewer doesn't realize he's lost the lease when update is called - leaseRefresher.renewLease(lease); + leaseManager.renewLease(lease); // renewer still thinks he has the lease - assertThat(renewer.getCurrentlyHeldLease("1"), notNullValue()); - lease.checkpoint(new ExtendedSequenceNumber("new checkpoint")); + Assert.assertNotNull(renewer.getCurrentlyHeldLease("1")); + lease.setCheckpoint(new ExtendedSequenceNumber("new checkpoint")); // update fails - assertThat(renewer.updateLease(lease, lease.concurrencyToken(), TEST_METRIC, null), equalTo(false)); + Assert.assertFalse(renewer.updateLease(lease, lease.getConcurrencyToken())); // renewer no longer thinks he has the lease - assertThat(renewer.getCurrentlyHeldLease("1"), nullValue()); + Assert.assertNull(renewer.getCurrentlyHeldLease("1")); } @Test public void testUpdateOldLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "foo").build(); builder.addLeasesToRenew(renewer, "1"); builder.renewMutateAssert(renewer, "1"); - Lease lease = renewer.getCurrentlyHeldLease("1"); + KinesisClientLease lease = renewer.getCurrentlyHeldLease("1"); // cause lease loss such that the renewer knows the lease has been lost when update is called - leaseRefresher.takeLease(lease, "bar"); + leaseManager.renewLease(lease); builder.renewMutateAssert(renewer); - lease.checkpoint(new ExtendedSequenceNumber("new checkpoint")); - assertThat(renewer.updateLease(lease, lease.concurrencyToken(), TEST_METRIC, lease.leaseKey()), equalTo(false)); + lease.setCheckpoint(new ExtendedSequenceNumber("new checkpoint")); + Assert.assertFalse(renewer.updateLease(lease, lease.getConcurrencyToken())); } @Test public void testUpdateRegainedLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "foo").build(); builder.addLeasesToRenew(renewer, "1"); builder.renewMutateAssert(renewer, "1"); - Lease lease = renewer.getCurrentlyHeldLease("1"); + KinesisClientLease lease = renewer.getCurrentlyHeldLease("1"); // cause lease loss such that the renewer knows the lease has been lost when update is called - leaseRefresher.takeLease(lease, "bar"); + leaseManager.renewLease(lease); builder.renewMutateAssert(renewer); // regain the lease builder.addLeasesToRenew(renewer, "1"); - lease.checkpoint(new ExtendedSequenceNumber("new checkpoint")); - assertThat(renewer.updateLease(lease, lease.concurrencyToken(), TEST_METRIC, lease.leaseKey()), equalTo(false)); + lease.setCheckpoint(new ExtendedSequenceNumber("new checkpoint")); + Assert.assertFalse(renewer.updateLease(lease, lease.getConcurrencyToken())); } @Test public void testIgnoreNoRenewalTimestamp() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); - Lease lease = builder.withLease("1", "foo").build().get("1"); - lease.lastCounterIncrementNanos(null); + KinesisClientLease lease = builder.withLease("1", "foo").build().get("1"); + lease.setLastCounterIncrementNanos(null); renewer.addLeasesToRenew(Collections.singleton(lease)); - assertThat(renewer.getCurrentlyHeldLeases().size(), equalTo(0)); + Assert.assertEquals(0, renewer.getCurrentlyHeldLeases().size()); } @Test public void testLeaseTimeout() throws LeasingException, InterruptedException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "foo").build(); @@ -242,7 +229,7 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { // TODO: Worth eliminating this sleep using the same pattern we used on LeaseTaker? Thread.sleep(LEASE_DURATION_MILLIS); // Wait for the lease to timeout - assertThat(renewer.getCurrentlyHeldLeases().size(), equalTo(0)); + Assert.assertEquals(0, renewer.getCurrentlyHeldLeases().size()); } @Test @@ -250,14 +237,14 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { final String shardId = "shd-0-0"; final String owner = "foo:8000"; - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease(shardId, owner); - Map leases = builder.build(); - DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer(leaseRefresher, owner, 30000L, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + Map leases = builder.build(); + LeaseRenewer renewer =new LeaseRenewer( + leaseManager, owner, 30000L, Executors.newCachedThreadPool()); renewer.initialize(); - Map heldLeases = renewer.getCurrentlyHeldLeases(); - assertThat(heldLeases.size(), equalTo(leases.size())); - assertThat(heldLeases.keySet(), equalTo(leases.keySet())); + Map heldLeases = renewer.getCurrentlyHeldLeases(); + Assert.assertEquals(leases.size(), heldLeases.size()); + Assert.assertEquals(leases.keySet(), heldLeases.keySet()); } } diff --git a/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseRenewerTest.java b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseRenewerTest.java new file mode 100644 index 00000000..7c8b3fd1 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseRenewerTest.java @@ -0,0 +1,129 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager; + +public class LeaseRenewerTest { + + ILeaseManager leaseManager; + String workerIdentifier; + long leaseDurationMillis; + ExecutorService leaseRenewalExecService; + LeaseRenewer renewer; + List leasesToRenew; + + private static Lease newLease(String leaseKey, + String leaseOwner, + Long leaseCounter, + UUID concurrencyToken, + Long lastCounterIncrementNanos) { + Lease lease = new Lease(); + lease.setLeaseKey(leaseKey); + lease.setLeaseOwner(leaseOwner); + lease.setLeaseCounter(leaseCounter); + lease.setConcurrencyToken(concurrencyToken); + lease.setLastCounterIncrementNanos(lastCounterIncrementNanos); + return lease; + } + + private static Lease newLease(String leaseKey) { + return newLease(leaseKey, "leaseOwner", 0L, UUID.randomUUID(), System.nanoTime()); + } + + @SuppressWarnings("unchecked") + @Before + public void before() { + leaseManager = Mockito.mock(ILeaseManager.class); + workerIdentifier = "workerId"; + leaseDurationMillis = 10000; + leaseRenewalExecService = Executors.newSingleThreadExecutor(); + leasesToRenew = null; + renewer = new LeaseRenewer<>(leaseManager, + workerIdentifier, + leaseDurationMillis, + Executors.newCachedThreadPool()); + } + + @After + public void after() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + if (leasesToRenew == null) { + return; + } + for (Lease l : leasesToRenew) { + Mockito.verify(leaseManager, Mockito.times(1)).renewLease(l); + } + } + + @Test + public void testLeaseRenewerHoldsGoodLeases() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + /* + * Prepare leases to be renewed + * 2 Good + */ + Lease lease1 = newLease("1"); + Lease lease2 = newLease("2"); + leasesToRenew = + Arrays.asList(lease1,lease2); + renewer.addLeasesToRenew(leasesToRenew); + + Mockito.doReturn(true).when(leaseManager).renewLease(lease1); + Mockito.doReturn(true).when(leaseManager).renewLease(lease2); + + renewer.renewLeases(); + + Assert.assertEquals(2, renewer.getCurrentlyHeldLeases().size()); + } + + @Test + public void testLeaseRenewerDoesNotRenewExpiredLease() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + String leaseKey = "expiredLease"; + long initialCounterIncrementNanos = 5L; // "expired" time. + Lease lease1 = newLease(leaseKey); + lease1.setLastCounterIncrementNanos(initialCounterIncrementNanos); + + leasesToRenew = new ArrayList<>(); + leasesToRenew.add(lease1); + Mockito.doReturn(true).when(leaseManager).renewLease(lease1); + renewer.addLeasesToRenew(leasesToRenew); + + Assert.assertTrue(lease1.isExpired(1, System.nanoTime())); + Assert.assertNull(renewer.getCurrentlyHeldLease(leaseKey)); + renewer.renewLeases(); + // Don't renew lease(s) with same key if getCurrentlyHeldLease returned null previously + Assert.assertNull(renewer.getCurrentlyHeldLease(leaseKey)); + Assert.assertFalse(renewer.getCurrentlyHeldLeases().containsKey(leaseKey)); + + // Clear the list to avoid triggering expectation mismatch in after(). + leasesToRenew.clear(); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerIntegrationTest.java b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseTakerIntegrationTest.java similarity index 65% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerIntegrationTest.java rename to src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseTakerIntegrationTest.java index 871918b5..6fb5caf6 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerIntegrationTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseTakerIntegrationTest.java @@ -1,49 +1,40 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.leases.dynamodb; +package com.amazonaws.services.kinesis.leases.impl; -import java.util.Collection; import java.util.Map; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import software.amazon.kinesis.leases.Lease; -import software.amazon.kinesis.leases.LeaseIntegrationTest; -import software.amazon.kinesis.leases.exceptions.LeasingException; -import software.amazon.kinesis.metrics.NullMetricsFactory; +import com.amazonaws.services.kinesis.leases.exceptions.LeasingException; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; - -public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { +public class LeaseTakerIntegrationTest extends LeaseIntegrationTest { private static final long LEASE_DURATION_MILLIS = 1000L; - private DynamoDBLeaseTaker taker; + private LeaseTaker taker; @Before - public void setup() { - taker = new DynamoDBLeaseTaker(leaseRefresher, "foo", LEASE_DURATION_MILLIS, new NullMetricsFactory()); + public void setUp() { + taker = new LeaseTaker(leaseManager, "foo", LEASE_DURATION_MILLIS); } @Test public void testSimpleLeaseTake() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", null).build(); @@ -52,7 +43,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { @Test public void testNotTakeUpdatedLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "bar").build(); @@ -65,7 +56,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { @Test public void testTakeOwnLease() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", taker.getWorkerIdentifier()).build(); @@ -76,7 +67,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { @Test public void testNotTakeNewOwnedLease() throws LeasingException, InterruptedException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "bar").build(); @@ -94,7 +85,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { */ @Test public void testNonGreedyTake() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); for (int i = 0; i < 3; i++) { builder.withLease(Integer.toString(i), null); @@ -105,32 +96,6 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { builder.takeMutateAssert(taker, 2); } - /** - * Verify that when getAllLeases() is called, DynamoDBLeaseTaker - * - does not call listLeases() - * - returns cached result was built during takeLeases() operation to return result - */ - @Test - public void testGetAllLeases() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - - Map addedLeases = builder.withLease("1", "bar") - .withLease("2", "bar") - .withLease("3", "baz") - .withLease("4", "baz") - .withLease("5", "foo") - .build(); - - // In the current DynamoDBLeaseTaker implementation getAllLeases() gets leases from an internal cache that is built during takeLeases() operation - assertThat(taker.allLeases().size(), equalTo(0)); - - taker.takeLeases(); - - Collection allLeases = taker.allLeases(); - assertThat(allLeases.size(), equalTo(addedLeases.size())); - assertThat(addedLeases.values().containsAll(allLeases), equalTo(true)); - } - /** * Verify that LeaseTaker does not steal when it's only short 1 lease and the other worker is at target. Set up a * scenario where there are 4 leases held by two servers, and a third server with one lease. The third server should @@ -138,7 +103,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { */ @Test public void testNoStealWhenOffByOne() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "bar") .withLease("2", "bar") @@ -159,7 +124,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { */ @Test public void testSteal() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", "bar"); for (int i = 2; i <= 6; i++) { @@ -170,7 +135,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { builder.build(); // Assert that one lease was stolen from baz. - Map takenLeases = builder.takeMutateAssert(taker, 1); + Map takenLeases = builder.takeMutateAssert(taker, 1); // Assert that it was one of baz's leases (shardId != 1) String shardIdStolen = takenLeases.keySet().iterator().next(); @@ -183,7 +148,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { */ @Test public void testNoStealWhenExpiredLeases() throws LeasingException { - TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); + TestHarnessBuilder builder = new TestHarnessBuilder(leaseManager); builder.withLease("1", null); for (int i = 2; i <= 4; i++) { diff --git a/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseTakerTest.java b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseTakerTest.java new file mode 100644 index 00000000..34dfedfa --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/leases/impl/LeaseTakerTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.ArrayList; +import java.util.List; + +import junit.framework.Assert; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * + */ +public class LeaseTakerTest { + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + } + + /** + * Test method for {@link com.amazonaws.services.kinesis.leases.impl.LeaseTaker#stringJoin(java.util.Collection, java.lang.String)}. + */ + @Test + public final void testStringJoin() { + List strings = new ArrayList<>(); + + strings.add("foo"); + Assert.assertEquals("foo", LeaseTaker.stringJoin(strings, ", ")); + + strings.add("bar"); + Assert.assertEquals("foo, bar", LeaseTaker.stringJoin(strings, ", ")); + } + +} diff --git a/src/test/java/com/amazonaws/services/kinesis/leases/impl/TestHarnessBuilder.java b/src/test/java/com/amazonaws/services/kinesis/leases/impl/TestHarnessBuilder.java new file mode 100644 index 00000000..6b6d673c --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/leases/impl/TestHarnessBuilder.java @@ -0,0 +1,168 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.leases.impl; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; + +import org.junit.Assert; + +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.leases.exceptions.DependencyException; +import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.leases.exceptions.LeasingException; +import com.amazonaws.services.kinesis.leases.interfaces.ILeaseRenewer; + +public class TestHarnessBuilder { + + private long currentTimeNanos; + + private Map leases = new HashMap(); + private KinesisClientLeaseManager leaseManager; + + private Callable timeProvider = new Callable() { + + @Override + public Long call() throws Exception { + return currentTimeNanos; + } + + }; + + public TestHarnessBuilder(KinesisClientLeaseManager leaseManager) { + this.leaseManager = leaseManager; + } + + public TestHarnessBuilder withLease(String shardId) { + return withLease(shardId, "leaseOwner"); + } + + public TestHarnessBuilder withLease(String shardId, String owner) { + KinesisClientLease lease = new KinesisClientLease(); + lease.setCheckpoint(new ExtendedSequenceNumber("checkpoint")); + lease.setOwnerSwitchesSinceCheckpoint(0L); + lease.setLeaseCounter(0L); + lease.setLeaseOwner(owner); + lease.setParentShardIds(Collections.singleton("parentShardId")); + lease.setLeaseKey(shardId); + + leases.put(shardId, lease); + return this; + } + + public Map build() throws LeasingException { + for (KinesisClientLease lease : leases.values()) { + leaseManager.createLeaseIfNotExists(lease); + if (lease.getLeaseOwner() != null) { + lease.setLastCounterIncrementNanos(System.nanoTime()); + } + } + + currentTimeNanos = System.nanoTime(); + + return leases; + } + + public void passTime(long millis) { + currentTimeNanos += millis * 1000000; + } + + public Map takeMutateAssert(LeaseTaker taker, int numToTake) + throws LeasingException { + Map result = taker.takeLeases(timeProvider); + Assert.assertEquals(numToTake, result.size()); + + for (KinesisClientLease actual : result.values()) { + KinesisClientLease original = leases.get(actual.getLeaseKey()); + Assert.assertNotNull(original); + + mutateAssert(taker.getWorkerIdentifier(), original, actual); + } + + return result; + } + + public Map takeMutateAssert(LeaseTaker taker, String... takenShardIds) + throws LeasingException { + Map result = taker.takeLeases(timeProvider); + Assert.assertEquals(takenShardIds.length, result.size()); + + for (String shardId : takenShardIds) { + KinesisClientLease original = leases.get(shardId); + Assert.assertNotNull(original); + + KinesisClientLease actual = result.get(shardId); + Assert.assertNotNull(actual); + + mutateAssert(taker.getWorkerIdentifier(), original, actual); + } + + return result; + } + + private void mutateAssert(String newWorkerIdentifier, KinesisClientLease original, KinesisClientLease actual) { + original.setLeaseCounter(original.getLeaseCounter() + 1); + if (original.getLeaseOwner() != null && !newWorkerIdentifier.equals(original.getLeaseOwner())) { + original.setOwnerSwitchesSinceCheckpoint(original.getOwnerSwitchesSinceCheckpoint() + 1); + } + original.setLeaseOwner(newWorkerIdentifier); + + Assert.assertEquals(original, actual); // Assert the contents of the lease + } + + public void addLeasesToRenew(ILeaseRenewer renewer, String... shardIds) + throws DependencyException, InvalidStateException { + List leasesToRenew = new ArrayList(); + + for (String shardId : shardIds) { + KinesisClientLease lease = leases.get(shardId); + Assert.assertNotNull(lease); + leasesToRenew.add(lease); + } + + renewer.addLeasesToRenew(leasesToRenew); + } + + public Map renewMutateAssert(ILeaseRenewer renewer, String... renewedShardIds) + throws DependencyException, InvalidStateException { + renewer.renewLeases(); + + Map heldLeases = renewer.getCurrentlyHeldLeases(); + Assert.assertEquals(renewedShardIds.length, heldLeases.size()); + + for (String shardId : renewedShardIds) { + KinesisClientLease original = leases.get(shardId); + Assert.assertNotNull(original); + + KinesisClientLease actual = heldLeases.get(shardId); + Assert.assertNotNull(actual); + + original.setLeaseCounter(original.getLeaseCounter() + 1); + Assert.assertEquals(original, actual); + } + + return heldLeases; + } + + public void renewAllLeases() throws LeasingException { + for (KinesisClientLease lease : leases.values()) { + leaseManager.renewLease(lease); + } + } +} diff --git a/src/test/java/com/amazonaws/services/kinesis/metrics/impl/AccumulatingMetricsScopeTest.java b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/AccumulatingMetricsScopeTest.java new file mode 100644 index 00000000..a7b179a0 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/AccumulatingMetricsScopeTest.java @@ -0,0 +1,67 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import org.junit.Assert; +import org.junit.Test; + +import com.amazonaws.services.cloudwatch.model.MetricDatum; +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.metrics.impl.AccumulateByNameMetricsScope; + +public class AccumulatingMetricsScopeTest { + + private static class TestScope extends AccumulateByNameMetricsScope { + + @Override + public void end() { + + } + + public void assertMetrics(MetricDatum... expectedData) { + for (MetricDatum expected : expectedData) { + MetricDatum actual = data.remove(expected.getMetricName()); + Assert.assertEquals(expected, actual); + } + + Assert.assertEquals("Data should be empty at the end of assertMetrics", 0, data.size()); + } + } + + @Test + public void testSingleAdd() { + TestScope scope = new TestScope(); + + scope.addData("name", 2.0, StandardUnit.Count); + scope.assertMetrics(TestHelper.constructDatum("name", StandardUnit.Count, 2.0, 2.0, 2.0, 1)); + } + + @Test + public void testAccumulate() { + TestScope scope = new TestScope(); + + scope.addData("name", 2.0, StandardUnit.Count); + scope.addData("name", 3.0, StandardUnit.Count); + scope.assertMetrics(TestHelper.constructDatum("name", StandardUnit.Count, 3.0, 2.0, 5.0, 2)); + } + + @Test(expected = IllegalArgumentException.class) + public void testAccumulateWrongUnit() { + TestScope scope = new TestScope(); + + scope.addData("name", 2.0, StandardUnit.Count); + scope.addData("name", 3.0, StandardUnit.Megabits); + } +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnableTest.java b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/CWPublisherRunnableTest.java similarity index 74% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnableTest.java rename to src/test/java/com/amazonaws/services/kinesis/metrics/impl/CWPublisherRunnableTest.java index 502fda7c..e0b30c66 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnableTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/CWPublisherRunnableTest.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.impl; import java.util.ArrayList; import java.util.List; @@ -21,11 +21,11 @@ import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.cloudwatch.model.MetricDatum; +import com.amazonaws.services.cloudwatch.model.StandardUnit; -public class CloudWatchPublisherRunnableTest { +public class CWPublisherRunnableTest { private static final int MAX_QUEUE_SIZE = 5; private static final long MAX_BUFFER_TIME_MILLIS = 1; @@ -36,15 +36,16 @@ public class CloudWatchPublisherRunnableTest { private static final int FLUSH_SIZE = 2; private static class TestHarness { - private List> data = new ArrayList>(); + private List> data = new ArrayList>(); private int counter = 0; - private CloudWatchMetricsPublisher publisher; - private CloudWatchPublisherRunnable runnable; + private ICWMetricsPublisher publisher; + private CWPublisherRunnable runnable; private long time = 0L; - TestHarness() { - publisher = Mockito.mock(CloudWatchMetricsPublisher.class); - runnable = new CloudWatchPublisherRunnable(publisher, + @SuppressWarnings("unchecked") + public TestHarness() { + publisher = Mockito.mock(ICWMetricsPublisher.class); + runnable = new CWPublisherRunnable(publisher, MAX_BUFFER_TIME_MILLIS, MAX_QUEUE_SIZE, FLUSH_SIZE) { @@ -57,7 +58,7 @@ public class CloudWatchPublisherRunnableTest { }; } - void enqueueRandom(int count) { + public void enqueueRandom(int count) { for (int i = 0; i < count; i++) { int value = counter++; data.add(constructDatum(value)); @@ -66,15 +67,15 @@ public class CloudWatchPublisherRunnableTest { runnable.enqueue(data.subList(data.size() - count, data.size())); } - private MetricDatumWithKey constructDatum(int value) { + private MetricDatumWithKey constructDatum(int value) { MetricDatum datum = TestHelper.constructDatum("datum-" + Integer.toString(value), - StandardUnit.COUNT, + StandardUnit.Count, value, value, value, 1); - return new MetricDatumWithKey(new CloudWatchMetricKey(datum), datum); + return new MetricDatumWithKey(new CWMetricKey(datum), datum); } /** @@ -84,7 +85,7 @@ public class CloudWatchPublisherRunnableTest { * @param startIndex * @param count */ - void runAndAssert(int startIndex, int count) { + public void runAndAssert(int startIndex, int count) { runnable.runOnce(); if (count > 0) { @@ -97,15 +98,15 @@ public class CloudWatchPublisherRunnableTest { /** * Run one iteration of the runnable and assert that it called CloudWatch with all data. */ - void runAndAssertAllData() { + public void runAndAssertAllData() { runAndAssert(0, data.size()); } - void passTime(long time) { + public void passTime(long time) { this.time += time; } - CloudWatchPublisherRunnable getRunnable() { + public CWPublisherRunnable getRunnable() { return runnable; } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisherTest.java b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/DefaultCWMetricsPublisherTest.java similarity index 50% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisherTest.java rename to src/test/java/com/amazonaws/services/kinesis/metrics/impl/DefaultCWMetricsPublisherTest.java index 80194f11..a547e038 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisherTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/DefaultCWMetricsPublisherTest.java @@ -1,18 +1,18 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.impl; import java.util.ArrayList; import java.util.HashMap; @@ -20,38 +20,28 @@ import java.util.List; import java.util.Map; import org.junit.Assert; -import org.junit.Before; import org.junit.Test; -import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; -import org.mockito.Mock; import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; -import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; -import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.cloudwatch.AmazonCloudWatch; +import com.amazonaws.services.cloudwatch.model.MetricDatum; +import com.amazonaws.services.cloudwatch.model.PutMetricDataRequest; +import com.amazonaws.services.cloudwatch.model.StandardUnit; -@RunWith(MockitoJUnitRunner.class) -public class CloudWatchMetricsPublisherTest { - private static final String NAMESPACE = "fakeNamespace"; - private CloudWatchMetricsPublisher publisher; +public class DefaultCWMetricsPublisherTest { - @Mock - private CloudWatchAsyncClient cloudWatchClient; - - @Before - public void setup() { - publisher = new CloudWatchMetricsPublisher(cloudWatchClient, NAMESPACE); - } + private final String NAMESPACE = "fakeNamespace"; + private final AmazonCloudWatch cloudWatchClient = Mockito.mock(AmazonCloudWatch.class); + private DefaultCWMetricsPublisher publisher = new DefaultCWMetricsPublisher(cloudWatchClient, NAMESPACE); /* * Test whether the data input into metrics publisher is the equal to the data which will be published to CW */ + @Test public void testMetricsPublisher() { - List> dataToPublish = constructMetricDatumWithKeyList(25); + List> dataToPublish = constructMetricDatumWithKeyList(25); List> expectedData = constructMetricDatumListMap(dataToPublish); publisher.publishMetrics(dataToPublish); @@ -67,46 +57,46 @@ public class CloudWatchMetricsPublisherTest { } - public static List> constructMetricDatumWithKeyList(int value) { - List> data = new ArrayList>(); + public static List> constructMetricDatumWithKeyList(int value) { + List> data = new ArrayList>(); for (int i = 1; i <= value; i++) { MetricDatum datum = - TestHelper.constructDatum("datum" + Integer.toString(i), StandardUnit.COUNT, i, i, i, 1); - data.add(new MetricDatumWithKey(new CloudWatchMetricKey(datum), datum)); + TestHelper.constructDatum("datum" + Integer.toString(i), StandardUnit.Count, i, i, i, 1); + data.add(new MetricDatumWithKey(new CWMetricKey(datum), datum)); } return data; } // batchSize is the number of metrics sent in a single request. - // In CloudWatchMetricsPublisher this number is set to 20. - public List> constructMetricDatumListMap(List> data) { + // In DefaultCWMetricsPublisher this number is set to 20. + public List> constructMetricDatumListMap(List> data) { int batchSize = 20; List> dataList = new ArrayList>(); int expectedRequestcount = (int) Math.ceil(data.size() / 20.0); for (int i = 0; i < expectedRequestcount; i++) { - dataList.add(i, new HashMap<>()); + dataList.add(i, new HashMap()); } int batchIndex = 1; int listIndex = 0; - for (MetricDatumWithKey metricDatumWithKey : data) { + for (MetricDatumWithKey metricDatumWithKey : data) { if (batchIndex > batchSize) { batchIndex = 1; listIndex++; } batchIndex++; - dataList.get(listIndex).put(metricDatumWithKey.datum.metricName(), metricDatumWithKey.datum); + dataList.get(listIndex).put(metricDatumWithKey.datum.getMetricName(), metricDatumWithKey.datum); } return dataList; } public static void assertMetricData(Map expected, PutMetricDataRequest actual) { - List actualData = actual.metricData(); + List actualData = actual.getMetricData(); for (MetricDatum actualDatum : actualData) { - String metricName = actualDatum.metricName(); + String metricName = actualDatum.getMetricName(); Assert.assertNotNull(expected.get(metricName)); Assert.assertTrue(expected.get(metricName).equals(actualDatum)); expected.remove(metricName); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/EndingMetricsScopeTest.java b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/EndingMetricsScopeTest.java similarity index 52% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/EndingMetricsScopeTest.java rename to src/test/java/com/amazonaws/services/kinesis/metrics/impl/EndingMetricsScopeTest.java index 7236155a..7d44f43f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/EndingMetricsScopeTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/EndingMetricsScopeTest.java @@ -1,23 +1,23 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.impl; import org.junit.Test; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; -import software.amazon.kinesis.metrics.EndingMetricsScope; +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.metrics.impl.EndingMetricsScope; public class EndingMetricsScopeTest { @@ -28,7 +28,7 @@ public class EndingMetricsScopeTest { @Test public void testAddDataNotEnded() { TestScope scope = new TestScope(); - scope.addData("foo", 1.0, StandardUnit.COUNT); + scope.addData("foo", 1.0, StandardUnit.Count); } @Test @@ -41,7 +41,7 @@ public class EndingMetricsScopeTest { public void testAddDataEnded() { TestScope scope = new TestScope(); scope.end(); - scope.addData("foo", 1.0, StandardUnit.COUNT); + scope.addData("foo", 1.0, StandardUnit.Count); } @Test(expected = IllegalArgumentException.class) diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/FilteringMetricsScopeTest.java b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/FilteringMetricsScopeTest.java similarity index 59% rename from amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/FilteringMetricsScopeTest.java rename to src/test/java/com/amazonaws/services/kinesis/metrics/impl/FilteringMetricsScopeTest.java index 408f54e4..deb03caf 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/FilteringMetricsScopeTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/FilteringMetricsScopeTest.java @@ -1,50 +1,52 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ -package software.amazon.kinesis.metrics; +package com.amazonaws.services.kinesis.metrics.impl; import java.util.Set; -import lombok.AccessLevel; -import lombok.NoArgsConstructor; import org.junit.Assert; import org.junit.Test; +import com.amazonaws.services.cloudwatch.model.Dimension; +import com.amazonaws.services.cloudwatch.model.MetricDatum; +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope; +import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel; import com.google.common.collect.ImmutableSet; -import software.amazon.awssdk.services.cloudwatch.model.Dimension; -import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; -import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - public class FilteringMetricsScopeTest { - @NoArgsConstructor(access = AccessLevel.PRIVATE) private static class TestScope extends FilteringMetricsScope { + + private TestScope() { + } + private TestScope(MetricsLevel metricsLevel, Set metricsEnabledDimensions) { super(metricsLevel, metricsEnabledDimensions); } - void assertMetrics(MetricDatum... expectedData) { + public void assertMetrics(MetricDatum... expectedData) { for (MetricDatum expected : expectedData) { - MetricDatum actual = data.remove(expected.metricName()); + MetricDatum actual = data.remove(expected.getMetricName()); Assert.assertEquals(expected, actual); } Assert.assertEquals("Data should be empty at the end of assertMetrics", 0, data.size()); } - void assertDimensions(Dimension... dimensions) { + public void assertDimensions(Dimension... dimensions) { for (Dimension dimension : dimensions) { Assert.assertTrue(getDimensions().remove(dimension)); } @@ -56,35 +58,35 @@ public class FilteringMetricsScopeTest { @Test public void testDefaultAddAll() { TestScope scope = new TestScope(); - scope.addData("detailedDataName", 2.0, StandardUnit.COUNT, MetricsLevel.DETAILED); - scope.addData("noLevelDataName", 3.0, StandardUnit.MILLISECONDS); + scope.addData("detailedDataName", 2.0, StandardUnit.Count, MetricsLevel.DETAILED); + scope.addData("noLevelDataName", 3.0, StandardUnit.Milliseconds); scope.addDimension("dimensionName", "dimensionValue"); // By default all metrics and dimensions should be allowed. scope.assertMetrics( - TestHelper.constructDatum("detailedDataName", StandardUnit.COUNT, 2.0, 2.0, 2.0, 1), - TestHelper.constructDatum("noLevelDataName", StandardUnit.MILLISECONDS, 3.0, 3.0, 3.0, 1.0)); + TestHelper.constructDatum("detailedDataName", StandardUnit.Count, 2.0, 2.0, 2.0, 1), + TestHelper.constructDatum("noLevelDataName", StandardUnit.Milliseconds, 3.0, 3.0, 3.0, 1.0)); scope.assertDimensions(TestHelper.constructDimension("dimensionName", "dimensionValue")); } @Test public void testMetricsLevel() { TestScope scope = new TestScope(MetricsLevel.SUMMARY, null); - scope.addData("summaryDataName", 2.0, StandardUnit.COUNT, MetricsLevel.SUMMARY); - scope.addData("summaryDataName", 10.0, StandardUnit.COUNT, MetricsLevel.SUMMARY); - scope.addData("detailedDataName", 4.0, StandardUnit.BYTES, MetricsLevel.DETAILED); - scope.addData("noLevelDataName", 3.0, StandardUnit.MILLISECONDS); + scope.addData("summaryDataName", 2.0, StandardUnit.Count, MetricsLevel.SUMMARY); + scope.addData("summaryDataName", 10.0, StandardUnit.Count, MetricsLevel.SUMMARY); + scope.addData("detailedDataName", 4.0, StandardUnit.Bytes, MetricsLevel.DETAILED); + scope.addData("noLevelDataName", 3.0, StandardUnit.Milliseconds); - scope.assertMetrics(TestHelper.constructDatum("summaryDataName", StandardUnit.COUNT, 10.0, 2.0, 12.0, 2.0)); + scope.assertMetrics(TestHelper.constructDatum("summaryDataName", StandardUnit.Count, 10.0, 2.0, 12.0, 2.0)); } @Test public void testMetricsLevelNone() { TestScope scope = new TestScope(MetricsLevel.NONE, null); - scope.addData("summaryDataName", 2.0, StandardUnit.COUNT, MetricsLevel.SUMMARY); - scope.addData("summaryDataName", 10.0, StandardUnit.COUNT, MetricsLevel.SUMMARY); - scope.addData("detailedDataName", 4.0, StandardUnit.BYTES, MetricsLevel.DETAILED); - scope.addData("noLevelDataName", 3.0, StandardUnit.MILLISECONDS); + scope.addData("summaryDataName", 2.0, StandardUnit.Count, MetricsLevel.SUMMARY); + scope.addData("summaryDataName", 10.0, StandardUnit.Count, MetricsLevel.SUMMARY); + scope.addData("detailedDataName", 4.0, StandardUnit.Bytes, MetricsLevel.DETAILED); + scope.addData("noLevelDataName", 3.0, StandardUnit.Milliseconds); // No metrics should be emitted. scope.assertMetrics(); @@ -107,7 +109,7 @@ public class FilteringMetricsScopeTest { @Test public void testMetricsDimensionsAll() { TestScope scope = new TestScope(MetricsLevel.DETAILED, ImmutableSet.of( - "ThisDoesNotMatter", MetricsScope.METRICS_DIMENSIONS_ALL, "ThisAlsoDoesNotMatter")); + "ThisDoesNotMatter", IMetricsScope.METRICS_DIMENSIONS_ALL, "ThisAlsoDoesNotMatter")); scope.addDimension("ShardId", "shard-0001"); scope.addDimension("Operation", "ProcessRecords"); scope.addDimension("ShardId", "shard-0001"); diff --git a/src/test/java/com/amazonaws/services/kinesis/metrics/impl/MetricAccumulatingQueueTest.java b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/MetricAccumulatingQueueTest.java new file mode 100644 index 00000000..1ca90076 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/MetricAccumulatingQueueTest.java @@ -0,0 +1,96 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.amazonaws.services.cloudwatch.model.Dimension; +import com.amazonaws.services.cloudwatch.model.MetricDatum; +import com.amazonaws.services.cloudwatch.model.StandardUnit; + +public class MetricAccumulatingQueueTest { + + private static final int MAX_QUEUE_SIZE = 5; + private MetricAccumulatingQueue queue; + + @Before + public void setup() { + this.queue = new MetricAccumulatingQueue(MAX_QUEUE_SIZE); + } + + /* + * Test whether the MetricDatums offered into the queue will accumulate data based on the same metricName and + * output those datums with the correctly accumulated output. + */ + @Test + public void testAccumulation() { + Collection dimensionsA = Collections.singleton(new Dimension().withName("name").withValue("a")); + Collection dimensionsB = Collections.singleton(new Dimension().withName("name").withValue("b")); + String keyA = "a"; + String keyB = "b"; + + MetricDatum datum1 = + TestHelper.constructDatum(keyA, StandardUnit.Count, 10, 5, 15, 2).withDimensions(dimensionsA); + queue.offer(new CWMetricKey(datum1), datum1); + MetricDatum datum2 = + TestHelper.constructDatum(keyA, StandardUnit.Count, 1, 1, 2, 2).withDimensions(dimensionsA); + queue.offer(new CWMetricKey(datum2), datum2); + + MetricDatum datum3 = + TestHelper.constructDatum(keyA, StandardUnit.Count, 1, 1, 2, 2).withDimensions(dimensionsB); + queue.offer(new CWMetricKey(datum3), datum3); + + MetricDatum datum4 = TestHelper.constructDatum(keyA, StandardUnit.Count, 1, 1, 2, 2); + queue.offer(new CWMetricKey(datum4), datum4); + queue.offer(new CWMetricKey(datum4), datum4); + + MetricDatum datum5 = + TestHelper.constructDatum(keyB, StandardUnit.Count, 100, 10, 110, 2).withDimensions(dimensionsA); + queue.offer(new CWMetricKey(datum5), datum5); + + Assert.assertEquals(4, queue.size()); + List> items = queue.drain(4); + + Assert.assertEquals(items.get(0).datum, TestHelper.constructDatum(keyA, StandardUnit.Count, 10, 1, 17, 4) + .withDimensions(dimensionsA)); + Assert.assertEquals(items.get(1).datum, datum3); + Assert.assertEquals(items.get(2).datum, TestHelper.constructDatum(keyA, StandardUnit.Count, 1, 1, 4, 4)); + Assert.assertEquals(items.get(3).datum, TestHelper.constructDatum(keyB, StandardUnit.Count, 100, 10, 110, 2) + .withDimensions(dimensionsA)); + } + + /* + * Test that the number of MetricDatum that can be added to our queue is capped at the MAX_QUEUE_SIZE. + * Therefore, any datums added to the queue that is greater than the capacity of our queue will be dropped. + */ + @Test + public void testDrop() { + for (int i = 0; i < MAX_QUEUE_SIZE; i++) { + MetricDatum datum = TestHelper.constructDatum(Integer.toString(i), StandardUnit.Count, 1, 1, 2, 2); + CWMetricKey key = new CWMetricKey(datum); + Assert.assertTrue(queue.offer(key, datum)); + } + + MetricDatum datum = TestHelper.constructDatum("foo", StandardUnit.Count, 1, 1, 2, 2); + Assert.assertFalse(queue.offer(new CWMetricKey(datum), datum)); + Assert.assertEquals(MAX_QUEUE_SIZE, queue.size()); + } +} diff --git a/src/test/java/com/amazonaws/services/kinesis/metrics/impl/TestHelper.java b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/TestHelper.java new file mode 100644 index 00000000..195a7f94 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/metrics/impl/TestHelper.java @@ -0,0 +1,40 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.metrics.impl; + +import com.amazonaws.services.cloudwatch.model.Dimension; +import com.amazonaws.services.cloudwatch.model.MetricDatum; +import com.amazonaws.services.cloudwatch.model.StandardUnit; +import com.amazonaws.services.cloudwatch.model.StatisticSet; + +public class TestHelper { + public static MetricDatum constructDatum(String name, + StandardUnit unit, + double maximum, + double minimum, + double sum, + double count) { + return new MetricDatum().withMetricName(name) + .withUnit(unit) + .withStatisticValues(new StatisticSet().withMaximum(maximum) + .withMinimum(minimum) + .withSum(sum) + .withSampleCount(count)); + } + + public static Dimension constructDimension(String name, String value) { + return new Dimension().withName(name).withValue(value); + } +} diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/Matchers.java b/src/test/java/com/amazonaws/services/kinesis/multilang/Matchers.java similarity index 70% rename from amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/Matchers.java rename to src/test/java/com/amazonaws/services/kinesis/multilang/Matchers.java index 6ec8962a..b84d61a0 100644 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/Matchers.java +++ b/src/test/java/com/amazonaws/services/kinesis/multilang/Matchers.java @@ -1,17 +1,3 @@ -/* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/asl/ - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ package com.amazonaws.services.kinesis.multilang; import static org.hamcrest.CoreMatchers.equalTo; @@ -21,8 +7,8 @@ import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import software.amazon.kinesis.lifecycle.events.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber; +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; public class Matchers { @@ -36,19 +22,19 @@ public class Matchers { private final Matcher sequenceNumberMatcher; public InitializationInputMatcher(InitializationInput input) { - shardIdMatcher = equalTo(input.shardId()); - sequenceNumberMatcher = withSequence(input.extendedSequenceNumber()); + shardIdMatcher = equalTo(input.getShardId()); + sequenceNumberMatcher = withSequence(input.getExtendedSequenceNumber()); } @Override protected boolean matchesSafely(final InitializationInput item, Description mismatchDescription) { boolean matches = true; - if (!shardIdMatcher.matches(item.shardId())) { + if (!shardIdMatcher.matches(item.getShardId())) { matches = false; - shardIdMatcher.describeMismatch(item.shardId(), mismatchDescription); + shardIdMatcher.describeMismatch(item.getShardId(), mismatchDescription); } - if (!sequenceNumberMatcher.matches(item.extendedSequenceNumber())) { + if (!sequenceNumberMatcher.matches(item.getExtendedSequenceNumber())) { matches = false; sequenceNumberMatcher.describeMismatch(item, mismatchDescription); } @@ -76,19 +62,19 @@ public class Matchers { private final Matcher subSequenceNumberMatcher; public ExtendedSequenceNumberMatcher(ExtendedSequenceNumber extendedSequenceNumber) { - sequenceNumberMatcher = equalTo(extendedSequenceNumber.sequenceNumber()); - subSequenceNumberMatcher = equalTo(extendedSequenceNumber.subSequenceNumber()); + sequenceNumberMatcher = equalTo(extendedSequenceNumber.getSequenceNumber()); + subSequenceNumberMatcher = equalTo(extendedSequenceNumber.getSubSequenceNumber()); } @Override protected boolean matchesSafely(ExtendedSequenceNumber item, Description mismatchDescription) { boolean matches = true; - if (!sequenceNumberMatcher.matches(item.sequenceNumber())) { + if (!sequenceNumberMatcher.matches(item.getSequenceNumber())) { matches = false; mismatchDescription.appendDescriptionOf(sequenceNumberMatcher); } - if (!subSequenceNumberMatcher.matches(item.subSequenceNumber())) { + if (!subSequenceNumberMatcher.matches(item.getSubSequenceNumber())) { matches = false; mismatchDescription.appendDescriptionOf(subSequenceNumberMatcher); } diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MessageReaderTest.java b/src/test/java/com/amazonaws/services/kinesis/multilang/MessageReaderTest.java similarity index 99% rename from amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MessageReaderTest.java rename to src/test/java/com/amazonaws/services/kinesis/multilang/MessageReaderTest.java index 89ca0d17..a30f3516 100644 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MessageReaderTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/multilang/MessageReaderTest.java @@ -18,11 +18,13 @@ import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; + import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.Future; import org.junit.Assert; + import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; @@ -37,6 +39,11 @@ public class MessageReaderTest { private static final String shardId = "shard-123"; + @Before + public void setup() { + + } + /* * This line is based on the definition of the protocol for communication between the KCL record processor and * the client's process. diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MessageWriterTest.java b/src/test/java/com/amazonaws/services/kinesis/multilang/MessageWriterTest.java similarity index 70% rename from amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MessageWriterTest.java rename to src/test/java/com/amazonaws/services/kinesis/multilang/MessageWriterTest.java index 22a448b1..08f04c92 100644 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MessageWriterTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/multilang/MessageWriterTest.java @@ -1,42 +1,41 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang; import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; -import java.util.Arrays; +import java.util.ArrayList; import java.util.List; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason; +import com.amazonaws.services.kinesis.model.Record; import com.amazonaws.services.kinesis.multilang.messages.Message; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownReason; -import software.amazon.kinesis.retrieval.KinesisClientRecord; - public class MessageWriterTest { private static final String shardId = "shard-123"; @@ -75,7 +74,7 @@ public class MessageWriterTest { @Test public void writeInitializeMessageTest() throws IOException, InterruptedException, ExecutionException { - Future future = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build()); + Future future = this.messageWriter.writeInitializeMessage(new InitializationInput().withShardId(shardId)); future.get(); Mockito.verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); @@ -84,12 +83,19 @@ public class MessageWriterTest { @Test public void writeProcessRecordsMessageTest() throws IOException, InterruptedException, ExecutionException { - List records = Arrays.asList( - KinesisClientRecord.builder().data(ByteBuffer.wrap("kitten".getBytes())).partitionKey("some cats") - .sequenceNumber("357234807854789057805").build(), - KinesisClientRecord.builder().build() - ); - Future future = this.messageWriter.writeProcessRecordsMessage(ProcessRecordsInput.builder().records(records).build()); + List records = new ArrayList() { + { + this.add(new Record() { + { + this.setData(ByteBuffer.wrap("kitten".getBytes())); + this.setPartitionKey("some cats"); + this.setSequenceNumber("357234807854789057805"); + } + }); + this.add(new Record()); + } + }; + Future future = this.messageWriter.writeProcessRecordsMessage(new ProcessRecordsInput().withRecords(records)); future.get(); Mockito.verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), @@ -99,17 +105,7 @@ public class MessageWriterTest { @Test public void writeShutdownMessageTest() throws IOException, InterruptedException, ExecutionException { - Future future = this.messageWriter.writeShutdownMessage(ShutdownReason.SHARD_END); - future.get(); - - Mockito.verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); - Mockito.verify(this.stream, Mockito.atLeastOnce()).flush(); - } - - @Test - public void writeShutdownRequestedMessageTest() throws IOException, InterruptedException, ExecutionException { - Future future = this.messageWriter.writeShutdownRequestedMessage(); + Future future = this.messageWriter.writeShutdownMessage(ShutdownReason.TERMINATE); future.get(); Mockito.verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), @@ -120,7 +116,7 @@ public class MessageWriterTest { @Test public void streamIOExceptionTest() throws IOException, InterruptedException, ExecutionException { Mockito.doThrow(IOException.class).when(stream).flush(); - Future initializeTask = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build()); + Future initializeTask = this.messageWriter.writeInitializeMessage(new InitializationInput().withShardId(shardId)); Boolean result = initializeTask.get(); Assert.assertNotNull(result); Assert.assertFalse(result); @@ -133,7 +129,7 @@ public class MessageWriterTest { messageWriter = new MessageWriter().initialize(stream, shardId, mapper, Executors.newCachedThreadPool()); try { - messageWriter.writeShutdownMessage(ShutdownReason.LEASE_LOST); + messageWriter.writeShutdownMessage(ShutdownReason.ZOMBIE); Assert.fail("The mapper failed so no write method should be able to succeed."); } catch (Exception e) { // Note that this is different than the stream failing. The stream is expected to fail, so we handle it @@ -150,7 +146,7 @@ public class MessageWriterTest { Assert.assertFalse(this.messageWriter.isOpen()); try { // Any message should fail - this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(shardId).build()); + this.messageWriter.writeInitializeMessage(new InitializationInput().withShardId(shardId)); Assert.fail("MessageWriter should be closed and unable to write."); } catch (IllegalStateException e) { // This should happen. diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfigTest.java b/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfigTest.java similarity index 52% rename from amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfigTest.java rename to src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfigTest.java index a08f6673..6a687577 100644 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfigTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonConfigTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Amazon Software License (the "License"). * You may not use this file except in compliance with the License. @@ -15,80 +15,71 @@ package com.amazonaws.services.kinesis.multilang; import static org.junit.Assert.assertNotNull; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.when; import java.io.ByteArrayInputStream; import java.io.IOException; import java.util.Properties; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; - -import com.amazonaws.services.kinesis.multilang.config.KinesisClientLibConfigurator; - import junit.framework.Assert; -import software.amazon.awssdk.auth.credentials.AwsCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; -@RunWith(MockitoJUnitRunner.class) +import org.junit.Test; +import org.mockito.Mockito; + +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfigurator; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration; + public class MultiLangDaemonConfigTest { + private static String FILENAME = "some.properties"; - @Mock - private AwsCredentialsProvider credentialsProvider; - @Mock - private AwsCredentials creds; - @Mock - private KinesisClientLibConfigurator configurator; - - @Before - public void setup() { - when(credentialsProvider.resolveCredentials()).thenReturn(creds); - when(creds.accessKeyId()).thenReturn("cool-user"); - when(configurator.getConfiguration(any(Properties.class))).thenReturn( - new KinesisClientLibConfiguration("cool-app", "cool-stream", credentialsProvider, "cool-worker")); + private KinesisClientLibConfigurator buildMockConfigurator() { + AWSCredentialsProvider credentialsProvider = Mockito.mock(AWSCredentialsProvider.class); + AWSCredentials creds = Mockito.mock(AWSCredentials.class); + Mockito.doReturn(creds).when(credentialsProvider).getCredentials(); + Mockito.doReturn("cool-user").when(creds).getAWSAccessKeyId(); + KinesisClientLibConfiguration kclConfig = + new KinesisClientLibConfiguration("cool-app", "cool-stream", credentialsProvider, "cool-worker"); + KinesisClientLibConfigurator configurator = Mockito.mock(KinesisClientLibConfigurator.class); + Mockito.doReturn(kclConfig).when(configurator).getConfiguration(Mockito.any(Properties.class)); + return configurator; } - // TODO: Fix test - @Ignore @Test public void constructorTest() throws IOException { - String PROPERTIES = "executableName = randomEXE \n" + "applicationName = testApp \n" - + "streamName = fakeStream \n" + "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" - + "processingLanguage = malbolge"; + String PROPERTIES = + "executableName = randomEXE \n" + "applicationName = testApp \n" + "streamName = fakeStream \n" + + "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" + + "processingLanguage = malbolge"; ClassLoader classLoader = Mockito.mock(ClassLoader.class); - Mockito.doReturn(new ByteArrayInputStream(PROPERTIES.getBytes())).when(classLoader) + Mockito.doReturn(new ByteArrayInputStream(PROPERTIES.getBytes())) + .when(classLoader) .getResourceAsStream(FILENAME); - MultiLangDaemonConfig deamonConfig = new MultiLangDaemonConfig(FILENAME, classLoader, configurator); + MultiLangDaemonConfig deamonConfig = new MultiLangDaemonConfig(FILENAME, classLoader, buildMockConfigurator()); assertNotNull(deamonConfig.getExecutorService()); assertNotNull(deamonConfig.getKinesisClientLibConfiguration()); assertNotNull(deamonConfig.getRecordProcessorFactory()); } - // TODO: Fix test - @Ignore @Test public void propertyValidation() { - String PROPERTIES_NO_EXECUTABLE_NAME = "applicationName = testApp \n" + "streamName = fakeStream \n" - + "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" + "processingLanguage = malbolge"; + String PROPERTIES_NO_EXECUTABLE_NAME = + "applicationName = testApp \n" + "streamName = fakeStream \n" + + "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" + + "processingLanguage = malbolge"; ClassLoader classLoader = Mockito.mock(ClassLoader.class); - Mockito.doReturn(new ByteArrayInputStream(PROPERTIES_NO_EXECUTABLE_NAME.getBytes())).when(classLoader) + Mockito.doReturn(new ByteArrayInputStream(PROPERTIES_NO_EXECUTABLE_NAME.getBytes())) + .when(classLoader) .getResourceAsStream(FILENAME); MultiLangDaemonConfig config; try { - config = new MultiLangDaemonConfig(FILENAME, classLoader, configurator); + config = new MultiLangDaemonConfig(FILENAME, classLoader, buildMockConfigurator()); Assert.fail("Construction of the config should have failed due to property validation failing."); } catch (IllegalArgumentException e) { // Good diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonTest.java b/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonTest.java similarity index 70% rename from amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonTest.java rename to src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonTest.java index 92271e2e..7ae6e5e7 100644 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangDaemonTest.java @@ -20,23 +20,26 @@ import java.util.concurrent.Executors; import org.junit.Test; import org.mockito.Mockito; -import software.amazon.awssdk.auth.credentials.AwsCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration; public class MultiLangDaemonTest { @Test - public void buildWorkerTest() { + public void buildWorkerTest() { // Mocking Kinesis creds - AwsCredentialsProvider provider = Mockito.mock(AwsCredentialsProvider.class); - Mockito.doReturn(Mockito.mock(AwsCredentials.class)).when(provider).resolveCredentials(); - KinesisClientLibConfiguration configuration = new KinesisClientLibConfiguration("Derp", "Blurp", provider, + AWSCredentialsProvider provider = Mockito.mock(AWSCredentialsProvider.class); + Mockito.doReturn(Mockito.mock(AWSCredentials.class)).when(provider).getCredentials(); + KinesisClientLibConfiguration configuration = new KinesisClientLibConfiguration( "Derp", + "Blurp", + provider, "Worker"); - + MultiLangRecordProcessorFactory factory = Mockito.mock(MultiLangRecordProcessorFactory.class); Mockito.doReturn(new String[] { "someExecutableName" }).when(factory).getCommandArray(); - MultiLangDaemon daemon = new MultiLangDaemon(configuration, factory, Executors.newCachedThreadPool()); + MultiLangDaemon daemon = + new MultiLangDaemon(configuration, factory, Executors.newCachedThreadPool()); } @Test diff --git a/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocolTest.java b/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocolTest.java new file mode 100644 index 00000000..f00bb48f --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/multilang/MultiLangProtocolTest.java @@ -0,0 +1,176 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.multilang; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.argThat; +import static org.mockito.Mockito.timeout; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; + +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer; + +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason; + +import com.amazonaws.services.kinesis.model.Record; +import com.amazonaws.services.kinesis.multilang.messages.CheckpointMessage; +import com.amazonaws.services.kinesis.multilang.messages.Message; +import com.amazonaws.services.kinesis.multilang.messages.ProcessRecordsMessage; +import com.amazonaws.services.kinesis.multilang.messages.StatusMessage; +import com.google.common.util.concurrent.SettableFuture; + +public class MultiLangProtocolTest { + + private static final List EMPTY_RECORD_LIST = Collections.emptyList(); + private MultiLangProtocol protocol; + private MessageWriter messageWriter; + private MessageReader messageReader; + private String shardId; + private IRecordProcessorCheckpointer checkpointer; + + + + @Before + public void setup() { + this.shardId = "shard-id-123"; + messageWriter = Mockito.mock(MessageWriter.class); + messageReader = Mockito.mock(MessageReader.class); + protocol = new MultiLangProtocol(messageReader, messageWriter, new InitializationInput().withShardId(shardId)); + checkpointer = Mockito.mock(IRecordProcessorCheckpointer.class); + } + + private Future buildFuture(T value) { + SettableFuture future = SettableFuture.create(); + future.set(value); + return future; + } + + private Future buildFuture(T value, Class clazz) { + SettableFuture future = SettableFuture.create(); + future.set(value); + return future; + } + + @Test + public void initializeTest() throws InterruptedException, ExecutionException { + when(messageWriter + .writeInitializeMessage(argThat(Matchers.withInit(new InitializationInput().withShardId(shardId))))) + .thenReturn(buildFuture(true)); + when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(new StatusMessage("initialize"), Message.class)); + assertThat(protocol.initialize(), equalTo(true)); + } + + @Test + public void processRecordsTest() throws InterruptedException, ExecutionException { + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); + when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(new StatusMessage("processRecords"), Message.class)); + + assertThat(protocol.processRecords(new ProcessRecordsInput().withRecords(EMPTY_RECORD_LIST)), equalTo(true)); + } + + @Test + public void shutdownTest() throws InterruptedException, ExecutionException { + when(messageWriter.writeShutdownMessage(any(ShutdownReason.class))).thenReturn(buildFuture(true)); + when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(new StatusMessage("shutdown"), Message.class)); + + Mockito.doReturn(buildFuture(true)).when(messageWriter) + .writeShutdownMessage(any(ShutdownReason.class)); + Mockito.doReturn(buildFuture(new StatusMessage("shutdown"))).when(messageReader).getNextMessageFromSTDOUT(); + assertThat(protocol.shutdown(null, ShutdownReason.ZOMBIE), equalTo(true)); + } + + private Answer> buildMessageAnswers(List messages) { + return new Answer>() { + + Iterator messageIterator; + Message message; + + Answer> init(List messages) { + messageIterator = messages.iterator(); + return this; + } + + @Override + public Future answer(InvocationOnMock invocation) throws Throwable { + if (this.messageIterator.hasNext()) { + message = this.messageIterator.next(); + } + return buildFuture(message); + } + + }.init(messages); + } + + @Test + public void processRecordsWithCheckpointsTest() throws InterruptedException, ExecutionException, + KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); + when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(buildFuture(true)); + when(messageReader.getNextMessageFromSTDOUT()).thenAnswer(buildMessageAnswers(new ArrayList() { + { + this.add(new CheckpointMessage("123", 0L, null)); + this.add(new CheckpointMessage(null, 0L, null)); + /* + * This procesRecords message will be ignored by the read loop which only cares about status and + * checkpoint messages. All other lines and message types are ignored. By inserting it here, we check + * that this test succeeds even with unexpected messaging. + */ + this.add(new ProcessRecordsMessage()); + this.add(new StatusMessage("processRecords")); + } + })); + assertThat(protocol.processRecords(new ProcessRecordsInput().withRecords(EMPTY_RECORD_LIST).withCheckpointer(checkpointer)), equalTo(true)); + + verify(checkpointer, timeout(1)).checkpoint(); + verify(checkpointer, timeout(1)).checkpoint("123", 0L); + } + + @Test + public void processRecordsWithABadCheckpointTest() throws InterruptedException, ExecutionException { + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); + when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(buildFuture(false)); + when(messageReader.getNextMessageFromSTDOUT()).thenAnswer(buildMessageAnswers(new ArrayList() { + { + this.add(new CheckpointMessage("456", 0L, null)); + this.add(new StatusMessage("processRecords")); + } + })); + assertThat(protocol.processRecords(new ProcessRecordsInput().withRecords(EMPTY_RECORD_LIST).withCheckpointer(checkpointer)), equalTo(false)); + } +} diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/ReadSTDERRTaskTest.java b/src/test/java/com/amazonaws/services/kinesis/multilang/ReadSTDERRTaskTest.java similarity index 100% rename from amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/ReadSTDERRTaskTest.java rename to src/test/java/com/amazonaws/services/kinesis/multilang/ReadSTDERRTaskTest.java diff --git a/src/test/java/com/amazonaws/services/kinesis/multilang/StreamingRecordProcessorFactoryTest.java b/src/test/java/com/amazonaws/services/kinesis/multilang/StreamingRecordProcessorFactoryTest.java new file mode 100644 index 00000000..a8f5885b --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/multilang/StreamingRecordProcessorFactoryTest.java @@ -0,0 +1,32 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.multilang; + +import org.junit.Assert; +import org.junit.Test; + +import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor; + +public class StreamingRecordProcessorFactoryTest { + + @Test + public void createProcessorTest() { + MultiLangRecordProcessorFactory factory = new MultiLangRecordProcessorFactory("somecommand", null); + IRecordProcessor processor = factory.createProcessor(); + + Assert.assertEquals("Should have constructed a StreamingRecordProcessor", MultiLangRecordProcessor.class, + processor.getClass()); + } +} diff --git a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/StreamingShardRecordProcessorTest.java b/src/test/java/com/amazonaws/services/kinesis/multilang/StreamingRecordProcessorTest.java similarity index 64% rename from amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/StreamingShardRecordProcessorTest.java rename to src/test/java/com/amazonaws/services/kinesis/multilang/StreamingRecordProcessorTest.java index e51bc2a1..2c02b5e9 100644 --- a/amazon-kinesis-client-multilang/src/test/java/com/amazonaws/services/kinesis/multilang/StreamingShardRecordProcessorTest.java +++ b/src/test/java/com/amazonaws/services/kinesis/multilang/StreamingRecordProcessorTest.java @@ -1,60 +1,19 @@ /* - * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. * - * Licensed under the Amazon Software License (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at * - * http://aws.amazon.com/asl/ + * http://aws.amazon.com/asl/ * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. */ package com.amazonaws.services.kinesis.multilang; -import software.amazon.kinesis.exceptions.InvalidStateException; -import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.exceptions.ThrottlingException; -import software.amazon.awssdk.services.kinesis.model.Record; -import software.amazon.kinesis.processor.Checkpointer; -import software.amazon.kinesis.processor.PreparedCheckpointer; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.coordinator.KinesisClientLibConfiguration; -import software.amazon.kinesis.lifecycle.ShutdownReason; -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownInput; -import com.amazonaws.services.kinesis.multilang.messages.InitializeMessage; -import com.amazonaws.services.kinesis.multilang.messages.Message; -import com.amazonaws.services.kinesis.multilang.messages.ProcessRecordsMessage; -import com.amazonaws.services.kinesis.multilang.messages.ShutdownMessage; -import com.amazonaws.services.kinesis.multilang.messages.StatusMessage; -import com.fasterxml.jackson.databind.ObjectMapper; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.runners.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; -import software.amazon.kinesis.retrieval.KinesisClientRecord; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyString; @@ -64,8 +23,45 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; + +import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException; +import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException; +import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer; +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput; +import com.amazonaws.services.kinesis.model.Record; +import com.amazonaws.services.kinesis.multilang.messages.InitializeMessage; +import com.amazonaws.services.kinesis.multilang.messages.Message; +import com.amazonaws.services.kinesis.multilang.messages.ProcessRecordsMessage; +import com.amazonaws.services.kinesis.multilang.messages.ShutdownMessage; +import com.amazonaws.services.kinesis.multilang.messages.StatusMessage; +import com.fasterxml.jackson.databind.ObjectMapper; + @RunWith(MockitoJUnitRunner.class) -public class StreamingShardRecordProcessorTest { +public class StreamingRecordProcessorTest { private static final String shardId = "shard-123"; @@ -74,7 +70,7 @@ public class StreamingShardRecordProcessorTest { @Mock private Future messageFuture; - private RecordProcessorCheckpointer unimplementedCheckpointer = new RecordProcessorCheckpointer() { + private IRecordProcessorCheckpointer unimplementedCheckpointer = new IRecordProcessorCheckpointer() { @Override public void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException, @@ -102,39 +98,6 @@ public class StreamingShardRecordProcessorTest { IllegalArgumentException { throw new UnsupportedOperationException(); } - - @Override - public PreparedCheckpointer prepareCheckpoint() - throws KinesisClientLibDependencyException, - InvalidStateException, ThrottlingException, ShutdownException { - throw new UnsupportedOperationException(); - } - - @Override - public PreparedCheckpointer prepareCheckpoint(Record record) - throws KinesisClientLibDependencyException, - InvalidStateException, ThrottlingException, ShutdownException { - throw new UnsupportedOperationException(); - } - - @Override - public PreparedCheckpointer prepareCheckpoint(String sequenceNumber) - throws KinesisClientLibDependencyException, - InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException { - throw new UnsupportedOperationException(); - } - - @Override - public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber) - throws KinesisClientLibDependencyException, - InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException { - throw new UnsupportedOperationException(); - } - - @Override - public Checkpointer checkpointer() { - throw new UnsupportedOperationException(); - } }; private MessageWriter messageWriter; @@ -143,10 +106,7 @@ public class StreamingShardRecordProcessorTest { private MessageReader messageReader; - private MultiLangShardRecordProcessor recordProcessor; - - @Mock - private KinesisClientLibConfiguration configuration; + private MultiLangRecordProcessor recordProcessor; @Before public void prepare() throws IOException, InterruptedException, ExecutionException { @@ -161,11 +121,10 @@ public class StreamingShardRecordProcessorTest { messageWriter = Mockito.mock(MessageWriter.class); messageReader = Mockito.mock(MessageReader.class); errorReader = Mockito.mock(DrainChildSTDERRTask.class); - when(configuration.getTimeoutInSeconds()).thenReturn(Optional.empty()); recordProcessor = - new MultiLangShardRecordProcessor(new ProcessBuilder(), executor, new ObjectMapper(), messageWriter, - messageReader, errorReader, configuration) { + new MultiLangRecordProcessor(new ProcessBuilder(), executor, new ObjectMapper(), messageWriter, + messageReader, errorReader) { // Just don't do anything when we exit. void exit() { @@ -208,15 +167,12 @@ public class StreamingShardRecordProcessorTest { when(messageFuture.get()).thenAnswer(answer); when(messageReader.getNextMessageFromSTDOUT()).thenReturn(messageFuture); - List testRecords = Collections.emptyList(); + List testRecords = new ArrayList(); - recordProcessor.initialize(InitializationInput.builder().shardId(shardId).build()); - recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords) - .checkpointer(unimplementedCheckpointer).build()); - recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords) - .checkpointer(unimplementedCheckpointer).build()); - recordProcessor.shutdown(ShutdownInput.builder().checkpointer(unimplementedCheckpointer) - .shutdownReason(ShutdownReason.LEASE_LOST).build()); + recordProcessor.initialize(new InitializationInput().withShardId(shardId)); + recordProcessor.processRecords(new ProcessRecordsInput().withRecords(testRecords).withCheckpointer(unimplementedCheckpointer)); + recordProcessor.processRecords(new ProcessRecordsInput().withRecords(testRecords).withCheckpointer(unimplementedCheckpointer)); + recordProcessor.shutdown(new ShutdownInput().withCheckpointer(unimplementedCheckpointer).withShutdownReason(ShutdownReason.ZOMBIE)); } @Test @@ -243,10 +199,9 @@ public class StreamingShardRecordProcessorTest { phases(answer); verify(messageWriter) - .writeInitializeMessage(argThat(Matchers.withInit( - InitializationInput.builder().shardId(shardId).build()))); + .writeInitializeMessage(argThat(Matchers.withInit(new InitializationInput().withShardId(shardId)))); verify(messageWriter, times(2)).writeProcessRecordsMessage(any(ProcessRecordsInput.class)); - verify(messageWriter).writeShutdownMessage(ShutdownReason.LEASE_LOST); + verify(messageWriter).writeShutdownMessage(ShutdownReason.ZOMBIE); } @Test @@ -275,10 +230,10 @@ public class StreamingShardRecordProcessorTest { phases(answer); - verify(messageWriter).writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder() - .shardId(shardId).build()))); + verify(messageWriter).writeInitializeMessage(argThat(Matchers.withInit(new InitializationInput() + .withShardId(shardId)))); verify(messageWriter, times(2)).writeProcessRecordsMessage(any(ProcessRecordsInput.class)); - verify(messageWriter, never()).writeShutdownMessage(ShutdownReason.LEASE_LOST); + verify(messageWriter, never()).writeShutdownMessage(ShutdownReason.ZOMBIE); Assert.assertEquals(1, systemExitCount); } } diff --git a/src/test/java/com/amazonaws/services/kinesis/multilang/messages/MessageTest.java b/src/test/java/com/amazonaws/services/kinesis/multilang/messages/MessageTest.java new file mode 100644 index 00000000..2c76aa30 --- /dev/null +++ b/src/test/java/com/amazonaws/services/kinesis/multilang/messages/MessageTest.java @@ -0,0 +1,77 @@ +/* + * Copyright 2012-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Amazon Software License (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/asl/ + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ +package com.amazonaws.services.kinesis.multilang.messages; + +import java.nio.ByteBuffer; +import java.util.ArrayList; + +import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput; +import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput; +import org.junit.Assert; +import org.junit.Test; + +import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason; +import com.amazonaws.services.kinesis.model.Record; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; + +public class MessageTest { + + @Test + public void toStringTest() { + Message[] messages = + new Message[] { new CheckpointMessage("1234567890", 0L, null), new InitializeMessage(new InitializationInput().withShardId("shard-123")), + new ProcessRecordsMessage(new ProcessRecordsInput().withRecords(new ArrayList() { + { + this.add(new Record() { + { + this.withData(ByteBuffer.wrap("cat".getBytes())); + this.withPartitionKey("cat"); + this.withSequenceNumber("555"); + } + }); + } + })), new ShutdownMessage(ShutdownReason.ZOMBIE), new StatusMessage("processRecords"), + new InitializeMessage(), new ProcessRecordsMessage() }; + + for (int i = 0; i < messages.length; i++) { + Assert.assertTrue("Each message should contain the action field", messages[i].toString().contains("action")); + } + + // Hit this constructor + JsonFriendlyRecord defaultJsonFriendlyRecord = new JsonFriendlyRecord(); + Assert.assertNull(defaultJsonFriendlyRecord.getPartitionKey()); + Assert.assertNull(defaultJsonFriendlyRecord.getData()); + Assert.assertNull(defaultJsonFriendlyRecord.getSequenceNumber()); + Assert.assertNull(new ShutdownMessage(null).getReason()); + + // Hit the bad object mapping path + Message withBadMapper = new Message() { + }.withObjectMapper(new ObjectMapper() { + /** + * + */ + private static final long serialVersionUID = 1L; + + @Override + public String writeValueAsString(Object m) throws JsonProcessingException { + throw new JsonProcessingException(new Throwable()) { + }; + } + }); + String s = withBadMapper.toString(); + Assert.assertNotNull(s); + } +} diff --git a/src/test/java/log4j.properties b/src/test/java/log4j.properties new file mode 100644 index 00000000..73ba669c --- /dev/null +++ b/src/test/java/log4j.properties @@ -0,0 +1,8 @@ +log4j.rootLogger=INFO, A1 +log4j.appender.A1=org.apache.log4j.ConsoleAppender +log4j.appender.A1.layout=org.apache.log4j.PatternLayout + +# Print the date in ISO 8601 format +log4j.appender.A1.layout.ConversionPattern=%d [%t] %-5p %c - %m%n + +log4j.logger.org.apache.http=WARN