diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java index d8d9068d..179eb9e4 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/coordinator/KinesisClientLibConfiguration.java @@ -18,11 +18,9 @@ import java.util.Date; import java.util.Optional; import java.util.Set; -import org.apache.commons.lang3.Validate; - import com.google.common.collect.ImmutableSet; - import lombok.Getter; +import org.apache.commons.lang3.Validate; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; import software.amazon.kinesis.common.InitialPositionInStream; @@ -119,14 +117,16 @@ public class KinesisClientLibConfiguration { /** * Metrics dimensions that always will be enabled regardless of the config provided by user. */ - public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS = ImmutableSet - .of(MetricsUtil.OPERATION_DIMENSION_NAME); + public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS = + ImmutableSet.of(MetricsUtil.OPERATION_DIMENSION_NAME); /** * Allowed dimensions for CloudWatch metrics. By default, worker ID dimension will be disabled. */ - public static final Set DEFAULT_METRICS_ENABLED_DIMENSIONS = ImmutableSet. builder() - .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS).add(MetricsUtil.SHARD_ID_DIMENSION_NAME).build(); + public static final Set DEFAULT_METRICS_ENABLED_DIMENSIONS = ImmutableSet.builder() + .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS) + .add(MetricsUtil.SHARD_ID_DIMENSION_NAME) + .build(); /** * Metrics dimensions that signify all possible dimensions. @@ -285,8 +285,8 @@ public class KinesisClientLibConfiguration { * @param workerId * Used to distinguish different workers/processes of a Kinesis application */ - public KinesisClientLibConfiguration(String applicationName, String streamName, - AwsCredentialsProvider credentialsProvider, String workerId) { + public KinesisClientLibConfiguration( + String applicationName, String streamName, AwsCredentialsProvider credentialsProvider, String workerId) { this(applicationName, streamName, credentialsProvider, credentialsProvider, credentialsProvider, workerId); } @@ -308,16 +308,36 @@ public class KinesisClientLibConfiguration { * @param workerId * Used to distinguish different workers/processes of a Kinesis application */ - public KinesisClientLibConfiguration(String applicationName, String streamName, - AwsCredentialsProvider kinesisCredentialsProvider, AwsCredentialsProvider dynamoDBCredentialsProvider, - AwsCredentialsProvider cloudWatchCredentialsProvider, String workerId) { - this(applicationName, streamName, null, null, DEFAULT_INITIAL_POSITION_IN_STREAM, kinesisCredentialsProvider, - dynamoDBCredentialsProvider, cloudWatchCredentialsProvider, DEFAULT_FAILOVER_TIME_MILLIS, workerId, - DEFAULT_MAX_RECORDS, DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, - DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS, - DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, - DEFAULT_TASK_BACKOFF_TIME_MILLIS, DEFAULT_METRICS_BUFFER_TIME_MILLIS, DEFAULT_METRICS_MAX_QUEUE_SIZE, - DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, null, DEFAULT_SHUTDOWN_GRACE_MILLIS, + public KinesisClientLibConfiguration( + String applicationName, + String streamName, + AwsCredentialsProvider kinesisCredentialsProvider, + AwsCredentialsProvider dynamoDBCredentialsProvider, + AwsCredentialsProvider cloudWatchCredentialsProvider, + String workerId) { + this( + applicationName, + streamName, + null, + null, + DEFAULT_INITIAL_POSITION_IN_STREAM, + kinesisCredentialsProvider, + dynamoDBCredentialsProvider, + cloudWatchCredentialsProvider, + DEFAULT_FAILOVER_TIME_MILLIS, + workerId, + DEFAULT_MAX_RECORDS, + DEFAULT_IDLETIME_BETWEEN_READS_MILLIS, + DEFAULT_DONT_CALL_PROCESS_RECORDS_FOR_EMPTY_RECORD_LIST, + DEFAULT_PARENT_SHARD_POLL_INTERVAL_MILLIS, + DEFAULT_SHARD_SYNC_INTERVAL_MILLIS, + DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION, + DEFAULT_TASK_BACKOFF_TIME_MILLIS, + DEFAULT_METRICS_BUFFER_TIME_MILLIS, + DEFAULT_METRICS_MAX_QUEUE_SIZE, + DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING, + null, + DEFAULT_SHUTDOWN_GRACE_MILLIS, DEFAULT_SCHEDULER_INITIALIZATION_BACKOFF_TIME_MILLIS); } @@ -377,20 +397,53 @@ public class KinesisClientLibConfiguration { */ // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 26 LINES // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 26 LINES - public KinesisClientLibConfiguration(String applicationName, String streamName, String kinesisEndpoint, - InitialPositionInStream initialPositionInStream, AwsCredentialsProvider kinesisCredentialsProvider, - AwsCredentialsProvider dynamoDBCredentialsProvider, AwsCredentialsProvider cloudWatchCredentialsProvider, - long failoverTimeMillis, String workerId, int maxRecords, long idleTimeBetweenReadsInMillis, - boolean callProcessRecordsEvenForEmptyRecordList, long parentShardPollIntervalMillis, - long shardSyncIntervalMillis, boolean cleanupTerminatedShardsBeforeExpiry, long taskBackoffTimeMillis, - long metricsBufferTimeMillis, int metricsMaxQueueSize, boolean validateSequenceNumberBeforeCheckpointing, - String regionName, long shutdownGraceMillis, long schedulerInitializationBackoffTimeMillis) { - this(applicationName, streamName, kinesisEndpoint, null, initialPositionInStream, kinesisCredentialsProvider, - dynamoDBCredentialsProvider, cloudWatchCredentialsProvider, failoverTimeMillis, workerId, maxRecords, - idleTimeBetweenReadsInMillis, callProcessRecordsEvenForEmptyRecordList, parentShardPollIntervalMillis, - shardSyncIntervalMillis, cleanupTerminatedShardsBeforeExpiry, taskBackoffTimeMillis, - metricsBufferTimeMillis, metricsMaxQueueSize, validateSequenceNumberBeforeCheckpointing, regionName, - shutdownGraceMillis, schedulerInitializationBackoffTimeMillis); + public KinesisClientLibConfiguration( + String applicationName, + String streamName, + String kinesisEndpoint, + InitialPositionInStream initialPositionInStream, + AwsCredentialsProvider kinesisCredentialsProvider, + AwsCredentialsProvider dynamoDBCredentialsProvider, + AwsCredentialsProvider cloudWatchCredentialsProvider, + long failoverTimeMillis, + String workerId, + int maxRecords, + long idleTimeBetweenReadsInMillis, + boolean callProcessRecordsEvenForEmptyRecordList, + long parentShardPollIntervalMillis, + long shardSyncIntervalMillis, + boolean cleanupTerminatedShardsBeforeExpiry, + long taskBackoffTimeMillis, + long metricsBufferTimeMillis, + int metricsMaxQueueSize, + boolean validateSequenceNumberBeforeCheckpointing, + String regionName, + long shutdownGraceMillis, + long schedulerInitializationBackoffTimeMillis) { + this( + applicationName, + streamName, + kinesisEndpoint, + null, + initialPositionInStream, + kinesisCredentialsProvider, + dynamoDBCredentialsProvider, + cloudWatchCredentialsProvider, + failoverTimeMillis, + workerId, + maxRecords, + idleTimeBetweenReadsInMillis, + callProcessRecordsEvenForEmptyRecordList, + parentShardPollIntervalMillis, + shardSyncIntervalMillis, + cleanupTerminatedShardsBeforeExpiry, + taskBackoffTimeMillis, + metricsBufferTimeMillis, + metricsMaxQueueSize, + validateSequenceNumberBeforeCheckpointing, + regionName, + shutdownGraceMillis, + schedulerInitializationBackoffTimeMillis); } /** @@ -449,15 +502,30 @@ public class KinesisClientLibConfiguration { */ // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 26 LINES // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 26 LINES - public KinesisClientLibConfiguration(String applicationName, String streamName, String kinesisEndpoint, - String dynamoDBEndpoint, InitialPositionInStream initialPositionInStream, - AwsCredentialsProvider kinesisCredentialsProvider, AwsCredentialsProvider dynamoDBCredentialsProvider, - AwsCredentialsProvider cloudWatchCredentialsProvider, long failoverTimeMillis, String workerId, - int maxRecords, long idleTimeBetweenReadsInMillis, boolean callProcessRecordsEvenForEmptyRecordList, - long parentShardPollIntervalMillis, long shardSyncIntervalMillis, - boolean cleanupTerminatedShardsBeforeExpiry, long taskBackoffTimeMillis, long metricsBufferTimeMillis, - int metricsMaxQueueSize, boolean validateSequenceNumberBeforeCheckpointing, String regionName, - long shutdownGraceMillis, long schedulerInitializationBackoffTimeMillis) { + public KinesisClientLibConfiguration( + String applicationName, + String streamName, + String kinesisEndpoint, + String dynamoDBEndpoint, + InitialPositionInStream initialPositionInStream, + AwsCredentialsProvider kinesisCredentialsProvider, + AwsCredentialsProvider dynamoDBCredentialsProvider, + AwsCredentialsProvider cloudWatchCredentialsProvider, + long failoverTimeMillis, + String workerId, + int maxRecords, + long idleTimeBetweenReadsInMillis, + boolean callProcessRecordsEvenForEmptyRecordList, + long parentShardPollIntervalMillis, + long shardSyncIntervalMillis, + boolean cleanupTerminatedShardsBeforeExpiry, + long taskBackoffTimeMillis, + long metricsBufferTimeMillis, + int metricsMaxQueueSize, + boolean validateSequenceNumberBeforeCheckpointing, + String regionName, + long shutdownGraceMillis, + long schedulerInitializationBackoffTimeMillis) { // Check following values are greater than zero checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis); checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis); @@ -495,8 +563,8 @@ public class KinesisClientLibConfiguration { this.maxLeasesToStealAtOneTime = DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME; this.initialLeaseTableReadCapacity = DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY; this.initialLeaseTableWriteCapacity = DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY; - this.initialPositionInStreamExtended = InitialPositionInStreamExtended - .newInitialPosition(initialPositionInStream); + this.initialPositionInStreamExtended = + InitialPositionInStreamExtended.newInitialPosition(initialPositionInStream); this.skipShardSyncAtWorkerInitializationIfLeasesExist = DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST; this.shardPrioritization = DEFAULT_SHARD_PRIORITIZATION; this.recordsFetcherFactory = new SimpleRecordsFetcherFactory(); @@ -559,15 +627,30 @@ public class KinesisClientLibConfiguration { */ // CHECKSTYLE:IGNORE HiddenFieldCheck FOR NEXT 26 LINES // CHECKSTYLE:IGNORE ParameterNumber FOR NEXT 26 LINES - public KinesisClientLibConfiguration(String applicationName, String streamName, String kinesisEndpoint, - String dynamoDBEndpoint, InitialPositionInStream initialPositionInStream, - AwsCredentialsProvider kinesisCredentialsProvider, AwsCredentialsProvider dynamoDBCredentialsProvider, - AwsCredentialsProvider cloudWatchCredentialsProvider, long failoverTimeMillis, String workerId, - int maxRecords, long idleTimeBetweenReadsInMillis, boolean callProcessRecordsEvenForEmptyRecordList, - long parentShardPollIntervalMillis, long shardSyncIntervalMillis, - boolean cleanupTerminatedShardsBeforeExpiry, long taskBackoffTimeMillis, long metricsBufferTimeMillis, - int metricsMaxQueueSize, boolean validateSequenceNumberBeforeCheckpointing, String regionName, - RecordsFetcherFactory recordsFetcherFactory, long schedulerInitializationBackoffTimeMillis) { + public KinesisClientLibConfiguration( + String applicationName, + String streamName, + String kinesisEndpoint, + String dynamoDBEndpoint, + InitialPositionInStream initialPositionInStream, + AwsCredentialsProvider kinesisCredentialsProvider, + AwsCredentialsProvider dynamoDBCredentialsProvider, + AwsCredentialsProvider cloudWatchCredentialsProvider, + long failoverTimeMillis, + String workerId, + int maxRecords, + long idleTimeBetweenReadsInMillis, + boolean callProcessRecordsEvenForEmptyRecordList, + long parentShardPollIntervalMillis, + long shardSyncIntervalMillis, + boolean cleanupTerminatedShardsBeforeExpiry, + long taskBackoffTimeMillis, + long metricsBufferTimeMillis, + int metricsMaxQueueSize, + boolean validateSequenceNumberBeforeCheckpointing, + String regionName, + RecordsFetcherFactory recordsFetcherFactory, + long schedulerInitializationBackoffTimeMillis) { // Check following values are greater than zero checkIsValuePositive("FailoverTimeMillis", failoverTimeMillis); checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis); @@ -607,8 +690,8 @@ public class KinesisClientLibConfiguration { this.maxLeasesToStealAtOneTime = DEFAULT_MAX_LEASES_TO_STEAL_AT_ONE_TIME; this.initialLeaseTableReadCapacity = DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY; this.initialLeaseTableWriteCapacity = DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY; - this.initialPositionInStreamExtended = InitialPositionInStreamExtended - .newInitialPosition(initialPositionInStream); + this.initialPositionInStreamExtended = + InitialPositionInStreamExtended.newInitialPosition(initialPositionInStream); this.skipShardSyncAtWorkerInitializationIfLeasesExist = DEFAULT_SKIP_SHARD_SYNC_AT_STARTUP_IF_LEASES_EXIST; this.shardPrioritization = DEFAULT_SHARD_PRIORITIZATION; this.recordsFetcherFactory = recordsFetcherFactory; @@ -933,8 +1016,8 @@ public class KinesisClientLibConfiguration { */ public KinesisClientLibConfiguration withInitialPositionInStream(InitialPositionInStream initialPositionInStream) { this.initialPositionInStream = initialPositionInStream; - this.initialPositionInStreamExtended = InitialPositionInStreamExtended - .newInitialPosition(initialPositionInStream); + this.initialPositionInStreamExtended = + InitialPositionInStreamExtended.newInitialPosition(initialPositionInStream); return this; } @@ -984,8 +1067,8 @@ public class KinesisClientLibConfiguration { public KinesisClientLibConfiguration withMaxRecords(int maxRecords) { checkIsValuePositive("MaxRecords", (long) maxRecords); if (maxRecords > DEFAULT_MAX_RECORDS) { - throw new IllegalArgumentException( - "maxRecords must be less than or equal to " + DEFAULT_MAX_RECORDS + " but current value is " + maxRecords); + throw new IllegalArgumentException("maxRecords must be less than or equal to " + DEFAULT_MAX_RECORDS + + " but current value is " + maxRecords); } this.maxRecords = maxRecords; return this; @@ -1145,8 +1228,10 @@ public class KinesisClientLibConfiguration { } else if (metricsEnabledDimensions.contains(MetricsScope.METRICS_DIMENSIONS_ALL)) { this.metricsEnabledDimensions = METRICS_DIMENSIONS_ALL; } else { - this.metricsEnabledDimensions = ImmutableSet. builder().addAll(metricsEnabledDimensions) - .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS).build(); + this.metricsEnabledDimensions = ImmutableSet.builder() + .addAll(metricsEnabledDimensions) + .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS) + .build(); } return this; } @@ -1277,7 +1362,8 @@ public class KinesisClientLibConfiguration { * @return this configuration object */ public KinesisClientLibConfiguration withMaxLeaseRenewalThreads(int maxLeaseRenewalThreads) { - Validate.isTrue(maxLeaseRenewalThreads > 2, + Validate.isTrue( + maxLeaseRenewalThreads > 2, "The maximum number of lease renewal threads must be greater than or equal to 2."); this.maxLeaseRenewalThreads = maxLeaseRenewalThreads; @@ -1337,7 +1423,8 @@ public class KinesisClientLibConfiguration { * @return KinesisClientLibConfiguration */ public KinesisClientLibConfiguration withDataFetchingStrategy(String dataFetchingStrategy) { - this.recordsFetcherFactory.dataFetchingStrategy(DataFetchingStrategy.valueOf(dataFetchingStrategy.toUpperCase())); + this.recordsFetcherFactory.dataFetchingStrategy( + DataFetchingStrategy.valueOf(dataFetchingStrategy.toUpperCase())); return this; } @@ -1423,7 +1510,8 @@ public class KinesisClientLibConfiguration { * Interval in milliseconds between retrying the scheduler initialization. * @return */ - public KinesisClientLibConfiguration withSchedulerInitializationBackoffTimeMillis(long schedulerInitializationBackoffTimeMillis) { + public KinesisClientLibConfiguration withSchedulerInitializationBackoffTimeMillis( + long schedulerInitializationBackoffTimeMillis) { checkIsValuePositive("schedulerInitializationBackoffTimeMillis", schedulerInitializationBackoffTimeMillis); this.schedulerInitializationBackoffTimeMillis = schedulerInitializationBackoffTimeMillis; return this; diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDERRTask.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDERRTask.java index f175d905..1f67ece5 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDERRTask.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDERRTask.java @@ -23,8 +23,7 @@ import lombok.extern.slf4j.Slf4j; */ @Slf4j class DrainChildSTDERRTask extends LineReaderTask { - DrainChildSTDERRTask() { - } + DrainChildSTDERRTask() {} @Override protected HandleLineResult handleLine(String line) { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDOUTTask.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDOUTTask.java index 19208304..573fe570 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDOUTTask.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/DrainChildSTDOUTTask.java @@ -22,23 +22,22 @@ import lombok.extern.slf4j.Slf4j; * This class is used to drain the STDOUT of the child process. After the child process has been given a shutdown * message and responded indicating that it is shutdown, we attempt to close the input and outputs of that process so * that the process can exit. - * + * * To understand why this is necessary, consider the following scenario: - * + * *
    *
  1. Child process responds that it is done with shutdown.
  2. *
  3. Child process prints debugging text to STDOUT that fills the pipe buffer so child becomes blocked.
  4. *
  5. Parent process doesn't drain child process's STDOUT.
  6. *
  7. Child process remains blocked.
  8. *
- * + * * To prevent the child process from becoming blocked in this way, it is the responsibility of the parent process to * drain the child process's STDOUT. We reprint each drained line to our log to permit debugging. */ @Slf4j class DrainChildSTDOUTTask extends LineReaderTask { - DrainChildSTDOUTTask() { - } + DrainChildSTDOUTTask() {} @Override protected HandleLineResult handleLine(String line) { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/GetNextMessageTask.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/GetNextMessageTask.java index 34fe30d3..a9fca27c 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/GetNextMessageTask.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/GetNextMessageTask.java @@ -17,10 +17,9 @@ package software.amazon.kinesis.multilang; import java.io.BufferedReader; import java.io.IOException; -import software.amazon.kinesis.multilang.messages.Message; import com.fasterxml.jackson.databind.ObjectMapper; - import lombok.extern.slf4j.Slf4j; +import software.amazon.kinesis.multilang.messages.Message; /** * Gets the next message off the STDOUT of the child process. Throws an exception if a message is not found before the @@ -34,7 +33,7 @@ class GetNextMessageTask extends LineReaderTask { /** * Constructor. - * + * * @param objectMapper An object mapper for decoding json messages from the input stream. */ GetNextMessageTask(ObjectMapper objectMapper) { @@ -43,7 +42,7 @@ class GetNextMessageTask extends LineReaderTask { /** * Checks if a line is an empty line. - * + * * @param line A string * @return True if the line is an empty string, i.e. "", false otherwise. */ @@ -71,8 +70,10 @@ class GetNextMessageTask extends LineReaderTask { @Override protected Message returnAfterException(Exception e) { - throw new RuntimeException("Encountered an error while reading a line from STDIN for shard " + getShardId() - + " so won't be able to return a message.", e); + throw new RuntimeException( + "Encountered an error while reading a line from STDIN for shard " + getShardId() + + " so won't be able to return a message.", + e); } @Override diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/LineReaderTask.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/LineReaderTask.java index 3c01c7b7..915fc088 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/LineReaderTask.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/LineReaderTask.java @@ -30,7 +30,7 @@ import lombok.extern.slf4j.Slf4j; *
  • {@link #returnAfterEndOfInput()}
  • *
  • {@link #returnAfterException(Exception)}
  • * - * + * * @param */ @Slf4j @@ -41,8 +41,7 @@ abstract class LineReaderTask implements Callable { private String shardId; - LineReaderTask() { - } + LineReaderTask() {} /** * Reads lines off the input stream until a return value is set, or an exception is encountered, or the end of the @@ -72,7 +71,7 @@ abstract class LineReaderTask implements Callable { * return from the {@link #call()} function by having a value, indicating that value should be returned immediately * without reading further, or not having a value, indicating that more lines of input need to be read before * returning. - * + * * @param line A line read from the input stream. * @return HandleLineResult which may or may not have a has return value, indicating to return or not return yet * respectively. @@ -83,7 +82,7 @@ abstract class LineReaderTask implements Callable { * This method will be called if there is an error while reading from the input stream. The return value of this * method will be returned as the result of this Callable unless an Exception is thrown. If an Exception is thrown * then that exception will be thrown by the Callable. - * + * * @param e An exception that occurred while reading from the input stream. * @return What to return. */ @@ -93,7 +92,7 @@ abstract class LineReaderTask implements Callable { * This method will be called once the end of the input stream is reached. The return value of this method will be * returned as the result of this Callable. Implementations of this method are welcome to throw a runtime exception * to indicate that the task was unsuccessful. - * + * * @return What to return. */ protected abstract T returnAfterEndOfInput(); @@ -101,7 +100,7 @@ abstract class LineReaderTask implements Callable { /** * Allows subclasses to provide more detailed logs. Specifically, this allows the drain tasks and GetNextMessageTask * to log which shard they're working on. - * + * * @return The shard id */ public String getShardId() { @@ -110,7 +109,7 @@ abstract class LineReaderTask implements Callable { /** * The description should be a string explaining what this particular LineReader class does. - * + * * @return The description. */ public String getDescription() { @@ -121,7 +120,7 @@ abstract class LineReaderTask implements Callable { * The result of a call to {@link LineReaderTask#handleLine(String)}. Allows implementations of that method to * indicate whether a particular invocation of that method produced a return for this task or not. If a return value * doesn't exist the {@link #call()} method will continue to the next line. - * + * * @param */ protected class HandleLineResult { @@ -158,7 +157,7 @@ abstract class LineReaderTask implements Callable { * {@link MultiLangShardRecordProcessor#initialize(String)} is called. So we follow a pattern where the attributes are * set inside this method instead of the constructor so that this object will be initialized when all its attributes * are known to the record processor. - * + * * @param stream * @param shardId * @param description @@ -180,5 +179,4 @@ abstract class LineReaderTask implements Callable { this.description = description; return this; } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageReader.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageReader.java index a649490c..a8d5f64f 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageReader.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageReader.java @@ -20,19 +20,19 @@ import java.io.InputStreamReader; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; -import software.amazon.kinesis.multilang.messages.Message; import com.fasterxml.jackson.databind.ObjectMapper; +import software.amazon.kinesis.multilang.messages.Message; /** * Provides methods for interacting with the child process's STDOUT. - * + * * {@link #getNextMessageFromSTDOUT()} reads lines from the child process's STDOUT and attempts to decode a * {@link Message} object from each line. A child process's STDOUT could have lines that don't contain data related to * the multi-language protocol, such as when the child process prints debugging information to its STDOUT (instead of * logging to a file), also when a child processes writes a Message it is expected to prepend and append a new line * character to their message to help ensure that it is isolated on a line all by itself which results in empty lines * being present in STDOUT. Lines which cannot be decoded to a Message object are ignored. - * + * * {@link #drainSTDOUT()} simply reads all data from the child process's STDOUT until the stream is closed. */ class MessageReader { @@ -48,19 +48,18 @@ class MessageReader { /** * Use the initialize methods after construction. */ - MessageReader() { - } + MessageReader() {} /** * Returns a future which represents an attempt to read the next message in the child process's STDOUT. If the task * is successful, the result of the future will be the next message found in the child process's STDOUT, if the task * is unable to find a message before the child process's STDOUT is closed, or reading from STDOUT causes an * IOException, then an execution exception will be generated by this future. - * + * * The task employed by this method reads from the child process's STDOUT line by line. The task attempts to decode * each line into a {@link Message} object. Lines that fail to decode to a Message are ignored and the task * continues to the next line until it finds a Message. - * + * * @return */ Future getNextMessageFromSTDOUT() { @@ -73,7 +72,7 @@ class MessageReader { * Returns a future that represents a computation that drains the STDOUT of the child process. That future's result * is true if the end of the child's STDOUT is reached, its result is false if there was an error while reading from * the stream. This task will log all the lines it drains to permit debugging. - * + * * @return */ Future drainSTDOUT() { @@ -89,19 +88,16 @@ class MessageReader { * {@link MultiLangShardRecordProcessor#initialize(String)} is called. So we follow a pattern where the attributes are * set inside this method instead of the constructor so that this object will be initialized when all its attributes * are known to the record processor. - * + * * @param stream Used to read messages from the subprocess. * @param shardId The shard we're working on. * @param objectMapper The object mapper to decode messages. * @param executorService An executor service to run tasks in. */ - MessageReader initialize(InputStream stream, - String shardId, - ObjectMapper objectMapper, - ExecutorService executorService) { - return this.initialize(new BufferedReader(new InputStreamReader(stream)), shardId, objectMapper, - executorService); - + MessageReader initialize( + InputStream stream, String shardId, ObjectMapper objectMapper, ExecutorService executorService) { + return this.initialize( + new BufferedReader(new InputStreamReader(stream)), shardId, objectMapper, executorService); } /** @@ -110,10 +106,8 @@ class MessageReader { * @param objectMapper The object mapper to decode messages. * @param executorService An executor service to run tasks in. */ - MessageReader initialize(BufferedReader reader, - String shardId, - ObjectMapper objectMapper, - ExecutorService executorService) { + MessageReader initialize( + BufferedReader reader, String shardId, ObjectMapper objectMapper, ExecutorService executorService) { this.reader = reader; this.shardId = shardId; this.objectMapper = objectMapper; diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageWriter.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageWriter.java index 371c044b..c50c2004 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageWriter.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MessageWriter.java @@ -23,7 +23,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import com.fasterxml.jackson.databind.ObjectMapper; - import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.lifecycle.events.InitializationInput; import software.amazon.kinesis.lifecycle.events.LeaseLostInput; @@ -55,13 +54,12 @@ class MessageWriter { /** * Use initialize method after construction. */ - MessageWriter() { - } + MessageWriter() {} /** * Writes the message then writes the line separator provided by the system. Flushes each message to guarantee it * is delivered as soon as possible to the subprocess. - * + * * @param message A message to be written to the subprocess. * @return * @throws IOException @@ -76,7 +74,10 @@ class MessageWriter { */ synchronized (writer) { writer.write(message, 0, message.length()); - writer.write(System.lineSeparator(), 0, System.lineSeparator().length()); + writer.write( + System.lineSeparator(), + 0, + System.lineSeparator().length()); writer.flush(); } log.info("Message size == {} bytes for shard {}", message.getBytes().length, shardId); @@ -98,7 +99,7 @@ class MessageWriter { /** * Converts the message to a JSON string and writes it to the subprocess. - * + * * @param message A message to be written to the subprocess. * @return */ @@ -108,9 +109,9 @@ class MessageWriter { String jsonText = objectMapper.writeValueAsString(message); return writeMessageToOutput(jsonText); } catch (IOException e) { - String errorMessage = - String.format("Encountered I/O error while writing %s action to subprocess", message.getClass() - .getSimpleName()); + String errorMessage = String.format( + "Encountered I/O error while writing %s action to subprocess", + message.getClass().getSimpleName()); log.error(errorMessage, e); throw new RuntimeException(errorMessage, e); } @@ -118,7 +119,7 @@ class MessageWriter { /** * Writes an {@link InitializeMessage} to the subprocess. - * + * * @param initializationInput * contains information about the shard being initialized */ @@ -128,7 +129,7 @@ class MessageWriter { /** * Writes a {@link ProcessRecordsMessage} message to the subprocess. - * + * * @param processRecordsInput * the records, and associated metadata to be processed. */ @@ -138,7 +139,7 @@ class MessageWriter { /** * Writes the lease lost message to the sub process. - * + * * @param leaseLostInput * the lease lost input. This is currently unused as lease loss doesn't actually have anything in it * @return A future that is set when the message has been written. @@ -149,7 +150,7 @@ class MessageWriter { /** * Writes a message to the sub process indicating that the shard has ended - * + * * @param shardEndedInput * the shard end input. This is currently unused as the checkpoint is extracted, and used by the caller. * @return A future that is set when the message has been written. @@ -167,7 +168,7 @@ class MessageWriter { /** * Writes a {@link CheckpointMessage} to the subprocess. - * + * * @param sequenceNumber * The sequence number that was checkpointed. * @param subSequenceNumber @@ -175,14 +176,14 @@ class MessageWriter { * @param throwable * The exception that was thrown by a checkpoint attempt. Null if one didn't occur. */ - Future writeCheckpointMessageWithError(String sequenceNumber, Long subSequenceNumber, - Throwable throwable) { + Future writeCheckpointMessageWithError( + String sequenceNumber, Long subSequenceNumber, Throwable throwable) { return writeMessage(new CheckpointMessage(sequenceNumber, subSequenceNumber, throwable)); } /** * Closes the output stream and prevents further attempts to write. - * + * * @throws IOException Thrown when closing the writer fails */ void close() throws IOException { @@ -201,18 +202,16 @@ class MessageWriter { * {@link MultiLangShardRecordProcessor (String)} is called. So we follow a pattern where the attributes are * set inside this method instead of the constructor so that this object will be initialized when all its attributes * are known to the record processor. - * + * * @param stream Used to write messages to the subprocess. * @param shardId The shard we're working on. * @param objectMapper The object mapper to encode messages. * @param executorService An executor service to run tasks in. */ - MessageWriter initialize(OutputStream stream, - String shardId, - ObjectMapper objectMapper, - ExecutorService executorService) { - return this.initialize(new BufferedWriter(new OutputStreamWriter(stream)), shardId, objectMapper, - executorService); + MessageWriter initialize( + OutputStream stream, String shardId, ObjectMapper objectMapper, ExecutorService executorService) { + return this.initialize( + new BufferedWriter(new OutputStreamWriter(stream)), shardId, objectMapper, executorService); } /** @@ -221,15 +220,12 @@ class MessageWriter { * @param objectMapper The object mapper to encode messages. * @param executorService An executor service to run tasks in. */ - MessageWriter initialize(BufferedWriter writer, - String shardId, - ObjectMapper objectMapper, - ExecutorService executorService) { + MessageWriter initialize( + BufferedWriter writer, String shardId, ObjectMapper objectMapper, ExecutorService executorService) { this.writer = writer; this.shardId = shardId; this.objectMapper = objectMapper; this.executorService = executorService; return this; } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemon.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemon.java index b056e21f..4588b246 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemon.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemon.java @@ -26,20 +26,18 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import ch.qos.logback.classic.LoggerContext; +import ch.qos.logback.classic.joran.JoranConfigurator; +import ch.qos.logback.core.joran.spi.JoranException; +import com.beust.jcommander.JCommander; +import com.beust.jcommander.Parameter; +import lombok.Data; +import lombok.experimental.Accessors; +import lombok.extern.slf4j.Slf4j; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.StringUtils; import org.slf4j.LoggerFactory; - -import com.beust.jcommander.JCommander; -import com.beust.jcommander.Parameter; - -import ch.qos.logback.classic.LoggerContext; -import ch.qos.logback.classic.joran.JoranConfigurator; -import ch.qos.logback.core.joran.spi.JoranException; -import lombok.Data; -import lombok.experimental.Accessors; -import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.coordinator.Scheduler; /** @@ -75,11 +73,14 @@ public class MultiLangDaemon { @Parameter List parameters = new ArrayList<>(); - @Parameter(names = { "-p", "--properties-file" }, description = "Properties file to be used with the KCL") + @Parameter( + names = {"-p", "--properties-file"}, + description = "Properties file to be used with the KCL") String propertiesFile; - @Parameter(names = { "-l", - "--log-configuration" }, description = "File location of logback.xml to be override the default") + @Parameter( + names = {"-l", "--log-configuration"}, + description = "File location of logback.xml to be override the default") String logConfiguration; } @@ -102,7 +103,8 @@ public class MultiLangDaemon { } JCommander buildJCommanderAndParseArgs(final MultiLangDaemonArguments arguments, final String[] args) { - JCommander jCommander = JCommander.newBuilder().programName("amazon-kinesis-client MultiLangDaemon") + JCommander jCommander = JCommander.newBuilder() + .programName("amazon-kinesis-client MultiLangDaemon") .addObject(arguments) .build(); jCommander.parse(args); @@ -128,8 +130,8 @@ public class MultiLangDaemon { } } - void configureLogging(final String logConfiguration, final LoggerContext loggerContext, - final JoranConfigurator configurator) { + void configureLogging( + final String logConfiguration, final LoggerContext loggerContext, final JoranConfigurator configurator) { loggerContext.reset(); try (InputStream inputStream = FileUtils.openInputStream(new File(logConfiguration))) { configurator.setContext(loggerContext); @@ -146,9 +148,8 @@ public class MultiLangDaemon { if (arguments.parameters.size() == 1) { propertiesFile = arguments.parameters.get(0); } else { - throw new RuntimeException( - "Expected a single argument, but found multiple arguments. Arguments: " - + String.join(", ", arguments.parameters)); + throw new RuntimeException("Expected a single argument, but found multiple arguments. Arguments: " + + String.join(", ", arguments.parameters)); } } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemonConfig.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemonConfig.java index c7f77c19..aaba66f0 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemonConfig.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangDaemonConfig.java @@ -26,10 +26,9 @@ import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import software.amazon.kinesis.multilang.config.KinesisClientLibConfigurator; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import lombok.extern.slf4j.Slf4j; +import software.amazon.kinesis.multilang.config.KinesisClientLibConfigurator; import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; import software.amazon.kinesis.retrieval.RetrievalConfig; @@ -53,7 +52,7 @@ public class MultiLangDaemonConfig { /** * Constructor. - * + * * @param propertiesFile * The location of the properties file. * @throws IOException @@ -66,7 +65,7 @@ public class MultiLangDaemonConfig { } /** - * + * * @param propertiesFile * The location of the properties file. * @param classLoader @@ -82,7 +81,7 @@ public class MultiLangDaemonConfig { } /** - * + * * @param propertiesFile * The location of the properties file. * @param classLoader @@ -94,8 +93,9 @@ public class MultiLangDaemonConfig { * @throws IllegalArgumentException * Thrown when the contents of the properties file are not as expected. */ - public MultiLangDaemonConfig(String propertiesFile, ClassLoader classLoader, - KinesisClientLibConfigurator configurator) throws IOException, IllegalArgumentException { + public MultiLangDaemonConfig( + String propertiesFile, ClassLoader classLoader, KinesisClientLibConfigurator configurator) + throws IOException, IllegalArgumentException { Properties properties = loadProperties(classLoader, propertiesFile); if (!validateProperties(properties)) { throw new IllegalArgumentException( @@ -107,11 +107,14 @@ public class MultiLangDaemonConfig { multiLangDaemonConfiguration = configurator.getConfiguration(properties); executorService = buildExecutorService(properties); - recordProcessorFactory = new MultiLangRecordProcessorFactory(executableName, executorService, - multiLangDaemonConfiguration); + recordProcessorFactory = + new MultiLangRecordProcessorFactory(executableName, executorService, multiLangDaemonConfiguration); - log.info("Running {} to process stream {} with executable {}", multiLangDaemonConfiguration.getApplicationName(), - multiLangDaemonConfiguration.getStreamName(), executableName); + log.info( + "Running {} to process stream {} with executable {}", + multiLangDaemonConfiguration.getApplicationName(), + multiLangDaemonConfiguration.getStreamName(), + executableName); prepare(processingLanguage); } @@ -138,7 +141,7 @@ public class MultiLangDaemonConfig { } log.info("MultiLangDaemon is adding the following fields to the User Agent: {}", userAgent.toString()); -// multiLangDaemonConfiguration.withUserAgent(userAgent.toString()); + // multiLangDaemonConfiguration.withUserAgent(userAgent.toString()); } private static Properties loadProperties(ClassLoader classLoader, String propertiesFileName) throws IOException { @@ -181,17 +184,22 @@ public class MultiLangDaemonConfig { log.debug("Value for {} property is {}", PROP_MAX_ACTIVE_THREADS, maxActiveThreads); if (maxActiveThreads <= 0) { log.info("Using a cached thread pool."); - return new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<>(), - builder.build()); + return new ThreadPoolExecutor( + 0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<>(), builder.build()); } else { log.info("Using a fixed thread pool with {} max active threads.", maxActiveThreads); - return new ThreadPoolExecutor(maxActiveThreads, maxActiveThreads, 0L, TimeUnit.MILLISECONDS, - new LinkedBlockingQueue<>(), builder.build()); + return new ThreadPoolExecutor( + maxActiveThreads, + maxActiveThreads, + 0L, + TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>(), + builder.build()); } } /** - * + * * @return A KinesisClientLibConfiguration object based on the properties file provided. */ public MultiLangDaemonConfiguration getMultiLangDaemonConfiguration() { @@ -199,7 +207,7 @@ public class MultiLangDaemonConfig { } /** - * + * * @return An executor service based on the properties file provided. */ public ExecutorService getExecutorService() { @@ -207,7 +215,7 @@ public class MultiLangDaemonConfig { } /** - * + * * @return A MultiLangRecordProcessorFactory based on the properties file provided. */ public MultiLangRecordProcessorFactory getRecordProcessorFactory() { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangProtocol.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangProtocol.java index 66a6ae9a..46ede873 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangProtocol.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangProtocol.java @@ -61,8 +61,11 @@ class MultiLangProtocol { * @param initializationInput * information about the shard this processor is starting to process */ - MultiLangProtocol(MessageReader messageReader, MessageWriter messageWriter, - InitializationInput initializationInput, MultiLangDaemonConfiguration configuration) { + MultiLangProtocol( + MessageReader messageReader, + MessageWriter messageWriter, + InitializationInput initializationInput, + MultiLangDaemonConfiguration configuration) { this.messageReader = messageReader; this.messageWriter = messageWriter; this.initializationInput = initializationInput; @@ -82,7 +85,6 @@ class MultiLangProtocol { */ Future writeFuture = messageWriter.writeInitializeMessage(initializationInput); return waitForStatusMessage(InitializeMessage.ACTION, null, writeFuture); - } /** @@ -100,7 +102,7 @@ class MultiLangProtocol { /** * Notifies the client process that the lease has been lost, and it needs to shutdown. - * + * * @param leaseLostInput * the lease lost input that is passed to the {@link MessageWriter} * @return true if the message was successfully writtem @@ -115,7 +117,9 @@ class MultiLangProtocol { * @return */ boolean shardEnded(ShardEndedInput shardEndedInput) { - return waitForStatusMessage(ShardEndedMessage.ACTION, shardEndedInput.checkpointer(), + return waitForStatusMessage( + ShardEndedMessage.ACTION, + shardEndedInput.checkpointer(), messageWriter.writeShardEndedMessage(shardEndedInput)); } @@ -147,8 +151,8 @@ class MultiLangProtocol { * The writing task. * @return Whether or not this operation succeeded. */ - private boolean waitForStatusMessage(String action, RecordProcessorCheckpointer checkpointer, - Future writeFuture) { + private boolean waitForStatusMessage( + String action, RecordProcessorCheckpointer checkpointer, Future writeFuture) { boolean statusWasCorrect = waitForStatusMessage(action, checkpointer); // Examine whether or not we failed somewhere along the line. @@ -194,7 +198,7 @@ class MultiLangProtocol { return false; } - statusMessage = message.filter(m -> m instanceof StatusMessage).map(m -> (StatusMessage) m ); + statusMessage = message.filter(m -> m instanceof StatusMessage).map(m -> (StatusMessage) m); } return this.validateStatusMessage(statusMessage.get(), action); } @@ -207,13 +211,17 @@ class MultiLangProtocol { try { return Optional.of(fm.get()); } catch (InterruptedException e) { - log.error("Interrupted while waiting for {} message for shard {}", action, - initializationInput.shardId(), e); + log.error( + "Interrupted while waiting for {} message for shard {}", action, initializationInput.shardId(), e); } catch (ExecutionException e) { - log.error("Failed to get status message for {} action for shard {}", action, - initializationInput.shardId(), e); + log.error( + "Failed to get status message for {} action for shard {}", + action, + initializationInput.shardId(), + e); } catch (TimeoutException e) { - log.error("Timedout to get status message for {} action for shard {}. Terminating...", + log.error( + "Timedout to get status message for {} action for shard {}. Terminating...", action, initializationInput.shardId(), e); @@ -240,11 +248,14 @@ class MultiLangProtocol { * @return Whether or not this operation succeeded. */ private boolean validateStatusMessage(StatusMessage statusMessage, String action) { - log.info("Received response {} from subprocess while waiting for {}" - + " while processing shard {}", statusMessage, action, initializationInput.shardId()); - return !(statusMessage == null || statusMessage.getResponseFor() == null || !statusMessage.getResponseFor() - .equals(action)); - + log.info( + "Received response {} from subprocess while waiting for {}" + " while processing shard {}", + statusMessage, + action, + initializationInput.shardId()); + return !(statusMessage == null + || statusMessage.getResponseFor() == null + || !statusMessage.getResponseFor().equals(action)); } /** @@ -274,13 +285,12 @@ class MultiLangProtocol { } return this.messageWriter.writeCheckpointMessageWithError(sequenceNumber, subSequenceNumber, null); } else { - String message = - String.format("Was asked to checkpoint at %s but no checkpointer was provided for shard %s", - sequenceNumber, initializationInput.shardId()); + String message = String.format( + "Was asked to checkpoint at %s but no checkpointer was provided for shard %s", + sequenceNumber, initializationInput.shardId()); log.error(message); - return this.messageWriter.writeCheckpointMessageWithError(sequenceNumber, subSequenceNumber, - new InvalidStateException( - message)); + return this.messageWriter.writeCheckpointMessageWithError( + sequenceNumber, subSequenceNumber, new InvalidStateException(message)); } } catch (Throwable t) { return this.messageWriter.writeCheckpointMessageWithError(sequenceNumber, subSequenceNumber, t); @@ -288,8 +298,8 @@ class MultiLangProtocol { } private String logCheckpointMessage(String sequenceNumber, Long subSequenceNumber) { - return String.format("Attempting to checkpoint shard %s @ sequence number %s, and sub sequence number %s", + return String.format( + "Attempting to checkpoint shard %s @ sequence number %s, and sub sequence number %s", initializationInput.shardId(), sequenceNumber, subSequenceNumber); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangRecordProcessorFactory.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangRecordProcessorFactory.java index c4aab958..56c555d7 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangRecordProcessorFactory.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangRecordProcessorFactory.java @@ -17,11 +17,10 @@ package software.amazon.kinesis.multilang; import java.util.concurrent.ExecutorService; import com.fasterxml.jackson.databind.ObjectMapper; - import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; import software.amazon.kinesis.processor.ShardRecordProcessor; +import software.amazon.kinesis.processor.ShardRecordProcessorFactory; /** * Creates {@link MultiLangShardRecordProcessor}'s. @@ -43,8 +42,8 @@ public class MultiLangRecordProcessorFactory implements ShardRecordProcessorFact * @param command The command that will do processing for this factory's record processors. * @param executorService An executor service to use while processing inputs and outputs of the child process. */ - public MultiLangRecordProcessorFactory(String command, ExecutorService executorService, - MultiLangDaemonConfiguration configuration) { + public MultiLangRecordProcessorFactory( + String command, ExecutorService executorService, MultiLangDaemonConfiguration configuration) { this(command, executorService, new ObjectMapper(), configuration); } @@ -53,8 +52,11 @@ public class MultiLangRecordProcessorFactory implements ShardRecordProcessorFact * @param executorService An executor service to use while processing inputs and outputs of the child process. * @param objectMapper An object mapper used to convert messages to json to be written to the child process */ - public MultiLangRecordProcessorFactory(String command, ExecutorService executorService, ObjectMapper objectMapper, - MultiLangDaemonConfiguration configuration) { + public MultiLangRecordProcessorFactory( + String command, + ExecutorService executorService, + ObjectMapper objectMapper, + MultiLangDaemonConfiguration configuration) { this.command = command; this.commandArray = command.split(COMMAND_DELIMETER_REGEX); this.executorService = executorService; @@ -68,8 +70,8 @@ public class MultiLangRecordProcessorFactory implements ShardRecordProcessorFact /* * Giving ProcessBuilder the command as an array of Strings allows users to specify command line arguments. */ - return new MultiLangShardRecordProcessor(new ProcessBuilder(commandArray), executorService, this.objectMapper, - this.configuration); + return new MultiLangShardRecordProcessor( + new ProcessBuilder(commandArray), executorService, this.objectMapper, this.configuration); } String[] getCommandArray() { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangShardRecordProcessor.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangShardRecordProcessor.java index 241ea8ee..c6569cb9 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangShardRecordProcessor.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/MultiLangShardRecordProcessor.java @@ -22,7 +22,6 @@ import java.util.concurrent.Future; import java.util.function.Function; import com.fasterxml.jackson.databind.ObjectMapper; - import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.lifecycle.events.InitializationInput; import software.amazon.kinesis.lifecycle.events.LeaseLostInput; @@ -156,8 +155,10 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { if (ProcessState.ACTIVE.equals(this.state)) { stopProcessing("Encountered an error while trying to shutdown child process", t); } else { - stopProcessing("Encountered an error during shutdown," - + " but it appears the processor has already been shutdown", t); + stopProcessing( + "Encountered an error during shutdown," + + " but it appears the processor has already been shutdown", + t); } } } @@ -166,12 +167,13 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { * Used to tell whether the processor has been shutdown already. */ private enum ProcessState { - ACTIVE, SHUTDOWN + ACTIVE, + SHUTDOWN } /** * Constructor. - * + * * @param processBuilder * Provides process builder functionality. * @param executorService @@ -179,15 +181,24 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { * @param objectMapper * An obejct mapper. */ - MultiLangShardRecordProcessor(ProcessBuilder processBuilder, ExecutorService executorService, - ObjectMapper objectMapper, MultiLangDaemonConfiguration configuration) { - this(processBuilder, executorService, objectMapper, new MessageWriter(), new MessageReader(), - new DrainChildSTDERRTask(), configuration); + MultiLangShardRecordProcessor( + ProcessBuilder processBuilder, + ExecutorService executorService, + ObjectMapper objectMapper, + MultiLangDaemonConfiguration configuration) { + this( + processBuilder, + executorService, + objectMapper, + new MessageWriter(), + new MessageReader(), + new DrainChildSTDERRTask(), + configuration); } /** * Note: This constructor has package level access solely for testing purposes. - * + * * @param processBuilder * Provides the child process for this record processor * @param executorService @@ -201,9 +212,14 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { * @param readSTDERRTask * Error reader to read from child process's stderr */ - MultiLangShardRecordProcessor(ProcessBuilder processBuilder, ExecutorService executorService, ObjectMapper objectMapper, - MessageWriter messageWriter, MessageReader messageReader, DrainChildSTDERRTask readSTDERRTask, - MultiLangDaemonConfiguration configuration) { + MultiLangShardRecordProcessor( + ProcessBuilder processBuilder, + ExecutorService executorService, + ObjectMapper objectMapper, + MessageWriter messageWriter, + MessageReader messageReader, + DrainChildSTDERRTask readSTDERRTask, + MultiLangDaemonConfiguration configuration) { this.executorService = executorService; this.processBuilder = processBuilder; this.objectMapper = objectMapper; @@ -268,7 +284,7 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { /** * Convenience method used by {@link #childProcessShutdownSequence()} to drain the STDIN and STDERR of the child * process. - * + * * @param future A future to wait on. * @param whatThisFutureIsDoing What that future is doing while we wait. */ @@ -283,7 +299,7 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { /** * Convenience method for logging and safely shutting down so that we don't throw an exception up to the KCL on * accident. - * + * * @param message The reason we are stopping processing. * @param reason An exception that caused us to want to stop processing. */ @@ -309,7 +325,7 @@ public class MultiLangShardRecordProcessor implements ShardRecordProcessor { /** * The {@link ProcessBuilder} class is final so not easily mocked. We wrap the only interaction we have with it in * this package level method to permit unit testing. - * + * * @return The process started by processBuilder * @throws IOException If the process can't be started. */ diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyKey.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyKey.java index ea3db8c3..19211822 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyKey.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyKey.java @@ -19,7 +19,6 @@ import java.util.Map; import com.amazonaws.regions.Regions; import com.google.common.base.CaseFormat; - import lombok.AccessLevel; import lombok.Getter; import lombok.extern.slf4j.Slf4j; @@ -91,7 +90,6 @@ public enum NestedPropertyKey { processor.acceptExternalId(externalId); } }, - ; /** @@ -141,5 +139,4 @@ public enum NestedPropertyKey { } } } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyProcessor.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyProcessor.java index d3dd7a6f..f7587297 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyProcessor.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/NestedPropertyProcessor.java @@ -50,5 +50,4 @@ public interface NestedPropertyProcessor { * @param externalId external id used in the service call used to retrieve session credentials */ void acceptExternalId(String externalId); - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProvider.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProvider.java index 3b196b94..b5b9f924 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProvider.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProvider.java @@ -24,7 +24,6 @@ import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; import com.amazonaws.regions.Regions; import com.amazonaws.services.securitytoken.AWSSecurityTokenService; import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClient; - import software.amazon.kinesis.multilang.NestedPropertyKey; import software.amazon.kinesis.multilang.NestedPropertyProcessor; @@ -47,8 +46,8 @@ public class KclSTSAssumeRoleSessionCredentialsProvider this(params[0], params[1], Arrays.copyOfRange(params, 2, params.length)); } - public KclSTSAssumeRoleSessionCredentialsProvider(final String roleArn, final String roleSessionName, - final String... params) { + public KclSTSAssumeRoleSessionCredentialsProvider( + final String roleArn, final String roleSessionName, final String... params) { builder = new Builder(roleArn, roleSessionName); NestedPropertyKey.parse(this, params); provider = builder.build(); @@ -75,9 +74,8 @@ public class KclSTSAssumeRoleSessionCredentialsProvider @Override public void acceptEndpointRegion(final Regions region) { - final AWSSecurityTokenService stsClient = AWSSecurityTokenServiceClient.builder() - .withRegion(region) - .build(); + final AWSSecurityTokenService stsClient = + AWSSecurityTokenServiceClient.builder().withRegion(region).build(); builder.withStsClient(stsClient); } @@ -85,5 +83,4 @@ public class KclSTSAssumeRoleSessionCredentialsProvider public void acceptExternalId(final String externalId) { builder.withExternalId(externalId); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoder.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoder.java index f11ac0ec..8110d4f7 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoder.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoder.java @@ -37,8 +37,7 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode /** * Constructor. */ - AWSCredentialsProviderPropertyValueDecoder() { - } + AWSCredentialsProviderPropertyValueDecoder() {} /** * Get AWSCredentialsProvider property. @@ -104,9 +103,8 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode if (provider == null) { // attempt to invoke a public varargs/array constructor of FooClass(String[]) - provider = constructProvider(providerName, () -> - clazz.getConstructor(String[].class).newInstance((Object) varargs) - ); + provider = constructProvider(providerName, () -> clazz.getConstructor(String[].class) + .newInstance((Object) varargs)); } } @@ -138,24 +136,26 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode private static List getPossibleFullClassNames(final String provider) { return Stream.of( - // Customer provides a short name of common providers in com.amazonaws.auth package - // (e.g., any classes implementing the AWSCredentialsProvider interface) - // @see http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html - "com.amazonaws.auth.", + // Customer provides a short name of common providers in com.amazonaws.auth package + // (e.g., any classes implementing the AWSCredentialsProvider interface) + // @see + // http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/auth/AWSCredentialsProvider.html + "com.amazonaws.auth.", - // Customer provides a short name of a provider offered by this multi-lang package - "software.amazon.kinesis.multilang.auth.", + // Customer provides a short name of a provider offered by this multi-lang package + "software.amazon.kinesis.multilang.auth.", - // Customer provides a fully-qualified provider name, or a custom credentials provider - // (e.g., com.amazonaws.auth.ClasspathFileCredentialsProvider, org.mycompany.FooProvider) - "" - ).map(prefix -> prefix + provider).collect(Collectors.toList()); + // Customer provides a fully-qualified provider name, or a custom credentials provider + // (e.g., com.amazonaws.auth.ClasspathFileCredentialsProvider, org.mycompany.FooProvider) + "") + .map(prefix -> prefix + provider) + .collect(Collectors.toList()); } @FunctionalInterface private interface CredentialsProviderConstructor { - T construct() throws IllegalAccessException, InstantiationException, - InvocationTargetException, NoSuchMethodException; + T construct() + throws IllegalAccessException, InstantiationException, InvocationTargetException, NoSuchMethodException; } /** @@ -179,5 +179,4 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode } return null; } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/BuilderDynaBean.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/BuilderDynaBean.java index 5baa47f4..927655c1 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/BuilderDynaBean.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/BuilderDynaBean.java @@ -31,7 +31,7 @@ import org.apache.commons.lang3.StringUtils; public class BuilderDynaBean implements DynaBean { - private static final String[] CLASS_NAME_JOINERS = { ClassUtils.PACKAGE_SEPARATOR, ClassUtils.INNER_CLASS_SEPARATOR }; + private static final String[] CLASS_NAME_JOINERS = {ClassUtils.PACKAGE_SEPARATOR, ClassUtils.INNER_CLASS_SEPARATOR}; static final String NO_MAP_ACCESS_SUPPORT = "Map access isn't supported"; private Class destinedClass; @@ -51,13 +51,19 @@ public class BuilderDynaBean implements DynaBean { this(destinedClass, convertUtilsBean, null, Arrays.asList(classPrefixSearchList)); } - public BuilderDynaBean(Class destinedClass, ConvertUtilsBean convertUtilsBean, - Function emptyPropertyHandler, String... classPrefixSearchList) { + public BuilderDynaBean( + Class destinedClass, + ConvertUtilsBean convertUtilsBean, + Function emptyPropertyHandler, + String... classPrefixSearchList) { this(destinedClass, convertUtilsBean, emptyPropertyHandler, Arrays.asList(classPrefixSearchList)); } - public BuilderDynaBean(Class destinedClass, ConvertUtilsBean convertUtilsBean, - Function emptyPropertyHandler, List classPrefixSearchList) { + public BuilderDynaBean( + Class destinedClass, + ConvertUtilsBean convertUtilsBean, + Function emptyPropertyHandler, + List classPrefixSearchList) { this.convertUtilsBean = convertUtilsBean; this.classPrefixSearchList = classPrefixSearchList; this.emptyPropertyHandler = emptyPropertyHandler; @@ -102,7 +108,6 @@ public class BuilderDynaBean implements DynaBean { // Ignored // } - } } } @@ -214,8 +219,10 @@ public class BuilderDynaBean implements DynaBean { validateCanBuildOrCreate(); List types = dynaBeanBuilderSupport.getProperty(name); if (types.size() > 1) { - Optional arrayType = types.stream().filter(t -> t.type.isArray()).findFirst(); - return arrayType.map(t -> new DynaProperty(name, t.type, t.type.getComponentType())) + Optional arrayType = + types.stream().filter(t -> t.type.isArray()).findFirst(); + return arrayType + .map(t -> new DynaProperty(name, t.type, t.type.getComponentType())) .orElseGet(() -> new DynaProperty(name)); } else { TypeTag type = types.get(0); @@ -232,7 +239,8 @@ public class BuilderDynaBean implements DynaBean { @Override public DynaProperty[] getDynaProperties() { validateCanBuildOrCreate(); - return dynaBeanBuilderSupport.getPropertyNames().stream().map(this::getDynaProperty) + return dynaBeanBuilderSupport.getPropertyNames().stream() + .map(this::getDynaProperty) .toArray(DynaProperty[]::new); } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettable.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettable.java index 381137eb..eef1e1c2 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettable.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettable.java @@ -28,21 +28,21 @@ public @interface ConfigurationSettable { /** * Which builder this option applies to - * + * * @return the class of the builder to use */ Class configurationClass(); /** * The method name on the builder, defaults to the fieldName - * + * * @return the name of the method or null to use the default */ String methodName() default ""; /** * If the type is actually an optional value this will enable conversions - * + * * @return true if the value should be wrapped by an optional */ boolean convertToOptional() default false; diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtils.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtils.java index 390d8b15..c6d58807 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtils.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtils.java @@ -23,13 +23,11 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import com.google.common.base.Defaults; +import lombok.NonNull; import org.apache.commons.lang3.ClassUtils; import org.apache.commons.lang3.StringUtils; -import com.google.common.base.Defaults; - -import lombok.NonNull; - public class ConfigurationSettableUtils { public static T resolveFields(@NonNull Object source, @NonNull T configObject) { @@ -40,8 +38,8 @@ public class ConfigurationSettableUtils { return configObject; } - public static void resolveFields(Object source, Map, Object> configObjects, Set> restrictTo, - Set> skipIf) { + public static void resolveFields( + Object source, Map, Object> configObjects, Set> restrictTo, Set> skipIf) { for (Field field : source.getClass().getDeclaredFields()) { for (ConfigurationSettable b : field.getAnnotationsByType(ConfigurationSettable.class)) { if (restrictTo != null && !restrictTo.contains(b.configurationClass())) { @@ -70,9 +68,11 @@ public class ConfigurationSettableUtils { value = Optional.of(value); } if (ClassUtils.isPrimitiveOrWrapper(value.getClass())) { - Class primitiveType = field.getType().isPrimitive() ? field.getType() + Class primitiveType = field.getType().isPrimitive() + ? field.getType() : ClassUtils.wrapperToPrimitive(field.getType()); - Class wrapperType = !field.getType().isPrimitive() ? field.getType() + Class wrapperType = !field.getType().isPrimitive() + ? field.getType() : ClassUtils.primitiveToWrapper(field.getType()); try { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoder.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoder.java index fa91aa70..c2b1528e 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoder.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoder.java @@ -26,8 +26,7 @@ public class DatePropertyValueDecoder implements IPropertyValueDecoder { /** * Constructor. */ - DatePropertyValueDecoder() { - } + DatePropertyValueDecoder() {} /** * @param value property value as String @@ -49,5 +48,4 @@ public class DatePropertyValueDecoder implements IPropertyValueDecoder { public List> getSupportedTypes() { return Arrays.asList(Date.class); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanBuilderSupport.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanBuilderSupport.java index 0cc0073c..97f429f1 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanBuilderSupport.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanBuilderSupport.java @@ -29,11 +29,10 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; -import org.apache.commons.beanutils.ConvertUtilsBean; -import org.apache.commons.lang3.ClassUtils; - import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; +import org.apache.commons.beanutils.ConvertUtilsBean; +import org.apache.commons.lang3.ClassUtils; class DynaBeanBuilderSupport { @@ -48,8 +47,8 @@ class DynaBeanBuilderSupport { private final Multimap properties = HashMultimap.create(); private final Map values = new HashMap<>(); - DynaBeanBuilderSupport(Class destinedClass, ConvertUtilsBean convertUtilsBean, - List classPrefixSearchList) { + DynaBeanBuilderSupport( + Class destinedClass, ConvertUtilsBean convertUtilsBean, List classPrefixSearchList) { this.destinedClass = destinedClass; this.convertUtilsBean = convertUtilsBean; this.classPrefixSearchList = classPrefixSearchList; @@ -103,11 +102,12 @@ class DynaBeanBuilderSupport { private Object createForProperty(String name) { Optional type = properties.get(name).stream().findFirst(); return type.map(t -> { - if (DynaBeanBuilderUtils.isBuilderOrCreate(t.type) || !t.hasConverter) { - return new BuilderDynaBean(t.type, convertUtilsBean, null, classPrefixSearchList); - } - return null; - }).orElse(null); + if (DynaBeanBuilderUtils.isBuilderOrCreate(t.type) || !t.hasConverter) { + return new BuilderDynaBean(t.type, convertUtilsBean, null, classPrefixSearchList); + } + return null; + }) + .orElse(null); } boolean hasValue(String name) { @@ -157,8 +157,11 @@ class DynaBeanBuilderSupport { void set(String name, Object value) { if (value instanceof String && properties.get(name).stream().anyMatch(t -> t.type.isEnum())) { - TypeTag typeTag = properties.get(name).stream().filter(t -> t.type.isEnum()).findFirst().orElseThrow( - () -> new IllegalStateException("Expected enum type for " + name + ", but couldn't find it.")); + TypeTag typeTag = properties.get(name).stream() + .filter(t -> t.type.isEnum()) + .findFirst() + .orElseThrow(() -> + new IllegalStateException("Expected enum type for " + name + ", but couldn't find it.")); Class enumClass = (Class) typeTag.type; values.put(name, Enum.valueOf(enumClass, value.toString())); } else { @@ -174,9 +177,11 @@ class DynaBeanBuilderSupport { private Object getArgument(Map.Entry setValue) { Object argument = setValue.getValue(); if (argument instanceof Object[]) { - TypeTag arrayType = properties.get(setValue.getKey()).stream().filter(t -> t.type.isArray()).findFirst() - .orElseThrow(() -> new IllegalStateException(String - .format("Received Object[] for %s but can't find corresponding type", setValue.getKey()))); + TypeTag arrayType = properties.get(setValue.getKey()).stream() + .filter(t -> t.type.isArray()) + .findFirst() + .orElseThrow(() -> new IllegalStateException(String.format( + "Received Object[] for %s but can't find corresponding type", setValue.getKey()))); Object[] arrayValues = (Object[]) argument; Object[] destination = (Object[]) Array.newInstance(arrayType.type.getComponentType(), arrayValues.length); @@ -212,10 +217,12 @@ class DynaBeanBuilderSupport { for (Map.Entry setValue : values.entrySet()) { Object argument = getArgument(setValue); Method mutator = properties.get(setValue.getKey()).stream() - .filter(t -> ClassUtils.isAssignable(argument.getClass(), t.type)).findFirst() - .map(a -> a.builderMethod).orElseThrow( - () -> new IllegalStateException(String.format("Unable to find mutator for %s of type %s", - setValue.getKey(), argument.getClass().getName()))); + .filter(t -> ClassUtils.isAssignable(argument.getClass(), t.type)) + .findFirst() + .map(a -> a.builderMethod) + .orElseThrow(() -> new IllegalStateException(String.format( + "Unable to find mutator for %s of type %s", + setValue.getKey(), argument.getClass().getName()))); try { source = mutator.invoke(source, argument); } catch (IllegalAccessException | InvocationTargetException e) { @@ -236,7 +243,6 @@ class DynaBeanBuilderSupport { } catch (IllegalAccessException | NoSuchMethodException | InvocationTargetException e) { throw new RuntimeException(e); } - } Collection getPropertyNames() { @@ -249,5 +255,4 @@ class DynaBeanBuilderSupport { } return new ArrayList<>(properties.get(name)); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanCreateSupport.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanCreateSupport.java index 03c6e389..dda0b7ff 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanCreateSupport.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/DynaBeanCreateSupport.java @@ -30,8 +30,8 @@ class DynaBeanCreateSupport { private final List createTypes = new ArrayList<>(); private Object[] createValues = null; - DynaBeanCreateSupport(Class destinedClass, ConvertUtilsBean convertUtilsBean, - List classPrefixSearchList) { + DynaBeanCreateSupport( + Class destinedClass, ConvertUtilsBean convertUtilsBean, List classPrefixSearchList) { this.destinedClass = destinedClass; this.convertUtilsBean = convertUtilsBean; this.classPrefixSearchList = classPrefixSearchList; @@ -58,8 +58,8 @@ class DynaBeanCreateSupport { Object build() { - Method createMethod = DynaBeanBuilderUtils.getMethod(destinedClass, "create", - createTypes.stream().map(t -> t.type).toArray(i -> new Class[i])); + Method createMethod = DynaBeanBuilderUtils.getMethod( + destinedClass, "create", createTypes.stream().map(t -> t.type).toArray(i -> new Class[i])); Object arguments[] = new Object[createValues.length]; for (int i = 0; i < createValues.length; ++i) { if (createValues[i] instanceof BuilderDynaBean) { @@ -77,8 +77,8 @@ class DynaBeanCreateSupport { return createValues[index]; } else { if (createValues[index] == null) { - createValues[index] = new BuilderDynaBean(createTypes.get(index).type, convertUtilsBean, null, - classPrefixSearchList); + createValues[index] = new BuilderDynaBean( + createTypes.get(index).type, convertUtilsBean, null, classPrefixSearchList); } return createValues[index]; } @@ -89,13 +89,11 @@ class DynaBeanCreateSupport { public void set(String name, int index, Object value) { if (StringUtils.isEmpty(name)) { if (index >= createValues.length) { - throw new IllegalArgumentException( - String.format("%d exceeds the maximum number of arguments (%d) for %s", index, - createValues.length, destinedClass.getName())); + throw new IllegalArgumentException(String.format( + "%d exceeds the maximum number of arguments (%d) for %s", + index, createValues.length, destinedClass.getName())); } createValues[index] = value; } - } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/FanoutConfigBean.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/FanoutConfigBean.java index f9ab7044..a7478100 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/FanoutConfigBean.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/FanoutConfigBean.java @@ -26,21 +26,28 @@ public class FanoutConfigBean implements RetrievalConfigBuilder { @ConfigurationSettable(configurationClass = FanOutConfig.class) private int maxDescribeStreamSummaryRetries; + @ConfigurationSettable(configurationClass = FanOutConfig.class) private String consumerArn; + @ConfigurationSettable(configurationClass = FanOutConfig.class) private String consumerName; + @ConfigurationSettable(configurationClass = FanOutConfig.class) private int maxDescribeStreamConsumerRetries; + @ConfigurationSettable(configurationClass = FanOutConfig.class) private int registerStreamConsumerRetries; + @ConfigurationSettable(configurationClass = FanOutConfig.class) private long retryBackoffMillis; @Override public FanOutConfig build(KinesisAsyncClient kinesisAsyncClient, MultiLangDaemonConfiguration parent) { - return ConfigurationSettableUtils.resolveFields(this, new FanOutConfig(kinesisAsyncClient).applicationName(parent.getApplicationName()) - .streamName(parent.getStreamName())); + return ConfigurationSettableUtils.resolveFields( + this, + new FanOutConfig(kinesisAsyncClient) + .applicationName(parent.getApplicationName()) + .streamName(parent.getStreamName())); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IPropertyValueDecoder.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IPropertyValueDecoder.java index 111d4c63..0498874d 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IPropertyValueDecoder.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IPropertyValueDecoder.java @@ -18,13 +18,13 @@ import java.util.List; /** * This class captures the concept of decoding a property value to a particular Java type. - * + * * @param */ interface IPropertyValueDecoder { /** * Get the value that was read from a configuration file and convert it to some type. - * + * * @param propertyValue property string value that needs to be decoded. * @return property value in type T */ @@ -32,7 +32,7 @@ interface IPropertyValueDecoder { /** * Get a list of supported types for this class. - * + * * @return list of supported classes. */ List> getSupportedTypes(); diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IntegerPropertyValueDecoder.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IntegerPropertyValueDecoder.java index 88775cee..eb9fd0b8 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IntegerPropertyValueDecoder.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/IntegerPropertyValueDecoder.java @@ -25,8 +25,7 @@ class IntegerPropertyValueDecoder implements IPropertyValueDecoder { /** * Constructor. */ - IntegerPropertyValueDecoder() { - } + IntegerPropertyValueDecoder() {} /** * @param value property value as String diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfigurator.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfigurator.java index 49856aa6..42b617a0 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfigurator.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfigurator.java @@ -19,10 +19,9 @@ import java.io.InputStream; import java.lang.reflect.InvocationTargetException; import java.util.Properties; +import lombok.extern.slf4j.Slf4j; import org.apache.commons.beanutils.BeanUtilsBean; import org.apache.commons.beanutils.ConvertUtilsBean; - -import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.arns.Arn; import software.amazon.kinesis.common.StreamIdentifier; @@ -71,16 +70,21 @@ public class KinesisClientLibConfigurator { Validate.notBlank(configuration.getApplicationName(), "Application name is required"); - if (configuration.getStreamArn() != null && !configuration.getStreamArn().trim().isEmpty()) { + if (configuration.getStreamArn() != null + && !configuration.getStreamArn().trim().isEmpty()) { final Arn streamArnObj = Arn.fromString(configuration.getStreamArn()); StreamIdentifier.validateArn(streamArnObj); - //Parse out the stream Name from the Arn (and/or override existing value for Stream Name) + // Parse out the stream Name from the Arn (and/or override existing value for Stream Name) final String streamNameFromArn = streamArnObj.resource().resource(); configuration.setStreamName(streamNameFromArn); } - Validate.notBlank(configuration.getStreamName(), "Stream name or Stream Arn is required. Stream Arn takes precedence if both are passed in."); - Validate.isTrue(configuration.getKinesisCredentialsProvider().isDirty(), "A basic set of AWS credentials must be provided"); + Validate.notBlank( + configuration.getStreamName(), + "Stream name or Stream Arn is required. Stream Arn takes precedence if both are passed in."); + Validate.isTrue( + configuration.getKinesisCredentialsProvider().isDirty(), + "A basic set of AWS credentials must be provided"); return configuration; } @@ -106,5 +110,4 @@ public class KinesisClientLibConfigurator { } return getConfiguration(properties); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfiguration.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfiguration.java index 8b6bc5e6..08c11d26 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfiguration.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfiguration.java @@ -27,17 +27,16 @@ import java.util.Set; import java.util.UUID; import java.util.function.Function; -import org.apache.commons.beanutils.BeanUtilsBean; -import org.apache.commons.beanutils.ConvertUtilsBean; -import org.apache.commons.beanutils.Converter; -import org.apache.commons.beanutils.converters.ArrayConverter; -import org.apache.commons.beanutils.converters.StringConverter; - import lombok.Data; import lombok.Getter; import lombok.Setter; import lombok.experimental.Delegate; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.beanutils.BeanUtilsBean; +import org.apache.commons.beanutils.ConvertUtilsBean; +import org.apache.commons.beanutils.Converter; +import org.apache.commons.beanutils.converters.ArrayConverter; +import org.apache.commons.beanutils.converters.StringConverter; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; @@ -74,7 +73,6 @@ public class MultiLangDaemonConfiguration { private String streamName; private String streamArn; - @ConfigurationSettable(configurationClass = ConfigsBuilder.class) private String tableName; @@ -86,22 +84,31 @@ public class MultiLangDaemonConfiguration { @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private long failoverTimeMillis; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private Boolean enablePriorityLeaseAssignment; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private long shardSyncIntervalMillis; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private boolean cleanupLeasesUponShardCompletion; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private boolean ignoreUnexpectedChildShards; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int maxLeasesForWorker; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int maxLeasesToStealAtOneTime; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int initialLeaseTableReadCapacity; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int initialLeaseTableWriteCapacity; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class, methodName = "initialPositionInStream") @ConfigurationSettable(configurationClass = RetrievalConfig.class) private InitialPositionInStreamExtended initialPositionInStreamExtended; @@ -114,14 +121,16 @@ public class MultiLangDaemonConfiguration { } public void setInitialPositionInStream(InitialPositionInStream initialPositionInStream) { - this.initialPositionInStreamExtended = InitialPositionInStreamExtended - .newInitialPosition(initialPositionInStream); + this.initialPositionInStreamExtended = + InitialPositionInStreamExtended.newInitialPosition(initialPositionInStream); } @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int maxLeaseRenewalThreads; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private long listShardsBackoffTimeInMillis; + @ConfigurationSettable(configurationClass = LeaseManagementConfig.class) private int maxListShardsRetryAttempts; @@ -131,10 +140,13 @@ public class MultiLangDaemonConfiguration { @ConfigurationSettable(configurationClass = CoordinatorConfig.class) private long parentShardPollIntervalMillis; + @ConfigurationSettable(configurationClass = CoordinatorConfig.class) private ShardPrioritization shardPrioritization; + @ConfigurationSettable(configurationClass = CoordinatorConfig.class) private boolean skipShardSyncAtWorkerInitializationIfLeasesExist; + @ConfigurationSettable(configurationClass = CoordinatorConfig.class) private long schedulerInitializationBackoffTimeMillis; @@ -143,12 +155,16 @@ public class MultiLangDaemonConfiguration { @ConfigurationSettable(configurationClass = MetricsConfig.class) private long metricsBufferTimeMillis; + @ConfigurationSettable(configurationClass = MetricsConfig.class) private int metricsMaxQueueSize; + @ConfigurationSettable(configurationClass = MetricsConfig.class) private MetricsLevel metricsLevel; + @ConfigurationSettable(configurationClass = LifecycleConfig.class, convertToOptional = true) private Long logWarningForTaskAfterMillis; + @ConfigurationSettable(configurationClass = MetricsConfig.class) private Set metricsEnabledDimensions; @@ -163,6 +179,7 @@ public class MultiLangDaemonConfiguration { private RetrievalMode retrievalMode = RetrievalMode.DEFAULT; private final FanoutConfigBean fanoutConfig = new FanoutConfigBean(); + @Delegate(types = PollingConfigBean.PollingConfigBeanDelegate.class) private final PollingConfigBean pollingConfig = new PollingConfigBean(); @@ -200,61 +217,75 @@ public class MultiLangDaemonConfiguration { this.utilsBean = utilsBean; this.convertUtilsBean = convertUtilsBean; - convertUtilsBean.register(new Converter() { - @Override - public T convert(Class type, Object value) { - Date date = new Date(Long.parseLong(value.toString()) * 1000L); - return type.cast(InitialPositionInStreamExtended.newInitialPositionAtTimestamp(date)); - } - }, InitialPositionInStreamExtended.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(Class type, Object value) { + Date date = new Date(Long.parseLong(value.toString()) * 1000L); + return type.cast(InitialPositionInStreamExtended.newInitialPositionAtTimestamp(date)); + } + }, + InitialPositionInStreamExtended.class); - convertUtilsBean.register(new Converter() { - @Override - public T convert(Class type, Object value) { - return type.cast(MetricsLevel.valueOf(value.toString().toUpperCase())); - } - }, MetricsLevel.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(Class type, Object value) { + return type.cast(MetricsLevel.valueOf(value.toString().toUpperCase())); + } + }, + MetricsLevel.class); - convertUtilsBean.register(new Converter() { - @Override - public T convert(Class type, Object value) { - return type.cast(InitialPositionInStream.valueOf(value.toString().toUpperCase())); - } - }, InitialPositionInStream.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(Class type, Object value) { + return type.cast( + InitialPositionInStream.valueOf(value.toString().toUpperCase())); + } + }, + InitialPositionInStream.class); - convertUtilsBean.register(new Converter() { - @Override - public T convert(Class type, Object value) { - return type.cast(URI.create(value.toString())); - } - }, URI.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(Class type, Object value) { + return type.cast(URI.create(value.toString())); + } + }, + URI.class); - convertUtilsBean.register(new Converter() { - @Override - public T convert(Class type, Object value) { - return type.cast(RetrievalMode.from(value.toString())); - } - }, RetrievalMode.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(Class type, Object value) { + return type.cast(RetrievalMode.from(value.toString())); + } + }, + RetrievalMode.class); - convertUtilsBean.register(new Converter() { - @Override - public T convert(final Class type, final Object value) { - return type.cast(Region.of(value.toString())); - } - }, Region.class); + convertUtilsBean.register( + new Converter() { + @Override + public T convert(final Class type, final Object value) { + return type.cast(Region.of(value.toString())); + } + }, + Region.class); ArrayConverter arrayConverter = new ArrayConverter(String[].class, new StringConverter()); arrayConverter.setDelimiter(','); convertUtilsBean.register(arrayConverter, String[].class); - AWSCredentialsProviderPropertyValueDecoder oldCredentialsDecoder = new AWSCredentialsProviderPropertyValueDecoder(); + AWSCredentialsProviderPropertyValueDecoder oldCredentialsDecoder = + new AWSCredentialsProviderPropertyValueDecoder(); Function converter = s -> new V2CredentialWrapper(oldCredentialsDecoder.decodeValue(s)); - this.kinesisCredentialsProvider = new BuilderDynaBean(AwsCredentialsProvider.class, convertUtilsBean, - converter, CREDENTIALS_DEFAULT_SEARCH_PATH); - this.dynamoDBCredentialsProvider = new BuilderDynaBean(AwsCredentialsProvider.class, convertUtilsBean, - converter, CREDENTIALS_DEFAULT_SEARCH_PATH); - this.cloudWatchCredentialsProvider = new BuilderDynaBean(AwsCredentialsProvider.class, convertUtilsBean, - converter, CREDENTIALS_DEFAULT_SEARCH_PATH); + this.kinesisCredentialsProvider = new BuilderDynaBean( + AwsCredentialsProvider.class, convertUtilsBean, converter, CREDENTIALS_DEFAULT_SEARCH_PATH); + this.dynamoDBCredentialsProvider = new BuilderDynaBean( + AwsCredentialsProvider.class, convertUtilsBean, converter, CREDENTIALS_DEFAULT_SEARCH_PATH); + this.cloudWatchCredentialsProvider = new BuilderDynaBean( + AwsCredentialsProvider.class, convertUtilsBean, converter, CREDENTIALS_DEFAULT_SEARCH_PATH); this.kinesisClient = new BuilderDynaBean(KinesisAsyncClient.class, convertUtilsBean); this.dynamoDbClient = new BuilderDynaBean(DynamoDbAsyncClient.class, convertUtilsBean); @@ -300,8 +331,8 @@ public class MultiLangDaemonConfiguration { return credsBuilder.build(AwsCredentialsProvider.class); } - private void updateCredentials(BuilderDynaBean toUpdate, AwsCredentialsProvider primary, - AwsCredentialsProvider secondary) { + private void updateCredentials( + BuilderDynaBean toUpdate, AwsCredentialsProvider primary, AwsCredentialsProvider secondary) { if (toUpdate.hasValue("credentialsProvider")) { return; @@ -329,8 +360,8 @@ public class MultiLangDaemonConfiguration { } private void handleRetrievalConfig(RetrievalConfig retrievalConfig, ConfigsBuilder configsBuilder) { - retrievalConfig - .retrievalSpecificConfig(retrievalMode.builder(this).build(configsBuilder.kinesisClient(), this)); + retrievalConfig.retrievalSpecificConfig( + retrievalMode.builder(this).build(configsBuilder.kinesisClient(), this)); } private Object adjustKinesisHttpConfiguration(Object builderObj) { @@ -353,8 +384,14 @@ public class MultiLangDaemonConfiguration { final RetrievalConfig retrievalConfig; public Scheduler build() { - return new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + return new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); } } @@ -367,19 +404,25 @@ public class MultiLangDaemonConfiguration { updateCredentials(dynamoDbClient, dynamoDbCreds, kinesisCreds); updateCredentials(cloudWatchClient, cloudwatchCreds, kinesisCreds); - KinesisAsyncClient kinesisAsyncClient = kinesisClient.build(KinesisAsyncClient.class, - this::adjustKinesisHttpConfiguration); + KinesisAsyncClient kinesisAsyncClient = + kinesisClient.build(KinesisAsyncClient.class, this::adjustKinesisHttpConfiguration); DynamoDbAsyncClient dynamoDbAsyncClient = dynamoDbClient.build(DynamoDbAsyncClient.class); CloudWatchAsyncClient cloudWatchAsyncClient = cloudWatchClient.build(CloudWatchAsyncClient.class); - ConfigsBuilder configsBuilder = new ConfigsBuilder(streamName, applicationName, kinesisAsyncClient, - dynamoDbAsyncClient, cloudWatchAsyncClient, workerIdentifier, shardRecordProcessorFactory); + ConfigsBuilder configsBuilder = new ConfigsBuilder( + streamName, + applicationName, + kinesisAsyncClient, + dynamoDbAsyncClient, + cloudWatchAsyncClient, + workerIdentifier, + shardRecordProcessorFactory); Map, Object> configObjects = new HashMap<>(); addConfigObjects(configObjects, configsBuilder); - resolveFields(configObjects, Collections.singleton(ConfigsBuilder.class), - Collections.singleton(PollingConfig.class)); + resolveFields( + configObjects, Collections.singleton(ConfigsBuilder.class), Collections.singleton(PollingConfig.class)); CoordinatorConfig coordinatorConfig = configsBuilder.coordinatorConfig(); CheckpointConfig checkpointConfig = configsBuilder.checkpointConfig(); @@ -389,19 +432,31 @@ public class MultiLangDaemonConfiguration { ProcessorConfig processorConfig = configsBuilder.processorConfig(); RetrievalConfig retrievalConfig = configsBuilder.retrievalConfig(); - addConfigObjects(configObjects, coordinatorConfig, checkpointConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + addConfigObjects( + configObjects, + coordinatorConfig, + checkpointConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); handleRetrievalConfig(retrievalConfig, configsBuilder); resolveFields(configObjects, null, new HashSet<>(Arrays.asList(ConfigsBuilder.class, PollingConfig.class))); - return new ResolvedConfiguration(coordinatorConfig, checkpointConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + return new ResolvedConfiguration( + coordinatorConfig, + checkpointConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); } public Scheduler build(ShardRecordProcessorFactory shardRecordProcessorFactory) { return resolvedConfiguration(shardRecordProcessorFactory).build(); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/PollingConfigBean.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/PollingConfigBean.java index 176efbad..64f24b16 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/PollingConfigBean.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/PollingConfigBean.java @@ -30,34 +30,44 @@ public class PollingConfigBean implements RetrievalConfigBuilder { interface PollingConfigBeanDelegate { Integer getRetryGetRecordsInSeconds(); + void setRetryGetRecordsInSeconds(Integer value); Integer getMaxGetRecordsThreadPool(); + void setMaxGetRecordsThreadPool(Integer value); long getIdleTimeBetweenReadsInMillis(); + void setIdleTimeBetweenReadsInMillis(long value); int getMaxRecords(); + void setMaxRecords(int value); } @ConfigurationSettable(configurationClass = PollingConfig.class, convertToOptional = true) private Integer retryGetRecordsInSeconds; + @ConfigurationSettable(configurationClass = PollingConfig.class, convertToOptional = true) private Integer maxGetRecordsThreadPool; + @ConfigurationSettable(configurationClass = PollingConfig.class) private long idleTimeBetweenReadsInMillis; + @ConfigurationSettable(configurationClass = PollingConfig.class) private int maxRecords; public boolean anyPropertiesSet() { - return retryGetRecordsInSeconds != null || maxGetRecordsThreadPool != null || idleTimeBetweenReadsInMillis != 0 || maxRecords != 0; + return retryGetRecordsInSeconds != null + || maxGetRecordsThreadPool != null + || idleTimeBetweenReadsInMillis != 0 + || maxRecords != 0; } @Override public PollingConfig build(KinesisAsyncClient kinesisAsyncClient, MultiLangDaemonConfiguration parent) { - return ConfigurationSettableUtils.resolveFields(this, new PollingConfig(parent.getStreamName(), kinesisAsyncClient)); + return ConfigurationSettableUtils.resolveFields( + this, new PollingConfig(parent.getStreamName(), kinesisAsyncClient)); } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalConfigBuilder.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalConfigBuilder.java index 36794a99..7fa9ff9d 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalConfigBuilder.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalConfigBuilder.java @@ -21,7 +21,7 @@ import software.amazon.kinesis.retrieval.RetrievalSpecificConfig; public interface RetrievalConfigBuilder { /** * Creates a retrieval specific configuration using the supplied parameters, and internal class parameters - * + * * @param kinesisAsyncClient * the client that will be provided to the RetrievalSpecificConfig constructor * @param parent diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalMode.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalMode.java index bf65fffb..ebe6dcd1 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalMode.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/RetrievalMode.java @@ -19,14 +19,14 @@ import java.util.Arrays; import java.util.function.Function; import java.util.stream.Collectors; -import org.apache.commons.lang3.Validate; - import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.Validate; @Slf4j public enum RetrievalMode { - FANOUT(MultiLangDaemonConfiguration::getFanoutConfig), POLLING( - MultiLangDaemonConfiguration::getPollingConfig), DEFAULT(RetrievalMode::decideForDefault); + FANOUT(MultiLangDaemonConfiguration::getFanoutConfig), + POLLING(MultiLangDaemonConfiguration::getPollingConfig), + DEFAULT(RetrievalMode::decideForDefault); private final Function builderFor; diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/TypeTag.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/TypeTag.java index ee630ecf..dc359b91 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/TypeTag.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/TypeTag.java @@ -15,14 +15,13 @@ package software.amazon.kinesis.multilang.config; -import lombok.Data; - import java.lang.reflect.Method; +import lombok.Data; + @Data class TypeTag { final Class type; final boolean hasConverter; final Method builderMethod; - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/credentials/V2CredentialWrapper.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/credentials/V2CredentialWrapper.java index 50880a83..e1b6072a 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/credentials/V2CredentialWrapper.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/config/credentials/V2CredentialWrapper.java @@ -32,7 +32,9 @@ public class V2CredentialWrapper implements AwsCredentialsProvider { public AwsCredentials resolveCredentials() { AWSCredentials current = oldCredentialsProvider.getCredentials(); if (current instanceof AWSSessionCredentials) { - return AwsSessionCredentials.create(current.getAWSAccessKeyId(), current.getAWSSecretKey(), + return AwsSessionCredentials.create( + current.getAWSAccessKeyId(), + current.getAWSSecretKey(), ((AWSSessionCredentials) current).getSessionToken()); } return new AwsCredentials() { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/CheckpointMessage.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/CheckpointMessage.java index b738dcd7..6413d161 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/CheckpointMessage.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/CheckpointMessage.java @@ -36,6 +36,7 @@ public class CheckpointMessage extends Message { * The checkpoint this message is about. */ private String sequenceNumber; + private Long subSequenceNumber; /** @@ -45,7 +46,7 @@ public class CheckpointMessage extends Message { /** * Convenience constructor. - * + * * @param sequenceNumber * The sequence number that this message is about. * @param subSequenceNumber @@ -61,5 +62,4 @@ public class CheckpointMessage extends Message { this.setError(throwable.getClass().getSimpleName()); } } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/InitializeMessage.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/InitializeMessage.java index a04c639e..b6b12955 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/InitializeMessage.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/InitializeMessage.java @@ -33,29 +33,29 @@ public class InitializeMessage extends Message { * The shard id that this processor is getting initialized for. */ private String shardId; + private String sequenceNumber; private Long subSequenceNumber; /** * Default constructor. */ - public InitializeMessage() { - } + public InitializeMessage() {} /** * Convenience constructor. - * + * * @param initializationInput {@link InitializationInput} */ public InitializeMessage(InitializationInput initializationInput) { this.shardId = initializationInput.shardId(); if (initializationInput.extendedSequenceNumber() != null) { this.sequenceNumber = initializationInput.extendedSequenceNumber().sequenceNumber(); - this.subSequenceNumber = initializationInput.extendedSequenceNumber().subSequenceNumber(); + this.subSequenceNumber = + initializationInput.extendedSequenceNumber().subSequenceNumber(); } else { this.sequenceNumber = null; this.subSequenceNumber = null; } } - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecord.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecord.java index ca020825..a3a09f22 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecord.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecord.java @@ -15,7 +15,6 @@ package software.amazon.kinesis.multilang.messages; import com.fasterxml.jackson.annotation.JsonProperty; - import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; import lombok.Getter; @@ -54,10 +53,11 @@ public class JsonFriendlyRecord { data = new byte[record.data().limit()]; record.data().get(data); } - Long approximateArrival = record.approximateArrivalTimestamp() == null ? null + Long approximateArrival = record.approximateArrivalTimestamp() == null + ? null : record.approximateArrivalTimestamp().toEpochMilli(); - return new JsonFriendlyRecord(data, record.partitionKey(), record.sequenceNumber(), - approximateArrival, record.subSequenceNumber()); + return new JsonFriendlyRecord( + data, record.partitionKey(), record.sequenceNumber(), approximateArrival, record.subSequenceNumber()); } @JsonProperty diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/LeaseLostMessage.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/LeaseLostMessage.java index aea0677f..ccf269b5 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/LeaseLostMessage.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/LeaseLostMessage.java @@ -21,5 +21,4 @@ package software.amazon.kinesis.multilang.messages; public class LeaseLostMessage extends Message { public static final String ACTION = "leaseLost"; - } diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/Message.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/Message.java index bdb89181..75fc1c68 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/Message.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/Message.java @@ -23,15 +23,15 @@ import com.fasterxml.jackson.databind.ObjectMapper; * Abstract class for all messages that are sent to the client's process. */ @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "action") -@JsonSubTypes({ - @Type(value = CheckpointMessage.class, name = CheckpointMessage.ACTION), - @Type(value = InitializeMessage.class, name = InitializeMessage.ACTION), - @Type(value = ProcessRecordsMessage.class, name = ProcessRecordsMessage.ACTION), - @Type(value = ShutdownMessage.class, name = ShutdownMessage.ACTION), - @Type(value = StatusMessage.class, name = StatusMessage.ACTION), - @Type(value = ShutdownRequestedMessage.class, name = ShutdownRequestedMessage.ACTION), - @Type(value = LeaseLostMessage.class, name = LeaseLostMessage.ACTION), - @Type(value = ShardEndedMessage.class, name = ShardEndedMessage.ACTION), +@JsonSubTypes({ + @Type(value = CheckpointMessage.class, name = CheckpointMessage.ACTION), + @Type(value = InitializeMessage.class, name = InitializeMessage.ACTION), + @Type(value = ProcessRecordsMessage.class, name = ProcessRecordsMessage.ACTION), + @Type(value = ShutdownMessage.class, name = ShutdownMessage.ACTION), + @Type(value = StatusMessage.class, name = StatusMessage.ACTION), + @Type(value = ShutdownRequestedMessage.class, name = ShutdownRequestedMessage.ACTION), + @Type(value = LeaseLostMessage.class, name = LeaseLostMessage.ACTION), + @Type(value = ShardEndedMessage.class, name = ShardEndedMessage.ACTION), }) public abstract class Message { @@ -40,11 +40,10 @@ public abstract class Message { /** * Default constructor. */ - public Message() { - } + public Message() {} /** - * + * * @param objectMapper An object mapper. * @return this */ @@ -54,7 +53,7 @@ public abstract class Message { } /** - * + * * @return A JSON representation of this object. */ public String toString() { diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/ProcessRecordsMessage.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/ProcessRecordsMessage.java index 95601b2b..50eed164 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/ProcessRecordsMessage.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/messages/ProcessRecordsMessage.java @@ -37,17 +37,17 @@ public class ProcessRecordsMessage extends Message { * The records that the client's process needs to handle. */ private List records; + private Long millisBehindLatest; /** * Default constructor. */ - public ProcessRecordsMessage() { - } + public ProcessRecordsMessage() {} /** * Convenience constructor. - * + * * @param processRecordsInput * the process records input to be sent to the child */ diff --git a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java index af905f90..a1b4561a 100644 --- a/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java +++ b/amazon-kinesis-client-multilang/src/main/java/software/amazon/kinesis/multilang/package-info.java @@ -142,7 +142,6 @@ * Jackson doc for more details) MIME is the basis of most base64 encoding variants including RFC 3548 which is the standard used by Python's base64 module. - * + * */ package software.amazon.kinesis.multilang; - diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/Matchers.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/Matchers.java index b357c16b..2fd4fcb7 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/Matchers.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/Matchers.java @@ -14,15 +14,14 @@ */ package software.amazon.kinesis.multilang; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; - import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; - -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; import software.amazon.kinesis.lifecycle.events.InitializationInput; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; public class Matchers { @@ -58,8 +57,12 @@ public class Matchers { @Override public void describeTo(Description description) { - description.appendText("An InitializationInput matching: { shardId: ").appendDescriptionOf(shardIdMatcher) - .appendText(", sequenceNumber: ").appendDescriptionOf(sequenceNumberMatcher).appendText(" }"); + description + .appendText("An InitializationInput matching: { shardId: ") + .appendDescriptionOf(shardIdMatcher) + .appendText(", sequenceNumber: ") + .appendDescriptionOf(sequenceNumberMatcher) + .appendText(" }"); } } @@ -98,10 +101,11 @@ public class Matchers { @Override public void describeTo(Description description) { - description.appendText("An ExtendedSequenceNumber matching: { sequenceNumber: ") - .appendDescriptionOf(sequenceNumberMatcher).appendText(", subSequenceNumber: ") + description + .appendText("An ExtendedSequenceNumber matching: { sequenceNumber: ") + .appendDescriptionOf(sequenceNumberMatcher) + .appendText(", subSequenceNumber: ") .appendDescriptionOf(subSequenceNumberMatcher); } } - } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageReaderTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageReaderTest.java index f6fab4c1..3ffcac14 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageReaderTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageReaderTest.java @@ -22,15 +22,14 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; - import software.amazon.kinesis.multilang.messages.Message; import software.amazon.kinesis.multilang.messages.StatusMessage; -import com.fasterxml.jackson.databind.ObjectMapper; public class MessageReaderTest { @@ -75,8 +74,8 @@ public class MessageReaderTest { @Test public void runLoopGoodInputTest() { - String[] sequenceNumbers = new String[] { "123", "456", "789" }; - String[] responseFors = new String[] { "initialize", "processRecords", "processRecords", "shutdown" }; + String[] sequenceNumbers = new String[] {"123", "456", "789"}; + String[] responseFors = new String[] {"initialize", "processRecords", "processRecords", "shutdown"}; InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors); MessageReader reader = new MessageReader().initialize(stream, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool()); @@ -85,7 +84,9 @@ public class MessageReaderTest { try { Message message = reader.getNextMessageFromSTDOUT().get(); if (message instanceof StatusMessage) { - Assert.assertEquals("The status message's responseFor field should have been correct", responseFor, + Assert.assertEquals( + "The status message's responseFor field should have been correct", + responseFor, ((StatusMessage) message).getResponseFor()); } } catch (InterruptedException | ExecutionException e) { @@ -96,8 +97,8 @@ public class MessageReaderTest { @Test public void drainInputTest() throws InterruptedException, ExecutionException { - String[] sequenceNumbers = new String[] { "123", "456", "789" }; - String[] responseFors = new String[] { "initialize", "processRecords", "processRecords", "shutdown" }; + String[] sequenceNumbers = new String[] {"123", "456", "789"}; + String[] responseFors = new String[] {"initialize", "processRecords", "processRecords", "shutdown"}; InputStream stream = buildInputStreamOfGoodInput(sequenceNumbers, responseFors); MessageReader reader = @@ -116,25 +117,26 @@ public class MessageReaderTest { BufferedReader bufferReader = Mockito.mock(BufferedReader.class); try { Mockito.doAnswer(new Answer() { - private boolean returnedOnce = false; + private boolean returnedOnce = false; - @Override - public Object answer(InvocationOnMock invocation) throws Throwable { - if (returnedOnce) { - return "{\"action\":\"status\",\"responseFor\":\"processRecords\"}"; - } else { - returnedOnce = true; - return "{\"action\":\"shutdown\",\"reason\":\"ZOMBIE\"}"; - } - } - }).when(bufferReader).readLine(); + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + if (returnedOnce) { + return "{\"action\":\"status\",\"responseFor\":\"processRecords\"}"; + } else { + returnedOnce = true; + return "{\"action\":\"shutdown\",\"reason\":\"ZOMBIE\"}"; + } + } + }) + .when(bufferReader) + .readLine(); } catch (IOException e) { Assert.fail("There shouldn't be an exception while setting up this mock."); } - MessageReader reader = - new MessageReader().initialize(bufferReader, SHARD_ID, new ObjectMapper(), - Executors.newCachedThreadPool()); + MessageReader reader = new MessageReader() + .initialize(bufferReader, SHARD_ID, new ObjectMapper(), Executors.newCachedThreadPool()); try { reader.getNextMessageFromSTDOUT().get(); @@ -165,7 +167,8 @@ public class MessageReaderTest { readTask.get(); Assert.fail("The reading task should have failed due to an IOException."); } catch (InterruptedException e) { - Assert.fail("The reading task should not have been interrupted. It should have failed due to an IOException."); + Assert.fail( + "The reading task should not have been interrupted. It should have failed due to an IOException."); } catch (ExecutionException e) { // Yay!! } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageWriterTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageWriterTest.java index 588f6140..90481b6c 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageWriterTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MessageWriterTest.java @@ -23,21 +23,19 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.mockito.Mockito; - +import software.amazon.kinesis.lifecycle.events.InitializationInput; import software.amazon.kinesis.lifecycle.events.LeaseLostInput; +import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.lifecycle.events.ShardEndedInput; import software.amazon.kinesis.multilang.messages.Message; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; - -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.retrieval.KinesisClientRecord; import static org.mockito.Mockito.verify; @@ -65,8 +63,7 @@ public class MessageWriterTest { public void writeCheckpointMessageNoErrorTest() throws IOException, InterruptedException, ExecutionException { Future future = this.messageWriter.writeCheckpointMessageWithError("1234", 0L, null); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @@ -74,42 +71,43 @@ public class MessageWriterTest { public void writeCheckpointMessageWithErrorTest() throws IOException, InterruptedException, ExecutionException { Future future = this.messageWriter.writeCheckpointMessageWithError("1234", 0L, new Throwable()); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @Test public void writeInitializeMessageTest() throws IOException, InterruptedException, ExecutionException { - Future future = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(SHARD_ID).build()); + Future future = this.messageWriter.writeInitializeMessage( + InitializationInput.builder().shardId(SHARD_ID).build()); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @Test public void writeProcessRecordsMessageTest() throws IOException, InterruptedException, ExecutionException { List records = Arrays.asList( - KinesisClientRecord.builder().data(ByteBuffer.wrap("kitten".getBytes())).partitionKey("some cats") - .sequenceNumber("357234807854789057805").build(), - KinesisClientRecord.builder().build() - ); - Future future = this.messageWriter.writeProcessRecordsMessage(ProcessRecordsInput.builder().records(records).build()); + KinesisClientRecord.builder() + .data(ByteBuffer.wrap("kitten".getBytes())) + .partitionKey("some cats") + .sequenceNumber("357234807854789057805") + .build(), + KinesisClientRecord.builder().build()); + Future future = this.messageWriter.writeProcessRecordsMessage( + ProcessRecordsInput.builder().records(records).build()); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @Test public void writeShutdownMessageTest() throws IOException, InterruptedException, ExecutionException { - Future future = this.messageWriter.writeShardEndedMessage(ShardEndedInput.builder().build()); + Future future = this.messageWriter.writeShardEndedMessage( + ShardEndedInput.builder().build()); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @@ -118,15 +116,15 @@ public class MessageWriterTest { Future future = this.messageWriter.writeShutdownRequestedMessage(); future.get(); - verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), - Mockito.anyInt()); + verify(this.stream, Mockito.atLeastOnce()).write(Mockito.any(byte[].class), Mockito.anyInt(), Mockito.anyInt()); verify(this.stream, Mockito.atLeastOnce()).flush(); } @Test public void streamIOExceptionTest() throws IOException, InterruptedException, ExecutionException { Mockito.doThrow(IOException.class).when(stream).flush(); - Future initializeTask = this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(SHARD_ID).build()); + Future initializeTask = this.messageWriter.writeInitializeMessage( + InitializationInput.builder().shardId(SHARD_ID).build()); Boolean result = initializeTask.get(); Assert.assertNotNull(result); Assert.assertFalse(result); @@ -152,7 +150,8 @@ public class MessageWriterTest { Assert.assertFalse(this.messageWriter.isOpen()); try { // Any message should fail - this.messageWriter.writeInitializeMessage(InitializationInput.builder().shardId(SHARD_ID).build()); + this.messageWriter.writeInitializeMessage( + InitializationInput.builder().shardId(SHARD_ID).build()); Assert.fail("MessageWriter should be closed and unable to write."); } catch (IllegalStateException e) { // This should happen. diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonConfigTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonConfigTest.java index aa46f431..de5a1405 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonConfigTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonConfigTest.java @@ -14,26 +14,25 @@ */ package software.amazon.kinesis.multilang; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.mockito.Mockito.when; - import java.io.ByteArrayInputStream; import java.io.IOException; -import software.amazon.awssdk.regions.Region; +import junit.framework.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.runners.MockitoJUnitRunner; - -import junit.framework.Assert; import software.amazon.awssdk.auth.credentials.AwsCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.regions.Region; import software.amazon.kinesis.multilang.config.KinesisClientLibConfigurator; import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class MultiLangDaemonConfigTest { private static final String FILENAME = "multilang.properties"; @@ -49,6 +48,7 @@ public class MultiLangDaemonConfigTest { @Mock private AwsCredentialsProvider credentialsProvider; + @Mock private AwsCredentials creds; @@ -62,14 +62,13 @@ public class MultiLangDaemonConfigTest { * @throws IOException */ public void setup(String streamName, String streamArn) throws IOException { - String properties = String.format("executableName = %s\n" + String properties = String.format( + "executableName = %s\n" + "applicationName = %s\n" + "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" + "processingLanguage = malbolge\n" + "regionName = %s\n", - EXE, - APPLICATION_NAME, - "us-east-1"); + EXE, APPLICATION_NAME, "us-east-1"); if (streamName != null) { properties += String.format("streamName = %s\n", streamName); @@ -79,7 +78,8 @@ public class MultiLangDaemonConfigTest { } classLoader = Mockito.mock(ClassLoader.class); - Mockito.doReturn(new ByteArrayInputStream(properties.getBytes())).when(classLoader) + Mockito.doReturn(new ByteArrayInputStream(properties.getBytes())) + .when(classLoader) .getResourceAsStream(FILENAME); when(credentialsProvider.resolveCredentials()).thenReturn(creds); @@ -185,7 +185,8 @@ public class MultiLangDaemonConfigTest { + "AWSCredentialsProvider = DefaultAWSCredentialsProviderChain\n" + "processingLanguage = malbolge"; ClassLoader classLoader = Mockito.mock(ClassLoader.class); - Mockito.doReturn(new ByteArrayInputStream(propertiesNoExecutableName.getBytes())).when(classLoader) + Mockito.doReturn(new ByteArrayInputStream(propertiesNoExecutableName.getBytes())) + .when(classLoader) .getResourceAsStream(FILENAME); try { @@ -207,5 +208,4 @@ public class MultiLangDaemonConfigTest { public void testActualPropertiesFile() throws Exception { new MultiLangDaemonConfig(FILENAME); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonTest.java index 3229e2b8..3e689437 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangDaemonTest.java @@ -14,6 +14,28 @@ */ package software.amazon.kinesis.multilang; +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; + +import ch.qos.logback.classic.LoggerContext; +import ch.qos.logback.classic.joran.JoranConfigurator; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import org.slf4j.LoggerFactory; +import software.amazon.kinesis.coordinator.Scheduler; +import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; + import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -27,46 +49,29 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import java.io.File; -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; -import org.slf4j.LoggerFactory; - -import ch.qos.logback.classic.LoggerContext; -import ch.qos.logback.classic.joran.JoranConfigurator; -import software.amazon.kinesis.coordinator.Scheduler; -import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; - @RunWith(MockitoJUnitRunner.class) public class MultiLangDaemonTest { @Mock private Scheduler scheduler; + @Mock private MultiLangDaemonConfig config; + @Mock private ExecutorService executorService; + @Mock private Future futureInteger; + @Mock private MultiLangDaemonConfiguration multiLangDaemonConfiguration; + @Mock private Runtime runtime; @Rule public ExpectedException expectedException = ExpectedException.none(); + @Rule public final TemporaryFolder temporaryFolder = new TemporaryFolder(); @@ -86,7 +91,7 @@ public class MultiLangDaemonTest { public void testSuccessfulNoOptionsJCommanderBuild() { String testPropertiesFile = "/test/properties/file"; MultiLangDaemon.MultiLangDaemonArguments arguments = new MultiLangDaemon.MultiLangDaemonArguments(); - daemon.buildJCommanderAndParseArgs(arguments, new String[] { testPropertiesFile }); + daemon.buildJCommanderAndParseArgs(arguments, new String[] {testPropertiesFile}); assertThat(arguments.propertiesFile, nullValue()); assertThat(arguments.logConfiguration, nullValue()); @@ -98,7 +103,7 @@ public class MultiLangDaemonTest { public void testSuccessfulOptionsJCommanderBuild() { String propertiesOption = "/test/properties/file/option"; String propertiesFileArgs = "/test/properties/args"; - String[] args = new String[] { "-p", propertiesOption, propertiesFileArgs }; + String[] args = new String[] {"-p", propertiesOption, propertiesFileArgs}; MultiLangDaemon.MultiLangDaemonArguments arguments = new MultiLangDaemon.MultiLangDaemonArguments(); daemon.buildJCommanderAndParseArgs(arguments, args); @@ -124,7 +129,8 @@ public class MultiLangDaemonTest { LoggerContext loggerContext = spy((LoggerContext) LoggerFactory.getILoggerFactory()); JoranConfigurator configurator = spy(new JoranConfigurator()); - String logConfiguration = this.getClass().getClassLoader().getResource("logback.xml").getPath(); + String logConfiguration = + this.getClass().getClassLoader().getResource("logback.xml").getPath(); daemon.configureLogging(logConfiguration, loggerContext, configurator); verify(loggerContext).reset(); diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangProtocolTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangProtocolTest.java index 5320aec5..bed6b6f6 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangProtocolTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/MultiLangProtocolTest.java @@ -14,6 +14,42 @@ */ package software.amazon.kinesis.multilang; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import com.google.common.util.concurrent.SettableFuture; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; +import software.amazon.kinesis.exceptions.InvalidStateException; +import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; +import software.amazon.kinesis.exceptions.ShutdownException; +import software.amazon.kinesis.exceptions.ThrottlingException; +import software.amazon.kinesis.lifecycle.events.InitializationInput; +import software.amazon.kinesis.lifecycle.events.LeaseLostInput; +import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; +import software.amazon.kinesis.lifecycle.events.ShardEndedInput; +import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; +import software.amazon.kinesis.multilang.messages.CheckpointMessage; +import software.amazon.kinesis.multilang.messages.LeaseLostMessage; +import software.amazon.kinesis.multilang.messages.Message; +import software.amazon.kinesis.multilang.messages.ProcessRecordsMessage; +import software.amazon.kinesis.multilang.messages.ShardEndedMessage; +import software.amazon.kinesis.multilang.messages.StatusMessage; +import software.amazon.kinesis.processor.RecordProcessorCheckpointer; +import software.amazon.kinesis.retrieval.KinesisClientRecord; + import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @@ -27,65 +63,35 @@ import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.runners.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; - -import software.amazon.kinesis.exceptions.InvalidStateException; -import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; -import software.amazon.kinesis.exceptions.ShutdownException; -import software.amazon.kinesis.exceptions.ThrottlingException; -import software.amazon.kinesis.lifecycle.events.LeaseLostInput; -import software.amazon.kinesis.lifecycle.events.ShardEndedInput; -import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; -import software.amazon.kinesis.multilang.messages.CheckpointMessage; -import software.amazon.kinesis.multilang.messages.LeaseLostMessage; -import software.amazon.kinesis.multilang.messages.Message; -import software.amazon.kinesis.multilang.messages.ProcessRecordsMessage; -import software.amazon.kinesis.multilang.messages.ShardEndedMessage; -import software.amazon.kinesis.multilang.messages.StatusMessage; -import com.google.common.util.concurrent.SettableFuture; - -import software.amazon.kinesis.lifecycle.events.InitializationInput; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.retrieval.KinesisClientRecord; - @RunWith(MockitoJUnitRunner.class) public class MultiLangProtocolTest { private static final List EMPTY_RECORD_LIST = Collections.emptyList(); @Mock private MultiLangProtocol protocol; + @Mock private MessageWriter messageWriter; + @Mock private MessageReader messageReader; + private String shardId; + @Mock private RecordProcessorCheckpointer checkpointer; + @Mock private MultiLangDaemonConfiguration configuration; @Before public void setup() { this.shardId = "shard-id-123"; - protocol = new MultiLangProtocolForTesting(messageReader, messageWriter, - InitializationInput.builder().shardId(shardId).build(), configuration); + protocol = new MultiLangProtocolForTesting( + messageReader, + messageWriter, + InitializationInput.builder().shardId(shardId).build(), + configuration); when(configuration.getTimeoutInSeconds()).thenReturn(null); } @@ -104,28 +110,32 @@ public class MultiLangProtocolTest { @Test public void testInitialize() { - when(messageWriter - .writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder() - .shardId(shardId).build())))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("initialize"), Message.class)); + when(messageWriter.writeInitializeMessage(argThat(Matchers.withInit( + InitializationInput.builder().shardId(shardId).build())))) + .thenReturn(buildFuture(true)); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage("initialize"), Message.class)); assertThat(protocol.initialize(), equalTo(true)); } @Test public void testProcessRecords() { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("processRecords"), Message.class)); + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(buildFuture(true)); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage("processRecords"), Message.class)); - assertThat(protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build()), + assertThat( + protocol.processRecords( + ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build()), equalTo(true)); } @Test public void leaseLostTest() { when(messageWriter.writeLeaseLossMessage(any(LeaseLostInput.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(new StatusMessage(LeaseLostMessage.ACTION), Message.class)); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage(LeaseLostMessage.ACTION), Message.class)); assertThat(protocol.leaseLost(LeaseLostInput.builder().build()), equalTo(true)); } @@ -133,7 +143,8 @@ public class MultiLangProtocolTest { @Test public void shardEndedTest() { when(messageWriter.writeShardEndedMessage(any(ShardEndedInput.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture(new StatusMessage(ShardEndedMessage.ACTION))); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage(ShardEndedMessage.ACTION))); assertThat(protocol.shardEnded(ShardEndedInput.builder().build()), equalTo(true)); } @@ -141,12 +152,12 @@ public class MultiLangProtocolTest { @Test public void shutdownRequestedTest() { when(messageWriter.writeShutdownRequestedMessage()).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("shutdownRequested"), Message.class)); - Mockito.doReturn(buildFuture(true)).when(messageWriter) - .writeShutdownRequestedMessage(); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage("shutdownRequested"), Message.class)); + Mockito.doReturn(buildFuture(true)).when(messageWriter).writeShutdownRequestedMessage(); Mockito.doReturn(buildFuture(new StatusMessage("shutdownRequested"))) - .when(messageReader).getNextMessageFromSTDOUT(); + .when(messageReader) + .getNextMessageFromSTDOUT(); assertThat(protocol.shutdownRequested(null), equalTo(true)); } @@ -168,16 +179,17 @@ public class MultiLangProtocolTest { } return buildFuture(message); } - }.init(messages); } @Test - public void testProcessRecordsWithCheckpoints() throws - KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + public void testProcessRecordsWithCheckpoints() + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(buildFuture(true)); + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(buildFuture(true)); + when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))) + .thenReturn(buildFuture(true)); when(messageReader.getNextMessageFromSTDOUT()).thenAnswer(buildMessageAnswers(new ArrayList() { { this.add(new CheckpointMessage("123", 0L, null)); @@ -192,8 +204,10 @@ public class MultiLangProtocolTest { } })); - boolean result = protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST) - .checkpointer(checkpointer).build()); + boolean result = protocol.processRecords(ProcessRecordsInput.builder() + .records(EMPTY_RECORD_LIST) + .checkpointer(checkpointer) + .build()); assertThat(result, equalTo(true)); @@ -203,41 +217,52 @@ public class MultiLangProtocolTest { @Test public void testProcessRecordsWithABadCheckpoint() { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(buildFuture(false)); + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(buildFuture(true)); + when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))) + .thenReturn(buildFuture(false)); when(messageReader.getNextMessageFromSTDOUT()).thenAnswer(buildMessageAnswers(new ArrayList() { { this.add(new CheckpointMessage("456", 0L, null)); this.add(new StatusMessage("processRecords")); } })); - assertThat(protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST) - .checkpointer(checkpointer).build()), equalTo(false)); + assertThat( + protocol.processRecords(ProcessRecordsInput.builder() + .records(EMPTY_RECORD_LIST) + .checkpointer(checkpointer) + .build()), + equalTo(false)); } @Test(expected = NullPointerException.class) public void waitForStatusMessageTimeoutTest() throws InterruptedException, TimeoutException, ExecutionException { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(buildFuture(true)); Future future = Mockito.mock(Future.class); when(messageReader.getNextMessageFromSTDOUT()).thenReturn(future); when(configuration.getTimeoutInSeconds()).thenReturn(5); when(future.get(anyInt(), eq(TimeUnit.SECONDS))).thenThrow(TimeoutException.class); - protocol = new MultiLangProtocolForTesting(messageReader, + protocol = new MultiLangProtocolForTesting( + messageReader, messageWriter, InitializationInput.builder().shardId(shardId).build(), configuration); - protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build()); + protocol.processRecords( + ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build()); } @Test public void waitForStatusMessageSuccessTest() { - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(buildFuture(true)); - when(messageReader.getNextMessageFromSTDOUT()).thenReturn(buildFuture( - new StatusMessage("processRecords"), Message.class)); + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(buildFuture(true)); + when(messageReader.getNextMessageFromSTDOUT()) + .thenReturn(buildFuture(new StatusMessage("processRecords"), Message.class)); when(configuration.getTimeoutInSeconds()).thenReturn(5); - assertTrue(protocol.processRecords(ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build())); + assertTrue(protocol.processRecords( + ProcessRecordsInput.builder().records(EMPTY_RECORD_LIST).build())); } private class MultiLangProtocolForTesting extends MultiLangProtocol { @@ -249,10 +274,11 @@ public class MultiLangProtocolTest { * @param initializationInput * @param configuration */ - MultiLangProtocolForTesting(final MessageReader messageReader, - final MessageWriter messageWriter, - final InitializationInput initializationInput, - final MultiLangDaemonConfiguration configuration) { + MultiLangProtocolForTesting( + final MessageReader messageReader, + final MessageWriter messageWriter, + final InitializationInput initializationInput, + final MultiLangDaemonConfiguration configuration) { super(messageReader, messageWriter, initializationInput, configuration); } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/NestedPropertyKeyTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/NestedPropertyKeyTest.java index 3f61db7a..fbffee81 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/NestedPropertyKeyTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/NestedPropertyKeyTest.java @@ -15,6 +15,10 @@ package software.amazon.kinesis.multilang; import com.amazonaws.regions.Regions; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.verify; @@ -24,11 +28,6 @@ import static software.amazon.kinesis.multilang.NestedPropertyKey.ENDPOINT_REGIO import static software.amazon.kinesis.multilang.NestedPropertyKey.EXTERNAL_ID; import static software.amazon.kinesis.multilang.NestedPropertyKey.parse; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - @RunWith(MockitoJUnitRunner.class) public class NestedPropertyKeyTest { @@ -95,11 +94,11 @@ public class NestedPropertyKeyTest { @Test public void testNonmatchingParameters() { final String[] params = new String[] { - null, - "", - "hello world", // no nested key - "foo=bar", // nested key, but is not a recognized key - createKey(EXTERNAL_ID, "eid") + "=extra", // valid key made invalid by second '=' + null, + "", + "hello world", // no nested key + "foo=bar", // nested key, but is not a recognized key + createKey(EXTERNAL_ID, "eid") + "=extra", // valid key made invalid by second '=' }; parse(mockProcessor, params); verifyZeroInteractions(mockProcessor); @@ -108,5 +107,4 @@ public class NestedPropertyKeyTest { private static String createKey(final NestedPropertyKey key, final String value) { return key.getNestedKey() + "=" + value; } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/ReadSTDERRTaskTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/ReadSTDERRTaskTest.java index 45ff3052..9876fd21 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/ReadSTDERRTaskTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/ReadSTDERRTaskTest.java @@ -72,7 +72,8 @@ public class ReadSTDERRTaskTest { try { finishedCleanly = result.get(); } catch (InterruptedException | ExecutionException e) { - Assert.fail("Should have been able to get a result. The error should be handled during the call and result in false."); + Assert.fail( + "Should have been able to get a result. The error should be handled during the call and result in false."); } Assert.assertFalse("Reading a line should have thrown an exception", finishedCleanly); } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorFactoryTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorFactoryTest.java index 1954cf91..d1f67ad8 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorFactoryTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorFactoryTest.java @@ -16,12 +16,11 @@ package software.amazon.kinesis.multilang; import org.junit.Assert; import org.junit.Test; - -import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; -import software.amazon.kinesis.processor.ShardRecordProcessor; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.kinesis.multilang.config.MultiLangDaemonConfiguration; +import software.amazon.kinesis.processor.ShardRecordProcessor; @RunWith(MockitoJUnitRunner.class) public class StreamingShardRecordProcessorFactoryTest { @@ -31,10 +30,13 @@ public class StreamingShardRecordProcessorFactoryTest { @Test public void createProcessorTest() { - MultiLangRecordProcessorFactory factory = new MultiLangRecordProcessorFactory("somecommand", null, configuration); + MultiLangRecordProcessorFactory factory = + new MultiLangRecordProcessorFactory("somecommand", null, configuration); ShardRecordProcessor processor = factory.shardRecordProcessor(); - Assert.assertEquals("Should have constructed a StreamingRecordProcessor", MultiLangShardRecordProcessor.class, + Assert.assertEquals( + "Should have constructed a StreamingRecordProcessor", + MultiLangShardRecordProcessor.class, processor.getClass()); } } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorTest.java index caa925b0..4eb66db1 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/StreamingShardRecordProcessorTest.java @@ -14,15 +14,6 @@ */ package software.amazon.kinesis.multilang; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.argThat; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.io.InputStream; import java.io.OutputStream; import java.util.Collections; @@ -32,6 +23,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; +import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -41,9 +33,6 @@ import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.runners.MockitoJUnitRunner; import org.mockito.stubbing.Answer; - -import com.fasterxml.jackson.databind.ObjectMapper; - import software.amazon.awssdk.services.kinesis.model.Record; import software.amazon.kinesis.exceptions.KinesisClientLibDependencyException; import software.amazon.kinesis.exceptions.ThrottlingException; @@ -61,6 +50,15 @@ import software.amazon.kinesis.processor.PreparedCheckpointer; import software.amazon.kinesis.processor.RecordProcessorCheckpointer; import software.amazon.kinesis.retrieval.KinesisClientRecord; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.argThat; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class StreamingShardRecordProcessorTest { @@ -70,6 +68,7 @@ public class StreamingShardRecordProcessorTest { @Mock private Future messageFuture; + @Mock private Future trueFuture; @@ -81,14 +80,13 @@ public class StreamingShardRecordProcessorTest { } @Override - public void checkpoint(String sequenceNumber) throws KinesisClientLibDependencyException, - ThrottlingException, IllegalArgumentException { + public void checkpoint(String sequenceNumber) + throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException { throw new UnsupportedOperationException(); } @Override - public void checkpoint(Record record) - throws KinesisClientLibDependencyException, ThrottlingException { + public void checkpoint(Record record) throws KinesisClientLibDependencyException, ThrottlingException { throw new UnsupportedOperationException(); } @@ -141,7 +139,8 @@ public class StreamingShardRecordProcessorTest { } @Override - public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber, byte[] applicationState) + public PreparedCheckpointer prepareCheckpoint( + String sequenceNumber, long subSequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, ThrottlingException, IllegalArgumentException { throw new UnsupportedOperationException(); } @@ -178,8 +177,14 @@ public class StreamingShardRecordProcessorTest { when(configuration.getTimeoutInSeconds()).thenReturn(null); recordProcessor = - new MultiLangShardRecordProcessor(new ProcessBuilder(), executor, new ObjectMapper(), messageWriter, - messageReader, errorReader, configuration) { + new MultiLangShardRecordProcessor( + new ProcessBuilder(), + executor, + new ObjectMapper(), + messageWriter, + messageReader, + errorReader, + configuration) { // Just don't do anything when we exit. void exit() { @@ -203,9 +208,12 @@ public class StreamingShardRecordProcessorTest { Mockito.doReturn(Mockito.mock(Future.class)).when(messageReader).drainSTDOUT(); Mockito.doReturn(true).when(trueFuture).get(); - when(messageWriter.writeInitializeMessage(any(InitializationInput.class))).thenReturn(trueFuture); - when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))).thenReturn(trueFuture); - when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))).thenReturn(trueFuture); + when(messageWriter.writeInitializeMessage(any(InitializationInput.class))) + .thenReturn(trueFuture); + when(messageWriter.writeCheckpointMessageWithError(anyString(), anyLong(), any(Throwable.class))) + .thenReturn(trueFuture); + when(messageWriter.writeProcessRecordsMessage(any(ProcessRecordsInput.class))) + .thenReturn(trueFuture); when(messageWriter.writeLeaseLossMessage(any(LeaseLostInput.class))).thenReturn(trueFuture); } @@ -223,11 +231,16 @@ public class StreamingShardRecordProcessorTest { List testRecords = Collections.emptyList(); - recordProcessor.initialize(InitializationInput.builder().shardId(SHARD_ID).build()); - recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords) - .checkpointer(unimplementedCheckpointer).build()); - recordProcessor.processRecords(ProcessRecordsInput.builder().records(testRecords) - .checkpointer(unimplementedCheckpointer).build()); + recordProcessor.initialize( + InitializationInput.builder().shardId(SHARD_ID).build()); + recordProcessor.processRecords(ProcessRecordsInput.builder() + .records(testRecords) + .checkpointer(unimplementedCheckpointer) + .build()); + recordProcessor.processRecords(ProcessRecordsInput.builder() + .records(testRecords) + .checkpointer(unimplementedCheckpointer) + .build()); recordProcessor.leaseLost(LeaseLostInput.builder().build()); } @@ -235,9 +248,12 @@ public class StreamingShardRecordProcessorTest { public void processorPhasesTest() throws InterruptedException, ExecutionException { Answer answer = new Answer() { - StatusMessage[] answers = new StatusMessage[] { new StatusMessage(InitializeMessage.ACTION), - new StatusMessage(ProcessRecordsMessage.ACTION), new StatusMessage(ProcessRecordsMessage.ACTION), - new StatusMessage(ShutdownMessage.ACTION) }; + StatusMessage[] answers = new StatusMessage[] { + new StatusMessage(InitializeMessage.ACTION), + new StatusMessage(ProcessRecordsMessage.ACTION), + new StatusMessage(ProcessRecordsMessage.ACTION), + new StatusMessage(ShutdownMessage.ACTION) + }; int callCount = 0; @@ -268,9 +284,12 @@ public class StreamingShardRecordProcessorTest { * This bad message will cause shutdown to not attempt to send a message. i.e. avoid encountering an * exception. */ - StatusMessage[] answers = new StatusMessage[] { new StatusMessage("Bad"), - new StatusMessage(ProcessRecordsMessage.ACTION), new StatusMessage(ProcessRecordsMessage.ACTION), - new StatusMessage(ShutdownMessage.ACTION) }; + StatusMessage[] answers = new StatusMessage[] { + new StatusMessage("Bad"), + new StatusMessage(ProcessRecordsMessage.ACTION), + new StatusMessage(ProcessRecordsMessage.ACTION), + new StatusMessage(ShutdownMessage.ACTION) + }; int callCount = 0; @@ -286,8 +305,9 @@ public class StreamingShardRecordProcessorTest { phases(answer); - verify(messageWriter).writeInitializeMessage(argThat(Matchers.withInit(InitializationInput.builder() - .shardId(SHARD_ID).build()))); + verify(messageWriter) + .writeInitializeMessage(argThat(Matchers.withInit( + InitializationInput.builder().shardId(SHARD_ID).build()))); verify(messageWriter, times(2)).writeProcessRecordsMessage(any(ProcessRecordsInput.class)); verify(messageWriter, never()).writeLeaseLossMessage(any(LeaseLostInput.class)); Assert.assertEquals(1, systemExitCount); diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProviderTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProviderTest.java index 1c9e6bca..c27a425d 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProviderTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/auth/KclSTSAssumeRoleSessionCredentialsProviderTest.java @@ -16,10 +16,10 @@ package software.amazon.kinesis.multilang.auth; import java.util.Arrays; -import static org.junit.Assert.assertEquals; - import org.junit.Test; +import static org.junit.Assert.assertEquals; + public class KclSTSAssumeRoleSessionCredentialsProviderTest { private static final String ARN = "arn"; @@ -31,7 +31,7 @@ public class KclSTSAssumeRoleSessionCredentialsProviderTest { */ @Test public void testConstructorWithoutOptionalParams() { - new KclSTSAssumeRoleSessionCredentialsProvider(new String[] { ARN, SESSION_NAME }); + new KclSTSAssumeRoleSessionCredentialsProvider(new String[] {ARN, SESSION_NAME}); } @Test @@ -46,9 +46,8 @@ public class KclSTSAssumeRoleSessionCredentialsProviderTest { @Test public void testVarArgs() { for (final String[] varargs : Arrays.asList( - new String[] { ARN, SESSION_NAME, "externalId=eid", "foo"}, - new String[] { ARN, SESSION_NAME, "foo", "externalId=eid"} - )) { + new String[] {ARN, SESSION_NAME, "externalId=eid", "foo"}, + new String[] {ARN, SESSION_NAME, "foo", "externalId=eid"})) { final VarArgsSpy provider = new VarArgsSpy(varargs); assertEquals("eid", provider.externalId); } @@ -68,4 +67,4 @@ public class KclSTSAssumeRoleSessionCredentialsProviderTest { super.acceptExternalId(externalId); } } -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoderTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoderTest.java index 80e67d26..ba5a0925 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoderTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/AWSCredentialsProviderPropertyValueDecoderTest.java @@ -14,14 +14,11 @@ */ package software.amazon.kinesis.multilang.config; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; - import java.util.Arrays; +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSCredentialsProviderChain; import com.amazonaws.auth.BasicAWSCredentials; import lombok.ToString; import org.hamcrest.Description; @@ -30,9 +27,11 @@ import org.hamcrest.TypeSafeDiagnosingMatcher; import org.junit.Test; import software.amazon.kinesis.multilang.auth.KclSTSAssumeRoleSessionCredentialsProvider; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.auth.AWSCredentialsProviderChain; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; public class AWSCredentialsProviderPropertyValueDecoderTest { @@ -79,10 +78,10 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { @Override public void describeTo(Description description) { - description.appendText("An AWSCredentialsProvider that provides an AWSCredential matching: ") + description + .appendText("An AWSCredentialsProvider that provides an AWSCredential matching: ") .appendList("(", ", ", ")", Arrays.asList(classMatcher, akidMatcher, secretMatcher)); } - } private static AWSCredentialsMatcher hasCredentials(String akid, String secret) { @@ -121,7 +120,7 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { for (final String className : Arrays.asList( KclSTSAssumeRoleSessionCredentialsProvider.class.getName(), // fully-qualified name KclSTSAssumeRoleSessionCredentialsProvider.class.getSimpleName() // name-only; needs prefix - )) { + )) { final AWSCredentialsProvider provider = decoder.decodeValue(className + "|arn|sessionName"); assertNotNull(className, provider); } @@ -132,7 +131,7 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { */ @Test public void testVarArgAuthProvider() { - final String[] args = new String[] { "arg1", "arg2", "arg3" }; + final String[] args = new String[] {"arg1", "arg2", "arg3"}; final String className = VarArgCredentialsProvider.class.getName(); final String encodedValue = className + "|" + String.join("|", args); @@ -151,9 +150,7 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { } @Override - public void refresh() { - - } + public void refresh() {} } /** @@ -180,9 +177,7 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { } @Override - public void refresh() { - - } + public void refresh() {} } private static class VarArgCredentialsProvider implements AWSCredentialsProvider { @@ -201,8 +196,6 @@ public class AWSCredentialsProviderPropertyValueDecoderTest { } @Override - public void refresh() { - - } + public void refresh() {} } } diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/BuilderDynaBeanTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/BuilderDynaBeanTest.java index 9038453a..ac687b82 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/BuilderDynaBeanTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/BuilderDynaBeanTest.java @@ -15,16 +15,14 @@ package software.amazon.kinesis.multilang.config; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - import java.util.function.Consumer; import java.util.function.Supplier; +import lombok.Builder; +import lombok.EqualsAndHashCode; +import lombok.RequiredArgsConstructor; +import lombok.ToString; +import lombok.experimental.Accessors; import org.apache.commons.beanutils.BeanUtilsBean; import org.apache.commons.beanutils.ConvertUtilsBean; import org.junit.Before; @@ -32,11 +30,12 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; -import lombok.Builder; -import lombok.EqualsAndHashCode; -import lombok.RequiredArgsConstructor; -import lombok.ToString; -import lombok.experimental.Accessors; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; public class BuilderDynaBeanTest { @@ -109,8 +108,8 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateAllParameters() throws Exception { - TestComplexCreate expected = TestComplexCreate.create("real", - TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build()); + TestComplexCreate expected = TestComplexCreate.create( + "real", TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build()); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreate.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "[0]", expected.realName); @@ -136,8 +135,8 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateComplexParameterOnly() throws Exception { - TestComplexCreate expected = TestComplexCreate.create(null, - TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build()); + TestComplexCreate expected = TestComplexCreate.create( + null, TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build()); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreate.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "[1].stringL1", expected.test1.stringL1); @@ -161,7 +160,8 @@ public class BuilderDynaBeanTest { @Test public void testSimpleBuilderAllParameters() throws Exception { - TestSimpleBuilder expected = TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build(); + TestSimpleBuilder expected = + TestSimpleBuilder.builder().stringL1("l1").longVal(10L).build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "stringL1", expected.stringL1); @@ -213,12 +213,14 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateSimpleBuilderVariantAllParameters() throws Exception { - TestSimpleBuilder variant = TestSimpleBuilder.builder().longVal(10L).stringL1("variant").build(); + TestSimpleBuilder variant = + TestSimpleBuilder.builder().longVal(10L).stringL1("variant").build(); TestComplexCreateVariance expected = TestComplexCreateVariance.create("simple-builder", variant); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName); - utilsBean.setProperty(builderDynaBean, "[1].class", expected.variant.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, "[1].class", expected.variant.getClass().getName()); utilsBean.setProperty(builderDynaBean, "[1].longVal", variant.longVal); utilsBean.setProperty(builderDynaBean, "[1].stringL1", variant.stringL1); @@ -229,8 +231,11 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateVariantBuilderAllParameters() throws Exception { - TestVariantBuilder variant = TestVariantBuilder.builder().variantBuilderName("variant-build").intClass(20) - .testEnum(TestEnum.Blue).build(); + TestVariantBuilder variant = TestVariantBuilder.builder() + .variantBuilderName("variant-build") + .intClass(20) + .testEnum(TestEnum.Blue) + .build(); TestComplexCreateVariance expected = TestComplexCreateVariance.create("builder-variant", variant); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean); @@ -264,13 +269,16 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateVariantBuilderAllParametersPrefixWithJoiner() throws Exception { - TestVariantBuilder variant = TestVariantBuilder.builder().variantBuilderName("variant-build").intClass(20) - .testEnum(TestEnum.Blue).build(); + TestVariantBuilder variant = TestVariantBuilder.builder() + .variantBuilderName("variant-build") + .intClass(20) + .testEnum(TestEnum.Blue) + .build(); TestComplexCreateVariance expected = TestComplexCreateVariance.create("builder-variant-prefix", variant); String prefix = variant.getClass().getEnclosingClass().getName() + "$"; - BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean, - prefix); + BuilderDynaBean builderDynaBean = + new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean, prefix); utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName); utilsBean.setProperty(builderDynaBean, "[1].class", variant.getClass().getSimpleName()); utilsBean.setProperty(builderDynaBean, "[1].variantBuilderName", variant.variantBuilderName); @@ -284,13 +292,16 @@ public class BuilderDynaBeanTest { @Test public void testComplexCreateVariantBuilderAllParametersPrefixWithOutJoiner() throws Exception { - TestVariantBuilder variant = TestVariantBuilder.builder().variantBuilderName("variant-build").intClass(20) - .testEnum(TestEnum.Blue).build(); + TestVariantBuilder variant = TestVariantBuilder.builder() + .variantBuilderName("variant-build") + .intClass(20) + .testEnum(TestEnum.Blue) + .build(); TestComplexCreateVariance expected = TestComplexCreateVariance.create("builder-variant-prefix", variant); String prefix = variant.getClass().getEnclosingClass().getName(); - BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean, - prefix); + BuilderDynaBean builderDynaBean = + new BuilderDynaBean(TestComplexCreateVariance.class, convertUtilsBean, prefix); utilsBean.setProperty(builderDynaBean, "[0]", expected.varianceName); utilsBean.setProperty(builderDynaBean, "[1].class", variant.getClass().getSimpleName()); utilsBean.setProperty(builderDynaBean, "[1].variantBuilderName", variant.variantBuilderName); @@ -330,11 +341,21 @@ public class BuilderDynaBeanTest { @Test public void testComplexRootAllParameters() throws Exception { - TestSimpleBuilder simpleBuilder = TestSimpleBuilder.builder().stringL1("simple-l1").longVal(20L).build(); - TestRootClass expected = TestRootClass.builder().intVal(10).stringVal("root").testEnum(TestEnum.Red) - .testComplexCreate(TestComplexCreate.create("real", - TestSimpleBuilder.builder().stringL1("complex-l1").longVal(10L).build())) - .testSimpleBuilder(simpleBuilder).testSimpleCreate(TestSimpleCreate.create("first", "last")).build(); + TestSimpleBuilder simpleBuilder = + TestSimpleBuilder.builder().stringL1("simple-l1").longVal(20L).build(); + TestRootClass expected = TestRootClass.builder() + .intVal(10) + .stringVal("root") + .testEnum(TestEnum.Red) + .testComplexCreate(TestComplexCreate.create( + "real", + TestSimpleBuilder.builder() + .stringL1("complex-l1") + .longVal(10L) + .build())) + .testSimpleBuilder(simpleBuilder) + .testSimpleCreate(TestSimpleCreate.create("first", "last")) + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestRootClass.class, convertUtilsBean); @@ -342,10 +363,10 @@ public class BuilderDynaBeanTest { utilsBean.setProperty(builderDynaBean, "stringVal", expected.stringVal); utilsBean.setProperty(builderDynaBean, "testEnum", expected.testEnum); utilsBean.setProperty(builderDynaBean, "testComplexCreate.[0]", expected.testComplexCreate.realName); - utilsBean.setProperty(builderDynaBean, "testComplexCreate.[1].stringL1", - expected.testComplexCreate.test1.stringL1); - utilsBean.setProperty(builderDynaBean, "testComplexCreate.[1].longVal", - expected.testComplexCreate.test1.longVal); + utilsBean.setProperty( + builderDynaBean, "testComplexCreate.[1].stringL1", expected.testComplexCreate.test1.stringL1); + utilsBean.setProperty( + builderDynaBean, "testComplexCreate.[1].longVal", expected.testComplexCreate.test1.longVal); utilsBean.setProperty(builderDynaBean, "testSimpleBuilder.class", TestSimpleBuilder.class.getName()); utilsBean.setProperty(builderDynaBean, "testSimpleBuilder.stringL1", simpleBuilder.stringL1); utilsBean.setProperty(builderDynaBean, "testSimpleBuilder.longVal", simpleBuilder.longVal); @@ -370,7 +391,11 @@ public class BuilderDynaBeanTest { @Test public void testComplexRootTopLevelOnly() throws Exception { - TestRootClass expected = TestRootClass.builder().intVal(10).stringVal("root").testEnum(TestEnum.Red).build(); + TestRootClass expected = TestRootClass.builder() + .intVal(10) + .stringVal("root") + .testEnum(TestEnum.Red) + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestRootClass.class, convertUtilsBean); @@ -385,12 +410,17 @@ public class BuilderDynaBeanTest { @Test public void testSupplierNotUsed() throws Exception { - TestVariantBuilder variant = TestVariantBuilder.builder().testEnum(TestEnum.Green).intClass(10) - .variantBuilderName("variant-supplier").build(); - TestSupplierClass expected = TestSupplierClass.builder().variantClass(variant).build(); + TestVariantBuilder variant = TestVariantBuilder.builder() + .testEnum(TestEnum.Green) + .intClass(10) + .variantBuilderName("variant-supplier") + .build(); + TestSupplierClass expected = + TestSupplierClass.builder().variantClass(variant).build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSupplierClass.class, convertUtilsBean); - utilsBean.setProperty(builderDynaBean, "variantClass.class", variant.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, "variantClass.class", variant.getClass().getName()); utilsBean.setProperty(builderDynaBean, "variantClass.testEnum", variant.testEnum); utilsBean.setProperty(builderDynaBean, "variantClass.intClass", variant.intClass); utilsBean.setProperty(builderDynaBean, "variantClass.variantBuilderName", variant.variantBuilderName); @@ -422,8 +452,11 @@ public class BuilderDynaBeanTest { @Test public void testVariantBuildsToSuperType() throws Exception { - TestVariantBuilder expected = TestVariantBuilder.builder().intClass(10).testEnum(TestEnum.Green) - .variantBuilderName("variant-super").build(); + TestVariantBuilder expected = TestVariantBuilder.builder() + .intClass(10) + .testEnum(TestEnum.Green) + .variantBuilderName("variant-super") + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestInterface.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "class", expected.getClass().getName()); @@ -439,9 +472,11 @@ public class BuilderDynaBeanTest { @Test public void testEmptyPropertyHandler() throws Exception { String emptyPropertyValue = "test-property"; - TestVariantCreate expected = TestVariantCreate.create(emptyPropertyValue, (long) emptyPropertyValue.length(), - emptyPropertyValue + "-vary"); - BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestInterface.class, convertUtilsBean, + TestVariantCreate expected = TestVariantCreate.create( + emptyPropertyValue, (long) emptyPropertyValue.length(), emptyPropertyValue + "-vary"); + BuilderDynaBean builderDynaBean = new BuilderDynaBean( + TestInterface.class, + convertUtilsBean, s -> TestVariantCreate.create(s, (long) s.length(), s + "-vary")); utilsBean.setProperty(builderDynaBean, "", emptyPropertyValue); @@ -455,8 +490,8 @@ public class BuilderDynaBeanTest { thrown.expect(IllegalStateException.class); thrown.expectMessage(containsString("When a property handler is resolved further properties may not be set.")); - BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestInterface.class, convertUtilsBean, - s -> TestVariantCreate.create("test", 10, "test")); + BuilderDynaBean builderDynaBean = new BuilderDynaBean( + TestInterface.class, convertUtilsBean, s -> TestVariantCreate.create("test", 10, "test")); utilsBean.setProperty(builderDynaBean, "", "test"); utilsBean.setProperty(builderDynaBean, "[0]", "test"); } @@ -468,8 +503,8 @@ public class BuilderDynaBeanTest { thrown.expectMessage(containsString(TestInterface.class.getName())); thrown.expectMessage(containsString("cannot be assigned to")); - BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestInterface.class, convertUtilsBean, - s -> TestEnum.Green); + BuilderDynaBean builderDynaBean = + new BuilderDynaBean(TestInterface.class, convertUtilsBean, s -> TestEnum.Green); utilsBean.setProperty(builderDynaBean, "", "test"); @@ -478,8 +513,11 @@ public class BuilderDynaBeanTest { @Test public void testSimpleArrayValues() throws Exception { - SimpleArrayClassVariant expected = SimpleArrayClassVariant.builder().ints(new Integer[] { 1, 2, 3 }) - .variantName("simple-array").longs(new Long[] { 1L, 2L, 3L }).strings(new String[] { "a", "b", "c" }) + SimpleArrayClassVariant expected = SimpleArrayClassVariant.builder() + .ints(new Integer[] {1, 2, 3}) + .variantName("simple-array") + .longs(new Long[] {1L, 2L, 3L}) + .strings(new String[] {"a", "b", "c"}) .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(SimpleArrayClassVariant.class, convertUtilsBean); @@ -503,12 +541,20 @@ public class BuilderDynaBeanTest { @Test public void testComplexArrayValuesBuilder() throws Exception { - TestVariantBuilder variant1 = TestVariantBuilder.builder().variantBuilderName("variant-1") - .testEnum(TestEnum.Green).intClass(10).build(); - TestVariantBuilder variant2 = TestVariantBuilder.builder().variantBuilderName("variant-2") - .testEnum(TestEnum.Blue).intClass(20).build(); - ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder().variantName("complex-test") - .tests(new TestInterface[] { variant1, variant2 }).build(); + TestVariantBuilder variant1 = TestVariantBuilder.builder() + .variantBuilderName("variant-1") + .testEnum(TestEnum.Green) + .intClass(10) + .build(); + TestVariantBuilder variant2 = TestVariantBuilder.builder() + .variantBuilderName("variant-2") + .testEnum(TestEnum.Blue) + .intClass(20) + .build(); + ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder() + .variantName("complex-test") + .tests(new TestInterface[] {variant1, variant2}) + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(ComplexArrayClassVariant.class, convertUtilsBean); @@ -533,18 +579,22 @@ public class BuilderDynaBeanTest { TestVariantCreate variant1 = TestVariantCreate.create("variant-1", 10L, "vary-1"); TestVariantCreate variant2 = TestVariantCreate.create("variant-2", 20L, "vary-2"); - ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder().variantName("create-test") - .tests(new TestInterface[] { variant1, variant2 }).build(); + ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder() + .variantName("create-test") + .tests(new TestInterface[] {variant1, variant2}) + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(ComplexArrayClassVariant.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "variantName", expected.variantName); - utilsBean.setProperty(builderDynaBean, "tests[0].class", variant1.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, "tests[0].class", variant1.getClass().getName()); utilsBean.setProperty(builderDynaBean, "tests[0].[0]", variant1.variantCreateName); utilsBean.setProperty(builderDynaBean, "tests[0].[1]", variant1.longClass); utilsBean.setProperty(builderDynaBean, "tests[0].[2]", variant1.varyString); - utilsBean.setProperty(builderDynaBean, "tests[1].class", variant2.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, "tests[1].class", variant2.getClass().getName()); utilsBean.setProperty(builderDynaBean, "tests[1].[0]", variant2.variantCreateName); utilsBean.setProperty(builderDynaBean, "tests[1].[1]", variant2.longClass); utilsBean.setProperty(builderDynaBean, "tests[1].[2]", variant2.varyString); @@ -552,7 +602,6 @@ public class BuilderDynaBeanTest { ComplexArrayClassVariant actual = builderDynaBean.build(ComplexArrayClassVariant.class); assertThat(actual, equalTo(expected)); - } @Test @@ -562,13 +611,18 @@ public class BuilderDynaBeanTest { if (i % 2 == 0) { variants[i] = TestVariantCreate.create("create-variant-" + i, i + 5, "vary-" + i); } else { - variants[i] = TestVariantBuilder.builder().testEnum(TestEnum.values()[i % TestEnum.values().length]) - .intClass(i).variantBuilderName("builder-variant-" + i).build(); + variants[i] = TestVariantBuilder.builder() + .testEnum(TestEnum.values()[i % TestEnum.values().length]) + .intClass(i) + .variantBuilderName("builder-variant-" + i) + .build(); } } - ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder().variantName("large-complex") - .tests(variants).build(); + ComplexArrayClassVariant expected = ComplexArrayClassVariant.builder() + .variantName("large-complex") + .tests(variants) + .build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(ComplexArrayClassVariant.class, convertUtilsBean); @@ -578,13 +632,15 @@ public class BuilderDynaBeanTest { TestInterface variant = variants[i]; if (variant instanceof TestVariantCreate) { TestVariantCreate create = (TestVariantCreate) variant; - utilsBean.setProperty(builderDynaBean, prefix + "class", create.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, prefix + "class", create.getClass().getName()); utilsBean.setProperty(builderDynaBean, prefix + "[0]", create.variantCreateName); utilsBean.setProperty(builderDynaBean, prefix + "[1]", create.longClass); utilsBean.setProperty(builderDynaBean, prefix + "[2]", create.varyString); } else if (variant instanceof TestVariantBuilder) { TestVariantBuilder builder = (TestVariantBuilder) variant; - utilsBean.setProperty(builderDynaBean, prefix + "class", builder.getClass().getName()); + utilsBean.setProperty( + builderDynaBean, prefix + "class", builder.getClass().getName()); utilsBean.setProperty(builderDynaBean, prefix + "variantBuilderName", builder.variantBuilderName); utilsBean.setProperty(builderDynaBean, prefix + "intClass", builder.intClass); utilsBean.setProperty(builderDynaBean, prefix + "testEnum", builder.testEnum); @@ -667,25 +723,27 @@ public class BuilderDynaBeanTest { @Test public void testAdditionalMutators() throws Exception { - TestSimpleBuilder expected = TestSimpleBuilder.builder().stringL1("test").longVal(10L).build(); + TestSimpleBuilder expected = + TestSimpleBuilder.builder().stringL1("test").longVal(10L).build(); BuilderDynaBean builderDynaBean = new BuilderDynaBean(TestSimpleBuilder.class, convertUtilsBean); utilsBean.setProperty(builderDynaBean, "stringL1", expected.stringL1); - TestSimpleBuilder actual = builderDynaBean.build(TestSimpleBuilder.class, - b -> ((TestSimpleBuilder.TestSimpleBuilderBuilder) b).longVal(expected.longVal)); + TestSimpleBuilder actual = + builderDynaBean.build(TestSimpleBuilder.class, b -> ((TestSimpleBuilder.TestSimpleBuilderBuilder) b) + .longVal(expected.longVal)); assertThat(actual, equalTo(expected)); } public enum TestEnum { - Red, Green, Blue + Red, + Green, + Blue } - public interface TestInterface { - - } + public interface TestInterface {} @Accessors(fluent = true) @ToString @@ -838,7 +896,5 @@ public class BuilderDynaBeanTest { } public String name = "default"; - } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtilsTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtilsTest.java index 96de848e..5e0db340 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtilsTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/ConfigurationSettableUtilsTest.java @@ -15,18 +15,17 @@ package software.amazon.kinesis.multilang.config; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertThat; - import java.util.Optional; -import org.junit.Test; - import lombok.Builder; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.Setter; import lombok.experimental.Accessors; +import org.junit.Test; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertThat; public class ConfigurationSettableUtilsTest { @@ -44,7 +43,10 @@ public class ConfigurationSettableUtilsTest { public void testPrimitivesSet() { ConfigResult expected = ConfigResult.builder().rawInt(10).rawLong(15L).build(); - ConfigObject configObject = ConfigObject.builder().rawInt(expected.rawInt).rawLong(expected.rawLong).build(); + ConfigObject configObject = ConfigObject.builder() + .rawInt(expected.rawInt) + .rawLong(expected.rawLong) + .build(); ConfigResult actual = resolve(configObject); assertThat(actual, equalTo(expected)); @@ -52,10 +54,14 @@ public class ConfigurationSettableUtilsTest { @Test public void testHeapValuesSet() { - ConfigResult expected = ConfigResult.builder().name("test").boxedInt(10).boxedLong(15L).build(); + ConfigResult expected = + ConfigResult.builder().name("test").boxedInt(10).boxedLong(15L).build(); - ConfigObject configObject = ConfigObject.builder().name(expected.name).boxedInt(expected.boxedInt.intValue()) - .boxedLong(expected.boxedLong.longValue()).build(); + ConfigObject configObject = ConfigObject.builder() + .name(expected.name) + .boxedInt(expected.boxedInt.intValue()) + .boxedLong(expected.boxedLong.longValue()) + .build(); ConfigResult actual = resolve(configObject); assertThat(actual, equalTo(expected)); @@ -63,27 +69,39 @@ public class ConfigurationSettableUtilsTest { @Test public void testComplexValuesSet() { - ComplexValue complexValue = ComplexValue.builder().name("complex").value(10).build(); - ConfigResult expected = ConfigResult.builder().complexValue(complexValue).build(); + ComplexValue complexValue = + ComplexValue.builder().name("complex").value(10).build(); + ConfigResult expected = + ConfigResult.builder().complexValue(complexValue).build(); ConfigObject configObject = ConfigObject.builder() - .complexValue(ComplexValue.builder().name(complexValue.name).value(complexValue.value).build()).build(); + .complexValue(ComplexValue.builder() + .name(complexValue.name) + .value(complexValue.value) + .build()) + .build(); ConfigResult actual = resolve(configObject); assertThat(actual, equalTo(expected)); - } @Test public void testOptionalValuesSet() { - ComplexValue complexValue = ComplexValue.builder().name("optional-complex").value(20).build(); - ConfigResult expected = ConfigResult.builder().optionalString(Optional.of("test")) - .optionalInteger(Optional.of(10)).optionalLong(Optional.of(15L)) - .optionalComplexValue(Optional.of(complexValue)).build(); + ComplexValue complexValue = + ComplexValue.builder().name("optional-complex").value(20).build(); + ConfigResult expected = ConfigResult.builder() + .optionalString(Optional.of("test")) + .optionalInteger(Optional.of(10)) + .optionalLong(Optional.of(15L)) + .optionalComplexValue(Optional.of(complexValue)) + .build(); - ConfigObject configObject = ConfigObject.builder().optionalString(expected.optionalString.get()) - .optionalInteger(expected.optionalInteger.get()).optionalLong(expected.optionalLong.get()) - .optionalComplexValue(expected.optionalComplexValue.get()).build(); + ConfigObject configObject = ConfigObject.builder() + .optionalString(expected.optionalString.get()) + .optionalInteger(expected.optionalInteger.get()) + .optionalLong(expected.optionalLong.get()) + .optionalComplexValue(expected.optionalComplexValue.get()) + .build(); ConfigResult actual = resolve(configObject); assertThat(actual, equalTo(expected)); @@ -91,20 +109,29 @@ public class ConfigurationSettableUtilsTest { @Test public void testRenamedRawValues() { - ComplexValue complexValue = ComplexValue.builder().name("renamed-complex").value(20).build(); - ConfigResult expected = ConfigResult.builder().renamedString("renamed").renamedInt(10) - .renamedOptionalString(Optional.of("renamed-optional")).renamedComplexValue(complexValue).build(); + ComplexValue complexValue = + ComplexValue.builder().name("renamed-complex").value(20).build(); + ConfigResult expected = ConfigResult.builder() + .renamedString("renamed") + .renamedInt(10) + .renamedOptionalString(Optional.of("renamed-optional")) + .renamedComplexValue(complexValue) + .build(); - ConfigObject configObject = ConfigObject.builder().toRenameString(expected.renamedString) - .toRenameInt(expected.renamedInt).toRenameComplexValue(complexValue) - .optionalToRename(expected.renamedOptionalString.get()).build(); + ConfigObject configObject = ConfigObject.builder() + .toRenameString(expected.renamedString) + .toRenameInt(expected.renamedInt) + .toRenameComplexValue(complexValue) + .optionalToRename(expected.renamedOptionalString.get()) + .build(); ConfigResult actual = resolve(configObject); assertThat(actual, equalTo(expected)); } private ConfigResult resolve(ConfigObject configObject) { - return ConfigurationSettableUtils.resolveFields(configObject, ConfigResult.builder().build()); + return ConfigurationSettableUtils.resolveFields( + configObject, ConfigResult.builder().build()); } @Accessors(fluent = true) @@ -129,7 +156,6 @@ public class ConfigurationSettableUtilsTest { private int renamedInt; private Optional renamedOptionalString; private ComplexValue renamedComplexValue; - } @Accessors(fluent = true) @@ -145,35 +171,47 @@ public class ConfigurationSettableUtilsTest { @ConfigurationSettable(configurationClass = ConfigResult.class) private String name; + @ConfigurationSettable(configurationClass = ConfigResult.class) private int rawInt; + @ConfigurationSettable(configurationClass = ConfigResult.class) private Integer boxedInt; + @ConfigurationSettable(configurationClass = ConfigResult.class) private long rawLong; + @ConfigurationSettable(configurationClass = ConfigResult.class) private Long boxedLong; + @ConfigurationSettable(configurationClass = ConfigResult.class) private ComplexValue complexValue; @ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true) private String optionalString; + @ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true) private Integer optionalInteger; + @ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true) private Long optionalLong; + @ConfigurationSettable(configurationClass = ConfigResult.class, convertToOptional = true) private ComplexValue optionalComplexValue; @ConfigurationSettable(configurationClass = ConfigResult.class, methodName = "renamedString") private String toRenameString; + @ConfigurationSettable(configurationClass = ConfigResult.class, methodName = "renamedInt") private int toRenameInt; - @ConfigurationSettable(configurationClass = ConfigResult.class, methodName = "renamedOptionalString", convertToOptional = true) + + @ConfigurationSettable( + configurationClass = ConfigResult.class, + methodName = "renamedOptionalString", + convertToOptional = true) private String optionalToRename; + @ConfigurationSettable(configurationClass = ConfigResult.class, methodName = "renamedComplexValue") private ComplexValue toRenameComplexValue; - } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoderTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoderTest.java index a3aace72..d274f9f2 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoderTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/DatePropertyValueDecoderTest.java @@ -14,12 +14,12 @@ */ package software.amazon.kinesis.multilang.config; -import static org.junit.Assert.assertEquals; - import java.util.Date; import org.junit.Test; +import static org.junit.Assert.assertEquals; + public class DatePropertyValueDecoderTest { private DatePropertyValueDecoder decoder = new DatePropertyValueDecoder(); diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/FanoutConfigBeanTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/FanoutConfigBeanTest.java index 11b12588..255a07ff 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/FanoutConfigBeanTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/FanoutConfigBeanTest.java @@ -21,7 +21,6 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.kinesis.retrieval.fanout.FanOutConfig; @@ -50,18 +49,22 @@ public class FanoutConfigBeanTest { MultiLangDaemonConfiguration configuration = new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean); configuration.setStreamName("test-stream"); configuration.setApplicationName("test-application"); - FanOutConfig fanOutConfig =fanoutConfigBean.build(kinesisAsyncClient, configuration); + FanOutConfig fanOutConfig = fanoutConfigBean.build(kinesisAsyncClient, configuration); assertThat(fanOutConfig.kinesisClient(), equalTo(kinesisAsyncClient)); assertThat(fanOutConfig.streamName(), equalTo(configuration.getStreamName())); assertThat(fanOutConfig.applicationName(), equalTo(configuration.getApplicationName())); assertThat(fanOutConfig.consumerArn(), equalTo(fanoutConfigBean.getConsumerArn())); assertThat(fanOutConfig.consumerName(), equalTo(fanoutConfigBean.getConsumerName())); - assertThat(fanOutConfig.maxDescribeStreamConsumerRetries(), equalTo(fanoutConfigBean.getMaxDescribeStreamConsumerRetries())); - assertThat(fanOutConfig.maxDescribeStreamSummaryRetries(), equalTo(fanoutConfigBean.getMaxDescribeStreamSummaryRetries())); - assertThat(fanOutConfig.registerStreamConsumerRetries(), equalTo(fanoutConfigBean.getRegisterStreamConsumerRetries())); + assertThat( + fanOutConfig.maxDescribeStreamConsumerRetries(), + equalTo(fanoutConfigBean.getMaxDescribeStreamConsumerRetries())); + assertThat( + fanOutConfig.maxDescribeStreamSummaryRetries(), + equalTo(fanoutConfigBean.getMaxDescribeStreamSummaryRetries())); + assertThat( + fanOutConfig.registerStreamConsumerRetries(), + equalTo(fanoutConfigBean.getRegisterStreamConsumerRetries())); assertThat(fanOutConfig.retryBackoffMillis(), equalTo(fanoutConfigBean.getRetryBackoffMillis())); - } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfiguratorTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfiguratorTest.java index 2b02ea43..b0e3b870 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfiguratorTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/KinesisClientLibConfiguratorTest.java @@ -14,15 +14,6 @@ */ package software.amazon.kinesis.multilang.config; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.ByteArrayInputStream; import java.io.InputStream; import java.net.URI; @@ -34,18 +25,25 @@ import java.util.Set; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicAWSCredentials; +import com.google.common.collect.ImmutableSet; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.exception.ExceptionUtils; import org.junit.Test; - -import com.google.common.collect.ImmutableSet; - import org.junit.runner.RunWith; import org.mockito.runners.MockitoJUnitRunner; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.kinesis.common.InitialPositionInStream; import software.amazon.kinesis.metrics.MetricsLevel; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + @RunWith(MockitoJUnitRunner.class) public class KinesisClientLibConfiguratorTest { @@ -58,8 +56,14 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithBasicSetup() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = " + credentialName1, "workerId = 123" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123" + }, + '\n')); assertEquals(config.getApplicationName(), "b"); assertEquals(config.getStreamName(), "a"); assertEquals(config.getWorkerIdentifier(), "123"); @@ -69,9 +73,16 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithLongVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "applicationName = app", - "streamName = 123", "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, - "workerId = 123", "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "applicationName = app", + "streamName = 123", + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "workerId = 123", + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, + '\n')); assertEquals(config.getApplicationName(), "app"); assertEquals(config.getStreamName(), "123"); @@ -83,9 +94,14 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithInitialPositionInStreamExtended() { long epochTimeInSeconds = 1617406032; - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "applicationName = app", - "streamName = 123", "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, - "initialPositionInStreamExtended = " + epochTimeInSeconds}, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "applicationName = app", + "streamName = 123", + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "initialPositionInStreamExtended = " + epochTimeInSeconds + }, + '\n')); assertEquals(config.getInitialPositionInStreamExtended().getTimestamp(), new Date(epochTimeInSeconds * 1000L)); assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.AT_TIMESTAMP); @@ -96,9 +112,14 @@ public class KinesisClientLibConfiguratorTest { // AT_TIMESTAMP cannot be used as initialPositionInStream. If a user wants to specify AT_TIMESTAMP, // they must specify the time with initialPositionInStreamExtended. try { - getConfiguration(StringUtils.join(new String[] { "applicationName = app", - "streamName = 123", "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, - "initialPositionInStream = AT_TIMESTAMP"}, '\n')); + getConfiguration(StringUtils.join( + new String[] { + "applicationName = app", + "streamName = 123", + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "initialPositionInStream = AT_TIMESTAMP" + }, + '\n')); fail("Should have thrown when initialPositionInStream is set to AT_TIMESTAMP"); } catch (Exception e) { Throwable rootCause = ExceptionUtils.getRootCause(e); @@ -111,9 +132,14 @@ public class KinesisClientLibConfiguratorTest { // initialPositionInStreamExtended takes a long value indicating seconds since epoch. If a non-long // value is provided, the constructor should throw an IllegalArgumentException exception. try { - getConfiguration(StringUtils.join(new String[] { "applicationName = app", - "streamName = 123", "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, - "initialPositionInStreamExtended = null"}, '\n')); + getConfiguration(StringUtils.join( + new String[] { + "applicationName = app", + "streamName = 123", + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "initialPositionInStreamExtended = null" + }, + '\n')); fail("Should have thrown when initialPositionInStreamExtended is set to null"); } catch (Exception e) { Throwable rootCause = ExceptionUtils.getRootCause(e); @@ -124,8 +150,13 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithUnsupportedClientConfigurationVariables() { MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( - new String[] { "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, "workerId = id", - "kinesisClientConfig = {}", "streamName = stream", "applicationName = b" }, + new String[] { + "AWSCredentialsProvider = " + credentialName1 + ", " + credentialName2, + "workerId = id", + "kinesisClientConfig = {}", + "streamName = stream", + "applicationName = b" + }, '\n')); assertEquals(config.getApplicationName(), "b"); @@ -136,10 +167,18 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithIntVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = kinesis", - "AWSCredentialsProvider = " + credentialName2 + ", " + credentialName1, "workerId = w123", - "maxRecords = 10", "metricsMaxQueueSize = 20", "applicationName = kinesis", - "retryGetRecordsInSeconds = 2", "maxGetRecordsThreadPool = 1" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = kinesis", + "AWSCredentialsProvider = " + credentialName2 + ", " + credentialName1, + "workerId = w123", + "maxRecords = 10", + "metricsMaxQueueSize = 20", + "applicationName = kinesis", + "retryGetRecordsInSeconds = 2", + "maxGetRecordsThreadPool = 1" + }, + '\n')); assertEquals(config.getApplicationName(), "kinesis"); assertEquals(config.getStreamName(), "kinesis"); @@ -152,9 +191,15 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithBooleanVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD, " + credentialName1, "workerId = 0", - "cleanupLeasesUponShardCompletion = false", "validateSequenceNumberBeforeCheckpointing = true" }, + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD, " + credentialName1, + "workerId = 0", + "cleanupLeasesUponShardCompletion = false", + "validateSequenceNumberBeforeCheckpointing = true" + }, '\n')); assertEquals(config.getApplicationName(), "b"); @@ -166,9 +211,16 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithStringVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 1", - "kinesisEndpoint = https://kinesis", "metricsLevel = SUMMARY" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 1", + "kinesisEndpoint = https://kinesis", + "metricsLevel = SUMMARY" + }, + '\n')); assertEquals(config.getWorkerIdentifier(), "1"); assertEquals(config.getKinesisClient().get("endpointOverride"), URI.create("https://kinesis")); @@ -177,38 +229,66 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithSetVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 1", - "metricsEnabledDimensions = ShardId, WorkerIdentifier" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 1", + "metricsEnabledDimensions = ShardId, WorkerIdentifier" + }, + '\n')); - Set expectedMetricsEnabledDimensions = ImmutableSet. builder() - .add("ShardId", "WorkerIdentifier").build(); - assertThat(new HashSet<>(Arrays.asList(config.getMetricsEnabledDimensions())), equalTo(expectedMetricsEnabledDimensions)); + Set expectedMetricsEnabledDimensions = ImmutableSet.builder() + .add("ShardId", "WorkerIdentifier") + .build(); + assertThat( + new HashSet<>(Arrays.asList(config.getMetricsEnabledDimensions())), + equalTo(expectedMetricsEnabledDimensions)); } @Test public void testWithInitialPositionInStreamTrimHorizon() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = TriM_Horizon" + }, + '\n')); assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.TRIM_HORIZON); } @Test public void testWithInitialPositionInStreamLatest() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = LateSt" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = LateSt" + }, + '\n')); assertEquals(config.getInitialPositionInStream(), InitialPositionInStream.LATEST); } @Test public void testSkippingNonKCLVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon", "abc = 1" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = TriM_Horizon", + "abc = 1" + }, + '\n')); assertEquals(config.getApplicationName(), "b"); assertEquals(config.getStreamName(), "a"); @@ -218,33 +298,61 @@ public class KinesisClientLibConfiguratorTest { @Test public void testEmptyOptionalVariables() { - MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join(new String[] { "streamName = a", - "applicationName = b", "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon", "maxGetRecordsThreadPool = 1" }, '\n')); + MultiLangDaemonConfiguration config = getConfiguration(StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = TriM_Horizon", + "maxGetRecordsThreadPool = 1" + }, + '\n')); assertThat(config.getMaxGetRecordsThreadPool(), equalTo(1)); assertThat(config.getRetryGetRecordsInSeconds(), nullValue()); } @Test public void testWithZeroValue() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = ABCD," + credentialName1, "workerId = 123", - "initialPositionInStream = TriM_Horizon", "maxGetRecordsThreadPool = 0", - "retryGetRecordsInSeconds = 0" }, '\n'); + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = ABCD," + credentialName1, + "workerId = 123", + "initialPositionInStream = TriM_Horizon", + "maxGetRecordsThreadPool = 0", + "retryGetRecordsInSeconds = 0" + }, + '\n'); getConfiguration(test); } @Test public void testWithInvalidIntValue() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = 100nf" }, '\n'); + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = 100nf" + }, + '\n'); getConfiguration(test); } @Test public void testWithNegativeIntValue() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialName1, "workerId = 123", "failoverTimeMillis = -12" }, '\n'); + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = -12" + }, + '\n'); // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement getConfiguration(test); @@ -252,8 +360,15 @@ public class KinesisClientLibConfiguratorTest { @Test(expected = IllegalArgumentException.class) public void testWithMissingCredentialsProvider() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", "workerId = 123", - "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, '\n'); + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "workerId = 123", + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, + '\n'); // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement getConfiguration(test); @@ -262,8 +377,13 @@ public class KinesisClientLibConfiguratorTest { @Test public void testWithMissingWorkerId() { String test = StringUtils.join( - new String[] { "streamName = a", "applicationName = b", "AWSCredentialsProvider = " + credentialName1, - "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, '\n'); MultiLangDaemonConfiguration config = getConfiguration(test); @@ -274,46 +394,63 @@ public class KinesisClientLibConfiguratorTest { @Test(expected = NullPointerException.class) public void testWithMissingStreamNameAndMissingStreamArn() { - String test = StringUtils.join(new String[] { - "applicationName = b", - "AWSCredentialsProvider = " + credentialName1, - "workerId = 123", - "failoverTimeMillis = 100" }, + String test = StringUtils.join( + new String[] { + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = 100" + }, '\n'); getConfiguration(test); } @Test(expected = IllegalArgumentException.class) public void testWithEmptyStreamNameAndMissingStreamArn() { - String test = StringUtils.join(new String[] { - "applicationName = b", - "AWSCredentialsProvider = " + credentialName1, - "workerId = 123", - "failoverTimeMillis = 100", - "streamName = ", - "streamArn = "}, + String test = StringUtils.join( + new String[] { + "applicationName = b", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = 100", + "streamName = ", + "streamArn = " + }, '\n'); getConfiguration(test); } @Test(expected = NullPointerException.class) public void testWithMissingApplicationName() { - String test = StringUtils.join(new String[] { "streamName = a", "AWSCredentialsProvider = " + credentialName1, - "workerId = 123", "failoverTimeMillis = 100" }, '\n'); + String test = StringUtils.join( + new String[] { + "streamName = a", + "AWSCredentialsProvider = " + credentialName1, + "workerId = 123", + "failoverTimeMillis = 100" + }, + '\n'); getConfiguration(test); } @Test public void testWithAWSCredentialsFailed() { String test = StringUtils.join( - new String[] { "streamName = a", "applicationName = b", "AWSCredentialsProvider = " + credentialName2, - "failoverTimeMillis = 100", "shardSyncIntervalMillis = 500" }, + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialName2, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, '\n'); MultiLangDaemonConfiguration config = getConfiguration(test); // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement try { - config.getKinesisCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); + config.getKinesisCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); fail("expect failure with wrong credentials provider"); } catch (Exception e) { // succeed @@ -323,39 +460,63 @@ public class KinesisClientLibConfiguratorTest { // TODO: fix this test @Test public void testWithDifferentAWSCredentialsForDynamoDBAndCloudWatch() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialNameKinesis, - "AWSCredentialsProviderDynamoDB = " + credentialNameDynamoDB, - "AWSCredentialsProviderCloudWatch = " + credentialNameCloudWatch, "failoverTimeMillis = 100", - "shardSyncIntervalMillis = 500" }, '\n'); + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialNameKinesis, + "AWSCredentialsProviderDynamoDB = " + credentialNameDynamoDB, + "AWSCredentialsProviderCloudWatch = " + credentialNameCloudWatch, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, + '\n'); // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement final MultiLangDaemonConfiguration config = getConfiguration(test); - config.getKinesisCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); - config.getDynamoDBCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); - config.getCloudWatchCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); + config.getKinesisCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); + config.getDynamoDBCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); + config.getCloudWatchCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); } // TODO: fix this test @Test public void testWithDifferentAWSCredentialsForDynamoDBAndCloudWatchFailed() { - String test = StringUtils.join(new String[] { "streamName = a", "applicationName = b", - "AWSCredentialsProvider = " + credentialNameKinesis, - "AWSCredentialsProviderDynamoDB = " + credentialName2, - "AWSCredentialsProviderCloudWatch = " + credentialName2, "failoverTimeMillis = 100", - "shardSyncIntervalMillis = 500" }, '\n'); + String test = StringUtils.join( + new String[] { + "streamName = a", + "applicationName = b", + "AWSCredentialsProvider = " + credentialNameKinesis, + "AWSCredentialsProviderDynamoDB = " + credentialName2, + "AWSCredentialsProviderCloudWatch = " + credentialName2, + "failoverTimeMillis = 100", + "shardSyncIntervalMillis = 500" + }, + '\n'); // separate input stream with getConfiguration to explicitly catch exception from the getConfiguration statement final MultiLangDaemonConfiguration config = getConfiguration(test); - config.getKinesisCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); + config.getKinesisCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); try { - config.getDynamoDBCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); + config.getDynamoDBCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); fail("DynamoDB credential providers should fail."); } catch (Exception e) { // succeed } try { - config.getCloudWatchCredentialsProvider().build(AwsCredentialsProvider.class).resolveCredentials(); + config.getCloudWatchCredentialsProvider() + .build(AwsCredentialsProvider.class) + .resolveCredentials(); fail("CloudWatch credential providers should fail."); } catch (Exception e) { // succeed @@ -373,9 +534,7 @@ public class KinesisClientLibConfiguratorTest { } @Override - public void refresh() { - - } + public void refresh() {} } /** @@ -389,9 +548,7 @@ public class KinesisClientLibConfiguratorTest { } @Override - public void refresh() { - - } + public void refresh() {} } /** @@ -405,9 +562,7 @@ public class KinesisClientLibConfiguratorTest { } @Override - public void refresh() { - - } + public void refresh() {} } /** @@ -421,9 +576,7 @@ public class KinesisClientLibConfiguratorTest { } @Override - public void refresh() { - - } + public void refresh() {} } /** @@ -437,13 +590,11 @@ public class KinesisClientLibConfiguratorTest { } @Override - public void refresh() { - - } + public void refresh() {} } private MultiLangDaemonConfiguration getConfiguration(String configString) { InputStream input = new ByteArrayInputStream(configString.getBytes()); return configurator.getConfiguration(input); } -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfigurationTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfigurationTest.java index da18e659..b98db83a 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfigurationTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/MultiLangDaemonConfigurationTest.java @@ -15,12 +15,6 @@ package software.amazon.kinesis.multilang.config; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; - import org.apache.commons.beanutils.BeanUtilsBean; import org.apache.commons.beanutils.ConvertUtilsBean; import org.junit.After; @@ -31,12 +25,17 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.kinesis.processor.ShardRecordProcessorFactory; import software.amazon.kinesis.retrieval.fanout.FanOutConfig; import software.amazon.kinesis.retrieval.polling.PollingConfig; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; + @RunWith(MockitoJUnitRunner.class) public class MultiLangDaemonConfigurationTest { @@ -69,7 +68,6 @@ public class MultiLangDaemonConfigurationTest { } } - public MultiLangDaemonConfiguration baseConfiguration() { MultiLangDaemonConfiguration configuration = new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean); configuration.setApplicationName("Test"); @@ -84,8 +82,8 @@ public class MultiLangDaemonConfigurationTest { MultiLangDaemonConfiguration configuration = baseConfiguration(); configuration.setMaxLeasesForWorker(10); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); assertThat(resolvedConfiguration.leaseManagementConfig.maxLeasesForWorker(), equalTo(10)); } @@ -95,8 +93,8 @@ public class MultiLangDaemonConfigurationTest { MultiLangDaemonConfiguration configuration = baseConfiguration(); configuration.setEnablePriorityLeaseAssignment(false); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration.resolvedConfiguration( - shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); assertThat(resolvedConfiguration.leaseManagementConfig.enablePriorityLeaseAssignment(), equalTo(false)); } @@ -105,11 +103,11 @@ public class MultiLangDaemonConfigurationTest { public void testDefaultRetrievalConfig() { MultiLangDaemonConfiguration configuration = baseConfiguration(); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(FanOutConfig.class)); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class)); } @Test @@ -118,18 +116,20 @@ public class MultiLangDaemonConfigurationTest { configuration.setMaxRecords(10); configuration.setIdleTimeBetweenReadsInMillis(60000); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(PollingConfig.class)); - assertEquals(10, - ((PollingConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig()).maxRecords()); - assertEquals(60000, - ((PollingConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig()) - .idleTimeBetweenReadsInMillis()); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(PollingConfig.class)); + assertEquals( + 10, + ((PollingConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig()).maxRecords()); + assertEquals( + 60000, + ((PollingConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig()) + .idleTimeBetweenReadsInMillis()); assertTrue(((PollingConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig()) - .usePollingConfigIdleTimeValue()); + .usePollingConfigIdleTimeValue()); } @Test @@ -137,11 +137,11 @@ public class MultiLangDaemonConfigurationTest { MultiLangDaemonConfiguration configuration = baseConfiguration(); configuration.setRetrievalMode(RetrievalMode.FANOUT); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(FanOutConfig.class)); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class)); } @Test @@ -149,37 +149,39 @@ public class MultiLangDaemonConfigurationTest { MultiLangDaemonConfiguration configuration = baseConfiguration(); configuration.setRetrievalMode(RetrievalMode.POLLING); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(PollingConfig.class)); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(PollingConfig.class)); } @Test public void testRetrievalModeSetForPollingString() throws Exception { MultiLangDaemonConfiguration configuration = baseConfiguration(); - utilsBean.setProperty(configuration, "retrievalMode", RetrievalMode.POLLING.name().toLowerCase()); + utilsBean.setProperty( + configuration, "retrievalMode", RetrievalMode.POLLING.name().toLowerCase()); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(PollingConfig.class)); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(PollingConfig.class)); } @Test public void testRetrievalModeSetForFanoutString() throws Exception { MultiLangDaemonConfiguration configuration = baseConfiguration(); - utilsBean.setProperty(configuration, "retrievalMode", RetrievalMode.FANOUT.name().toLowerCase()); + utilsBean.setProperty( + configuration, "retrievalMode", RetrievalMode.FANOUT.name().toLowerCase()); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(FanOutConfig.class)); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class)); } @Test @@ -196,7 +198,7 @@ public class MultiLangDaemonConfigurationTest { // TODO : Enable this test once https://github.com/awslabs/amazon-kinesis-client/issues/692 is resolved public void testmetricsEnabledDimensions() { MultiLangDaemonConfiguration configuration = baseConfiguration(); - configuration.setMetricsEnabledDimensions(new String[]{"Operation"}); + configuration.setMetricsEnabledDimensions(new String[] {"Operation"}); configuration.resolvedConfiguration(shardRecordProcessorFactory); } @@ -209,14 +211,14 @@ public class MultiLangDaemonConfigurationTest { configuration.setRetrievalMode(RetrievalMode.FANOUT); configuration.getFanoutConfig().setConsumerArn(consumerArn); - MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = configuration - .resolvedConfiguration(shardRecordProcessorFactory); + MultiLangDaemonConfiguration.ResolvedConfiguration resolvedConfiguration = + configuration.resolvedConfiguration(shardRecordProcessorFactory); - assertThat(resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), - instanceOf(FanOutConfig.class)); - FanOutConfig fanOutConfig = (FanOutConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(); + assertThat( + resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(), instanceOf(FanOutConfig.class)); + FanOutConfig fanOutConfig = + (FanOutConfig) resolvedConfiguration.getRetrievalConfig().retrievalSpecificConfig(); assertThat(fanOutConfig.consumerArn(), equalTo(consumerArn)); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/PollingConfigBeanTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/PollingConfigBeanTest.java index 2d032728..576e6101 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/PollingConfigBeanTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/config/PollingConfigBeanTest.java @@ -15,6 +15,8 @@ package software.amazon.kinesis.multilang.config; +import java.util.Optional; + import org.apache.commons.beanutils.BeanUtilsBean; import org.apache.commons.beanutils.ConvertUtilsBean; import org.junit.Test; @@ -24,8 +26,6 @@ import org.mockito.runners.MockitoJUnitRunner; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.kinesis.retrieval.polling.PollingConfig; -import java.util.Optional; - import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertThat; @@ -46,17 +46,23 @@ public class PollingConfigBeanTest { ConvertUtilsBean convertUtilsBean = new ConvertUtilsBean(); BeanUtilsBean utilsBean = new BeanUtilsBean(convertUtilsBean); - MultiLangDaemonConfiguration multiLangDaemonConfiguration = new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean); + MultiLangDaemonConfiguration multiLangDaemonConfiguration = + new MultiLangDaemonConfiguration(utilsBean, convertUtilsBean); multiLangDaemonConfiguration.setStreamName("test-stream"); PollingConfig pollingConfig = pollingConfigBean.build(kinesisAsyncClient, multiLangDaemonConfiguration); assertThat(pollingConfig.kinesisClient(), equalTo(kinesisAsyncClient)); assertThat(pollingConfig.streamName(), equalTo(multiLangDaemonConfiguration.getStreamName())); - assertThat(pollingConfig.idleTimeBetweenReadsInMillis(), equalTo(pollingConfigBean.getIdleTimeBetweenReadsInMillis())); - assertThat(pollingConfig.maxGetRecordsThreadPool(), equalTo(Optional.of(pollingConfigBean.getMaxGetRecordsThreadPool()))); + assertThat( + pollingConfig.idleTimeBetweenReadsInMillis(), + equalTo(pollingConfigBean.getIdleTimeBetweenReadsInMillis())); + assertThat( + pollingConfig.maxGetRecordsThreadPool(), + equalTo(Optional.of(pollingConfigBean.getMaxGetRecordsThreadPool()))); assertThat(pollingConfig.maxRecords(), equalTo(pollingConfigBean.getMaxRecords())); - assertThat(pollingConfig.retryGetRecordsInSeconds(), equalTo(Optional.of(pollingConfigBean.getRetryGetRecordsInSeconds()))); + assertThat( + pollingConfig.retryGetRecordsInSeconds(), + equalTo(Optional.of(pollingConfigBean.getRetryGetRecordsInSeconds()))); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecordTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecordTest.java index 4aabfb33..b817da05 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecordTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/JsonFriendlyRecordTest.java @@ -15,11 +15,6 @@ package software.amazon.kinesis.multilang.messages; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.CoreMatchers.sameInstance; -import static org.junit.Assert.assertThat; - import java.nio.ByteBuffer; import java.time.Instant; import java.util.Arrays; @@ -31,9 +26,13 @@ import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; import org.junit.Test; - import software.amazon.kinesis.retrieval.KinesisClientRecord; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.CoreMatchers.sameInstance; +import static org.junit.Assert.assertThat; + public class JsonFriendlyRecordTest { private KinesisClientRecord kinesisClientRecord; @@ -48,7 +47,7 @@ public class JsonFriendlyRecordTest { @Test public void testRecordHandlesNoByteArrayBuffer() { - byte[] expected = new byte[] { 1, 2, 3, 4 }; + byte[] expected = new byte[] {1, 2, 3, 4}; ByteBuffer expectedBuffer = ByteBuffer.allocateDirect(expected.length); @@ -64,7 +63,7 @@ public class JsonFriendlyRecordTest { @Test public void testRecordHandlesArrayByteBuffer() { - ByteBuffer expected = ByteBuffer.wrap(new byte[] { 1, 2, 3, 4 }); + ByteBuffer expected = ByteBuffer.wrap(new byte[] {1, 2, 3, 4}); kinesisClientRecord = defaultRecord().data(expected).build(); JsonFriendlyRecord jsonFriendlyRecord = JsonFriendlyRecord.fromKinesisClientRecord(kinesisClientRecord); @@ -82,14 +81,15 @@ public class JsonFriendlyRecordTest { private RecordMatcher(KinesisClientRecord expected) { this.matchers = Arrays.asList( - new FieldMatcher<>("approximateArrivalTimestamp", + new FieldMatcher<>( + "approximateArrivalTimestamp", equalTo(expected.approximateArrivalTimestamp().toEpochMilli()), JsonFriendlyRecord::getApproximateArrivalTimestamp), new FieldMatcher<>("partitionKey", expected::partitionKey, JsonFriendlyRecord::getPartitionKey), - new FieldMatcher<>("sequenceNumber", expected::sequenceNumber, - JsonFriendlyRecord::getSequenceNumber), - new FieldMatcher<>("subSequenceNumber", expected::subSequenceNumber, - JsonFriendlyRecord::getSubSequenceNumber), + new FieldMatcher<>( + "sequenceNumber", expected::sequenceNumber, JsonFriendlyRecord::getSequenceNumber), + new FieldMatcher<>( + "subSequenceNumber", expected::subSequenceNumber, JsonFriendlyRecord::getSubSequenceNumber), new FieldMatcher<>("data", dataEquivalentTo(expected.data()), JsonFriendlyRecord::getData)); this.expected = expected; @@ -97,13 +97,16 @@ public class JsonFriendlyRecordTest { @Override protected boolean matchesSafely(JsonFriendlyRecord item, Description mismatchDescription) { - return matchers.stream().map(m -> { - if (!m.matches(item)) { - m.describeMismatch(item, mismatchDescription); - return false; - } - return true; - }).reduce((l, r) -> l && r).orElse(true); + return matchers.stream() + .map(m -> { + if (!m.matches(item)) { + m.describeMismatch(item, mismatchDescription); + return false; + } + return true; + }) + .reduce((l, r) -> l && r) + .orElse(true); } @Override @@ -160,8 +163,9 @@ public class JsonFriendlyRecordTest { } private KinesisClientRecord.KinesisClientRecordBuilder defaultRecord() { - return KinesisClientRecord.builder().partitionKey("test-partition").sequenceNumber("123") + return KinesisClientRecord.builder() + .partitionKey("test-partition") + .sequenceNumber("123") .approximateArrivalTimestamp(Instant.now()); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/MessageTest.java b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/MessageTest.java index 62e5a741..adbd17fa 100644 --- a/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/MessageTest.java +++ b/amazon-kinesis-client-multilang/src/test/java/software/amazon/kinesis/multilang/messages/MessageTest.java @@ -17,66 +17,65 @@ package software.amazon.kinesis.multilang.messages; import java.nio.ByteBuffer; import java.util.Collections; -import org.junit.Assert; -import org.junit.Test; - import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; - +import org.junit.Assert; +import org.junit.Test; +import software.amazon.kinesis.lifecycle.ShutdownReason; import software.amazon.kinesis.lifecycle.events.InitializationInput; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.ShutdownReason; import software.amazon.kinesis.retrieval.KinesisClientRecord; public class MessageTest { @Test public void toStringTest() { - Message[] messages = new Message[]{ - new CheckpointMessage("1234567890", 0L, null), - new InitializeMessage(InitializationInput.builder().shardId("shard-123").build()), - new ProcessRecordsMessage(ProcessRecordsInput.builder() - .records(Collections.singletonList( - KinesisClientRecord.builder() - .data(ByteBuffer.wrap("cat".getBytes())) - .partitionKey("cat") - .sequenceNumber("555") - .build())) - .build()), - new ShutdownMessage(ShutdownReason.LEASE_LOST), - new StatusMessage("processRecords"), - new InitializeMessage(), - new ProcessRecordsMessage(), - new ShutdownRequestedMessage(), - new LeaseLostMessage(), - new ShardEndedMessage(), + Message[] messages = new Message[] { + new CheckpointMessage("1234567890", 0L, null), + new InitializeMessage( + InitializationInput.builder().shardId("shard-123").build()), + new ProcessRecordsMessage(ProcessRecordsInput.builder() + .records(Collections.singletonList(KinesisClientRecord.builder() + .data(ByteBuffer.wrap("cat".getBytes())) + .partitionKey("cat") + .sequenceNumber("555") + .build())) + .build()), + new ShutdownMessage(ShutdownReason.LEASE_LOST), + new StatusMessage("processRecords"), + new InitializeMessage(), + new ProcessRecordsMessage(), + new ShutdownRequestedMessage(), + new LeaseLostMessage(), + new ShardEndedMessage(), }; -// TODO: fix this + // TODO: fix this for (int i = 0; i < messages.length; i++) { System.out.println(messages[i].toString()); - Assert.assertTrue("Each message should contain the action field", messages[i].toString().contains("action")); + Assert.assertTrue( + "Each message should contain the action field", + messages[i].toString().contains("action")); } // Hit this constructor - KinesisClientRecord defaultJsonFriendlyRecord = KinesisClientRecord.builder().build(); + KinesisClientRecord defaultJsonFriendlyRecord = + KinesisClientRecord.builder().build(); Assert.assertNull(defaultJsonFriendlyRecord.partitionKey()); Assert.assertNull(defaultJsonFriendlyRecord.data()); Assert.assertNull(defaultJsonFriendlyRecord.sequenceNumber()); Assert.assertNull(new ShutdownMessage(null).getReason()); // Hit the bad object mapping path - Message withBadMapper = new Message() { - }.withObjectMapper(new ObjectMapper() { + Message withBadMapper = new Message() {}.withObjectMapper(new ObjectMapper() { /** - * + * */ private static final long serialVersionUID = 1L; @Override public String writeValueAsString(Object m) throws JsonProcessingException { - throw new JsonProcessingException(new Throwable()) { - }; + throw new JsonProcessingException(new Throwable()) {}; } }); String s = withBadMapper.toString(); diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/annotations/KinesisClientInternalApi.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/annotations/KinesisClientInternalApi.java index be137383..e10583e8 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/annotations/KinesisClientInternalApi.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/annotations/KinesisClientInternalApi.java @@ -22,5 +22,4 @@ import java.lang.annotation.RetentionPolicy; * Any class/method/variable marked with this annotation is subject to breaking changes between minor releases. */ @Retention(RetentionPolicy.CLASS) -public @interface KinesisClientInternalApi { -} +public @interface KinesisClientInternalApi {} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/Checkpoint.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/Checkpoint.java index f5af81e3..7b4de295 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/Checkpoint.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/Checkpoint.java @@ -40,7 +40,10 @@ public class Checkpoint { * @param pendingCheckpoint the pending checkpoint sequence number - can be null. * @param pendingCheckpointState the pending checkpoint state - can be null. */ - public Checkpoint(final ExtendedSequenceNumber checkpoint, final ExtendedSequenceNumber pendingCheckpoint, byte[] pendingCheckpointState) { + public Checkpoint( + final ExtendedSequenceNumber checkpoint, + final ExtendedSequenceNumber pendingCheckpoint, + byte[] pendingCheckpointState) { if (checkpoint == null || checkpoint.sequenceNumber().isEmpty()) { throw new IllegalArgumentException("Checkpoint cannot be null or empty"); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointConfig.java index dbde3b5a..a76673ed 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/CheckpointConfig.java @@ -15,7 +15,6 @@ package software.amazon.kinesis.checkpoint; - import lombok.Data; import lombok.experimental.Accessors; import software.amazon.kinesis.checkpoint.dynamodb.DynamoDBCheckpointFactory; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/DoesNothingPreparedCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/DoesNothingPreparedCheckpointer.java index 5a1e8168..2b8e547d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/DoesNothingPreparedCheckpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/DoesNothingPreparedCheckpointer.java @@ -60,9 +60,7 @@ public class DoesNothingPreparedCheckpointer implements PreparedCheckpointer { @Override public void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { + IllegalArgumentException { // This method does nothing } - } - diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SequenceNumberValidator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SequenceNumberValidator.java index de5565bf..7b674ca4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SequenceNumberValidator.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/SequenceNumberValidator.java @@ -20,10 +20,9 @@ import java.util.Collections; import java.util.List; import java.util.Optional; -import org.apache.commons.lang3.StringUtils; - import lombok.Data; import lombok.experimental.Accessors; +import org.apache.commons.lang3.StringUtils; /** * This supports extracting the shardId from a sequence number. @@ -98,11 +97,15 @@ public class SequenceNumberValidator { } } - private static final List SEQUENCE_NUMBER_READERS = Collections - .singletonList(new V2SequenceNumberReader()); + private static final List SEQUENCE_NUMBER_READERS = + Collections.singletonList(new V2SequenceNumberReader()); private Optional retrieveComponentsFor(String sequenceNumber) { - return SEQUENCE_NUMBER_READERS.stream().map(r -> r.read(sequenceNumber)).filter(Optional::isPresent).map(Optional::get).findFirst(); + return SEQUENCE_NUMBER_READERS.stream() + .map(r -> r.read(sequenceNumber)) + .filter(Optional::isPresent) + .map(Optional::get) + .findFirst(); } /** @@ -118,7 +121,7 @@ public class SequenceNumberValidator { * * *

    - * + * * @param sequenceNumber * the sequence number to extract the version from * @return an Optional containing the version if a compatible sequence number reader can be found, an empty Optional @@ -184,5 +187,4 @@ public class SequenceNumberValidator { public Optional validateSequenceNumberForShard(String sequenceNumber, String shardId) { return shardIdFor(sequenceNumber).map(s -> StringUtils.equalsIgnoreCase(s, shardId)); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointer.java index 63e13eaa..ea6bcaa3 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointer.java @@ -38,8 +38,8 @@ public class ShardPreparedCheckpointer implements PreparedCheckpointer { * @param pendingCheckpointSequenceNumber sequence number to checkpoint at * @param checkpointer checkpointer to use */ - public ShardPreparedCheckpointer(ExtendedSequenceNumber pendingCheckpointSequenceNumber, - RecordProcessorCheckpointer checkpointer) { + public ShardPreparedCheckpointer( + ExtendedSequenceNumber pendingCheckpointSequenceNumber, RecordProcessorCheckpointer checkpointer) { this.pendingCheckpointSequenceNumber = pendingCheckpointSequenceNumber; this.checkpointer = checkpointer; } @@ -58,8 +58,8 @@ public class ShardPreparedCheckpointer implements PreparedCheckpointer { @Override public void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { - checkpointer.checkpoint(pendingCheckpointSequenceNumber.sequenceNumber(), - pendingCheckpointSequenceNumber.subSequenceNumber()); + IllegalArgumentException { + checkpointer.checkpoint( + pendingCheckpointSequenceNumber.sequenceNumber(), pendingCheckpointSequenceNumber.subSequenceNumber()); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardRecordProcessorCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardRecordProcessorCheckpointer.java index 5fbac1d7..4de90d94 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardRecordProcessorCheckpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/ShardRecordProcessorCheckpointer.java @@ -41,16 +41,22 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpointer { @NonNull private final ShardInfo shardInfo; + @NonNull - @Getter @Accessors(fluent = true) + @Getter + @Accessors(fluent = true) private final Checkpointer checkpointer; // Set to the last value set via checkpoint(). // Sample use: verify application shutdown() invoked checkpoint() at the end of a shard. - @Getter @Accessors(fluent = true) + @Getter + @Accessors(fluent = true) private ExtendedSequenceNumber lastCheckpointValue; - @Getter @Accessors(fluent = true) + + @Getter + @Accessors(fluent = true) private ExtendedSequenceNumber largestPermittedCheckpointValue; + private ExtendedSequenceNumber sequenceNumberAtShardEnd; /** @@ -60,8 +66,11 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi public synchronized void checkpoint() throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { if (log.isDebugEnabled()) { - log.debug("Checkpointing {}, token {} at largest permitted value {}", ShardInfo.getLeaseKey(shardInfo), - shardInfo.concurrencyToken(), this.largestPermittedCheckpointValue); + log.debug( + "Checkpointing {}, token {} at largest permitted value {}", + ShardInfo.getLeaseKey(shardInfo), + shardInfo.concurrencyToken(), + this.largestPermittedCheckpointValue); } advancePosition(this.largestPermittedCheckpointValue); } @@ -71,15 +80,15 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi */ @Override public synchronized void checkpoint(Record record) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { // TODO: UserRecord Deprecation if (record == null) { throw new IllegalArgumentException("Could not checkpoint a null record"); } /* else if (record instanceof UserRecord) { - checkpoint(record.sequenceNumber(), ((UserRecord) record).subSequenceNumber()); - } */ else { + checkpoint(record.sequenceNumber(), ((UserRecord) record).subSequenceNumber()); + } */ else { checkpoint(record.sequenceNumber(), 0); } } @@ -89,8 +98,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi */ @Override public synchronized void checkpoint(String sequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { checkpoint(sequenceNumber, 0); } @@ -99,12 +108,12 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi */ @Override public synchronized void checkpoint(String sequenceNumber, long subSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { if (subSequenceNumber < 0) { - throw new IllegalArgumentException("Could not checkpoint at invalid, negative subsequence number " - + subSequenceNumber); + throw new IllegalArgumentException( + "Could not checkpoint at invalid, negative subsequence number " + subSequenceNumber); } /* @@ -116,15 +125,18 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi && newCheckpoint.compareTo(largestPermittedCheckpointValue) <= 0) { if (log.isDebugEnabled()) { - log.debug("Checkpointing {}, token {} at specific extended sequence number {}", ShardInfo.getLeaseKey(shardInfo), - shardInfo.concurrencyToken(), newCheckpoint); + log.debug( + "Checkpointing {}, token {} at specific extended sequence number {}", + ShardInfo.getLeaseKey(shardInfo), + shardInfo.concurrencyToken(), + newCheckpoint); } this.advancePosition(newCheckpoint); } else { throw new IllegalArgumentException(String.format( "Could not checkpoint at extended sequence number %s as it did not fall into acceptable range " - + "between the last checkpoint %s and the greatest extended sequence number passed to this " - + "record processor %s", + + "between the last checkpoint %s and the greatest extended sequence number passed to this " + + "record processor %s", newCheckpoint, this.lastCheckpointValue, this.largestPermittedCheckpointValue)); } } @@ -161,8 +173,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi if (record == null) { throw new IllegalArgumentException("Could not prepare checkpoint a null record"); } /*else if (record instanceof UserRecord) { - return prepareCheckpoint(record.sequenceNumber(), ((UserRecord) record).subSequenceNumber()); - } */ else { + return prepareCheckpoint(record.sequenceNumber(), ((UserRecord) record).subSequenceNumber()); + } */ else { return prepareCheckpoint(record.sequenceNumber(), 0, applicationState); } } @@ -190,7 +202,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi */ @Override public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, byte[] applicationState) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { return prepareCheckpoint(sequenceNumber, 0, applicationState); } @@ -207,11 +220,13 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi * {@inheritDoc} */ @Override - public PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber, byte[] applicationState) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, IllegalArgumentException { + public PreparedCheckpointer prepareCheckpoint( + String sequenceNumber, long subSequenceNumber, byte[] applicationState) + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException { if (subSequenceNumber < 0) { - throw new IllegalArgumentException("Could not checkpoint at invalid, negative subsequence number " - + subSequenceNumber); + throw new IllegalArgumentException( + "Could not checkpoint at invalid, negative subsequence number " + subSequenceNumber); } /* @@ -223,8 +238,11 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi && pendingCheckpoint.compareTo(largestPermittedCheckpointValue) <= 0) { if (log.isDebugEnabled()) { - log.debug("Preparing checkpoint {}, token {} at specific extended sequence number {}", - ShardInfo.getLeaseKey(shardInfo), shardInfo.concurrencyToken(), pendingCheckpoint); + log.debug( + "Preparing checkpoint {}, token {} at specific extended sequence number {}", + ShardInfo.getLeaseKey(shardInfo), + shardInfo.concurrencyToken(), + pendingCheckpoint); } return doPrepareCheckpoint(pendingCheckpoint, applicationState); } else { @@ -258,7 +276,6 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi this.sequenceNumberAtShardEnd = extendedSequenceNumber; } - /** * Internal API - has package level access only for testing purposes. * @@ -270,29 +287,35 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi * @throws InvalidStateException */ void advancePosition(String sequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { advancePosition(new ExtendedSequenceNumber(sequenceNumber)); } void advancePosition(ExtendedSequenceNumber extendedSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { ExtendedSequenceNumber checkpointToRecord = extendedSequenceNumber; if (sequenceNumberAtShardEnd != null && sequenceNumberAtShardEnd.equals(extendedSequenceNumber)) { // If we are about to checkpoint the very last sequence number for this shard, we might as well // just checkpoint at SHARD_END checkpointToRecord = ExtendedSequenceNumber.SHARD_END; } - + // Don't checkpoint a value we already successfully checkpointed if (extendedSequenceNumber != null && !extendedSequenceNumber.equals(lastCheckpointValue)) { try { if (log.isDebugEnabled()) { - log.debug("Setting {}, token {} checkpoint to {}", ShardInfo.getLeaseKey(shardInfo), - shardInfo.concurrencyToken(), checkpointToRecord); + log.debug( + "Setting {}, token {} checkpoint to {}", + ShardInfo.getLeaseKey(shardInfo), + shardInfo.concurrencyToken(), + checkpointToRecord); } - checkpointer.setCheckpoint(ShardInfo.getLeaseKey(shardInfo), checkpointToRecord, shardInfo.concurrencyToken()); + checkpointer.setCheckpoint( + ShardInfo.getLeaseKey(shardInfo), checkpointToRecord, shardInfo.concurrencyToken()); lastCheckpointValue = checkpointToRecord; - } catch (ThrottlingException | ShutdownException | InvalidStateException + } catch (ThrottlingException + | ShutdownException + | InvalidStateException | KinesisClientLibDependencyException e) { throw e; } catch (KinesisClientLibException e) { @@ -325,7 +348,8 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi * @throws ThrottlingException * @throws ShutdownException */ - private PreparedCheckpointer doPrepareCheckpoint(ExtendedSequenceNumber extendedSequenceNumber, byte[] applicationState) + private PreparedCheckpointer doPrepareCheckpoint( + ExtendedSequenceNumber extendedSequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException { ExtendedSequenceNumber newPrepareCheckpoint = extendedSequenceNumber; @@ -343,8 +367,14 @@ public class ShardRecordProcessorCheckpointer implements RecordProcessorCheckpoi } try { - checkpointer.prepareCheckpoint(ShardInfo.getLeaseKey(shardInfo), newPrepareCheckpoint, shardInfo.concurrencyToken(), applicationState); - } catch (ThrottlingException | ShutdownException | InvalidStateException + checkpointer.prepareCheckpoint( + ShardInfo.getLeaseKey(shardInfo), + newPrepareCheckpoint, + shardInfo.concurrencyToken(), + applicationState); + } catch (ThrottlingException + | ShutdownException + | InvalidStateException | KinesisClientLibDependencyException e) { throw e; } catch (KinesisClientLibException e) { diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointFactory.java index d200de84..74caae9b 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointFactory.java @@ -29,9 +29,8 @@ import software.amazon.kinesis.processor.Checkpointer; @KinesisClientInternalApi public class DynamoDBCheckpointFactory implements CheckpointFactory { @Override - public Checkpointer createCheckpointer(final LeaseCoordinator leaseLeaseCoordinator, - final LeaseRefresher leaseRefresher) { + public Checkpointer createCheckpointer( + final LeaseCoordinator leaseLeaseCoordinator, final LeaseRefresher leaseRefresher) { return new DynamoDBCheckpointer(leaseLeaseCoordinator, leaseRefresher); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointer.java index 1aa258bb..0a6a9607 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/checkpoint/dynamodb/DynamoDBCheckpointer.java @@ -19,7 +19,6 @@ import java.util.Objects; import java.util.UUID; import com.google.common.annotations.VisibleForTesting; - import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; @@ -48,14 +47,16 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; public class DynamoDBCheckpointer implements Checkpointer { @NonNull private final LeaseCoordinator leaseCoordinator; + @NonNull private final LeaseRefresher leaseRefresher; private String operation; @Override - public void setCheckpoint(final String leaseKey, final ExtendedSequenceNumber checkpointValue, - final String concurrencyToken) throws KinesisClientLibException { + public void setCheckpoint( + final String leaseKey, final ExtendedSequenceNumber checkpointValue, final String concurrencyToken) + throws KinesisClientLibException { try { boolean wasSuccessful = setCheckpoint(leaseKey, checkpointValue, UUID.fromString(concurrencyToken)); if (!wasSuccessful) { @@ -97,17 +98,22 @@ public class DynamoDBCheckpointer implements Checkpointer { } @Override - public void prepareCheckpoint(final String leaseKey, final ExtendedSequenceNumber pendingCheckpoint, - final String concurrencyToken) throws KinesisClientLibException { + public void prepareCheckpoint( + final String leaseKey, final ExtendedSequenceNumber pendingCheckpoint, final String concurrencyToken) + throws KinesisClientLibException { prepareCheckpoint(leaseKey, pendingCheckpoint, concurrencyToken, null); } @Override - public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken, - byte[] pendingCheckpointState) throws KinesisClientLibException { + public void prepareCheckpoint( + String leaseKey, + ExtendedSequenceNumber pendingCheckpoint, + String concurrencyToken, + byte[] pendingCheckpointState) + throws KinesisClientLibException { try { - boolean wasSuccessful = - prepareCheckpoint(leaseKey, pendingCheckpoint, UUID.fromString(concurrencyToken), pendingCheckpointState); + boolean wasSuccessful = prepareCheckpoint( + leaseKey, pendingCheckpoint, UUID.fromString(concurrencyToken), pendingCheckpointState); if (!wasSuccessful) { throw new ShutdownException( "Can't prepare checkpoint - instance doesn't hold the lease for this shard"); @@ -128,8 +134,10 @@ public class DynamoDBCheckpointer implements Checkpointer { throws DependencyException, InvalidStateException, ProvisionedThroughputException { Lease lease = leaseCoordinator.getCurrentlyHeldLease(leaseKey); if (lease == null) { - log.info("Worker {} could not update checkpoint for shard {} because it does not hold the lease", - leaseCoordinator.workerIdentifier(), leaseKey); + log.info( + "Worker {} could not update checkpoint for shard {} because it does not hold the lease", + leaseCoordinator.workerIdentifier(), + leaseKey); return false; } @@ -141,12 +149,18 @@ public class DynamoDBCheckpointer implements Checkpointer { return leaseCoordinator.updateLease(lease, concurrencyToken, operation, leaseKey); } - boolean prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, UUID concurrencyToken, byte[] pendingCheckpointState) + boolean prepareCheckpoint( + String leaseKey, + ExtendedSequenceNumber pendingCheckpoint, + UUID concurrencyToken, + byte[] pendingCheckpointState) throws DependencyException, InvalidStateException, ProvisionedThroughputException { Lease lease = leaseCoordinator.getCurrentlyHeldLease(leaseKey); if (lease == null) { - log.info("Worker {} could not prepare checkpoint for shard {} because it does not hold the lease", - leaseCoordinator.workerIdentifier(), leaseKey); + log.info( + "Worker {} could not prepare checkpoint for shard {} because it does not hold the lease", + leaseCoordinator.workerIdentifier(), + leaseKey); return false; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/CommonCalculations.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/CommonCalculations.java index edb6de2e..d7f33c23 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/CommonCalculations.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/CommonCalculations.java @@ -15,10 +15,8 @@ package software.amazon.kinesis.common; - public class CommonCalculations { - /** * Convenience method for calculating renewer intervals in milliseconds. * diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ConfigsBuilder.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ConfigsBuilder.java index 02258950..2838d62d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ConfigsBuilder.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/ConfigsBuilder.java @@ -19,12 +19,11 @@ import java.util.function.Function; import lombok.EqualsAndHashCode; import lombok.Getter; +import lombok.NonNull; import lombok.Setter; import lombok.ToString; -import org.apache.commons.lang3.StringUtils; - -import lombok.NonNull; import lombok.experimental.Accessors; +import org.apache.commons.lang3.StringUtils; import software.amazon.awssdk.arns.Arn; import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; @@ -35,9 +34,9 @@ import software.amazon.kinesis.coordinator.CoordinatorConfig; import software.amazon.kinesis.leases.LeaseManagementConfig; import software.amazon.kinesis.lifecycle.LifecycleConfig; import software.amazon.kinesis.metrics.MetricsConfig; +import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.ProcessorConfig; import software.amazon.kinesis.processor.ShardRecordProcessorFactory; -import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.SingleStreamTracker; import software.amazon.kinesis.processor.StreamTracker; import software.amazon.kinesis.retrieval.RetrievalConfig; @@ -45,7 +44,10 @@ import software.amazon.kinesis.retrieval.RetrievalConfig; /** * This Builder is useful to create all configurations for the KCL with default values. */ -@Getter @Setter @ToString @EqualsAndHashCode +@Getter +@Setter +@ToString +@EqualsAndHashCode @Accessors(fluent = true) public class ConfigsBuilder { /** @@ -139,11 +141,16 @@ public class ConfigsBuilder { * @param workerIdentifier * @param shardRecordProcessorFactory */ - public ConfigsBuilder(@NonNull String streamName, @NonNull String applicationName, - @NonNull KinesisAsyncClient kinesisClient, @NonNull DynamoDbAsyncClient dynamoDBClient, - @NonNull CloudWatchAsyncClient cloudWatchClient, @NonNull String workerIdentifier, + public ConfigsBuilder( + @NonNull String streamName, + @NonNull String applicationName, + @NonNull KinesisAsyncClient kinesisClient, + @NonNull DynamoDbAsyncClient dynamoDBClient, + @NonNull CloudWatchAsyncClient cloudWatchClient, + @NonNull String workerIdentifier, @NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) { - this(new SingleStreamTracker(streamName), + this( + new SingleStreamTracker(streamName), applicationName, kinesisClient, dynamoDBClient, @@ -163,11 +170,16 @@ public class ConfigsBuilder { * @param workerIdentifier * @param shardRecordProcessorFactory */ - public ConfigsBuilder(@NonNull Arn streamArn, @NonNull String applicationName, - @NonNull KinesisAsyncClient kinesisClient, @NonNull DynamoDbAsyncClient dynamoDBClient, - @NonNull CloudWatchAsyncClient cloudWatchClient, @NonNull String workerIdentifier, - @NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) { - this(new SingleStreamTracker(streamArn), + public ConfigsBuilder( + @NonNull Arn streamArn, + @NonNull String applicationName, + @NonNull KinesisAsyncClient kinesisClient, + @NonNull DynamoDbAsyncClient dynamoDBClient, + @NonNull CloudWatchAsyncClient cloudWatchClient, + @NonNull String workerIdentifier, + @NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) { + this( + new SingleStreamTracker(streamArn), applicationName, kinesisClient, dynamoDBClient, @@ -187,9 +199,13 @@ public class ConfigsBuilder { * @param workerIdentifier * @param shardRecordProcessorFactory */ - public ConfigsBuilder(@NonNull StreamTracker streamTracker, @NonNull String applicationName, - @NonNull KinesisAsyncClient kinesisClient, @NonNull DynamoDbAsyncClient dynamoDBClient, - @NonNull CloudWatchAsyncClient cloudWatchClient, @NonNull String workerIdentifier, + public ConfigsBuilder( + @NonNull StreamTracker streamTracker, + @NonNull String applicationName, + @NonNull KinesisAsyncClient kinesisClient, + @NonNull DynamoDbAsyncClient dynamoDBClient, + @NonNull CloudWatchAsyncClient cloudWatchClient, + @NonNull String workerIdentifier, @NonNull ShardRecordProcessorFactory shardRecordProcessorFactory) { this.applicationName = applicationName; this.kinesisClient = kinesisClient; @@ -209,8 +225,11 @@ public class ConfigsBuilder { public void streamTracker(StreamTracker streamTracker) { this.streamTracker = streamTracker; - this.appStreamTracker = DeprecationUtils.convert(streamTracker, - singleStreamTracker -> singleStreamTracker.streamConfigList().get(0).streamIdentifier().streamName()); + this.appStreamTracker = DeprecationUtils.convert(streamTracker, singleStreamTracker -> singleStreamTracker + .streamConfigList() + .get(0) + .streamIdentifier() + .streamName()); } /** diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DeprecationUtils.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DeprecationUtils.java index 5d8782e0..73ff0bff 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DeprecationUtils.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DeprecationUtils.java @@ -39,8 +39,7 @@ public final class DeprecationUtils { */ @Deprecated public static Either convert( - StreamTracker streamTracker, - Function converter) { + StreamTracker streamTracker, Function converter) { if (streamTracker instanceof MultiStreamTracker) { return Either.left((MultiStreamTracker) streamTracker); } else if (streamTracker instanceof SingleStreamTracker) { @@ -49,5 +48,4 @@ public final class DeprecationUtils { throw new IllegalArgumentException("Unhandled StreamTracker: " + streamTracker); } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DiagnosticUtils.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DiagnosticUtils.java index 37eea1a3..8178e4b4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DiagnosticUtils.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/DiagnosticUtils.java @@ -15,12 +15,12 @@ package software.amazon.kinesis.common; -import org.slf4j.Logger; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; - import java.time.Duration; import java.time.Instant; +import org.slf4j.Logger; +import software.amazon.kinesis.annotations.KinesisClientInternalApi; + import static software.amazon.kinesis.lifecycle.ShardConsumer.MAX_TIME_BETWEEN_REQUEST_RESPONSE; @KinesisClientInternalApi @@ -32,18 +32,22 @@ public class DiagnosticUtils { * @param enqueueTimestamp of the event submitted to the executor service * @param log Slf4j Logger from RecordPublisher to log the events */ - public static void takeDelayedDeliveryActionIfRequired(String resourceIdentifier, Instant enqueueTimestamp, Logger log) { - final long durationBetweenEnqueueAndAckInMillis = Duration - .between(enqueueTimestamp, Instant.now()).toMillis(); + public static void takeDelayedDeliveryActionIfRequired( + String resourceIdentifier, Instant enqueueTimestamp, Logger log) { + final long durationBetweenEnqueueAndAckInMillis = + Duration.between(enqueueTimestamp, Instant.now()).toMillis(); if (durationBetweenEnqueueAndAckInMillis > MAX_TIME_BETWEEN_REQUEST_RESPONSE / 3) { // The above condition logs the warn msg if the delivery time exceeds 11 seconds. log.warn( "{}: Record delivery time to shard consumer is high at {} millis. Check the ExecutorStateEvent logs" + " to see the state of the executor service. Also check if the RecordProcessor's processing " + "time is high. ", - resourceIdentifier, durationBetweenEnqueueAndAckInMillis); + resourceIdentifier, + durationBetweenEnqueueAndAckInMillis); } else if (log.isDebugEnabled()) { - log.debug("{}: Record delivery time to shard consumer is {} millis", resourceIdentifier, + log.debug( + "{}: Record delivery time to shard consumer is {} millis", + resourceIdentifier, durationBetweenEnqueueAndAckInMillis); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/FutureUtils.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/FutureUtils.java index 9410d6fd..3c104d8d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/FutureUtils.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/FutureUtils.java @@ -31,5 +31,4 @@ public class FutureUtils { throw te; } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/HashKeyRangeForLease.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/HashKeyRangeForLease.java index 30f8963a..8d52ec90 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/HashKeyRangeForLease.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/HashKeyRangeForLease.java @@ -15,14 +15,14 @@ package software.amazon.kinesis.common; +import java.math.BigInteger; + import lombok.NonNull; import lombok.Value; import lombok.experimental.Accessors; import org.apache.commons.lang3.Validate; import software.amazon.awssdk.services.kinesis.model.HashKeyRange; -import java.math.BigInteger; - /** * Lease POJO to hold the starting hashkey range and ending hashkey range of kinesis shards. */ @@ -34,8 +34,11 @@ public class HashKeyRangeForLease { private final BigInteger endingHashKey; public HashKeyRangeForLease(BigInteger startingHashKey, BigInteger endingHashKey) { - Validate.isTrue(startingHashKey.compareTo(endingHashKey) < 0, - "StartingHashKey %s must be less than EndingHashKey %s ", startingHashKey, endingHashKey); + Validate.isTrue( + startingHashKey.compareTo(endingHashKey) < 0, + "StartingHashKey %s must be less than EndingHashKey %s ", + startingHashKey, + endingHashKey); this.startingHashKey = startingHashKey; this.endingHashKey = endingHashKey; } @@ -65,11 +68,15 @@ public class HashKeyRangeForLease { * @param endingHashKeyStr * @return HashKeyRangeForLease */ - public static HashKeyRangeForLease deserialize(@NonNull String startingHashKeyStr, @NonNull String endingHashKeyStr) { + public static HashKeyRangeForLease deserialize( + @NonNull String startingHashKeyStr, @NonNull String endingHashKeyStr) { final BigInteger startingHashKey = new BigInteger(startingHashKeyStr); final BigInteger endingHashKey = new BigInteger(endingHashKeyStr); - Validate.isTrue(startingHashKey.compareTo(endingHashKey) < 0, - "StartingHashKey %s must be less than EndingHashKey %s ", startingHashKeyStr, endingHashKeyStr); + Validate.isTrue( + startingHashKey.compareTo(endingHashKey) < 0, + "StartingHashKey %s must be less than EndingHashKey %s ", + startingHashKeyStr, + endingHashKeyStr); return new HashKeyRangeForLease(startingHashKey, endingHashKey); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStreamExtended.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStreamExtended.java index b3bedd88..5c512933 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStreamExtended.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/InitialPositionInStreamExtended.java @@ -14,16 +14,17 @@ */ package software.amazon.kinesis.common; +import java.util.Date; + import lombok.EqualsAndHashCode; import lombok.ToString; -import java.util.Date; - /** * Class that houses the entities needed to specify the position in the stream from where a new application should * start. */ -@ToString @EqualsAndHashCode +@ToString +@EqualsAndHashCode public class InitialPositionInStreamExtended { private final InitialPositionInStream position; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisClientUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisClientUtil.java index 45bd88e2..c2f3ca7d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisClientUtil.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisClientUtil.java @@ -15,14 +15,14 @@ package software.amazon.kinesis.common; +import java.time.Duration; + import software.amazon.awssdk.http.Protocol; import software.amazon.awssdk.http.nio.netty.Http2Configuration; import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.KinesisAsyncClientBuilder; -import java.time.Duration; - /** * Utility to setup KinesisAsyncClient to be used with KCL. */ @@ -42,9 +42,12 @@ public class KinesisClientUtil { } public static KinesisAsyncClientBuilder adjustKinesisClientBuilder(KinesisAsyncClientBuilder builder) { - return builder.httpClientBuilder(NettyNioAsyncHttpClient.builder().maxConcurrency(Integer.MAX_VALUE) - .http2Configuration(Http2Configuration.builder().initialWindowSize(INITIAL_WINDOW_SIZE_BYTES) - .healthCheckPingPeriod(Duration.ofMillis(HEALTH_CHECK_PING_PERIOD_MILLIS)).build()) + return builder.httpClientBuilder(NettyNioAsyncHttpClient.builder() + .maxConcurrency(Integer.MAX_VALUE) + .http2Configuration(Http2Configuration.builder() + .initialWindowSize(INITIAL_WINDOW_SIZE_BYTES) + .healthCheckPingPeriod(Duration.ofMillis(HEALTH_CHECK_PING_PERIOD_MILLIS)) + .build()) .protocol(Protocol.HTTP2)); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisRequestsBuilder.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisRequestsBuilder.java index 52e16f3e..9ef43b8e 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisRequestsBuilder.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/KinesisRequestsBuilder.java @@ -63,12 +63,11 @@ public class KinesisRequestsBuilder { @SuppressWarnings("unchecked") private static T appendUserAgent(final T builder) { - return (T) builder - .overrideConfiguration( - AwsRequestOverrideConfiguration.builder() - .addApiName(ApiName.builder().name(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT) - .version(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT_VERSION).build()) + return (T) builder.overrideConfiguration(AwsRequestOverrideConfiguration.builder() + .addApiName(ApiName.builder() + .name(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT) + .version(RetrievalConfig.KINESIS_CLIENT_LIB_USER_AGENT_VERSION) + .build()) .build()); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/LeaseCleanupConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/LeaseCleanupConfig.java index b2582d45..6c29c76b 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/LeaseCleanupConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/LeaseCleanupConfig.java @@ -24,7 +24,7 @@ import lombok.experimental.Accessors; */ @Builder @Getter -@Accessors(fluent=true) +@Accessors(fluent = true) public class LeaseCleanupConfig { /** * Interval at which to run lease cleanup thread. diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/RequestDetails.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/RequestDetails.java index 9f511123..108a12d4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/RequestDetails.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/RequestDetails.java @@ -15,11 +15,11 @@ package software.amazon.kinesis.common; -import lombok.experimental.Accessors; - import java.util.Optional; -@Accessors(fluent=true) +import lombok.experimental.Accessors; + +@Accessors(fluent = true) public class RequestDetails { /** @@ -62,6 +62,4 @@ public class RequestDetails { public String toString() { return String.format("request id - %s, timestamp - %s", getRequestId(), getTimestamp()); } - } - diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamConfig.java index 95ab0560..95772008 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamConfig.java @@ -28,8 +28,7 @@ import lombok.experimental.Accessors; public class StreamConfig { @NonNull private final StreamIdentifier streamIdentifier; + private final InitialPositionInStreamExtended initialPositionInStreamExtended; private String consumerArn; } - - diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamIdentifier.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamIdentifier.java index 82cef04b..2070a535 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamIdentifier.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/common/StreamIdentifier.java @@ -15,6 +15,10 @@ package software.amazon.kinesis.common; +import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + import lombok.AccessLevel; import lombok.Builder; import lombok.EqualsAndHashCode; @@ -25,10 +29,6 @@ import software.amazon.awssdk.arns.Arn; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.utils.Validate; -import java.util.Optional; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - @Builder(access = AccessLevel.PRIVATE) @EqualsAndHashCode @Getter @@ -37,10 +37,13 @@ public class StreamIdentifier { @Builder.Default private final Optional accountIdOptional = Optional.empty(); + @NonNull private final String streamName; + @Builder.Default private final Optional streamCreationEpochOptional = Optional.empty(); + @Builder.Default @EqualsAndHashCode.Exclude private final Optional streamArnOptional = Optional.empty(); @@ -49,8 +52,8 @@ public class StreamIdentifier { * Pattern for a serialized {@link StreamIdentifier}. The valid format is * {@code ::}. */ - private static final Pattern STREAM_IDENTIFIER_PATTERN = Pattern.compile( - "(?[0-9]+):(?[^:]+):(?[0-9]+)"); + private static final Pattern STREAM_IDENTIFIER_PATTERN = + Pattern.compile("(?[0-9]+):(?[^:]+):(?[0-9]+)"); /** * Pattern for a stream ARN. The valid format is @@ -74,8 +77,10 @@ public class StreamIdentifier { final char delimiter = ':'; final StringBuilder sb = new StringBuilder() - .append(accountIdOptional.get()).append(delimiter) - .append(streamName).append(delimiter) + .append(accountIdOptional.get()) + .append(delimiter) + .append(streamName) + .append(delimiter) .append(streamCreationEpochOptional.get()); return sb.toString(); } @@ -146,9 +151,7 @@ public class StreamIdentifier { public static StreamIdentifier singleStreamInstance(String streamName) { Validate.notEmpty(streamName, "StreamName should not be empty"); - return StreamIdentifier.builder() - .streamName(streamName) - .build(); + return StreamIdentifier.builder().streamName(streamName).build(); } /** @@ -173,7 +176,8 @@ public class StreamIdentifier { * @param streamArn */ public static void validateArn(Arn streamArn) { - if (!STREAM_ARN_PATTERN.matcher(streamArn.toString()).matches() || !streamArn.region().isPresent()) { + if (!STREAM_ARN_PATTERN.matcher(streamArn.toString()).matches() + || !streamArn.region().isPresent()) { throw new IllegalArgumentException("Invalid streamArn " + streamArn); } } @@ -185,9 +189,7 @@ public class StreamIdentifier { */ private static void validateCreationEpoch(long creationEpoch) { if (creationEpoch <= 0) { - throw new IllegalArgumentException( - "Creation epoch must be > 0; received " + creationEpoch); + throw new IllegalArgumentException("Creation epoch must be > 0; received " + creationEpoch); } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorConfig.java index d5c4dc13..e1835228 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/CoordinatorConfig.java @@ -96,5 +96,4 @@ public class CoordinatorConfig { *

    Default value: 1000L

    */ private long schedulerInitializationBackoffTimeMillis = 1000L; - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeletedStreamListProvider.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeletedStreamListProvider.java index d0d332d9..d4057999 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeletedStreamListProvider.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeletedStreamListProvider.java @@ -5,7 +5,6 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import lombok.extern.slf4j.Slf4j; - import software.amazon.kinesis.common.StreamIdentifier; /** diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDecider.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDecider.java index b06dba39..4c7f25da 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDecider.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDecider.java @@ -26,6 +26,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BooleanSupplier; import java.util.stream.Collectors; + import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.kinesis.leases.Lease; @@ -45,8 +46,7 @@ import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; * This ensures redundancy for shard-sync during host failures. */ @Slf4j -class DeterministicShuffleShardSyncLeaderDecider - implements LeaderDecider { +class DeterministicShuffleShardSyncLeaderDecider implements LeaderDecider { // Fixed seed so that the shuffle order is preserved across workers static final int DETERMINISTIC_SHUFFLE_SEED = 1947; @@ -67,13 +67,11 @@ class DeterministicShuffleShardSyncLeaderDecider * @param leaderElectionThreadPool Thread-pool to be used for leaderElection. * @param numPeriodicShardSyncWorkers Number of leaders that will be elected to perform periodic shard syncs. */ - DeterministicShuffleShardSyncLeaderDecider(LeaseRefresher leaseRefresher, - ScheduledExecutorService leaderElectionThreadPool, - int numPeriodicShardSyncWorkers) { - this(leaseRefresher, - leaderElectionThreadPool, - numPeriodicShardSyncWorkers, - new ReentrantReadWriteLock()); + DeterministicShuffleShardSyncLeaderDecider( + LeaseRefresher leaseRefresher, + ScheduledExecutorService leaderElectionThreadPool, + int numPeriodicShardSyncWorkers) { + this(leaseRefresher, leaderElectionThreadPool, numPeriodicShardSyncWorkers, new ReentrantReadWriteLock()); } /** @@ -82,10 +80,11 @@ class DeterministicShuffleShardSyncLeaderDecider * @param numPeriodicShardSyncWorkers Number of leaders that will be elected to perform periodic shard syncs. * @param readWriteLock Mechanism to lock for reading and writing of critical components */ - DeterministicShuffleShardSyncLeaderDecider(LeaseRefresher leaseRefresher, - ScheduledExecutorService leaderElectionThreadPool, - int numPeriodicShardSyncWorkers, - ReadWriteLock readWriteLock) { + DeterministicShuffleShardSyncLeaderDecider( + LeaseRefresher leaseRefresher, + ScheduledExecutorService leaderElectionThreadPool, + int numPeriodicShardSyncWorkers, + ReadWriteLock readWriteLock) { this.leaseRefresher = leaseRefresher; this.leaderElectionThreadPool = leaderElectionThreadPool; this.numPeriodicShardSyncWorkers = numPeriodicShardSyncWorkers; @@ -101,8 +100,12 @@ class DeterministicShuffleShardSyncLeaderDecider try { log.debug("Started leader election at: " + Instant.now()); List leases = leaseRefresher.listLeases(); - List uniqueHosts = leases.stream().map(Lease::leaseOwner) - .filter(owner -> owner != null).distinct().sorted().collect(Collectors.toList()); + List uniqueHosts = leases.stream() + .map(Lease::leaseOwner) + .filter(owner -> owner != null) + .distinct() + .sorted() + .collect(Collectors.toList()); Collections.shuffle(uniqueHosts, new Random(DETERMINISTIC_SHUFFLE_SEED)); int numShardSyncWorkers = Math.min(uniqueHosts.size(), numPeriodicShardSyncWorkers); @@ -137,8 +140,11 @@ class DeterministicShuffleShardSyncLeaderDecider // The first run will be after a minute. // We don't need jitter since it is scheduled with a fixed delay and time taken to scan leases // will be different at different times and on different hosts/workers. - leaderElectionThreadPool.scheduleWithFixedDelay(this::electLeaders, ELECTION_INITIAL_DELAY_MILLIS, - ELECTION_SCHEDULING_INTERVAL_MILLIS, TimeUnit.MILLISECONDS); + leaderElectionThreadPool.scheduleWithFixedDelay( + this::electLeaders, + ELECTION_INITIAL_DELAY_MILLIS, + ELECTION_SCHEDULING_INTERVAL_MILLIS, + TimeUnit.MILLISECONDS); } return executeConditionCheckWithReadLock(() -> isWorkerLeaderForShardSync(workerId)); @@ -152,7 +158,8 @@ class DeterministicShuffleShardSyncLeaderDecider log.info("Successfully stopped leader election on the worker"); } else { leaderElectionThreadPool.shutdownNow(); - log.info(String.format("Stopped leader election thread after awaiting termination for %d milliseconds", + log.info(String.format( + "Stopped leader election thread after awaiting termination for %d milliseconds", AWAIT_TERMINATION_MILLIS)); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DiagnosticEventFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DiagnosticEventFactory.java index 316313aa..8fe61a94 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DiagnosticEventFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/DiagnosticEventFactory.java @@ -15,11 +15,11 @@ package software.amazon.kinesis.coordinator; +import java.util.concurrent.ExecutorService; + import lombok.NoArgsConstructor; import software.amazon.kinesis.leases.LeaseCoordinator; -import java.util.concurrent.ExecutorService; - /** * Creates {@link DiagnosticEvent}s for logging and visibility */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/ExecutorStateEvent.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/ExecutorStateEvent.java index 33c83a5c..b0c0b55d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/ExecutorStateEvent.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/ExecutorStateEvent.java @@ -15,15 +15,15 @@ package software.amazon.kinesis.coordinator; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadPoolExecutor; + import lombok.Getter; import lombok.ToString; import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.leases.LeaseCoordinator; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadPoolExecutor; - @Getter @ToString(exclude = "isThreadPoolExecutor") @Slf4j diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownContext.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownContext.java index e0ad229b..d805d6cd 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownContext.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownContext.java @@ -14,12 +14,12 @@ */ package software.amazon.kinesis.coordinator; +import java.util.concurrent.CountDownLatch; + import lombok.Builder; import lombok.Data; import lombok.experimental.Accessors; -import java.util.concurrent.CountDownLatch; - @Data @Builder @Accessors(fluent = true) diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinator.java index a55ea70f..af3a2dca 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinator.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinator.java @@ -14,13 +14,12 @@ */ package software.amazon.kinesis.coordinator; - -import lombok.extern.slf4j.Slf4j; - import java.util.concurrent.Callable; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; +import lombok.extern.slf4j.Slf4j; + class GracefulShutdownCoordinator { /** @@ -31,8 +30,11 @@ class GracefulShutdownCoordinator { CompletableFuture startGracefulShutdown(Callable shutdownCallable) { CompletableFuture cf = new CompletableFuture<>(); CompletableFuture.runAsync(() -> { - try { cf.complete(shutdownCallable.call()); } - catch(Throwable ex) { cf.completeExceptionally(ex); } + try { + cf.complete(shutdownCallable.call()); + } catch (Throwable ex) { + cf.completeExceptionally(ex); + } }); return cf; } @@ -50,7 +52,8 @@ class GracefulShutdownCoordinator { } private boolean isWorkerShutdownComplete(GracefulShutdownContext context) { - return context.scheduler().shutdownComplete() || context.scheduler().shardInfoShardConsumerMap().isEmpty(); + return context.scheduler().shutdownComplete() + || context.scheduler().shardInfoShardConsumerMap().isEmpty(); } private String awaitingLogMessage(GracefulShutdownContext context) { @@ -92,12 +95,14 @@ class GracefulShutdownCoordinator { throw new InterruptedException(); } log.info(awaitingLogMessage(context)); - if (workerShutdownWithRemaining(context.shutdownCompleteLatch().getCount(), context)) { + if (workerShutdownWithRemaining( + context.shutdownCompleteLatch().getCount(), context)) { return false; } } } catch (InterruptedException ie) { - log.warn("Interrupted while waiting for notification complete, terminating shutdown. {}", + log.warn( + "Interrupted while waiting for notification complete, terminating shutdown. {}", awaitingLogMessage(context)); return false; } @@ -129,12 +134,14 @@ class GracefulShutdownCoordinator { throw new InterruptedException(); } log.info(awaitingFinalShutdownMessage(context)); - if (workerShutdownWithRemaining(context.shutdownCompleteLatch().getCount(), context)) { + if (workerShutdownWithRemaining( + context.shutdownCompleteLatch().getCount(), context)) { return false; } } } catch (InterruptedException ie) { - log.warn("Interrupted while waiting for shutdown completion, terminating shutdown. {}", + log.warn( + "Interrupted while waiting for shutdown completion, terminating shutdown. {}", awaitingFinalShutdownMessage(context)); return false; } @@ -152,9 +159,12 @@ class GracefulShutdownCoordinator { private boolean workerShutdownWithRemaining(long outstanding, GracefulShutdownContext context) { if (isWorkerShutdownComplete(context)) { if (outstanding != 0) { - log.info("Shutdown completed, but shutdownCompleteLatch still had outstanding {} with a current" - + " value of {}. shutdownComplete: {} -- Consumer Map: {}", outstanding, - context.shutdownCompleteLatch().getCount(), context.scheduler().shutdownComplete(), + log.info( + "Shutdown completed, but shutdownCompleteLatch still had outstanding {} with a current" + + " value of {}. shutdownComplete: {} -- Consumer Map: {}", + outstanding, + context.shutdownCompleteLatch().getCount(), + context.scheduler().shutdownComplete(), context.scheduler().shardInfoShardConsumerMap().size()); return true; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/NoOpWorkerStateChangeListener.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/NoOpWorkerStateChangeListener.java index ec21e4f6..8accdfcb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/NoOpWorkerStateChangeListener.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/NoOpWorkerStateChangeListener.java @@ -19,12 +19,8 @@ public class NoOpWorkerStateChangeListener implements WorkerStateChangeListener /** * Empty constructor for NoOp Worker State Change Listener */ - public NoOpWorkerStateChangeListener() { - - } + public NoOpWorkerStateChangeListener() {} @Override - public void onWorkerStateChange(WorkerState newState) { - - } + public void onWorkerStateChange(WorkerState newState) {} } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManager.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManager.java index 30282de4..ba258bc2 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManager.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManager.java @@ -14,6 +14,24 @@ */ package software.amazon.kinesis.coordinator; +import java.io.Serializable; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.stream.Collectors; + import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ComparisonChain; import lombok.AccessLevel; @@ -46,24 +64,6 @@ import software.amazon.kinesis.metrics.MetricsLevel; import software.amazon.kinesis.metrics.MetricsScope; import software.amazon.kinesis.metrics.MetricsUtil; -import java.io.Serializable; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; -import java.util.stream.Collectors; - import static software.amazon.kinesis.common.HashKeyRangeForLease.fromHashKeyRange; /** @@ -76,10 +76,13 @@ import static software.amazon.kinesis.common.HashKeyRangeForLease.fromHashKeyRan @KinesisClientInternalApi class PeriodicShardSyncManager { private static final long INITIAL_DELAY = 60 * 1000L; + @VisibleForTesting static final BigInteger MIN_HASH_KEY = BigInteger.ZERO; + @VisibleForTesting static final BigInteger MAX_HASH_KEY = new BigInteger("2").pow(128).subtract(BigInteger.ONE); + static final String PERIODIC_SHARD_SYNC_MANAGER = "PeriodicShardSyncManager"; private final Map hashRangeHoleTrackerMap = new HashMap<>(); @@ -94,30 +97,48 @@ class PeriodicShardSyncManager { private final MetricsFactory metricsFactory; private final long leasesRecoveryAuditorExecutionFrequencyMillis; private final int leasesRecoveryAuditorInconsistencyConfidenceThreshold; + @Getter(AccessLevel.NONE) private final AtomicBoolean leaderSynced; + private boolean isRunning; - PeriodicShardSyncManager(String workerId, LeaderDecider leaderDecider, LeaseRefresher leaseRefresher, + PeriodicShardSyncManager( + String workerId, + LeaderDecider leaderDecider, + LeaseRefresher leaseRefresher, Map currentStreamConfigMap, Function shardSyncTaskManagerProvider, Map streamToShardSyncTaskManagerMap, - boolean isMultiStreamingMode, MetricsFactory metricsFactory, + boolean isMultiStreamingMode, + MetricsFactory metricsFactory, long leasesRecoveryAuditorExecutionFrequencyMillis, int leasesRecoveryAuditorInconsistencyConfidenceThreshold, - AtomicBoolean leaderSynced){ - this(workerId, leaderDecider, leaseRefresher, currentStreamConfigMap, shardSyncTaskManagerProvider, + AtomicBoolean leaderSynced) { + this( + workerId, + leaderDecider, + leaseRefresher, + currentStreamConfigMap, + shardSyncTaskManagerProvider, streamToShardSyncTaskManagerMap, - Executors.newSingleThreadScheduledExecutor(), isMultiStreamingMode, metricsFactory, - leasesRecoveryAuditorExecutionFrequencyMillis, leasesRecoveryAuditorInconsistencyConfidenceThreshold, + Executors.newSingleThreadScheduledExecutor(), + isMultiStreamingMode, + metricsFactory, + leasesRecoveryAuditorExecutionFrequencyMillis, + leasesRecoveryAuditorInconsistencyConfidenceThreshold, leaderSynced); } - PeriodicShardSyncManager(String workerId, LeaderDecider leaderDecider, LeaseRefresher leaseRefresher, + PeriodicShardSyncManager( + String workerId, + LeaderDecider leaderDecider, + LeaseRefresher leaseRefresher, Map currentStreamConfigMap, Function shardSyncTaskManagerProvider, Map streamToShardSyncTaskManagerMap, - ScheduledExecutorService shardSyncThreadPool, boolean isMultiStreamingMode, + ScheduledExecutorService shardSyncThreadPool, + boolean isMultiStreamingMode, MetricsFactory metricsFactory, long leasesRecoveryAuditorExecutionFrequencyMillis, int leasesRecoveryAuditorInconsistencyConfidenceThreshold, @@ -134,7 +155,8 @@ class PeriodicShardSyncManager { this.isMultiStreamingMode = isMultiStreamingMode; this.metricsFactory = metricsFactory; this.leasesRecoveryAuditorExecutionFrequencyMillis = leasesRecoveryAuditorExecutionFrequencyMillis; - this.leasesRecoveryAuditorInconsistencyConfidenceThreshold = leasesRecoveryAuditorInconsistencyConfidenceThreshold; + this.leasesRecoveryAuditorInconsistencyConfidenceThreshold = + leasesRecoveryAuditorInconsistencyConfidenceThreshold; this.leaderSynced = leaderSynced; } @@ -147,10 +169,12 @@ class PeriodicShardSyncManager { log.error("Error during runShardSync.", t); } }; - shardSyncThreadPool.scheduleWithFixedDelay(periodicShardSyncer, INITIAL_DELAY, leasesRecoveryAuditorExecutionFrequencyMillis, + shardSyncThreadPool.scheduleWithFixedDelay( + periodicShardSyncer, + INITIAL_DELAY, + leasesRecoveryAuditorExecutionFrequencyMillis, TimeUnit.MILLISECONDS); isRunning = true; - } return new TaskResult(null); } @@ -186,8 +210,8 @@ class PeriodicShardSyncManager { if (leaderDecider.isLeader(workerId) && leaderSynced.get()) { log.info(String.format("WorkerId %s is leader, running the periodic shard sync task", workerId)); - final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, - PERIODIC_SHARD_SYNC_MANAGER); + final MetricsScope scope = + MetricsUtil.createMetricsWithOperation(metricsFactory, PERIODIC_SHARD_SYNC_MANAGER); int numStreamsWithPartialLeases = 0; int numStreamsToSync = 0; int numSkippedShardSyncTask = 0; @@ -207,15 +231,17 @@ class PeriodicShardSyncManager { log.info("Skipping shard sync task for {} as stream is purged", streamIdentifier); continue; } - final ShardSyncResponse shardSyncResponse = checkForShardSync(streamIdentifier, - streamToLeasesMap.get(streamIdentifier)); + final ShardSyncResponse shardSyncResponse = + checkForShardSync(streamIdentifier, streamToLeasesMap.get(streamIdentifier)); numStreamsWithPartialLeases += shardSyncResponse.isHoleDetected() ? 1 : 0; numStreamsToSync += shardSyncResponse.shouldDoShardSync ? 1 : 0; if (shardSyncResponse.shouldDoShardSync()) { - log.info("Periodic shard syncer initiating shard sync for {} due to the reason - {} ", - streamIdentifier, shardSyncResponse.reasonForDecision()); + log.info( + "Periodic shard syncer initiating shard sync for {} due to the reason - {} ", + streamIdentifier, + shardSyncResponse.reasonForDecision()); final StreamConfig streamConfig = currentStreamConfigMap.get(streamIdentifier); if (streamConfig == null) { log.info("Skipping shard sync task for {} as stream is purged", streamIdentifier); @@ -223,15 +249,15 @@ class PeriodicShardSyncManager { } final ShardSyncTaskManager shardSyncTaskManager; if (streamToShardSyncTaskManagerMap.containsKey(streamConfig)) { - log.info("shardSyncTaskManager for stream {} already exists", - streamIdentifier.streamName()); + log.info( + "shardSyncTaskManager for stream {} already exists", streamIdentifier.streamName()); shardSyncTaskManager = streamToShardSyncTaskManagerMap.get(streamConfig); - } - else { + } else { // If streamConfig of a stream has already been added to currentStreamConfigMap but // Scheduler failed to create shardSyncTaskManager for it, then Scheduler will not try // to create one later. So enable PeriodicShardSyncManager to do it for such cases - log.info("Failed to get shardSyncTaskManager so creating one for stream {}.", + log.info( + "Failed to get shardSyncTaskManager so creating one for stream {}.", streamIdentifier.streamName()); shardSyncTaskManager = streamToShardSyncTaskManagerMap.computeIfAbsent( streamConfig, s -> shardSyncTaskManagerProvider.apply(s)); @@ -239,15 +265,24 @@ class PeriodicShardSyncManager { if (!shardSyncTaskManager.submitShardSyncTask()) { log.warn( "Failed to submit shard sync task for stream {}. This could be due to the previous pending shard sync task.", - shardSyncTaskManager.shardDetector().streamIdentifier().streamName()); + shardSyncTaskManager + .shardDetector() + .streamIdentifier() + .streamName()); numSkippedShardSyncTask += 1; } else { - log.info("Submitted shard sync task for stream {} because of reason {}", - shardSyncTaskManager.shardDetector().streamIdentifier().streamName(), + log.info( + "Submitted shard sync task for stream {} because of reason {}", + shardSyncTaskManager + .shardDetector() + .streamIdentifier() + .streamName(), shardSyncResponse.reasonForDecision()); } } else { - log.info("Skipping shard sync for {} due to the reason - {}", streamIdentifier, + log.info( + "Skipping shard sync for {} due to the reason - {}", + streamIdentifier, shardSyncResponse.reasonForDecision()); } } @@ -255,9 +290,14 @@ class PeriodicShardSyncManager { } catch (Exception e) { log.error("Caught exception while running periodic shard syncer.", e); } finally { - scope.addData("NumStreamsWithPartialLeases", numStreamsWithPartialLeases, StandardUnit.COUNT, MetricsLevel.SUMMARY); + scope.addData( + "NumStreamsWithPartialLeases", + numStreamsWithPartialLeases, + StandardUnit.COUNT, + MetricsLevel.SUMMARY); scope.addData("NumStreamsToSync", numStreamsToSync, StandardUnit.COUNT, MetricsLevel.SUMMARY); - scope.addData("NumSkippedShardSyncTask", numSkippedShardSyncTask, StandardUnit.COUNT, MetricsLevel.SUMMARY); + scope.addData( + "NumSkippedShardSyncTask", numSkippedShardSyncTask, StandardUnit.COUNT, MetricsLevel.SUMMARY); MetricsUtil.addSuccessAndLatency(scope, isRunSuccess, runStartMillis, MetricsLevel.SUMMARY); scope.end(); } @@ -284,17 +324,18 @@ class PeriodicShardSyncManager { } else { final Map> streamToLeasesMap = new HashMap<>(); for (Lease lease : leases) { - StreamIdentifier streamIdentifier = StreamIdentifier - .multiStreamInstance(((MultiStreamLease) lease).streamIdentifier()); + StreamIdentifier streamIdentifier = + StreamIdentifier.multiStreamInstance(((MultiStreamLease) lease).streamIdentifier()); if (streamIdentifiersToFilter.contains(streamIdentifier)) { - streamToLeasesMap.computeIfAbsent(streamIdentifier, s -> new ArrayList<>()).add(lease); + streamToLeasesMap + .computeIfAbsent(streamIdentifier, s -> new ArrayList<>()) + .add(lease); } } return streamToLeasesMap; } } - /** * Given a list of leases for a stream, determine if a shard sync is necessary. * @param streamIdentifier @@ -315,11 +356,13 @@ class PeriodicShardSyncManager { // If hole is determined with high confidence return true; return false otherwise // We are using the high confidence factor to avoid shard sync on any holes during resharding and // lease cleanups or any intermittent issues. - final HashRangeHoleTracker hashRangeHoleTracker = hashRangeHoleTrackerMap - .computeIfAbsent(streamIdentifier, s -> new HashRangeHoleTracker()); - final boolean hasHoleWithHighConfidence = hashRangeHoleTracker - .hasHighConfidenceOfHoleWith(hashRangeHoleOpt.get()); - return new ShardSyncResponse(hasHoleWithHighConfidence, true, + final HashRangeHoleTracker hashRangeHoleTracker = + hashRangeHoleTrackerMap.computeIfAbsent(streamIdentifier, s -> new HashRangeHoleTracker()); + final boolean hasHoleWithHighConfidence = + hashRangeHoleTracker.hasHighConfidenceOfHoleWith(hashRangeHoleOpt.get()); + return new ShardSyncResponse( + hasHoleWithHighConfidence, + true, "Detected same hole for " + hashRangeHoleTracker.getNumConsecutiveHoles() + " times. Shard sync will be initiated when threshold reaches " + leasesRecoveryAuditorInconsistencyConfidenceThreshold); @@ -355,7 +398,9 @@ class PeriodicShardSyncManager { Optional hasHoleInLeases(StreamIdentifier streamIdentifier, List leases) { // Filter the leases with any checkpoint other than shard end. List activeLeases = leases.stream() - .filter(lease -> lease.checkpoint() != null && !lease.checkpoint().isShardEnd()).collect(Collectors.toList()); + .filter(lease -> + lease.checkpoint() != null && !lease.checkpoint().isShardEnd()) + .collect(Collectors.toList()); List activeLeasesWithHashRanges = fillWithHashRangesIfRequired(streamIdentifier, activeLeases); return checkForHoleInHashKeyRanges(streamIdentifier, activeLeasesWithHashRanges); } @@ -364,43 +409,49 @@ class PeriodicShardSyncManager { // by learning from kinesis shards. private List fillWithHashRangesIfRequired(StreamIdentifier streamIdentifier, List activeLeases) { List activeLeasesWithNoHashRanges = activeLeases.stream() - .filter(lease -> lease.hashKeyRangeForLease() == null).collect(Collectors.toList()); + .filter(lease -> lease.hashKeyRangeForLease() == null) + .collect(Collectors.toList()); Optional minLeaseOpt = activeLeasesWithNoHashRanges.stream().min(Comparator.comparing(Lease::leaseKey)); if (minLeaseOpt.isPresent()) { // TODO : use minLease for new ListShards with startingShardId final Lease minLease = minLeaseOpt.get(); final ShardDetector shardDetector = shardSyncTaskManagerProvider - .apply(currentStreamConfigMap.get(streamIdentifier)).shardDetector(); - final Map kinesisShards = shardDetector.listShards().stream() - .collect(Collectors.toMap(Shard::shardId, shard -> shard)); - return activeLeases.stream().map(lease -> { - if (lease.hashKeyRangeForLease() == null) { - final String shardId = lease instanceof MultiStreamLease ? - ((MultiStreamLease) lease).shardId() : - lease.leaseKey(); - final Shard shard = kinesisShards.get(shardId); - if (shard == null) { + .apply(currentStreamConfigMap.get(streamIdentifier)) + .shardDetector(); + final Map kinesisShards = + shardDetector.listShards().stream().collect(Collectors.toMap(Shard::shardId, shard -> shard)); + return activeLeases.stream() + .map(lease -> { + if (lease.hashKeyRangeForLease() == null) { + final String shardId = lease instanceof MultiStreamLease + ? ((MultiStreamLease) lease).shardId() + : lease.leaseKey(); + final Shard shard = kinesisShards.get(shardId); + if (shard == null) { + return lease; + } + lease.hashKeyRange(fromHashKeyRange(shard.hashKeyRange())); + try { + leaseRefresher.updateLeaseWithMetaInfo(lease, UpdateField.HASH_KEY_RANGE); + } catch (Exception e) { + log.warn( + "Unable to update hash range key information for lease {} of stream {}. This may result in explicit lease sync.", + lease.leaseKey(), + streamIdentifier); + } + } return lease; - } - lease.hashKeyRange(fromHashKeyRange(shard.hashKeyRange())); - try { - leaseRefresher.updateLeaseWithMetaInfo(lease, UpdateField.HASH_KEY_RANGE); - } catch (Exception e) { - log.warn( - "Unable to update hash range key information for lease {} of stream {}. This may result in explicit lease sync.", - lease.leaseKey(), streamIdentifier); - } - } - return lease; - }).filter(lease -> lease.hashKeyRangeForLease() != null).collect(Collectors.toList()); + }) + .filter(lease -> lease.hashKeyRangeForLease() != null) + .collect(Collectors.toList()); } else { return activeLeases; } } @VisibleForTesting - static Optional checkForHoleInHashKeyRanges(StreamIdentifier streamIdentifier, - List leasesWithHashKeyRanges) { + static Optional checkForHoleInHashKeyRanges( + StreamIdentifier streamIdentifier, List leasesWithHashKeyRanges) { // Sort the hash ranges by starting hash key. List sortedLeasesWithHashKeyRanges = sortLeasesByHashRange(leasesWithHashKeyRanges); if (sortedLeasesWithHashKeyRanges.isEmpty()) { @@ -408,34 +459,54 @@ class PeriodicShardSyncManager { return Optional.of(new HashRangeHole()); } // Validate for hashranges bounds. - if (!sortedLeasesWithHashKeyRanges.get(0).hashKeyRangeForLease().startingHashKey().equals(MIN_HASH_KEY) || !sortedLeasesWithHashKeyRanges - .get(sortedLeasesWithHashKeyRanges.size() - 1).hashKeyRangeForLease().endingHashKey().equals(MAX_HASH_KEY)) { - log.error("Incomplete hash range found for stream {} between {} and {}.", streamIdentifier, + if (!sortedLeasesWithHashKeyRanges + .get(0) + .hashKeyRangeForLease() + .startingHashKey() + .equals(MIN_HASH_KEY) + || !sortedLeasesWithHashKeyRanges + .get(sortedLeasesWithHashKeyRanges.size() - 1) + .hashKeyRangeForLease() + .endingHashKey() + .equals(MAX_HASH_KEY)) { + log.error( + "Incomplete hash range found for stream {} between {} and {}.", + streamIdentifier, sortedLeasesWithHashKeyRanges.get(0), sortedLeasesWithHashKeyRanges.get(sortedLeasesWithHashKeyRanges.size() - 1)); - return Optional.of(new HashRangeHole(sortedLeasesWithHashKeyRanges.get(0).hashKeyRangeForLease(), - sortedLeasesWithHashKeyRanges.get(sortedLeasesWithHashKeyRanges.size() - 1).hashKeyRangeForLease())); + return Optional.of(new HashRangeHole( + sortedLeasesWithHashKeyRanges.get(0).hashKeyRangeForLease(), + sortedLeasesWithHashKeyRanges + .get(sortedLeasesWithHashKeyRanges.size() - 1) + .hashKeyRangeForLease())); } // Check for any holes in the sorted hashrange intervals. if (sortedLeasesWithHashKeyRanges.size() > 1) { Lease leftMostLeaseToReportInCaseOfHole = sortedLeasesWithHashKeyRanges.get(0); HashKeyRangeForLease leftLeaseHashRange = leftMostLeaseToReportInCaseOfHole.hashKeyRangeForLease(); for (int i = 1; i < sortedLeasesWithHashKeyRanges.size(); i++) { - final HashKeyRangeForLease rightLeaseHashRange = sortedLeasesWithHashKeyRanges.get(i).hashKeyRangeForLease(); - final BigInteger rangeDiff = rightLeaseHashRange.startingHashKey().subtract(leftLeaseHashRange.endingHashKey()); + final HashKeyRangeForLease rightLeaseHashRange = + sortedLeasesWithHashKeyRanges.get(i).hashKeyRangeForLease(); + final BigInteger rangeDiff = + rightLeaseHashRange.startingHashKey().subtract(leftLeaseHashRange.endingHashKey()); // Case of overlapping leases when the rangediff is 0 or negative. // signum() will be -1 for negative and 0 if value is 0. // Merge the range for further tracking. if (rangeDiff.signum() <= 0) { - leftLeaseHashRange = new HashKeyRangeForLease(leftLeaseHashRange.startingHashKey(), + leftLeaseHashRange = new HashKeyRangeForLease( + leftLeaseHashRange.startingHashKey(), leftLeaseHashRange.endingHashKey().max(rightLeaseHashRange.endingHashKey())); } else { // Case of non overlapping leases when rangediff is positive. signum() will be 1 for positive. // If rangeDiff is 1, then it is a case of continuous hashrange. If not, it is a hole. if (!rangeDiff.equals(BigInteger.ONE)) { - log.error("Incomplete hash range found for {} between {} and {}.", streamIdentifier, - leftMostLeaseToReportInCaseOfHole, sortedLeasesWithHashKeyRanges.get(i)); - return Optional.of(new HashRangeHole(leftMostLeaseToReportInCaseOfHole.hashKeyRangeForLease(), + log.error( + "Incomplete hash range found for {} between {} and {}.", + streamIdentifier, + leftMostLeaseToReportInCaseOfHole, + sortedLeasesWithHashKeyRanges.get(i)); + return Optional.of(new HashRangeHole( + leftMostLeaseToReportInCaseOfHole.hashKeyRangeForLease(), sortedLeasesWithHashKeyRanges.get(i).hashKeyRangeForLease())); } leftMostLeaseToReportInCaseOfHole = sortedLeasesWithHashKeyRanges.get(i); @@ -461,7 +532,9 @@ class PeriodicShardSyncManager { hashRangeAtStartOfPossibleHole = hashRangeAtEndOfPossibleHole = null; } - HashRangeHole(HashKeyRangeForLease hashRangeAtStartOfPossibleHole, HashKeyRangeForLease hashRangeAtEndOfPossibleHole) { + HashRangeHole( + HashKeyRangeForLease hashRangeAtStartOfPossibleHole, + HashKeyRangeForLease hashRangeAtEndOfPossibleHole) { this.hashRangeAtStartOfPossibleHole = hashRangeAtStartOfPossibleHole; this.hashRangeAtEndOfPossibleHole = hashRangeAtEndOfPossibleHole; } @@ -472,6 +545,7 @@ class PeriodicShardSyncManager { private class HashRangeHoleTracker { private HashRangeHole hashRangeHole; + @Getter private Integer numConsecutiveHoles; @@ -500,8 +574,12 @@ class PeriodicShardSyncManager { Validate.notNull(lease.hashKeyRangeForLease()); Validate.notNull(otherLease.hashKeyRangeForLease()); return ComparisonChain.start() - .compare(lease.hashKeyRangeForLease().startingHashKey(), otherLease.hashKeyRangeForLease().startingHashKey()) - .compare(lease.hashKeyRangeForLease().endingHashKey(), otherLease.hashKeyRangeForLease().endingHashKey()) + .compare( + lease.hashKeyRangeForLease().startingHashKey(), + otherLease.hashKeyRangeForLease().startingHashKey()) + .compare( + lease.hashKeyRangeForLease().endingHashKey(), + otherLease.hashKeyRangeForLease().endingHashKey()) .result(); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/RejectedTaskEvent.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/RejectedTaskEvent.java index 7dc8dfaf..df8445e1 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/RejectedTaskEvent.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/RejectedTaskEvent.java @@ -25,8 +25,8 @@ import software.amazon.kinesis.annotations.KinesisClientInternalApi; @Slf4j @KinesisClientInternalApi class RejectedTaskEvent implements DiagnosticEvent { - private static final String MESSAGE = "Review your thread configuration to prevent task rejections. " + - "Task rejections will slow down your application and some shards may stop processing. "; + private static final String MESSAGE = "Review your thread configuration to prevent task rejections. " + + "Task rejections will slow down your application and some shards may stop processing. "; private ExecutorStateEvent executorStateEvent; private Throwable throwable; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java index e4e63078..a163dfdc 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/Scheduler.java @@ -15,10 +15,6 @@ package software.amazon.kinesis.coordinator; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Stopwatch; - -import io.reactivex.rxjava3.plugins.RxJavaPlugins; import java.time.Duration; import java.time.Instant; import java.util.ArrayList; @@ -45,6 +41,10 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Stopwatch; +import io.reactivex.rxjava3.plugins.RxJavaPlugins; import lombok.AccessLevel; import lombok.Getter; import lombok.NoArgsConstructor; @@ -188,6 +188,7 @@ public class Scheduler implements Runnable { private final Stopwatch streamSyncWatch = Stopwatch.createUnstarted(); private boolean leasesSyncedOnAppInit = false; + @Getter(AccessLevel.NONE) private final AtomicBoolean leaderSynced = new AtomicBoolean(false); @@ -206,15 +207,23 @@ public class Scheduler implements Runnable { @VisibleForTesting protected boolean gracefuleShutdownStarted = false; - public Scheduler(@NonNull final CheckpointConfig checkpointConfig, - @NonNull final CoordinatorConfig coordinatorConfig, - @NonNull final LeaseManagementConfig leaseManagementConfig, - @NonNull final LifecycleConfig lifecycleConfig, - @NonNull final MetricsConfig metricsConfig, - @NonNull final ProcessorConfig processorConfig, - @NonNull final RetrievalConfig retrievalConfig) { - this(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, metricsConfig, - processorConfig, retrievalConfig, new DiagnosticEventFactory()); + public Scheduler( + @NonNull final CheckpointConfig checkpointConfig, + @NonNull final CoordinatorConfig coordinatorConfig, + @NonNull final LeaseManagementConfig leaseManagementConfig, + @NonNull final LifecycleConfig lifecycleConfig, + @NonNull final MetricsConfig metricsConfig, + @NonNull final ProcessorConfig processorConfig, + @NonNull final RetrievalConfig retrievalConfig) { + this( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig, + new DiagnosticEventFactory()); } /** @@ -222,14 +231,15 @@ public class Scheduler implements Runnable { * is desired for testing. This constructor is only used for testing to provide a mock DiagnosticEventFactory. */ @VisibleForTesting - protected Scheduler(@NonNull final CheckpointConfig checkpointConfig, - @NonNull final CoordinatorConfig coordinatorConfig, - @NonNull final LeaseManagementConfig leaseManagementConfig, - @NonNull final LifecycleConfig lifecycleConfig, - @NonNull final MetricsConfig metricsConfig, - @NonNull final ProcessorConfig processorConfig, - @NonNull final RetrievalConfig retrievalConfig, - @NonNull final DiagnosticEventFactory diagnosticEventFactory) { + protected Scheduler( + @NonNull final CheckpointConfig checkpointConfig, + @NonNull final CoordinatorConfig coordinatorConfig, + @NonNull final LeaseManagementConfig leaseManagementConfig, + @NonNull final LifecycleConfig lifecycleConfig, + @NonNull final MetricsConfig metricsConfig, + @NonNull final ProcessorConfig processorConfig, + @NonNull final RetrievalConfig retrievalConfig, + @NonNull final DiagnosticEventFactory diagnosticEventFactory) { this.checkpointConfig = checkpointConfig; this.coordinatorConfig = coordinatorConfig; this.leaseManagementConfig = leaseManagementConfig; @@ -242,16 +252,14 @@ public class Scheduler implements Runnable { this.streamTracker = retrievalConfig.streamTracker(); this.isMultiStreamMode = streamTracker.isMultiStream(); this.formerStreamsLeasesDeletionStrategy = streamTracker.formerStreamsLeasesDeletionStrategy(); - streamTracker.streamConfigList().forEach( - sc -> currentStreamConfigMap.put(sc.streamIdentifier(), sc)); + streamTracker.streamConfigList().forEach(sc -> currentStreamConfigMap.put(sc.streamIdentifier(), sc)); log.info("Initial state: {}", currentStreamConfigMap.values()); this.maxInitializationAttempts = this.coordinatorConfig.maxInitializationAttempts(); this.metricsFactory = this.metricsConfig.metricsFactory(); // Determine leaseSerializer based on availability of MultiStreamTracker. - final LeaseSerializer leaseSerializer = isMultiStreamMode ? - new DynamoDBMultiStreamLeaseSerializer() : - new DynamoDBLeaseSerializer(); + final LeaseSerializer leaseSerializer = + isMultiStreamMode ? new DynamoDBMultiStreamLeaseSerializer() : new DynamoDBLeaseSerializer(); this.leaseCoordinator = this.leaseManagementConfig .leaseManagementFactory(leaseSerializer, isMultiStreamMode) .createLeaseCoordinator(this.metricsFactory); @@ -260,8 +268,9 @@ public class Scheduler implements Runnable { // // TODO: Figure out what to do with lease manage <=> checkpoint relationship // - this.checkpoint = this.checkpointConfig.checkpointFactory().createCheckpointer(this.leaseCoordinator, - this.leaseRefresher); + this.checkpoint = this.checkpointConfig + .checkpointFactory() + .createCheckpointer(this.leaseCoordinator, this.leaseRefresher); // // TODO: Move this configuration to lifecycle @@ -282,38 +291,46 @@ public class Scheduler implements Runnable { if (coordinatorConfig.gracefulShutdownCoordinator() != null) { this.gracefulShutdownCoordinator = coordinatorConfig.gracefulShutdownCoordinator(); } else { - this.gracefulShutdownCoordinator = this.coordinatorConfig.coordinatorFactory() - .createGracefulShutdownCoordinator(); + this.gracefulShutdownCoordinator = + this.coordinatorConfig.coordinatorFactory().createGracefulShutdownCoordinator(); } if (coordinatorConfig.workerStateChangeListener() != null) { this.workerStateChangeListener = coordinatorConfig.workerStateChangeListener(); } else { - this.workerStateChangeListener = this.coordinatorConfig.coordinatorFactory() - .createWorkerStateChangeListener(); + this.workerStateChangeListener = + this.coordinatorConfig.coordinatorFactory().createWorkerStateChangeListener(); } - this.leaderDecider = new DeterministicShuffleShardSyncLeaderDecider(leaseRefresher, - Executors.newSingleThreadScheduledExecutor(), - PERIODIC_SHARD_SYNC_MAX_WORKERS_DEFAULT); + this.leaderDecider = new DeterministicShuffleShardSyncLeaderDecider( + leaseRefresher, Executors.newSingleThreadScheduledExecutor(), PERIODIC_SHARD_SYNC_MAX_WORKERS_DEFAULT); this.failoverTimeMillis = this.leaseManagementConfig.failoverTimeMillis(); this.taskBackoffTimeMillis = this.lifecycleConfig.taskBackoffTimeMillis(); this.listShardsBackoffTimeMillis = this.retrievalConfig.listShardsBackoffTimeInMillis(); this.maxListShardsRetryAttempts = this.retrievalConfig.maxListShardsRetryAttempts(); - this.shardDetectorProvider = streamConfig -> createOrGetShardSyncTaskManager(streamConfig).shardDetector(); + this.shardDetectorProvider = + streamConfig -> createOrGetShardSyncTaskManager(streamConfig).shardDetector(); this.ignoreUnexpetedChildShards = this.leaseManagementConfig.ignoreUnexpectedChildShards(); this.aggregatorUtil = this.lifecycleConfig.aggregatorUtil(); - this.hierarchicalShardSyncerProvider = streamConfig -> createOrGetShardSyncTaskManager(streamConfig).hierarchicalShardSyncer(); - this.schedulerInitializationBackoffTimeMillis = this.coordinatorConfig.schedulerInitializationBackoffTimeMillis(); + this.hierarchicalShardSyncerProvider = + streamConfig -> createOrGetShardSyncTaskManager(streamConfig).hierarchicalShardSyncer(); + this.schedulerInitializationBackoffTimeMillis = + this.coordinatorConfig.schedulerInitializationBackoffTimeMillis(); this.leaderElectedPeriodicShardSyncManager = new PeriodicShardSyncManager( - leaseManagementConfig.workerIdentifier(), leaderDecider, leaseRefresher, currentStreamConfigMap, - shardSyncTaskManagerProvider, streamToShardSyncTaskManagerMap, isMultiStreamMode, metricsFactory, + leaseManagementConfig.workerIdentifier(), + leaderDecider, + leaseRefresher, + currentStreamConfigMap, + shardSyncTaskManagerProvider, + streamToShardSyncTaskManagerMap, + isMultiStreamMode, + metricsFactory, leaseManagementConfig.leasesRecoveryAuditorExecutionFrequencyMillis(), leaseManagementConfig.leasesRecoveryAuditorInconsistencyConfidenceThreshold(), leaderSynced); - this.leaseCleanupManager = this.leaseManagementConfig.leaseManagementFactory(leaseSerializer, isMultiStreamMode) + this.leaseCleanupManager = this.leaseManagementConfig + .leaseManagementFactory(leaseSerializer, isMultiStreamMode) .createLeaseCleanupManager(metricsFactory); - this.schemaRegistryDecoder = - this.retrievalConfig.glueSchemaRegistryDeserializer() == null ? - null + this.schemaRegistryDecoder = this.retrievalConfig.glueSchemaRegistryDeserializer() == null + ? null : new SchemaRegistryDecoder(this.retrievalConfig.glueSchemaRegistryDeserializer()); } @@ -357,9 +374,10 @@ public class Scheduler implements Runnable { if (!skipShardSyncAtWorkerInitializationIfLeasesExist || leaseRefresher.isLeaseTableEmpty()) { if (shouldInitiateLeaseSync()) { - log.info("Worker {} is initiating the lease sync.", leaseManagementConfig.workerIdentifier()); + log.info( + "Worker {} is initiating the lease sync.", + leaseManagementConfig.workerIdentifier()); leaderElectedPeriodicShardSyncManager.syncShardsOnce(); - } } else { log.info("Skipping shard sync per configuration setting (and lease table is not empty)"); @@ -402,13 +420,15 @@ public class Scheduler implements Runnable { } @VisibleForTesting - boolean shouldInitiateLeaseSync() throws InterruptedException, - DependencyException, ProvisionedThroughputException, InvalidStateException { - long waitTime = ThreadLocalRandom.current().nextLong(MIN_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS, MAX_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS); + boolean shouldInitiateLeaseSync() + throws InterruptedException, DependencyException, ProvisionedThroughputException, InvalidStateException { + long waitTime = ThreadLocalRandom.current() + .nextLong(MIN_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS, MAX_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS); long waitUntil = System.currentTimeMillis() + waitTime; boolean shouldInitiateLeaseSync = true; - while (System.currentTimeMillis() < waitUntil && (shouldInitiateLeaseSync = leaseRefresher.isLeaseTableEmpty())) { + while (System.currentTimeMillis() < waitUntil + && (shouldInitiateLeaseSync = leaseRefresher.isLeaseTableEmpty())) { // check every 3 seconds if lease table is still empty, // to minimize contention between all workers bootstrapping at the same time log.info("Lease table is still empty. Checking again in {} ms", LEASE_TABLE_CHECK_FREQUENCY_MILLIS); @@ -422,8 +442,8 @@ public class Scheduler implements Runnable { try { Set assignedShards = new HashSet<>(); for (ShardInfo shardInfo : getShardInfoForAssignments()) { - ShardConsumer shardConsumer = createOrGetShardConsumer(shardInfo, - processorConfig.shardRecordProcessorFactory(), leaseCleanupManager); + ShardConsumer shardConsumer = createOrGetShardConsumer( + shardInfo, processorConfig.shardRecordProcessorFactory(), leaseCleanupManager); shardConsumer.executeLifecycle(); assignedShards.add(shardInfo); @@ -444,8 +464,10 @@ public class Scheduler implements Runnable { slog.info("Sleeping ..."); Thread.sleep(shardConsumerDispatchPollIntervalMillis); } catch (Exception e) { - log.error("Worker.run caught exception, sleeping for {} milli seconds!", - shardConsumerDispatchPollIntervalMillis, e); + log.error( + "Worker.run caught exception, sleeping for {} milli seconds!", + shardConsumerDispatchPollIntervalMillis, + e); try { Thread.sleep(shardConsumerDispatchPollIntervalMillis); } catch (InterruptedException ex) { @@ -470,11 +492,12 @@ public class Scheduler implements Runnable { final Set streamsSynced = new HashSet<>(); if (shouldSyncStreamsNow()) { - final MetricsScope metricsScope = MetricsUtil.createMetricsWithOperation(metricsFactory, MULTI_STREAM_TRACKER); + final MetricsScope metricsScope = + MetricsUtil.createMetricsWithOperation(metricsFactory, MULTI_STREAM_TRACKER); try { - final Map newStreamConfigMap = streamTracker.streamConfigList() - .stream().collect(Collectors.toMap(StreamConfig::streamIdentifier, Function.identity())); + final Map newStreamConfigMap = streamTracker.streamConfigList().stream() + .collect(Collectors.toMap(StreamConfig::streamIdentifier, Function.identity())); // This is done to ensure that we clean up the stale streams lingering in the lease table. // Only sync from lease table again if the currentStreamConfigMap and newStreamConfigMap contain // different set of streams and Leader has not synced the leases yet @@ -487,12 +510,16 @@ public class Scheduler implements Runnable { final Set streamsFromLeaseTable = leaseTableLeases.stream() .map(lease -> StreamIdentifier.multiStreamInstance(lease.streamIdentifier())) .collect(Collectors.toSet()); - // Remove stream from currentStreamConfigMap if this stream in not in the lease table and newStreamConfigMap. + // Remove stream from currentStreamConfigMap if this stream in not in the lease table and + // newStreamConfigMap. // This means that the leases have already been deleted by the last leader. currentStreamConfigMap.keySet().stream() .filter(streamIdentifier -> !newStreamConfigMap.containsKey(streamIdentifier) - && !streamsFromLeaseTable.contains(streamIdentifier)).forEach(stream -> { - log.info("Removing stream {} from currentStreamConfigMap due to not being active", stream); + && !streamsFromLeaseTable.contains(streamIdentifier)) + .forEach(stream -> { + log.info( + "Removing stream {} from currentStreamConfigMap due to not being active", + stream); currentStreamConfigMap.remove(stream); staleStreamDeletionMap.remove(stream); streamsSynced.add(stream); @@ -519,18 +546,21 @@ public class Scheduler implements Runnable { staleStreamDeletionMap.putIfAbsent(streamIdentifier, Instant.now()); } }; - if (formerStreamsLeasesDeletionStrategy.leaseDeletionType() == FORMER_STREAMS_AUTO_DETECTION_DEFERRED_DELETION) { + if (formerStreamsLeasesDeletionStrategy.leaseDeletionType() + == FORMER_STREAMS_AUTO_DETECTION_DEFERRED_DELETION) { // Now, we are identifying the stale/old streams and enqueuing it for deferred deletion. // It is assumed that all the workers will always have the latest and consistent snapshot of streams // from the multiStreamTracker. // - // The following streams transition state among two workers are NOT considered safe, where Worker 2, on + // The following streams transition state among two workers are NOT considered safe, where Worker 2, + // on // initialization learn about D from lease table and delete the leases for D, as it is not available // in its latest MultiStreamTracker. // Worker 1 : A,B,C -> A,B,C,D (latest) // Worker 2 : BOOTS_UP -> A,B,C (stale) // - // The following streams transition state among two workers are NOT considered safe, where Worker 2 might + // The following streams transition state among two workers are NOT considered safe, where Worker 2 + // might // end up deleting the leases for A and D and lose progress made so far. // Worker 1 : A,B,C -> A,B,C,D (latest) // Worker 2 : A,B,C -> B,C (stale/partial) @@ -539,13 +569,16 @@ public class Scheduler implements Runnable { // before attempting to delete it, we will be deferring the leases deletion based on the // defer time period. currentStreamConfigMap.keySet().forEach(enqueueStreamLeaseDeletionOperation); - } else if (formerStreamsLeasesDeletionStrategy.leaseDeletionType() == StreamsLeasesDeletionType.PROVIDED_STREAMS_DEFERRED_DELETION) { - Optional.ofNullable(formerStreamsLeasesDeletionStrategy.streamIdentifiersForLeaseCleanup()).ifPresent( - streamIdentifiers -> streamIdentifiers.forEach(enqueueStreamLeaseDeletionOperation)); + } else if (formerStreamsLeasesDeletionStrategy.leaseDeletionType() + == StreamsLeasesDeletionType.PROVIDED_STREAMS_DEFERRED_DELETION) { + Optional.ofNullable(formerStreamsLeasesDeletionStrategy.streamIdentifiersForLeaseCleanup()) + .ifPresent(streamIdentifiers -> + streamIdentifiers.forEach(enqueueStreamLeaseDeletionOperation)); } else { // Remove the old/stale streams identified through the new and existing streams list, without // cleaning up their leases. Disabling deprecated shard sync + lease cleanup through a flag. - Iterator currentSetOfStreamsIter = currentStreamConfigMap.keySet().iterator(); + Iterator currentSetOfStreamsIter = + currentStreamConfigMap.keySet().iterator(); while (currentSetOfStreamsIter.hasNext()) { StreamIdentifier streamIdentifier = currentSetOfStreamsIter.next(); if (!newStreamConfigMap.containsKey(streamIdentifier)) { @@ -553,13 +586,14 @@ public class Scheduler implements Runnable { log.info( "Found old/deleted stream : {}. Triggering shard sync. Removing from tracked active streams.", streamIdentifier); - ShardSyncTaskManager shardSyncTaskManager = createOrGetShardSyncTaskManager( - currentStreamConfigMap.get(streamIdentifier)); + ShardSyncTaskManager shardSyncTaskManager = + createOrGetShardSyncTaskManager(currentStreamConfigMap.get(streamIdentifier)); shardSyncTaskManager.submitShardSyncTask(); } else { log.info( "Found old/deleted stream : {}. Removing from tracked active streams, but not cleaning up leases," - + " as part of this workflow", streamIdentifier); + + " as part of this workflow", + streamIdentifier); } currentSetOfStreamsIter.remove(); streamsSynced.add(streamIdentifier); @@ -573,22 +607,24 @@ public class Scheduler implements Runnable { // StreamIdentifiers are eligible for deletion only when the deferment period has elapsed and // the streamIdentifiersForLeaseCleanup are not present in the latest snapshot. final Map> staleStreamIdDeletionDecisionMap = - staleStreamDeletionMap.keySet().stream().collect( - Collectors.partitioningBy(newStreamConfigMap::containsKey, Collectors.toSet())); - final Set staleStreamIdsToBeDeleted = staleStreamIdDeletionDecisionMap.get(false) - .stream().filter(streamIdentifier -> - Duration.between(staleStreamDeletionMap.get(streamIdentifier), Instant.now()) - .toMillis() >= waitPeriodToDeleteOldStreams.toMillis()) - .collect(Collectors.toSet()); + staleStreamDeletionMap.keySet().stream() + .collect( + Collectors.partitioningBy(newStreamConfigMap::containsKey, Collectors.toSet())); + final Set staleStreamIdsToBeDeleted = + staleStreamIdDeletionDecisionMap.get(false).stream() + .filter(streamIdentifier -> + Duration.between(staleStreamDeletionMap.get(streamIdentifier), Instant.now()) + .toMillis() + >= waitPeriodToDeleteOldStreams.toMillis()) + .collect(Collectors.toSet()); // These are the streams which are deleted in Kinesis and we encounter resource not found during // shardSyncTask. This is applicable in MultiStreamMode only, in case of SingleStreamMode, store will // not have any data. // Filter streams based on newStreamConfigMap so that we don't override input to KCL in any case. - final Set deletedStreamSet = this.deletedStreamListProvider - .purgeAllDeletedStream() - .stream() - .filter(streamIdentifier -> !newStreamConfigMap.containsKey(streamIdentifier)) - .collect(Collectors.toSet()); + final Set deletedStreamSet = + this.deletedStreamListProvider.purgeAllDeletedStream().stream() + .filter(streamIdentifier -> !newStreamConfigMap.containsKey(streamIdentifier)) + .collect(Collectors.toSet()); if (deletedStreamSet.size() > 0) { log.info("Stale streams to delete: {}", deletedStreamSet); staleStreamIdsToBeDeleted.addAll(deletedStreamSet); @@ -603,18 +639,24 @@ public class Scheduler implements Runnable { if (!staleStreamDeletionMap.isEmpty()) { log.warn( "Streams enqueued for deletion for lease table cleanup along with their scheduled time for deletion: {} ", - staleStreamDeletionMap.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, - entry -> entry.getValue().plus(waitPeriodToDeleteOldStreams)))); + staleStreamDeletionMap.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue() + .plus(waitPeriodToDeleteOldStreams)))); } streamSyncWatch.reset().start(); - MetricsUtil.addCount(metricsScope, ACTIVE_STREAMS_COUNT, newStreamConfigMap.size(), MetricsLevel.SUMMARY); - MetricsUtil.addCount(metricsScope, PENDING_STREAMS_DELETION_COUNT, staleStreamDeletionMap.size(), + MetricsUtil.addCount( + metricsScope, ACTIVE_STREAMS_COUNT, newStreamConfigMap.size(), MetricsLevel.SUMMARY); + MetricsUtil.addCount( + metricsScope, + PENDING_STREAMS_DELETION_COUNT, + staleStreamDeletionMap.size(), MetricsLevel.SUMMARY); - MetricsUtil.addCount(metricsScope, NON_EXISTING_STREAM_DELETE_COUNT, deletedStreamSet.size(), - MetricsLevel.SUMMARY); - MetricsUtil.addCount(metricsScope, DELETED_STREAMS_COUNT, deletedStreamsLeases.size(), MetricsLevel.SUMMARY); + MetricsUtil.addCount( + metricsScope, NON_EXISTING_STREAM_DELETE_COUNT, deletedStreamSet.size(), MetricsLevel.SUMMARY); + MetricsUtil.addCount( + metricsScope, DELETED_STREAMS_COUNT, deletedStreamsLeases.size(), MetricsLevel.SUMMARY); } finally { MetricsUtil.endScope(metricsScope); } @@ -624,8 +666,7 @@ public class Scheduler implements Runnable { @VisibleForTesting boolean shouldSyncStreamsNow() { - return isMultiStreamMode && - (streamSyncWatch.elapsed(TimeUnit.MILLISECONDS) > NEW_STREAM_CHECK_INTERVAL_MILLIS); + return isMultiStreamMode && (streamSyncWatch.elapsed(TimeUnit.MILLISECONDS) > NEW_STREAM_CHECK_INTERVAL_MILLIS); } @VisibleForTesting @@ -642,7 +683,8 @@ public class Scheduler implements Runnable { private List fetchMultiStreamLeases() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - return (List) ((List) leaseCoordinator.leaseRefresher().listLeases()); + return (List) + ((List) leaseCoordinator.leaseRefresher().listLeases()); } private void removeStreamsFromStaleStreamsList(Set streamIdentifiers) { @@ -659,8 +701,9 @@ public class Scheduler implements Runnable { log.info("Deleting streams: {}", streamIdentifiers); final Set streamsSynced = new HashSet<>(); final List leases = fetchMultiStreamLeases(); - final Map> streamIdToShardsMap = leases.stream().collect( - Collectors.groupingBy(MultiStreamLease::streamIdentifier, Collectors.toCollection(ArrayList::new))); + final Map> streamIdToShardsMap = leases.stream() + .collect(Collectors.groupingBy( + MultiStreamLease::streamIdentifier, Collectors.toCollection(ArrayList::new))); for (StreamIdentifier streamIdentifier : streamIdentifiers) { log.warn("Found old/deleted stream: {}. Directly deleting leases of this stream.", streamIdentifier); // Removing streamIdentifier from this map so PSSM doesn't think there is a hole in the stream while @@ -690,7 +733,8 @@ public class Scheduler implements Runnable { } catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) { log.error( "Unable to delete stale stream lease {}. Skipping further deletions for this stream. Will retry later.", - lease.leaseKey(), e); + lease.leaseKey(), + e); return false; } } @@ -760,8 +804,8 @@ public class Scheduler implements Runnable { public CompletableFuture startGracefulShutdown() { synchronized (this) { if (gracefulShutdownFuture == null) { - gracefulShutdownFuture = gracefulShutdownCoordinator - .startGracefulShutdown(createGracefulShutdownCallable()); + gracefulShutdownFuture = + gracefulShutdownCoordinator.startGracefulShutdown(createGracefulShutdownCallable()); } } return gracefulShutdownFuture; @@ -809,13 +853,15 @@ public class Scheduler implements Runnable { // If there are no leases notification is already completed, but we still need to shutdown the worker. // this.shutdown(); - return GracefulShutdownContext.builder().finalShutdownLatch(finalShutdownLatch).build(); + return GracefulShutdownContext.builder() + .finalShutdownLatch(finalShutdownLatch) + .build(); } CountDownLatch shutdownCompleteLatch = new CountDownLatch(leases.size()); CountDownLatch notificationCompleteLatch = new CountDownLatch(leases.size()); for (Lease lease : leases) { - ShutdownNotification shutdownNotification = new ShardConsumerShutdownNotification(leaseCoordinator, - lease, notificationCompleteLatch, shutdownCompleteLatch); + ShutdownNotification shutdownNotification = new ShardConsumerShutdownNotification( + leaseCoordinator, lease, notificationCompleteLatch, shutdownCompleteLatch); ShardInfo shardInfo = DynamoDBLeaseCoordinator.convertLeaseToAssignment(lease); ShardConsumer consumer = shardInfoShardConsumerMap.get(shardInfo); if (consumer != null) { @@ -929,9 +975,10 @@ public class Scheduler implements Runnable { * Kinesis shard info * @return ShardConsumer for the shard */ - ShardConsumer createOrGetShardConsumer(@NonNull final ShardInfo shardInfo, - @NonNull final ShardRecordProcessorFactory shardRecordProcessorFactory, - @NonNull final LeaseCleanupManager leaseCleanupManager) { + ShardConsumer createOrGetShardConsumer( + @NonNull final ShardInfo shardInfo, + @NonNull final ShardRecordProcessorFactory shardRecordProcessorFactory, + @NonNull final LeaseCleanupManager leaseCleanupManager) { ShardConsumer consumer = shardInfoShardConsumerMap.get(shardInfo); // Instantiate a new consumer if we don't have one, or the one we // had was from an earlier @@ -948,20 +995,23 @@ public class Scheduler implements Runnable { } private ShardSyncTaskManager createOrGetShardSyncTaskManager(StreamConfig streamConfig) { - return streamToShardSyncTaskManagerMap.computeIfAbsent(streamConfig, s -> shardSyncTaskManagerProvider.apply(s)); + return streamToShardSyncTaskManagerMap.computeIfAbsent( + streamConfig, s -> shardSyncTaskManagerProvider.apply(s)); } - protected ShardConsumer buildConsumer(@NonNull final ShardInfo shardInfo, - @NonNull final ShardRecordProcessorFactory shardRecordProcessorFactory, - @NonNull final LeaseCleanupManager leaseCleanupManager) { - ShardRecordProcessorCheckpointer checkpointer = coordinatorConfig.coordinatorFactory().createRecordProcessorCheckpointer(shardInfo, - checkpoint); + protected ShardConsumer buildConsumer( + @NonNull final ShardInfo shardInfo, + @NonNull final ShardRecordProcessorFactory shardRecordProcessorFactory, + @NonNull final LeaseCleanupManager leaseCleanupManager) { + ShardRecordProcessorCheckpointer checkpointer = + coordinatorConfig.coordinatorFactory().createRecordProcessorCheckpointer(shardInfo, checkpoint); // The only case where streamName is not available will be when multistreamtracker not set. In this case, // get the default stream name for the single stream application. final StreamIdentifier streamIdentifier = getStreamIdentifier(shardInfo.streamIdentifierSerOpt()); // Irrespective of single stream app or multi stream app, streamConfig should always be available. - // If we have a shardInfo, that is not present in currentStreamConfigMap for whatever reason, then return default stream config + // If we have a shardInfo, that is not present in currentStreamConfigMap for whatever reason, then return + // default stream config // to gracefully complete the reading. StreamConfig streamConfig = currentStreamConfigMap.get(streamIdentifier); if (streamConfig == null) { @@ -969,8 +1019,10 @@ public class Scheduler implements Runnable { log.info("Created orphan {}", streamConfig); } Validate.notNull(streamConfig, "StreamConfig should not be null"); - RecordsPublisher cache = retrievalConfig.retrievalFactory().createGetRecordsCache(shardInfo, streamConfig, metricsFactory); - ShardConsumerArgument argument = new ShardConsumerArgument(shardInfo, + RecordsPublisher cache = + retrievalConfig.retrievalFactory().createGetRecordsCache(shardInfo, streamConfig, metricsFactory); + ShardConsumerArgument argument = new ShardConsumerArgument( + shardInfo, streamConfig.streamIdentifier(), leaseCoordinator, executorService, @@ -993,10 +1045,15 @@ public class Scheduler implements Runnable { hierarchicalShardSyncerProvider.apply(streamConfig), metricsFactory, leaseCleanupManager, - schemaRegistryDecoder - ); - return new ShardConsumer(cache, executorService, shardInfo, lifecycleConfig.logWarningForTaskAfterMillis(), - argument, lifecycleConfig.taskExecutionListener(), lifecycleConfig.readTimeoutsToIgnoreBeforeWarning()); + schemaRegistryDecoder); + return new ShardConsumer( + cache, + executorService, + shardInfo, + lifecycleConfig.logWarningForTaskAfterMillis(), + argument, + lifecycleConfig.taskExecutionListener(), + lifecycleConfig.readTimeoutsToIgnoreBeforeWarning()); } /** @@ -1030,16 +1087,16 @@ public class Scheduler implements Runnable { */ private void registerErrorHandlerForUndeliverableAsyncTaskExceptions() { RxJavaPlugins.setErrorHandler(t -> { - ExecutorStateEvent executorStateEvent = diagnosticEventFactory.executorStateEvent(executorService, - leaseCoordinator); + ExecutorStateEvent executorStateEvent = + diagnosticEventFactory.executorStateEvent(executorService, leaseCoordinator); RejectedTaskEvent rejectedTaskEvent = diagnosticEventFactory.rejectedTaskEvent(executorStateEvent, t); rejectedTaskEvent.accept(diagnosticEventHandler); }); } private void logExecutorState() { - ExecutorStateEvent executorStateEvent = diagnosticEventFactory.executorStateEvent(executorService, - leaseCoordinator); + ExecutorStateEvent executorStateEvent = + diagnosticEventFactory.executorStateEvent(executorService, leaseCoordinator); executorStateEvent.accept(diagnosticEventHandler); } @@ -1049,7 +1106,8 @@ public class Scheduler implements Runnable { streamIdentifier = StreamIdentifier.multiStreamInstance(streamIdentifierString.get()); } else { Validate.isTrue(!isMultiStreamMode, "Should not be in MultiStream Mode"); - streamIdentifier = this.currentStreamConfigMap.values().iterator().next().streamIdentifier(); + streamIdentifier = + this.currentStreamConfigMap.values().iterator().next().streamIdentifier(); } Validate.notNull(streamIdentifier, "Stream identifier should not be empty"); return streamIdentifier; @@ -1070,13 +1128,16 @@ public class Scheduler implements Runnable { */ private static StreamConfig withStreamArn( @NonNull final StreamConfig streamConfig, @NonNull final Region kinesisRegion) { - Validate.isTrue(streamConfig.streamIdentifier().accountIdOptional().isPresent(), - "accountId should not be empty"); - Validate.isTrue(streamConfig.streamIdentifier().streamCreationEpochOptional().isPresent(), + Validate.isTrue( + streamConfig.streamIdentifier().accountIdOptional().isPresent(), "accountId should not be empty"); + Validate.isTrue( + streamConfig.streamIdentifier().streamCreationEpochOptional().isPresent(), "streamCreationEpoch should not be empty"); - log.info("Constructing stream ARN for {} using the Kinesis client's configured region - {}.", - streamConfig.streamIdentifier(), kinesisRegion); + log.info( + "Constructing stream ARN for {} using the Kinesis client's configured region - {}.", + streamConfig.streamIdentifier(), + kinesisRegion); final StreamIdentifier streamIdentifierWithArn = StreamIdentifier.multiStreamInstance( constructStreamArn( @@ -1106,13 +1167,22 @@ public class Scheduler implements Runnable { @NonNull final StreamIdentifier streamIdentifier, @NonNull final StreamConfig streamConfig) { final Region kinesisRegion = getKinesisRegion(); - return super.put(streamIdentifier, streamConfig.streamIdentifier().streamArnOptional() - .map(streamArn -> { - Validate.isTrue(kinesisRegion.id().equals(streamArn.region().get()), - "The provided streamARN " + streamArn - + " does not match the Kinesis client's configured region - " + kinesisRegion); - return streamConfig; - }).orElse(isMultiStreamMode ? withStreamArn(streamConfig, kinesisRegion) : streamConfig)); + return super.put( + streamIdentifier, + streamConfig + .streamIdentifier() + .streamArnOptional() + .map(streamArn -> { + Validate.isTrue( + kinesisRegion + .id() + .equals(streamArn.region().get()), + "The provided streamARN " + streamArn + + " does not match the Kinesis client's configured region - " + + kinesisRegion); + return streamConfig; + }) + .orElse(isMultiStreamMode ? withStreamArn(streamConfig, kinesisRegion) : streamConfig)); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/SchedulerCoordinatorFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/SchedulerCoordinatorFactory.java index d138b84f..de17542f 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/SchedulerCoordinatorFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/SchedulerCoordinatorFactory.java @@ -22,7 +22,6 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import lombok.Data; import lombok.NonNull; import software.amazon.kinesis.annotations.KinesisClientInternalApi; @@ -41,15 +40,16 @@ public class SchedulerCoordinatorFactory implements CoordinatorFactory { */ @Override public ExecutorService createExecutorService() { - return new SchedulerThreadPoolExecutor( - new ThreadFactoryBuilder().setNameFormat("ShardRecordProcessor-%04d").build()); + return new SchedulerThreadPoolExecutor(new ThreadFactoryBuilder() + .setNameFormat("ShardRecordProcessor-%04d") + .build()); } static class SchedulerThreadPoolExecutor extends ThreadPoolExecutor { private static final long DEFAULT_KEEP_ALIVE = 60L; + SchedulerThreadPoolExecutor(ThreadFactory threadFactory) { - super(0, Integer.MAX_VALUE, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, new SynchronousQueue<>(), - threadFactory); + super(0, Integer.MAX_VALUE, DEFAULT_KEEP_ALIVE, TimeUnit.SECONDS, new SynchronousQueue<>(), threadFactory); } } @@ -57,8 +57,8 @@ public class SchedulerCoordinatorFactory implements CoordinatorFactory { * {@inheritDoc} */ @Override - public ShardRecordProcessorCheckpointer createRecordProcessorCheckpointer(@NonNull final ShardInfo shardInfo, - @NonNull final Checkpointer checkpoint) { + public ShardRecordProcessorCheckpointer createRecordProcessorCheckpointer( + @NonNull final ShardInfo shardInfo, @NonNull final Checkpointer checkpoint) { return new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/WorkerStateChangeListener.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/WorkerStateChangeListener.java index dd7162b3..bc2a1b1c 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/WorkerStateChangeListener.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/coordinator/WorkerStateChangeListener.java @@ -29,6 +29,5 @@ public interface WorkerStateChangeListener { void onWorkerStateChange(WorkerState newState); - default void onAllInitializationAttemptsFailed(Throwable e) { - } + default void onAllInitializationAttemptsFailed(Throwable e) {} } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/InvalidStateException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/InvalidStateException.java index 97e9209d..6f2fa3a8 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/InvalidStateException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/InvalidStateException.java @@ -19,16 +19,16 @@ package software.amazon.kinesis.exceptions; * is not found). */ public class InvalidStateException extends KinesisClientLibNonRetryableException { - + private static final long serialVersionUID = 1L; - + /** * @param message provides more details about the cause and potential ways to debug/address. */ public InvalidStateException(String message) { super(message); } - + /** * @param message provides more details about the cause and potential ways to debug/address. * @param e Cause of the exception @@ -36,5 +36,4 @@ public class InvalidStateException extends KinesisClientLibNonRetryableException public InvalidStateException(String message, Exception e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibDependencyException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibDependencyException.java index 6c7c295a..1f2092cb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibDependencyException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibDependencyException.java @@ -15,21 +15,21 @@ package software.amazon.kinesis.exceptions; /** - * This is thrown when the Amazon Kinesis Client Library encounters issues talking to its dependencies + * This is thrown when the Amazon Kinesis Client Library encounters issues talking to its dependencies * (e.g. fetching data from Kinesis, DynamoDB table reads/writes, emitting metrics to CloudWatch). - * + * */ public class KinesisClientLibDependencyException extends KinesisClientLibRetryableException { - + private static final long serialVersionUID = 1L; - + /** * @param message provides more details about the cause and potential ways to debug/address. */ public KinesisClientLibDependencyException(String message) { super(message); } - + /** * @param message provides more details about the cause and potential ways to debug/address. * @param e Cause of the exception @@ -37,5 +37,4 @@ public class KinesisClientLibDependencyException extends KinesisClientLibRetryab public KinesisClientLibDependencyException(String message, Exception e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibException.java index a7b2e173..0da75474 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibException.java @@ -25,7 +25,7 @@ public abstract class KinesisClientLibException extends Exception { /** * Constructor. - * + * * @param message Message of with details of the exception. */ public KinesisClientLibException(String message) { @@ -34,12 +34,11 @@ public abstract class KinesisClientLibException extends Exception { /** * Constructor. - * + * * @param message Message with details of the exception. * @param cause Cause. */ public KinesisClientLibException(String message, Throwable cause) { super(message, cause); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibNonRetryableException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibNonRetryableException.java index 49f4bf6b..b538f048 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibNonRetryableException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibNonRetryableException.java @@ -16,7 +16,7 @@ package software.amazon.kinesis.exceptions; /** * Non-retryable exceptions. Simply retrying the same request/operation is not expected to succeed. - * + * */ public abstract class KinesisClientLibNonRetryableException extends KinesisClientLibException { @@ -24,7 +24,7 @@ public abstract class KinesisClientLibNonRetryableException extends KinesisClien /** * Constructor. - * + * * @param message Message. */ public KinesisClientLibNonRetryableException(String message) { @@ -33,7 +33,7 @@ public abstract class KinesisClientLibNonRetryableException extends KinesisClien /** * Constructor. - * + * * @param message Message. * @param e Cause. */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibRetryableException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibRetryableException.java index 72e9b189..35d0782e 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibRetryableException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/KinesisClientLibRetryableException.java @@ -22,7 +22,7 @@ public abstract class KinesisClientLibRetryableException extends RuntimeExceptio /** * Constructor. - * + * * @param message Message with details about the exception. */ public KinesisClientLibRetryableException(String message) { @@ -31,7 +31,7 @@ public abstract class KinesisClientLibRetryableException extends RuntimeExceptio /** * Constructor. - * + * * @param message Message with details about the exception. * @param e Cause. */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ShutdownException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ShutdownException.java index 6d7fafc0..0a530f57 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ShutdownException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ShutdownException.java @@ -35,5 +35,4 @@ public class ShutdownException extends KinesisClientLibNonRetryableException { public ShutdownException(String message, Exception e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ThrottlingException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ThrottlingException.java index addfa58b..ef951ef6 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ThrottlingException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/ThrottlingException.java @@ -35,5 +35,4 @@ public class ThrottlingException extends KinesisClientLibRetryableException { public ThrottlingException(String message, Exception e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/BlockedOnParentShardException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/BlockedOnParentShardException.java index 9b1db062..db979b3d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/BlockedOnParentShardException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/BlockedOnParentShardException.java @@ -27,7 +27,7 @@ public class BlockedOnParentShardException extends KinesisClientLibRetryableExce /** * Constructor. - * + * * @param message Error message. */ public BlockedOnParentShardException(String message) { @@ -36,12 +36,11 @@ public class BlockedOnParentShardException extends KinesisClientLibRetryableExce /** * Constructor. - * + * * @param message Error message. * @param e Cause of the exception. */ public BlockedOnParentShardException(String message, Exception e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/KinesisClientLibIOException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/KinesisClientLibIOException.java index 95495013..02f9c1a4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/KinesisClientLibIOException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/exceptions/internal/KinesisClientLibIOException.java @@ -25,7 +25,7 @@ public class KinesisClientLibIOException extends KinesisClientLibRetryableExcept /** * Constructor. - * + * * @param message Error message. */ public KinesisClientLibIOException(String message) { @@ -34,7 +34,7 @@ public class KinesisClientLibIOException extends KinesisClientLibRetryableExcept /** * Constructor. - * + * * @param message Error message. * @param e Cause. */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/DynamoUtils.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/DynamoUtils.java index 29d6029b..34b13f64 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/DynamoUtils.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/DynamoUtils.java @@ -14,15 +14,15 @@ */ package software.amazon.kinesis.leases; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.services.dynamodb.model.AttributeValue; -import software.amazon.kinesis.annotations.KinesisClientInternalApi; - import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.kinesis.annotations.KinesisClientInternalApi; + /** * Static utility functions used by our LeaseSerializers. */ @@ -42,7 +42,9 @@ public class DynamoUtils { throw new IllegalArgumentException("Byte buffer attributeValues cannot be null or empty."); } - return AttributeValue.builder().b(SdkBytes.fromByteArray(byteBufferValue)).build(); + return AttributeValue.builder() + .b(SdkBytes.fromByteArray(byteBufferValue)) + .build(); } public static AttributeValue createAttributeValue(String stringValue) { @@ -97,5 +99,4 @@ public class DynamoUtils { return av.ss(); } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java index 6ddef3cf..053dc2a6 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/HierarchicalShardSyncer.java @@ -32,12 +32,11 @@ import com.google.common.annotations.VisibleForTesting; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; -import lombok.experimental.Accessors; -import org.apache.commons.lang3.StringUtils; - import lombok.NonNull; import lombok.RequiredArgsConstructor; +import lombok.experimental.Accessors; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import software.amazon.awssdk.services.kinesis.model.ChildShard; import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; import software.amazon.awssdk.services.kinesis.model.Shard; @@ -78,7 +77,8 @@ public class HierarchicalShardSyncer { private final DeletedStreamListProvider deletedStreamListProvider; private static final String MIN_HASH_KEY = BigInteger.ZERO.toString(); - private static final String MAX_HASH_KEY = new BigInteger("2").pow(128).subtract(BigInteger.ONE).toString(); + private static final String MAX_HASH_KEY = + new BigInteger("2").pow(128).subtract(BigInteger.ONE).toString(); private static final int RETRIES_FOR_COMPLETE_HASH_RANGE = 3; private static final long DELAY_BETWEEN_LIST_SHARDS_MILLIS = 1000; @@ -91,7 +91,9 @@ public class HierarchicalShardSyncer { this(isMultiStreamMode, streamIdentifier, null); } - public HierarchicalShardSyncer(final boolean isMultiStreamMode, final String streamIdentifier, + public HierarchicalShardSyncer( + final boolean isMultiStreamMode, + final String streamIdentifier, final DeletedStreamListProvider deletedStreamListProvider) { this.isMultiStreamMode = isMultiStreamMode; this.streamIdentifier = streamIdentifier; @@ -99,9 +101,7 @@ public class HierarchicalShardSyncer { } private static String getShardIdFromLease(Lease lease, MultiStreamArgs multiStreamArgs) { - return multiStreamArgs.isMultiStreamMode() - ? ((MultiStreamLease) lease).shardId() - : lease.leaseKey(); + return multiStreamArgs.isMultiStreamMode() ? ((MultiStreamLease) lease).shardId() : lease.leaseKey(); } /** @@ -119,23 +119,41 @@ public class HierarchicalShardSyncer { * @throws ProvisionedThroughputException * @throws KinesisClientLibIOException */ - public synchronized boolean checkAndCreateLeaseForNewShards(@NonNull final ShardDetector shardDetector, - final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition, - final MetricsScope scope, final boolean ignoreUnexpectedChildShards, final boolean isLeaseTableEmpty) - throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException, InterruptedException { - final List latestShards = isLeaseTableEmpty ? - getShardListAtInitialPosition(shardDetector, initialPosition) : getShardList(shardDetector); - return checkAndCreateLeaseForNewShards(shardDetector, leaseRefresher, initialPosition, latestShards, ignoreUnexpectedChildShards, scope, + public synchronized boolean checkAndCreateLeaseForNewShards( + @NonNull final ShardDetector shardDetector, + final LeaseRefresher leaseRefresher, + final InitialPositionInStreamExtended initialPosition, + final MetricsScope scope, + final boolean ignoreUnexpectedChildShards, + final boolean isLeaseTableEmpty) + throws DependencyException, InvalidStateException, ProvisionedThroughputException, + KinesisClientLibIOException, InterruptedException { + final List latestShards = isLeaseTableEmpty + ? getShardListAtInitialPosition(shardDetector, initialPosition) + : getShardList(shardDetector); + return checkAndCreateLeaseForNewShards( + shardDetector, + leaseRefresher, + initialPosition, + latestShards, + ignoreUnexpectedChildShards, + scope, isLeaseTableEmpty); } /** * Provide a pre-collected list of shards to avoid calling ListShards API */ - public synchronized boolean checkAndCreateLeaseForNewShards(@NonNull final ShardDetector shardDetector, - final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition, - List latestShards, final boolean ignoreUnexpectedChildShards, final MetricsScope scope, final boolean isLeaseTableEmpty) - throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException { + public synchronized boolean checkAndCreateLeaseForNewShards( + @NonNull final ShardDetector shardDetector, + final LeaseRefresher leaseRefresher, + final InitialPositionInStreamExtended initialPosition, + List latestShards, + final boolean ignoreUnexpectedChildShards, + final MetricsScope scope, + final boolean isLeaseTableEmpty) + throws DependencyException, InvalidStateException, ProvisionedThroughputException, + KinesisClientLibIOException { if (!CollectionUtils.isNullOrEmpty(latestShards)) { log.debug("{} - Num shards: {}", streamIdentifier, latestShards.size()); } else { @@ -144,19 +162,22 @@ public class HierarchicalShardSyncer { } final Map shardIdToShardMap = constructShardIdToShardMap(latestShards); - final Map> shardIdToChildShardIdsMap = constructShardIdToChildShardIdsMap( - shardIdToShardMap); + final Map> shardIdToChildShardIdsMap = + constructShardIdToChildShardIdsMap(shardIdToShardMap); final Set inconsistentShardIds = findInconsistentShardIds(shardIdToChildShardIdsMap, shardIdToShardMap); if (!ignoreUnexpectedChildShards) { assertAllParentShardsAreClosed(inconsistentShardIds); } - final List currentLeases = isMultiStreamMode ? - leaseRefresher.listLeasesForStream(shardDetector.streamIdentifier()) : leaseRefresher.listLeases(); - final MultiStreamArgs multiStreamArgs = new MultiStreamArgs(isMultiStreamMode, shardDetector.streamIdentifier()); - final LeaseSynchronizer leaseSynchronizer = isLeaseTableEmpty ? new EmptyLeaseTableSynchronizer() : - new NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); - final List newLeasesToCreate = determineNewLeasesToCreate(leaseSynchronizer, latestShards, currentLeases, - initialPosition, inconsistentShardIds, multiStreamArgs); + final List currentLeases = isMultiStreamMode + ? leaseRefresher.listLeasesForStream(shardDetector.streamIdentifier()) + : leaseRefresher.listLeases(); + final MultiStreamArgs multiStreamArgs = + new MultiStreamArgs(isMultiStreamMode, shardDetector.streamIdentifier()); + final LeaseSynchronizer leaseSynchronizer = isLeaseTableEmpty + ? new EmptyLeaseTableSynchronizer() + : new NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap); + final List newLeasesToCreate = determineNewLeasesToCreate( + leaseSynchronizer, latestShards, currentLeases, initialPosition, inconsistentShardIds, multiStreamArgs); log.info("{} - Number of new leases to create: {}", streamIdentifier, newLeasesToCreate.size()); final Set createdLeases = new HashSet<>(); @@ -172,7 +193,9 @@ public class HierarchicalShardSyncer { } finally { MetricsUtil.addSuccessAndLatency(scope, "CreateLease", success, startTime, MetricsLevel.DETAILED); if (lease.checkpoint() != null) { - final String metricName = lease.checkpoint().isSentinelCheckpoint() ? lease.checkpoint().sequenceNumber() : "SEQUENCE_NUMBER"; + final String metricName = lease.checkpoint().isSentinelCheckpoint() + ? lease.checkpoint().sequenceNumber() + : "SEQUENCE_NUMBER"; MetricsUtil.addSuccess(scope, "CreateLease_" + metricName, true, MetricsLevel.DETAILED); } } @@ -187,7 +210,7 @@ public class HierarchicalShardSyncer { * @throws KinesisClientLibIOException */ private static void assertAllParentShardsAreClosed(final Set inconsistentShardIds) - throws KinesisClientLibIOException { + throws KinesisClientLibIOException { if (!CollectionUtils.isNullOrEmpty(inconsistentShardIds)) { final String ids = StringUtils.join(inconsistentShardIds, ' '); throw new KinesisClientLibIOException(String.format( @@ -205,12 +228,17 @@ public class HierarchicalShardSyncer { * @param shardIdToShardMap * @return Set of inconsistent open shard ids for shards having open parents. */ - private static Set findInconsistentShardIds(final Map> shardIdToChildShardIdsMap, - final Map shardIdToShardMap) { + private static Set findInconsistentShardIds( + final Map> shardIdToChildShardIdsMap, final Map shardIdToShardMap) { return shardIdToChildShardIdsMap.entrySet().stream() .filter(entry -> entry.getKey() == null - || shardIdToShardMap.get(entry.getKey()).sequenceNumberRange().endingSequenceNumber() == null) - .flatMap(entry -> shardIdToChildShardIdsMap.get(entry.getKey()).stream()).collect(Collectors.toSet()); + || shardIdToShardMap + .get(entry.getKey()) + .sequenceNumberRange() + .endingSequenceNumber() + == null) + .flatMap(entry -> shardIdToChildShardIdsMap.get(entry.getKey()).stream()) + .collect(Collectors.toSet()); } /** @@ -227,15 +255,15 @@ public class HierarchicalShardSyncer { final Shard shard = entry.getValue(); final String parentShardId = shard.parentShardId(); if (parentShardId != null && shardIdToShardMap.containsKey(parentShardId)) { - final Set childShardIds = shardIdToChildShardIdsMap.computeIfAbsent(parentShardId, - key -> new HashSet<>()); + final Set childShardIds = + shardIdToChildShardIdsMap.computeIfAbsent(parentShardId, key -> new HashSet<>()); childShardIds.add(shardId); } final String adjacentParentShardId = shard.adjacentParentShardId(); if (adjacentParentShardId != null && shardIdToShardMap.containsKey(adjacentParentShardId)) { - final Set childShardIds = shardIdToChildShardIdsMap.computeIfAbsent(adjacentParentShardId, - key -> new HashSet<>()); + final Set childShardIds = + shardIdToChildShardIdsMap.computeIfAbsent(adjacentParentShardId, key -> new HashSet<>()); childShardIds.add(shardId); } } @@ -247,7 +275,8 @@ public class HierarchicalShardSyncer { * @param initialPositionInStreamExtended * @return ShardFilter shard filter for the corresponding position in the stream. */ - private static ShardFilter getShardFilterFromInitialPosition(InitialPositionInStreamExtended initialPositionInStreamExtended) { + private static ShardFilter getShardFilterFromInitialPosition( + InitialPositionInStreamExtended initialPositionInStreamExtended) { ShardFilter.Builder builder = ShardFilter.builder(); switch (initialPositionInStreamExtended.getInitialPositionInStream()) { @@ -258,14 +287,17 @@ public class HierarchicalShardSyncer { builder = builder.type(ShardFilterType.AT_TRIM_HORIZON); break; case AT_TIMESTAMP: - builder = builder.type(ShardFilterType.AT_TIMESTAMP).timestamp(initialPositionInStreamExtended.getTimestamp().toInstant()); + builder = builder.type(ShardFilterType.AT_TIMESTAMP) + .timestamp( + initialPositionInStreamExtended.getTimestamp().toInstant()); break; } return builder.build(); } - private static List getShardListAtInitialPosition(@NonNull final ShardDetector shardDetector, - InitialPositionInStreamExtended initialPositionInStreamExtended) throws KinesisClientLibIOException, InterruptedException { + private static List getShardListAtInitialPosition( + @NonNull final ShardDetector shardDetector, InitialPositionInStreamExtended initialPositionInStreamExtended) + throws KinesisClientLibIOException, InterruptedException { final ShardFilter shardFilter = getShardFilterFromInitialPosition(initialPositionInStreamExtended); final String streamName = shardDetector.streamIdentifier().streamName(); @@ -276,8 +308,8 @@ public class HierarchicalShardSyncer { shards = shardDetector.listShardsWithFilter(shardFilter); if (shards == null) { - throw new KinesisClientLibIOException( - "Stream " + streamName + " is not in ACTIVE OR UPDATING state - will retry getting the shard list."); + throw new KinesisClientLibIOException("Stream " + streamName + + " is not in ACTIVE OR UPDATING state - will retry getting the shard list."); } if (isHashRangeOfShardsComplete(shards)) { @@ -287,13 +319,13 @@ public class HierarchicalShardSyncer { Thread.sleep(DELAY_BETWEEN_LIST_SHARDS_MILLIS); } - throw new KinesisClientLibIOException("Hash range of shards returned for " + streamName + " was incomplete after " - + RETRIES_FOR_COMPLETE_HASH_RANGE + " retries."); + throw new KinesisClientLibIOException("Hash range of shards returned for " + streamName + + " was incomplete after " + RETRIES_FOR_COMPLETE_HASH_RANGE + " retries."); } private List getShardList(@NonNull final ShardDetector shardDetector) throws KinesisClientLibIOException { // Fallback to existing behavior for backward compatibility - List shardList = Collections.emptyList(); + List shardList = Collections.emptyList(); try { shardList = shardDetector.listShardsWithoutConsumingResourceNotFoundException(); } catch (ResourceNotFoundException e) { @@ -303,8 +335,9 @@ public class HierarchicalShardSyncer { } final Optional> shards = Optional.of(shardList); - return shards.orElseThrow(() -> new KinesisClientLibIOException("Stream " + shardDetector.streamIdentifier().streamName() + - " is not in ACTIVE OR UPDATING state - will retry getting the shard list.")); + return shards.orElseThrow(() -> new KinesisClientLibIOException( + "Stream " + shardDetector.streamIdentifier().streamName() + + " is not in ACTIVE OR UPDATING state - will retry getting the shard list.")); } private static boolean isHashRangeOfShardsComplete(@NonNull List shards) { @@ -315,8 +348,8 @@ public class HierarchicalShardSyncer { final Comparator shardStartingHashKeyBasedComparator = new ShardStartingHashKeyBasedComparator(); shards.sort(shardStartingHashKeyBasedComparator); - if (!shards.get(0).hashKeyRange().startingHashKey().equals(MIN_HASH_KEY) || - !shards.get(shards.size() - 1).hashKeyRange().endingHashKey().equals(MAX_HASH_KEY)) { + if (!shards.get(0).hashKeyRange().startingHashKey().equals(MIN_HASH_KEY) + || !shards.get(shards.size() - 1).hashKeyRange().endingHashKey().equals(MAX_HASH_KEY)) { return false; } @@ -324,11 +357,16 @@ public class HierarchicalShardSyncer { for (int i = 1; i < shards.size(); i++) { final Shard shardAtStartOfPossibleHole = shards.get(i - 1); final Shard shardAtEndOfPossibleHole = shards.get(i); - final BigInteger startOfPossibleHole = new BigInteger(shardAtStartOfPossibleHole.hashKeyRange().endingHashKey()); - final BigInteger endOfPossibleHole = new BigInteger(shardAtEndOfPossibleHole.hashKeyRange().startingHashKey()); + final BigInteger startOfPossibleHole = + new BigInteger(shardAtStartOfPossibleHole.hashKeyRange().endingHashKey()); + final BigInteger endOfPossibleHole = + new BigInteger(shardAtEndOfPossibleHole.hashKeyRange().startingHashKey()); if (!endOfPossibleHole.subtract(startOfPossibleHole).equals(BigInteger.ONE)) { - log.error("Incomplete hash range found between {} and {}.", shardAtStartOfPossibleHole, shardAtEndOfPossibleHole); + log.error( + "Incomplete hash range found between {} and {}.", + shardAtStartOfPossibleHole, + shardAtEndOfPossibleHole); return false; } } @@ -350,10 +388,15 @@ public class HierarchicalShardSyncer { * @param multiStreamArgs determines if we are using multistream mode. * @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard */ - static List determineNewLeasesToCreate(final LeaseSynchronizer leaseSynchronizer, final List shards, - final List currentLeases, final InitialPositionInStreamExtended initialPosition, - final Set inconsistentShardIds, final MultiStreamArgs multiStreamArgs) { - return leaseSynchronizer.determineNewLeasesToCreate(shards, currentLeases, initialPosition, inconsistentShardIds, multiStreamArgs); + static List determineNewLeasesToCreate( + final LeaseSynchronizer leaseSynchronizer, + final List shards, + final List currentLeases, + final InitialPositionInStreamExtended initialPosition, + final Set inconsistentShardIds, + final MultiStreamArgs multiStreamArgs) { + return leaseSynchronizer.determineNewLeasesToCreate( + shards, currentLeases, initialPosition, inconsistentShardIds, multiStreamArgs); } /** @@ -368,10 +411,18 @@ public class HierarchicalShardSyncer { * @param inconsistentShardIds Set of child shard ids having open parents. * @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard */ - static List determineNewLeasesToCreate(final LeaseSynchronizer leaseSynchronizer, final List shards, - final List currentLeases, final InitialPositionInStreamExtended initialPosition, + static List determineNewLeasesToCreate( + final LeaseSynchronizer leaseSynchronizer, + final List shards, + final List currentLeases, + final InitialPositionInStreamExtended initialPosition, final Set inconsistentShardIds) { - return determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, initialPosition, inconsistentShardIds, + return determineNewLeasesToCreate( + leaseSynchronizer, + shards, + currentLeases, + initialPosition, + inconsistentShardIds, new MultiStreamArgs(false, null)); } @@ -386,10 +437,14 @@ public class HierarchicalShardSyncer { * location in the shard (when an application starts up for the first time - and there are no checkpoints). * @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard */ - static List determineNewLeasesToCreate(final LeaseSynchronizer leaseSynchronizer, final List shards, - final List currentLeases, final InitialPositionInStreamExtended initialPosition) { + static List determineNewLeasesToCreate( + final LeaseSynchronizer leaseSynchronizer, + final List shards, + final List currentLeases, + final InitialPositionInStreamExtended initialPosition) { final Set inconsistentShardIds = new HashSet<>(); - return determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, initialPosition, inconsistentShardIds); + return determineNewLeasesToCreate( + leaseSynchronizer, shards, currentLeases, initialPosition, inconsistentShardIds); } /** @@ -407,10 +462,13 @@ public class HierarchicalShardSyncer { * @param memoizationContext Memoization of shards that have been evaluated as part of the evaluation * @return true if the shard is a descendant of any current shard (lease already exists) */ - static boolean checkIfDescendantAndAddNewLeasesForAncestors(final String shardId, - final InitialPositionInStreamExtended initialPosition, final Set shardIdsOfCurrentLeases, + static boolean checkIfDescendantAndAddNewLeasesForAncestors( + final String shardId, + final InitialPositionInStreamExtended initialPosition, + final Set shardIdsOfCurrentLeases, final Map shardIdToShardMapOfAllKinesisShards, - final Map shardIdToLeaseMapOfNewShards, final MemoizationContext memoizationContext, + final Map shardIdToLeaseMapOfNewShards, + final MemoizationContext memoizationContext, final MultiStreamArgs multiStreamArgs) { final String streamIdentifier = getStreamIdentifier(multiStreamArgs); final Boolean previousValue = memoizationContext.isDescendant(shardId); @@ -427,7 +485,10 @@ public class HierarchicalShardSyncer { isDescendant = true; // We don't need to add leases of its ancestors, // because we'd have done it when creating a lease for this shard. - log.debug("{} - Shard {} is a descendant shard of an existing shard. Skipping lease creation", streamIdentifier, shardId); + log.debug( + "{} - Shard {} is a descendant shard of an existing shard. Skipping lease creation", + streamIdentifier, + shardId); } else { final Shard shard = shardIdToShardMapOfAllKinesisShards.get(shardId); @@ -436,9 +497,14 @@ public class HierarchicalShardSyncer { // Check if the parent is a descendant, and include its ancestors. Or, if the parent is NOT a // descendant but we should create a lease for it anyway (e.g. to include in processing from // TRIM_HORIZON or AT_TIMESTAMP). If either is true, then we mark the current shard as a descendant. - final boolean isParentDescendant = checkIfDescendantAndAddNewLeasesForAncestors(parentShardId, - initialPosition, shardIdsOfCurrentLeases, shardIdToShardMapOfAllKinesisShards, - shardIdToLeaseMapOfNewShards, memoizationContext, multiStreamArgs); + final boolean isParentDescendant = checkIfDescendantAndAddNewLeasesForAncestors( + parentShardId, + initialPosition, + shardIdsOfCurrentLeases, + shardIdToShardMapOfAllKinesisShards, + shardIdToLeaseMapOfNewShards, + memoizationContext, + multiStreamArgs); if (isParentDescendant || memoizationContext.shouldCreateLease(parentShardId)) { isDescendant = true; descendantParentShardIds.add(parentShardId); @@ -465,13 +531,17 @@ public class HierarchicalShardSyncer { * therefore covered in the lease table). So we should create a lease for the parent. */ if (lease == null) { - if (memoizationContext.shouldCreateLease(parentShardId) || - !descendantParentShardIds.contains(parentShardId)) { - log.debug("{} : Need to create a lease for shardId {}", streamIdentifier, parentShardId); - lease = multiStreamArgs.isMultiStreamMode() ? - newKCLMultiStreamLease(shardIdToShardMapOfAllKinesisShards.get(parentShardId), - multiStreamArgs.streamIdentifier()) : - newKCLLease(shardIdToShardMapOfAllKinesisShards.get(parentShardId)); + if (memoizationContext.shouldCreateLease(parentShardId) + || !descendantParentShardIds.contains(parentShardId)) { + log.debug( + "{} : Need to create a lease for shardId {}", + streamIdentifier, + parentShardId); + lease = multiStreamArgs.isMultiStreamMode() + ? newKCLMultiStreamLease( + shardIdToShardMapOfAllKinesisShards.get(parentShardId), + multiStreamArgs.streamIdentifier()) + : newKCLLease(shardIdToShardMapOfAllKinesisShards.get(parentShardId)); shardIdToLeaseMapOfNewShards.put(parentShardId, lease); } } @@ -502,15 +572,21 @@ public class HierarchicalShardSyncer { */ if (lease != null) { if (descendantParentShardIds.contains(parentShardId) - && !initialPosition.getInitialPositionInStream() - .equals(InitialPositionInStream.AT_TIMESTAMP)) { - log.info("Setting Lease '{}' checkpoint to 'TRIM_HORIZON'. Checkpoint was previously set to {}", - lease.leaseKey(), lease.checkpoint()); + && !initialPosition + .getInitialPositionInStream() + .equals(InitialPositionInStream.AT_TIMESTAMP)) { + log.info( + "Setting Lease '{}' checkpoint to 'TRIM_HORIZON'. Checkpoint was previously set to {}", + lease.leaseKey(), + lease.checkpoint()); lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); } else { final ExtendedSequenceNumber newCheckpoint = convertToCheckpoint(initialPosition); - log.info("Setting Lease '{}' checkpoint to '{}'. Checkpoint was previously set to {}", - lease.leaseKey(), newCheckpoint, lease.checkpoint()); + log.info( + "Setting Lease '{}' checkpoint to '{}'. Checkpoint was previously set to {}", + lease.leaseKey(), + newCheckpoint, + lease.checkpoint()); lease.checkpoint(newCheckpoint); } } @@ -522,8 +598,9 @@ public class HierarchicalShardSyncer { // lease just like we do for TRIM_HORIZON. However we will only return back records with server-side // timestamp at or after the specified initial position timestamp. if (initialPosition.getInitialPositionInStream().equals(InitialPositionInStream.TRIM_HORIZON) - || initialPosition.getInitialPositionInStream() - .equals(InitialPositionInStream.AT_TIMESTAMP)) { + || initialPosition + .getInitialPositionInStream() + .equals(InitialPositionInStream.AT_TIMESTAMP)) { memoizationContext.setShouldCreateLease(shardId, true); } } @@ -534,12 +611,20 @@ public class HierarchicalShardSyncer { return isDescendant; } - static boolean checkIfDescendantAndAddNewLeasesForAncestors(final String shardId, - final InitialPositionInStreamExtended initialPosition, final Set shardIdsOfCurrentLeases, + static boolean checkIfDescendantAndAddNewLeasesForAncestors( + final String shardId, + final InitialPositionInStreamExtended initialPosition, + final Set shardIdsOfCurrentLeases, final Map shardIdToShardMapOfAllKinesisShards, - final Map shardIdToLeaseMapOfNewShards, MemoizationContext memoizationContext) { - return checkIfDescendantAndAddNewLeasesForAncestors(shardId, initialPosition, shardIdsOfCurrentLeases, - shardIdToShardMapOfAllKinesisShards, shardIdToLeaseMapOfNewShards, memoizationContext, + final Map shardIdToLeaseMapOfNewShards, + MemoizationContext memoizationContext) { + return checkIfDescendantAndAddNewLeasesForAncestors( + shardId, + initialPosition, + shardIdsOfCurrentLeases, + shardIdToShardMapOfAllKinesisShards, + shardIdToLeaseMapOfNewShards, + memoizationContext, new MultiStreamArgs(false, null)); } @@ -552,8 +637,8 @@ public class HierarchicalShardSyncer { * @param shardIdToShardMapOfAllKinesisShards ShardId->Shard map containing all shards obtained via DescribeStream. * @return Set of parentShardIds */ - static Set getParentShardIds(final Shard shard, - final Map shardIdToShardMapOfAllKinesisShards) { + static Set getParentShardIds( + final Shard shard, final Map shardIdToShardMapOfAllKinesisShards) { final Set parentShardIds = new HashSet<>(2); final String parentShardId = shard.parentShardId(); if (parentShardId != null && shardIdToShardMapOfAllKinesisShards.containsKey(parentShardId)) { @@ -566,12 +651,13 @@ public class HierarchicalShardSyncer { return parentShardIds; } - public synchronized Lease createLeaseForChildShard(final ChildShard childShard, - final StreamIdentifier streamIdentifier) throws InvalidStateException { + public synchronized Lease createLeaseForChildShard( + final ChildShard childShard, final StreamIdentifier streamIdentifier) throws InvalidStateException { final MultiStreamArgs multiStreamArgs = new MultiStreamArgs(isMultiStreamMode, streamIdentifier); - return multiStreamArgs.isMultiStreamMode() ? newKCLMultiStreamLeaseForChildShard(childShard, streamIdentifier) - : newKCLLeaseForChildShard(childShard); + return multiStreamArgs.isMultiStreamMode() + ? newKCLMultiStreamLeaseForChildShard(childShard, streamIdentifier) + : newKCLLeaseForChildShard(childShard); } /** @@ -595,8 +681,8 @@ public class HierarchicalShardSyncer { return newLease; } - private static Lease newKCLMultiStreamLeaseForChildShard(final ChildShard childShard, - final StreamIdentifier streamIdentifier) throws InvalidStateException { + private static Lease newKCLMultiStreamLeaseForChildShard( + final ChildShard childShard, final StreamIdentifier streamIdentifier) throws InvalidStateException { MultiStreamLease newLease = new MultiStreamLease(); newLease.leaseKey(MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), childShard.shardId())); if (!CollectionUtils.isNullOrEmpty(childShard.parentShards())) { @@ -671,8 +757,10 @@ public class HierarchicalShardSyncer { * @return List of open shards (shards at the tip of the stream) - may include shards that are not yet active. */ static List getOpenShards(final List allShards, final String streamIdentifier) { - return allShards.stream().filter(shard -> shard.sequenceNumberRange().endingSequenceNumber() == null) - .peek(shard -> log.debug("{} : Found open shard: {}", streamIdentifier, shard.shardId())).collect(Collectors.toList()); + return allShards.stream() + .filter(shard -> shard.sequenceNumberRange().endingSequenceNumber() == null) + .peek(shard -> log.debug("{} : Found open shard: {}", streamIdentifier, shard.shardId())) + .collect(Collectors.toList()); } private static ExtendedSequenceNumber convertToCheckpoint(final InitialPositionInStreamExtended position) { @@ -691,7 +779,8 @@ public class HierarchicalShardSyncer { private static String getStreamIdentifier(MultiStreamArgs multiStreamArgs) { return Optional.ofNullable(multiStreamArgs.streamIdentifier()) - .map(streamId -> streamId.serialize()).orElse("single_stream_mode"); + .map(streamId -> streamId.serialize()) + .orElse("single_stream_mode"); } /** @@ -745,8 +834,10 @@ public class HierarchicalShardSyncer { // If we found shards for the two leases, use comparison of the starting sequence numbers if (shard1 != null && shard2 != null) { - BigInteger sequenceNumber1 = new BigInteger(shard1.sequenceNumberRange().startingSequenceNumber()); - BigInteger sequenceNumber2 = new BigInteger(shard2.sequenceNumberRange().startingSequenceNumber()); + BigInteger sequenceNumber1 = + new BigInteger(shard1.sequenceNumberRange().startingSequenceNumber()); + BigInteger sequenceNumber2 = + new BigInteger(shard2.sequenceNumberRange().startingSequenceNumber()); result = sequenceNumber1.compareTo(sequenceNumber2); } @@ -780,9 +871,12 @@ public class HierarchicalShardSyncer { * @param multiStreamArgs * @return */ - List determineNewLeasesToCreate(List shards, List currentLeases, - InitialPositionInStreamExtended initialPosition, Set inconsistentShardIds, - MultiStreamArgs multiStreamArgs); + List determineNewLeasesToCreate( + List shards, + List currentLeases, + InitialPositionInStreamExtended initialPosition, + Set inconsistentShardIds, + MultiStreamArgs multiStreamArgs); } /** @@ -805,21 +899,29 @@ public class HierarchicalShardSyncer { * @return */ @Override - public List determineNewLeasesToCreate(List shards, List currentLeases, - InitialPositionInStreamExtended initialPosition, Set inconsistentShardIds, MultiStreamArgs multiStreamArgs) { + public List determineNewLeasesToCreate( + List shards, + List currentLeases, + InitialPositionInStreamExtended initialPosition, + Set inconsistentShardIds, + MultiStreamArgs multiStreamArgs) { final String streamIdentifier = Optional.ofNullable(multiStreamArgs.streamIdentifier()) - .map(streamId -> streamId.serialize()).orElse(""); + .map(streamId -> streamId.serialize()) + .orElse(""); final Map shardIdToShardMapOfAllKinesisShards = constructShardIdToShardMap(shards); - currentLeases.stream().peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease)) + currentLeases.stream() + .peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease)) .map(lease -> getShardIdFromLease(lease, multiStreamArgs)) .collect(Collectors.toSet()); - final List newLeasesToCreate = getLeasesToCreateForOpenAndClosedShards(initialPosition, shards, multiStreamArgs, streamIdentifier); + final List newLeasesToCreate = + getLeasesToCreateForOpenAndClosedShards(initialPosition, shards, multiStreamArgs, streamIdentifier); - //TODO: Verify before LTR launch that ending sequence number is still returned from the service. + // TODO: Verify before LTR launch that ending sequence number is still returned from the service. final Comparator startingSequenceNumberComparator = - new StartingSequenceNumberAndShardIdBasedComparator(shardIdToShardMapOfAllKinesisShards, multiStreamArgs); + new StartingSequenceNumberAndShardIdBasedComparator( + shardIdToShardMapOfAllKinesisShards, multiStreamArgs); newLeasesToCreate.sort(startingSequenceNumberComparator); return newLeasesToCreate; } @@ -829,14 +931,18 @@ public class HierarchicalShardSyncer { * regardless of if they are open or closed. Closed shards will be unblocked via child shard information upon * reaching SHARD_END. */ - private List getLeasesToCreateForOpenAndClosedShards(InitialPositionInStreamExtended initialPosition, - List shards, MultiStreamArgs multiStreamArgs, String streamId) { + private List getLeasesToCreateForOpenAndClosedShards( + InitialPositionInStreamExtended initialPosition, + List shards, + MultiStreamArgs multiStreamArgs, + String streamId) { final Map shardIdToNewLeaseMap = new HashMap<>(); for (Shard shard : shards) { final String shardId = shard.shardId(); - final Lease lease = multiStreamArgs.isMultiStreamMode() ? - newKCLMultiStreamLease(shard, multiStreamArgs.streamIdentifier) : newKCLLease(shard); + final Lease lease = multiStreamArgs.isMultiStreamMode() + ? newKCLMultiStreamLease(shard, multiStreamArgs.streamIdentifier) + : newKCLLease(shard); lease.checkpoint(convertToCheckpoint(initialPosition)); log.debug("{} : Need to create a lease for shard with shardId {}", streamId, shardId); @@ -909,13 +1015,17 @@ public class HierarchicalShardSyncer { * @return List of new leases to create sorted by starting sequenceNumber of the corresponding shard */ @Override - public synchronized List determineNewLeasesToCreate(final List shards, final List currentLeases, - final InitialPositionInStreamExtended initialPosition, final Set inconsistentShardIds, + public synchronized List determineNewLeasesToCreate( + final List shards, + final List currentLeases, + final InitialPositionInStreamExtended initialPosition, + final Set inconsistentShardIds, final MultiStreamArgs multiStreamArgs) { final Map shardIdToNewLeaseMap = new HashMap<>(); final Map shardIdToShardMapOfAllKinesisShards = constructShardIdToShardMap(shards); final String streamIdentifier = Optional.ofNullable(multiStreamArgs.streamIdentifier()) - .map(streamId -> streamId.serialize()).orElse(""); + .map(streamId -> streamId.serialize()) + .orElse(""); final Set shardIdsOfCurrentLeases = currentLeases.stream() .peek(lease -> log.debug("{} : Existing lease: {}", streamIdentifier, lease)) .map(lease -> getShardIdFromLease(lease, multiStreamArgs)) @@ -929,9 +1039,15 @@ public class HierarchicalShardSyncer { final String shardId = shard.shardId(); log.debug("{} : Evaluating leases for open shard {} and its ancestors.", streamIdentifier, shardId); if (shardIdsOfCurrentLeases.contains(shardId)) { - log.debug("{} : Lease for shardId {} already exists. Not creating a lease", streamIdentifier, shardId); + log.debug( + "{} : Lease for shardId {} already exists. Not creating a lease", + streamIdentifier, + shardId); } else if (inconsistentShardIds.contains(shardId)) { - log.info("{} : shardId {} is an inconsistent child. Not creating a lease", streamIdentifier, shardId); + log.info( + "{} : shardId {} is an inconsistent child. Not creating a lease", + streamIdentifier, + shardId); } else { log.debug("{} : Beginning traversal of ancestry tree for shardId {}", streamIdentifier, shardId); @@ -939,9 +1055,14 @@ public class HierarchicalShardSyncer { // We will create leases for only one level in the ancestry tree. Once we find the first ancestor // that needs to be processed in order to complete the hash range, we will not create leases for // further descendants of that ancestor. - final boolean isDescendant = checkIfDescendantAndAddNewLeasesForAncestors(shardId, initialPosition, - shardIdsOfCurrentLeases, shardIdToShardMapOfAllKinesisShards, shardIdToNewLeaseMap, - memoizationContext, multiStreamArgs); + final boolean isDescendant = checkIfDescendantAndAddNewLeasesForAncestors( + shardId, + initialPosition, + shardIdsOfCurrentLeases, + shardIdToShardMapOfAllKinesisShards, + shardIdToNewLeaseMap, + memoizationContext, + multiStreamArgs); // If shard is a descendant, the leases for its ancestors were already created above. Open shards // that are NOT descendants will not have leases yet, so we create them here. We will not create @@ -949,22 +1070,30 @@ public class HierarchicalShardSyncer { // SHARD_END of their parents. if (!isDescendant) { log.debug("{} : shardId {} has no ancestors. Creating a lease.", streamIdentifier, shardId); - final Lease newLease = multiStreamArgs.isMultiStreamMode() ? - newKCLMultiStreamLease(shard, multiStreamArgs.streamIdentifier()) : - newKCLLease(shard); + final Lease newLease = multiStreamArgs.isMultiStreamMode() + ? newKCLMultiStreamLease(shard, multiStreamArgs.streamIdentifier()) + : newKCLLease(shard); newLease.checkpoint(convertToCheckpoint(initialPosition)); - log.debug("{} : Set checkpoint of {} to {}", streamIdentifier, newLease.leaseKey(), newLease.checkpoint()); + log.debug( + "{} : Set checkpoint of {} to {}", + streamIdentifier, + newLease.leaseKey(), + newLease.checkpoint()); shardIdToNewLeaseMap.put(shardId, newLease); } else { - log.debug("{} : shardId {} is a descendant whose ancestors should already have leases. " + - "Not creating a lease.", streamIdentifier, shardId); + log.debug( + "{} : shardId {} is a descendant whose ancestors should already have leases. " + + "Not creating a lease.", + streamIdentifier, + shardId); } } } final List newLeasesToCreate = new ArrayList<>(shardIdToNewLeaseMap.values()); - final Comparator startingSequenceNumberComparator = new StartingSequenceNumberAndShardIdBasedComparator( - shardIdToShardMapOfAllKinesisShards, multiStreamArgs); + final Comparator startingSequenceNumberComparator = + new StartingSequenceNumberAndShardIdBasedComparator( + shardIdToShardMapOfAllKinesisShards, multiStreamArgs); newLeasesToCreate.sort(startingSequenceNumberComparator); return newLeasesToCreate; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java index e2336562..d128fc95 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/KinesisShardDetector.java @@ -27,6 +27,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.stream.Collectors; + import lombok.AccessLevel; import lombok.Getter; import lombok.NonNull; @@ -82,8 +83,11 @@ public class KinesisShardDetector implements ShardDetector { @NonNull private final KinesisAsyncClient kinesisClient; - @NonNull @Getter + + @NonNull + @Getter private final StreamIdentifier streamIdentifier; + private final long listShardsBackoffTimeInMillis; private final int maxListShardsRetryAttempts; private final long listShardsCacheAllowedAgeInSeconds; @@ -93,23 +97,41 @@ public class KinesisShardDetector implements ShardDetector { private volatile Map cachedShardMap = null; private volatile Instant lastCacheUpdateTime; + @Getter(AccessLevel.PACKAGE) private final AtomicInteger cacheMisses = new AtomicInteger(0); private static final Boolean THROW_RESOURCE_NOT_FOUND_EXCEPTION = true; @Deprecated - public KinesisShardDetector(KinesisAsyncClient kinesisClient, String streamName, long listShardsBackoffTimeInMillis, - int maxListShardsRetryAttempts, long listShardsCacheAllowedAgeInSeconds, int maxCacheMissesBeforeReload, + public KinesisShardDetector( + KinesisAsyncClient kinesisClient, + String streamName, + long listShardsBackoffTimeInMillis, + int maxListShardsRetryAttempts, + long listShardsCacheAllowedAgeInSeconds, + int maxCacheMissesBeforeReload, int cacheMissWarningModulus) { - this(kinesisClient, StreamIdentifier.singleStreamInstance(streamName), listShardsBackoffTimeInMillis, maxListShardsRetryAttempts, - listShardsCacheAllowedAgeInSeconds, maxCacheMissesBeforeReload, cacheMissWarningModulus, + this( + kinesisClient, + StreamIdentifier.singleStreamInstance(streamName), + listShardsBackoffTimeInMillis, + maxListShardsRetryAttempts, + listShardsCacheAllowedAgeInSeconds, + maxCacheMissesBeforeReload, + cacheMissWarningModulus, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); } - public KinesisShardDetector(KinesisAsyncClient kinesisClient, StreamIdentifier streamIdentifier, long listShardsBackoffTimeInMillis, - int maxListShardsRetryAttempts, long listShardsCacheAllowedAgeInSeconds, int maxCacheMissesBeforeReload, - int cacheMissWarningModulus, Duration kinesisRequestTimeout) { + public KinesisShardDetector( + KinesisAsyncClient kinesisClient, + StreamIdentifier streamIdentifier, + long listShardsBackoffTimeInMillis, + int maxListShardsRetryAttempts, + long listShardsCacheAllowedAgeInSeconds, + int maxCacheMissesBeforeReload, + int cacheMissWarningModulus, + Duration kinesisRequestTimeout) { this.kinesisClient = kinesisClient; this.streamIdentifier = streamIdentifier; this.listShardsBackoffTimeInMillis = listShardsBackoffTimeInMillis; @@ -143,8 +165,10 @@ public class KinesisShardDetector implements ShardDetector { shard = cachedShardMap.get(shardId); if (shard == null) { - log.warn("Even after cache refresh shard '{}' wasn't found. This could indicate a bigger" - + " problem.", shardId); + log.warn( + "Even after cache refresh shard '{}' wasn't found. This could indicate a bigger" + + " problem.", + shardId); } cacheMisses.set(0); @@ -159,8 +183,8 @@ public class KinesisShardDetector implements ShardDetector { } if (shard == null) { - final String message = String.format("Cannot find the shard given the shardId %s. Cache misses: %s", - shardId, cacheMisses); + final String message = + String.format("Cannot find the shard given the shardId %s. Cache misses: %s", shardId, cacheMisses); if (cacheMisses.get() % cacheMissWarningModulus == 0) { log.warn(message); } else { @@ -189,8 +213,8 @@ public class KinesisShardDetector implements ShardDetector { return listShardsWithFilterInternal(shardFilter, !THROW_RESOURCE_NOT_FOUND_EXCEPTION); } - private List listShardsWithFilterInternal(ShardFilter shardFilter, - boolean shouldPropagateResourceNotFoundException) { + private List listShardsWithFilterInternal( + ShardFilter shardFilter, boolean shouldPropagateResourceNotFoundException) { final List shards = new ArrayList<>(); ListShardsResponse result; String nextToken = null; @@ -218,8 +242,8 @@ public class KinesisShardDetector implements ShardDetector { * @param shouldPropagateResourceNotFoundException : used to determine if ResourceNotFoundException should be * handled by method and return Empty list or propagate the exception. */ - private ListShardsResponse listShards(ShardFilter shardFilter, final String nextToken, - final boolean shouldPropagateResourceNotFoundException) { + private ListShardsResponse listShards( + ShardFilter shardFilter, final String nextToken, final boolean shouldPropagateResourceNotFoundException) { ListShardsRequest.Builder builder = KinesisRequestsBuilder.listShardsRequestBuilder(); if (StringUtils.isEmpty(nextToken)) { builder.streamName(streamIdentifier.streamName()).shardFilter(shardFilter); @@ -250,7 +274,9 @@ public class KinesisShardDetector implements ShardDetector { + " Active or Updating)"); return null; } catch (LimitExceededException e) { - log.info("Got LimitExceededException when listing shards {}. Backing off for {} millis.", streamIdentifier, + log.info( + "Got LimitExceededException when listing shards {}. Backing off for {} millis.", + streamIdentifier, listShardsBackoffTimeInMillis); try { Thread.sleep(listShardsBackoffTimeInMillis); @@ -259,15 +285,16 @@ public class KinesisShardDetector implements ShardDetector { } lastException = e; } catch (ResourceNotFoundException e) { - log.warn("Got ResourceNotFoundException when fetching shard list for {}. Stream no longer exists.", + log.warn( + "Got ResourceNotFoundException when fetching shard list for {}. Stream no longer exists.", streamIdentifier.streamName()); if (shouldPropagateResourceNotFoundException) { throw e; } return ListShardsResponse.builder() - .shards(Collections.emptyList()) - .nextToken(null) - .build(); + .shards(Collections.emptyList()) + .nextToken(null) + .build(); } catch (TimeoutException te) { throw new RuntimeException(te); @@ -301,8 +328,8 @@ public class KinesisShardDetector implements ShardDetector { } @Override - public ListShardsResponse getListShardsResponse(ListShardsRequest request) throws - ExecutionException, TimeoutException, InterruptedException { + public ListShardsResponse getListShardsResponse(ListShardsRequest request) + throws ExecutionException, TimeoutException, InterruptedException { return FutureUtils.resolveOrCancelFuture(kinesisClient.listShards(request), kinesisRequestTimeout); } @@ -317,16 +344,14 @@ public class KinesisShardDetector implements ShardDetector { streamIdentifier.streamArnOptional().ifPresent(arn -> getShardIteratorRequestBuilder.streamARN(arn.toString())); final GetShardIteratorResponse getShardIteratorResponse = FutureUtils.resolveOrCancelFuture( - kinesisClient.getShardIterator(getShardIteratorRequestBuilder.build()), - kinesisRequestTimeout); + kinesisClient.getShardIterator(getShardIteratorRequestBuilder.build()), kinesisRequestTimeout); final GetRecordsRequest.Builder getRecordsRequestBuilder = KinesisRequestsBuilder.getRecordsRequestBuilder() .shardIterator(getShardIteratorResponse.shardIterator()); streamIdentifier.streamArnOptional().ifPresent(arn -> getRecordsRequestBuilder.streamARN(arn.toString())); final GetRecordsResponse getRecordsResponse = FutureUtils.resolveOrCancelFuture( - kinesisClient.getRecords(getRecordsRequestBuilder.build()), - kinesisRequestTimeout); + kinesisClient.getRecords(getRecordsRequestBuilder.build()), kinesisRequestTimeout); return getRecordsResponse.childShards(); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/Lease.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/Lease.java index b1602eb8..01735f9c 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/Lease.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/Lease.java @@ -19,6 +19,7 @@ import java.util.HashSet; import java.util.Set; import java.util.UUID; import java.util.concurrent.TimeUnit; + import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NoArgsConstructor; @@ -39,12 +40,19 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; @NoArgsConstructor @Getter @Accessors(fluent = true) -@EqualsAndHashCode(exclude = {"concurrencyToken", "lastCounterIncrementNanos", "childShardIds", "pendingCheckpointState", "isMarkedForLeaseSteal"}) +@EqualsAndHashCode( + exclude = { + "concurrencyToken", + "lastCounterIncrementNanos", + "childShardIds", + "pendingCheckpointState", + "isMarkedForLeaseSteal" + }) @ToString public class Lease { /** * See javadoc for System.nanoTime - summary: - * + * * Sometimes System.nanoTime's return values will wrap due to overflow. When they do, the difference between two * values will be very large. We will consider leases to be expired if they are more than a year old. */ @@ -100,36 +108,71 @@ public class Lease { * Count of distinct lease holders between checkpoints. */ private Long ownerSwitchesSinceCheckpoint = 0L; + private final Set parentShardIds = new HashSet<>(); private final Set childShardIds = new HashSet<>(); private HashKeyRangeForLease hashKeyRangeForLease; /** * Copy constructor, used by clone(). - * + * * @param lease lease to copy */ protected Lease(Lease lease) { - this(lease.leaseKey(), lease.leaseOwner(), lease.leaseCounter(), lease.concurrencyToken(), - lease.lastCounterIncrementNanos(), lease.checkpoint(), lease.pendingCheckpoint(), - lease.ownerSwitchesSinceCheckpoint(), lease.parentShardIds(), lease.childShardIds(), - lease.pendingCheckpointState(), lease.hashKeyRangeForLease()); + this( + lease.leaseKey(), + lease.leaseOwner(), + lease.leaseCounter(), + lease.concurrencyToken(), + lease.lastCounterIncrementNanos(), + lease.checkpoint(), + lease.pendingCheckpoint(), + lease.ownerSwitchesSinceCheckpoint(), + lease.parentShardIds(), + lease.childShardIds(), + lease.pendingCheckpointState(), + lease.hashKeyRangeForLease()); } @Deprecated - public Lease(final String leaseKey, final String leaseOwner, final Long leaseCounter, - final UUID concurrencyToken, final Long lastCounterIncrementNanos, - final ExtendedSequenceNumber checkpoint, final ExtendedSequenceNumber pendingCheckpoint, - final Long ownerSwitchesSinceCheckpoint, final Set parentShardIds) { - this(leaseKey, leaseOwner, leaseCounter, concurrencyToken, lastCounterIncrementNanos, checkpoint, pendingCheckpoint, - ownerSwitchesSinceCheckpoint, parentShardIds, new HashSet<>(), null, null); + public Lease( + final String leaseKey, + final String leaseOwner, + final Long leaseCounter, + final UUID concurrencyToken, + final Long lastCounterIncrementNanos, + final ExtendedSequenceNumber checkpoint, + final ExtendedSequenceNumber pendingCheckpoint, + final Long ownerSwitchesSinceCheckpoint, + final Set parentShardIds) { + this( + leaseKey, + leaseOwner, + leaseCounter, + concurrencyToken, + lastCounterIncrementNanos, + checkpoint, + pendingCheckpoint, + ownerSwitchesSinceCheckpoint, + parentShardIds, + new HashSet<>(), + null, + null); } - public Lease(final String leaseKey, final String leaseOwner, final Long leaseCounter, - final UUID concurrencyToken, final Long lastCounterIncrementNanos, - final ExtendedSequenceNumber checkpoint, final ExtendedSequenceNumber pendingCheckpoint, - final Long ownerSwitchesSinceCheckpoint, final Set parentShardIds, final Set childShardIds, - final byte[] pendingCheckpointState, final HashKeyRangeForLease hashKeyRangeForLease) { + public Lease( + final String leaseKey, + final String leaseOwner, + final Long leaseCounter, + final UUID concurrencyToken, + final Long lastCounterIncrementNanos, + final ExtendedSequenceNumber checkpoint, + final ExtendedSequenceNumber pendingCheckpoint, + final Long ownerSwitchesSinceCheckpoint, + final Set parentShardIds, + final Set childShardIds, + final byte[] pendingCheckpointState, + final HashKeyRangeForLease hashKeyRangeForLease) { this.leaseKey = leaseKey; this.leaseOwner = leaseOwner; this.leaseCounter = leaseCounter; @@ -159,7 +202,7 @@ public class Lease { /** * Updates this Lease's mutable, application-specific fields based on the passed-in lease object. Does not update * fields that are internal to the leasing library (leaseKey, leaseOwner, leaseCounter). - * + * * @param lease */ public void update(final Lease lease) { @@ -208,7 +251,7 @@ public class Lease { /** * Sets lastCounterIncrementNanos - * + * * @param lastCounterIncrementNanos last renewal in nanoseconds since the epoch */ public void lastCounterIncrementNanos(Long lastCounterIncrementNanos) { @@ -217,7 +260,7 @@ public class Lease { /** * Sets concurrencyToken. - * + * * @param concurrencyToken may not be null */ public void concurrencyToken(@NonNull final UUID concurrencyToken) { @@ -226,7 +269,7 @@ public class Lease { /** * Sets leaseKey. LeaseKey is immutable once set. - * + * * @param leaseKey may not be null. */ public void leaseKey(@NonNull final String leaseKey) { @@ -238,7 +281,7 @@ public class Lease { /** * Sets leaseCounter. - * + * * @param leaseCounter may not be null */ public void leaseCounter(@NonNull final Long leaseCounter) { @@ -314,7 +357,7 @@ public class Lease { /** * Sets leaseOwner. - * + * * @param leaseOwner may be null. */ public void leaseOwner(String leaseOwner) { @@ -323,11 +366,10 @@ public class Lease { /** * Returns a deep copy of this object. Type-unsafe - there aren't good mechanisms for copy-constructing generics. - * + * * @return A deep copy of this object. */ public Lease copy() { return new Lease(this); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCleanupManager.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCleanupManager.java index 861626b6..739732d2 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCleanupManager.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCleanupManager.java @@ -15,6 +15,18 @@ package software.amazon.kinesis.leases; +import java.util.HashSet; +import java.util.Objects; +import java.util.Optional; +import java.util.Queue; +import java.util.Set; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Stopwatch; import lombok.EqualsAndHashCode; @@ -28,40 +40,31 @@ import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; import software.amazon.awssdk.utils.CollectionUtils; import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion; import software.amazon.kinesis.leases.exceptions.InvalidStateException; +import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.retrieval.AWSExceptionManager; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import java.util.HashSet; -import java.util.Objects; -import java.util.Optional; -import java.util.Queue; -import java.util.Set; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; - /** * Helper class to cleanup of any expired/closed shard leases. It will cleanup leases periodically as defined by * {@link LeaseManagementConfig#leaseCleanupConfig()} asynchronously. */ -@Accessors(fluent=true) +@Accessors(fluent = true) @Slf4j @RequiredArgsConstructor @EqualsAndHashCode public class LeaseCleanupManager { @NonNull private final LeaseCoordinator leaseCoordinator; + @NonNull private final MetricsFactory metricsFactory; + @NonNull private final ScheduledExecutorService deletionThreadPool; + private final boolean cleanupLeasesUponShardCompletion; private final long leaseCleanupIntervalMillis; private final long completedLeaseCleanupIntervalMillis; @@ -85,8 +88,8 @@ public class LeaseCleanupManager { log.info("Starting lease cleanup thread."); completedLeaseStopwatch.reset().start(); garbageLeaseStopwatch.reset().start(); - deletionThreadPool.scheduleAtFixedRate(new LeaseCleanupThread(), INITIAL_DELAY, leaseCleanupIntervalMillis, - TimeUnit.MILLISECONDS); + deletionThreadPool.scheduleAtFixedRate( + new LeaseCleanupThread(), INITIAL_DELAY, leaseCleanupIntervalMillis, TimeUnit.MILLISECONDS); isRunning = true; } else { log.info("Lease cleanup thread already running, no need to start."); @@ -117,8 +120,10 @@ public class LeaseCleanupManager { public void enqueueForDeletion(LeasePendingDeletion leasePendingDeletion) { final Lease lease = leasePendingDeletion.lease(); if (lease == null) { - log.warn("Cannot enqueue {} for {} as instance doesn't hold the lease for that shard.", - leasePendingDeletion.shardInfo(), leasePendingDeletion.streamIdentifier()); + log.warn( + "Cannot enqueue {} for {} as instance doesn't hold the lease for that shard.", + leasePendingDeletion.shardInfo(), + leasePendingDeletion.streamIdentifier()); } else { log.debug("Enqueuing lease {} for deferred deletion.", lease.leaseKey()); if (!deletionQueue.add(leasePendingDeletion)) { @@ -161,9 +166,12 @@ public class LeaseCleanupManager { return garbageLeaseStopwatch.elapsed(TimeUnit.MILLISECONDS) >= garbageLeaseCleanupIntervalMillis; } - public LeaseCleanupResult cleanupLease(LeasePendingDeletion leasePendingDeletion, - boolean timeToCheckForCompletedShard, boolean timeToCheckForGarbageShard) throws TimeoutException, - InterruptedException, DependencyException, ProvisionedThroughputException, InvalidStateException { + public LeaseCleanupResult cleanupLease( + LeasePendingDeletion leasePendingDeletion, + boolean timeToCheckForCompletedShard, + boolean timeToCheckForGarbageShard) + throws TimeoutException, InterruptedException, DependencyException, ProvisionedThroughputException, + InvalidStateException { final Lease lease = leasePendingDeletion.lease(); final ShardInfo shardInfo = leasePendingDeletion.shardInfo(); final StreamIdentifier streamIdentifier = leasePendingDeletion.streamIdentifier(); @@ -188,7 +196,8 @@ public class LeaseCleanupManager { if (CollectionUtils.isNullOrEmpty(childShardKeys)) { log.error( "No child shards returned from service for shard {} for {} while cleaning up lease.", - shardInfo.shardId(), streamIdentifier.streamName()); + shardInfo.shardId(), + streamIdentifier.streamName()); } else { wereChildShardsPresent = true; updateLeaseWithChildShards(leasePendingDeletion, childShardKeys); @@ -205,19 +214,25 @@ public class LeaseCleanupManager { cleanedUpCompletedLease = cleanupLeaseForCompletedShard(lease, shardInfo, childShardKeys); } catch (Exception e) { // Suppressing the exception here, so that we can attempt for garbage cleanup. - log.warn("Unable to cleanup lease for shard {} in {}", shardInfo.shardId(), streamIdentifier.streamName(), e); + log.warn( + "Unable to cleanup lease for shard {} in {}", + shardInfo.shardId(), + streamIdentifier.streamName(), + e); } } else { - log.info("Lease not present in lease table while cleaning the shard {} of {}", - shardInfo.shardId(), streamIdentifier.streamName()); + log.info( + "Lease not present in lease table while cleaning the shard {} of {}", + shardInfo.shardId(), + streamIdentifier.streamName()); cleanedUpCompletedLease = true; } } if (!alreadyCheckedForGarbageCollection && timeToCheckForGarbageShard) { try { - wereChildShardsPresent = !CollectionUtils - .isNullOrEmpty(leasePendingDeletion.getChildShardsFromService()); + wereChildShardsPresent = + !CollectionUtils.isNullOrEmpty(leasePendingDeletion.getChildShardsFromService()); } catch (ExecutionException e) { throw exceptionManager.apply(e.getCause()); } @@ -227,8 +242,8 @@ public class LeaseCleanupManager { cleanedUpGarbageLease = cleanupLeaseForGarbageShard(lease, e); } - return new LeaseCleanupResult(cleanedUpCompletedLease, cleanedUpGarbageLease, wereChildShardsPresent, - wasResourceNotFound); + return new LeaseCleanupResult( + cleanedUpCompletedLease, cleanedUpGarbageLease, wereChildShardsPresent, wasResourceNotFound); } // A lease that ended with SHARD_END from ResourceNotFoundException is safe to delete if it no longer exists in the @@ -254,11 +269,15 @@ public class LeaseCleanupManager { private boolean allParentShardLeasesDeleted(Lease lease, ShardInfo shardInfo) throws DependencyException, ProvisionedThroughputException, InvalidStateException { for (String parentShard : lease.parentShardIds()) { - final Lease parentLease = leaseCoordinator.leaseRefresher().getLease(ShardInfo.getLeaseKey(shardInfo, parentShard)); + final Lease parentLease = + leaseCoordinator.leaseRefresher().getLease(ShardInfo.getLeaseKey(shardInfo, parentShard)); if (parentLease != null) { - log.warn("Lease {} has a parent lease {} which is still present in the lease table, skipping deletion " + - "for this lease.", lease, parentLease); + log.warn( + "Lease {} has a parent lease {} which is still present in the lease table, skipping deletion " + + "for this lease.", + lease, + parentLease); return false; } } @@ -271,27 +290,29 @@ public class LeaseCleanupManager { private boolean cleanupLeaseForCompletedShard(Lease lease, ShardInfo shardInfo, Set childShardKeys) throws DependencyException, ProvisionedThroughputException, InvalidStateException, IllegalStateException { final Set processedChildShardLeaseKeys = new HashSet<>(); - final Set childShardLeaseKeys = childShardKeys.stream().map(ck -> ShardInfo.getLeaseKey(shardInfo, ck)) + final Set childShardLeaseKeys = childShardKeys.stream() + .map(ck -> ShardInfo.getLeaseKey(shardInfo, ck)) .collect(Collectors.toSet()); for (String childShardLeaseKey : childShardLeaseKeys) { final Lease childShardLease = Optional.ofNullable( - leaseCoordinator.leaseRefresher().getLease(childShardLeaseKey)) - .orElseThrow(() -> new IllegalStateException( - "Child lease " + childShardLeaseKey + " for completed shard not found in " - + "lease table - not cleaning up lease " + lease)); + leaseCoordinator.leaseRefresher().getLease(childShardLeaseKey)) + .orElseThrow(() -> new IllegalStateException("Child lease " + childShardLeaseKey + + " for completed shard not found in " + "lease table - not cleaning up lease " + lease)); - if (!childShardLease.checkpoint().equals(ExtendedSequenceNumber.TRIM_HORIZON) && !childShardLease - .checkpoint().equals(ExtendedSequenceNumber.AT_TIMESTAMP)) { + if (!childShardLease.checkpoint().equals(ExtendedSequenceNumber.TRIM_HORIZON) + && !childShardLease.checkpoint().equals(ExtendedSequenceNumber.AT_TIMESTAMP)) { processedChildShardLeaseKeys.add(childShardLease.leaseKey()); } } - if (!allParentShardLeasesDeleted(lease, shardInfo) || !Objects.equals(childShardLeaseKeys, processedChildShardLeaseKeys)) { + if (!allParentShardLeasesDeleted(lease, shardInfo) + || !Objects.equals(childShardLeaseKeys, processedChildShardLeaseKeys)) { return false; } - log.info("Deleting lease {} as it has been completely processed and processing of child shard(s) has begun.", + log.info( + "Deleting lease {} as it has been completely processed and processing of child shard(s) has begun.", lease); leaseCoordinator.leaseRefresher().deleteLease(lease); @@ -331,23 +352,36 @@ public class LeaseCleanupManager { final StreamIdentifier streamIdentifier = leasePendingDeletion.streamIdentifier(); boolean deletionSucceeded = false; try { - final LeaseCleanupResult leaseCleanupResult = cleanupLease(leasePendingDeletion, - timeToCheckForCompletedShard(), timeToCheckForGarbageShard()); + final LeaseCleanupResult leaseCleanupResult = cleanupLease( + leasePendingDeletion, timeToCheckForCompletedShard(), timeToCheckForGarbageShard()); completedLeaseCleanedUp |= leaseCleanupResult.cleanedUpCompletedLease(); garbageLeaseCleanedUp |= leaseCleanupResult.cleanedUpGarbageLease(); if (leaseCleanupResult.leaseCleanedUp()) { - log.info("Successfully cleaned up lease {} for {} due to {}", leaseKey, streamIdentifier, leaseCleanupResult); + log.info( + "Successfully cleaned up lease {} for {} due to {}", + leaseKey, + streamIdentifier, + leaseCleanupResult); deletionSucceeded = true; } else { - log.warn("Unable to clean up lease {} for {} due to {}", leaseKey, streamIdentifier, leaseCleanupResult); + log.warn( + "Unable to clean up lease {} for {} due to {}", + leaseKey, + streamIdentifier, + leaseCleanupResult); } } catch (Exception e) { - log.error("Failed to cleanup lease {} for {}. Will re-enqueue for deletion and retry on next " + - "scheduled execution.", leaseKey, streamIdentifier, e); + log.error( + "Failed to cleanup lease {} for {}. Will re-enqueue for deletion and retry on next " + + "scheduled execution.", + leaseKey, + streamIdentifier, + e); } if (!deletionSucceeded) { - log.debug("Did not cleanup lease {} for {}. Re-enqueueing for deletion.", leaseKey, streamIdentifier); + log.debug( + "Did not cleanup lease {} for {}. Re-enqueueing for deletion.", leaseKey, streamIdentifier); failedDeletions.add(leasePendingDeletion); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java index 6437f339..acc08dab 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseCoordinator.java @@ -136,7 +136,7 @@ public interface LeaseCoordinator { * @return all leases for the application that are in the lease table */ default List allLeases() { - return Collections.emptyList(); + return Collections.emptyList(); } /** diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java index aef4d87e..c8c49a19 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementConfig.java @@ -15,8 +15,6 @@ package software.amazon.kinesis.leases; -import com.google.common.util.concurrent.ThreadFactoryBuilder; - import java.time.Duration; import java.util.Collection; import java.util.concurrent.ExecutorService; @@ -25,11 +23,12 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Function; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; import lombok.Data; import lombok.NonNull; import lombok.experimental.Accessors; import org.apache.commons.lang3.Validate; - import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.model.BillingMode; @@ -53,14 +52,16 @@ public class LeaseManagementConfig { public static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofMinutes(1); - public static final long DEFAULT_LEASE_CLEANUP_INTERVAL_MILLIS = Duration.ofMinutes(1).toMillis(); - public static final long DEFAULT_COMPLETED_LEASE_CLEANUP_INTERVAL_MILLIS = Duration.ofMinutes(5).toMillis(); - public static final long DEFAULT_GARBAGE_LEASE_CLEANUP_INTERVAL_MILLIS = Duration.ofMinutes(30).toMillis(); + public static final long DEFAULT_LEASE_CLEANUP_INTERVAL_MILLIS = + Duration.ofMinutes(1).toMillis(); + public static final long DEFAULT_COMPLETED_LEASE_CLEANUP_INTERVAL_MILLIS = + Duration.ofMinutes(5).toMillis(); + public static final long DEFAULT_GARBAGE_LEASE_CLEANUP_INTERVAL_MILLIS = + Duration.ofMinutes(30).toMillis(); public static final long DEFAULT_PERIODIC_SHARD_SYNC_INTERVAL_MILLIS = 2 * 60 * 1000L; public static final boolean DEFAULT_ENABLE_PRIORITY_LEASE_ASSIGNMENT = true; public static final int DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY = 3; - public static final LeaseCleanupConfig DEFAULT_LEASE_CLEANUP_CONFIG = LeaseCleanupConfig.builder() .leaseCleanupIntervalMillis(DEFAULT_LEASE_CLEANUP_INTERVAL_MILLIS) .completedLeaseCleanupIntervalMillis(DEFAULT_COMPLETED_LEASE_CLEANUP_INTERVAL_MILLIS) @@ -232,7 +233,8 @@ public class LeaseManagementConfig { * is inconsistent. If the auditor finds same set of inconsistencies consecutively for a stream for this many times, * then it would trigger a shard sync. */ - private int leasesRecoveryAuditorInconsistencyConfidenceThreshold = DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY; + private int leasesRecoveryAuditorInconsistencyConfidenceThreshold = + DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY; /** * The initial position for getting records from Kinesis streams. @@ -249,8 +251,12 @@ public class LeaseManagementConfig { private MetricsFactory metricsFactory = new NullMetricsFactory(); @Deprecated - public LeaseManagementConfig(String tableName, DynamoDbAsyncClient dynamoDBClient, KinesisAsyncClient kinesisClient, - String streamName, String workerIdentifier) { + public LeaseManagementConfig( + String tableName, + DynamoDbAsyncClient dynamoDBClient, + KinesisAsyncClient kinesisClient, + String streamName, + String workerIdentifier) { this.tableName = tableName; this.dynamoDBClient = dynamoDBClient; this.kinesisClient = kinesisClient; @@ -258,7 +264,10 @@ public class LeaseManagementConfig { this.workerIdentifier = workerIdentifier; } - public LeaseManagementConfig(String tableName, DynamoDbAsyncClient dynamoDBClient, KinesisAsyncClient kinesisClient, + public LeaseManagementConfig( + String tableName, + DynamoDbAsyncClient dynamoDBClient, + KinesisAsyncClient kinesisClient, String workerIdentifier) { this.tableName = tableName; this.dynamoDBClient = dynamoDBClient; @@ -302,14 +311,20 @@ public class LeaseManagementConfig { * *

    Default value: {@link LeaseManagementThreadPool}

    */ - private ExecutorService executorService = new LeaseManagementThreadPool( - new ThreadFactoryBuilder().setNameFormat("ShardSyncTaskManager-%04d").build()); + private ExecutorService executorService = new LeaseManagementThreadPool(new ThreadFactoryBuilder() + .setNameFormat("ShardSyncTaskManager-%04d") + .build()); static class LeaseManagementThreadPool extends ThreadPoolExecutor { private static final long DEFAULT_KEEP_ALIVE_TIME = 60L; LeaseManagementThreadPool(ThreadFactory threadFactory) { - super(0, Integer.MAX_VALUE, DEFAULT_KEEP_ALIVE_TIME, TimeUnit.SECONDS, new SynchronousQueue<>(), + super( + 0, + Integer.MAX_VALUE, + DEFAULT_KEEP_ALIVE_TIME, + TimeUnit.SECONDS, + new SynchronousQueue<>(), threadFactory); } } @@ -339,7 +354,8 @@ public class LeaseManagementConfig { public LeaseManagementFactory leaseManagementFactory() { if (leaseManagementFactory == null) { Validate.notEmpty(streamName(), "Stream name is empty"); - leaseManagementFactory = new DynamoDBLeaseManagementFactory(kinesisClient(), + leaseManagementFactory = new DynamoDBLeaseManagementFactory( + kinesisClient(), streamName(), dynamoDBClient(), tableName(), @@ -363,7 +379,10 @@ public class LeaseManagementConfig { initialLeaseTableReadCapacity(), initialLeaseTableWriteCapacity(), hierarchicalShardSyncer(), - tableCreatorCallback(), dynamoDbRequestTimeout(), billingMode(), tags()); + tableCreatorCallback(), + dynamoDbRequestTimeout(), + billingMode(), + tags()); } return leaseManagementFactory; } @@ -374,9 +393,11 @@ public class LeaseManagementConfig { * @param isMultiStreamingMode * @return LeaseManagementFactory */ - public LeaseManagementFactory leaseManagementFactory(final LeaseSerializer leaseSerializer, boolean isMultiStreamingMode) { + public LeaseManagementFactory leaseManagementFactory( + final LeaseSerializer leaseSerializer, boolean isMultiStreamingMode) { if (leaseManagementFactory == null) { - leaseManagementFactory = new DynamoDBLeaseManagementFactory(kinesisClient(), + leaseManagementFactory = new DynamoDBLeaseManagementFactory( + kinesisClient(), dynamoDBClient(), tableName(), workerIdentifier(), diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementFactory.java index 9f2e5f94..9ed77a53 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseManagementFactory.java @@ -32,7 +32,9 @@ public interface LeaseManagementFactory { throw new UnsupportedOperationException(); } - default ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory, StreamConfig streamConfig, + default ShardSyncTaskManager createShardSyncTaskManager( + MetricsFactory metricsFactory, + StreamConfig streamConfig, DeletedStreamListProvider deletedStreamListProvider) { throw new UnsupportedOperationException("createShardSyncTaskManager method not implemented"); } @@ -46,5 +48,4 @@ public interface LeaseManagementFactory { } LeaseCleanupManager createLeaseCleanupManager(MetricsFactory metricsFactory); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java index 7ec5b5ec..c38d442a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRefresher.java @@ -47,31 +47,30 @@ public interface LeaseRefresher { /** * Creates the table that will store leases. Table is now created in PayPerRequest billing mode by default. * Succeeds if table already exists. - * + * * @return true if we created a new table (table didn't exist before) - * + * * @throws ProvisionedThroughputException if we cannot create the lease table due to per-AWS-account capacity * restrictions. * @throws DependencyException if DynamoDB createTable fails in an unexpected way */ - boolean createLeaseTableIfNotExists() - throws ProvisionedThroughputException, DependencyException; + boolean createLeaseTableIfNotExists() throws ProvisionedThroughputException, DependencyException; /** * @return true if the lease table already exists. - * + * * @throws DependencyException if DynamoDB describeTable fails in an unexpected way */ boolean leaseTableExists() throws DependencyException; /** * Blocks until the lease table exists by polling leaseTableExists. - * + * * @param secondsBetweenPolls time to wait between polls in seconds * @param timeoutSeconds total time to wait in seconds - * + * * @return true if table exists, false if timeout was reached - * + * * @throws DependencyException if DynamoDB describeTable fails in an unexpected way */ boolean waitUntilLeaseTableExists(long secondsBetweenPolls, long timeoutSeconds) throws DependencyException; @@ -85,41 +84,41 @@ public interface LeaseRefresher { * * @return list of leases */ - List listLeasesForStream(StreamIdentifier streamIdentifier) throws DependencyException, InvalidStateException, - ProvisionedThroughputException; + List listLeasesForStream(StreamIdentifier streamIdentifier) + throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * List all objects in table synchronously. - * + * * @throws DependencyException if DynamoDB scan fails in an unexpected way * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB scan fails due to lack of capacity - * + * * @return list of leases */ List listLeases() throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Create a new lease. Conditional on a lease not already existing with this shardId. - * + * * @param lease the lease to create - * + * * @return true if lease was created, false if lease already exists - * + * * @throws DependencyException if DynamoDB put fails in an unexpected way * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB put fails due to lack of capacity */ boolean createLeaseIfNotExists(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; + throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * @param leaseKey Get the lease for this leasekey - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB get fails due to lack of capacity * @throws DependencyException if DynamoDB get fails in an unexpected way - * + * * @return lease for the specified leaseKey, or null if one doesn't exist */ Lease getLease(String leaseKey) throws DependencyException, InvalidStateException, ProvisionedThroughputException; @@ -127,55 +126,53 @@ public interface LeaseRefresher { /** * Renew a lease by incrementing the lease counter. Conditional on the leaseCounter in DynamoDB matching the leaseCounter * of the input. Mutates the leaseCounter of the passed-in lease object after updating the record in DynamoDB. - * + * * @param lease the lease to renew - * + * * @return true if renewal succeeded, false otherwise - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ - boolean renewLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; + boolean renewLease(Lease lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Take a lease for the given owner by incrementing its leaseCounter and setting its owner field. Conditional on * the leaseCounter in DynamoDB matching the leaseCounter of the input. Mutates the leaseCounter and owner of the * passed-in lease object after updating DynamoDB. - * + * * @param lease the lease to take * @param owner the new owner - * + * * @return true if lease was successfully taken, false otherwise - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ boolean takeLease(Lease lease, String owner) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; + throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Evict the current owner of lease by setting owner to null. Conditional on the owner in DynamoDB matching the owner of * the input. Mutates the lease counter and owner of the passed-in lease object after updating the record in DynamoDB. - * + * * @param lease the lease to void - * + * * @return true if eviction succeeded, false otherwise - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ - boolean evictLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; + boolean evictLease(Lease lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Delete the given lease from DynamoDB. Does nothing when passed a lease that does not exist in DynamoDB. - * + * * @param lease the lease to delete - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB delete fails due to lack of capacity * @throws DependencyException if DynamoDB delete fails in an unexpected way @@ -184,7 +181,7 @@ public interface LeaseRefresher { /** * Delete all leases from DynamoDB. Useful for tools/utils and testing. - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB scan or delete fail due to lack of capacity * @throws DependencyException if DynamoDB scan or delete fail in an unexpected way @@ -196,15 +193,14 @@ public interface LeaseRefresher { * library such as leaseCounter, leaseOwner, or leaseKey. Conditional on the leaseCounter in DynamoDB matching the * leaseCounter of the input. Increments the lease counter in DynamoDB so that updates can be contingent on other * updates. Mutates the lease counter of the passed-in lease object. - * + * * @return true if update succeeded, false otherwise - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ - boolean updateLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException; + boolean updateLease(Lease lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Update application-specific fields of the given lease in DynamoDB. Does not update fields managed by the leasing @@ -221,9 +217,9 @@ public interface LeaseRefresher { /** * Check (synchronously) if there are any leases in the lease table. - * + * * @return true if there are no leases in the lease table - * + * * @throws DependencyException if DynamoDB scan fails in an unexpected way * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB scan fails due to lack of capacity diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRenewer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRenewer.java index 25ec5b45..61d9643c 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRenewer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseRenewer.java @@ -28,18 +28,18 @@ import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; * that worker. */ public interface LeaseRenewer { - + /** * Bootstrap initial set of leases from the {@link LeaseRefresher} (e.g. upon process restart, pick up leases we own) * @throws DependencyException on unexpected DynamoDB failures * @throws InvalidStateException if lease table doesn't exist * @throws ProvisionedThroughputException if DynamoDB reads fail due to insufficient capacity */ - void initialize() throws DependencyException, InvalidStateException, ProvisionedThroughputException; + void initialize() throws DependencyException, InvalidStateException, ProvisionedThroughputException; /** * Attempt to renew all currently held leases. - * + * * @throws DependencyException on unexpected DynamoDB failures * @throws InvalidStateException if lease table does not exist */ @@ -54,7 +54,7 @@ public interface LeaseRenewer { /** * @param leaseKey key of the lease to retrieve - * + * * @return a deep copy of a currently held lease, or null if we don't hold the lease */ Lease getCurrentlyHeldLease(String leaseKey); @@ -62,7 +62,7 @@ public interface LeaseRenewer { /** * Adds leases to this LeaseRenewer's set of currently held leases. Leases must have lastRenewalNanos set to the * last time the lease counter was incremented before being passed to this method. - * + * * @param newLeases new leases. */ void addLeasesToRenew(Collection newLeases); @@ -74,7 +74,7 @@ public interface LeaseRenewer { /** * Stops the lease renewer from continunig to maintain the given lease. - * + * * @param lease the lease to drop. */ void dropLease(Lease lease); @@ -83,20 +83,19 @@ public interface LeaseRenewer { * Update application-specific fields in a currently held lease. Cannot be used to update internal fields such as * leaseCounter, leaseOwner, etc. Fails if we do not hold the lease, or if the concurrency token does not match * the concurrency token on the internal authoritative copy of the lease (ie, if we lost and re-acquired the lease). - * + * * @param lease lease object containing updated data * @param concurrencyToken obtained by calling Lease.concurrencyToken for a currently held lease * @param operation that performs updateLease * @param singleStreamShardId shardId for metrics emission in single stream mode. MultiStream mode will get the * shardId from the lease object - * + * * @return true if update succeeds, false otherwise - * + * * @throws InvalidStateException if lease table does not exist * @throws ProvisionedThroughputException if DynamoDB update fails due to lack of capacity * @throws DependencyException if DynamoDB update fails in an unexpected way */ boolean updateLease(Lease lease, UUID concurrencyToken, String operation, String singleStreamShardId) throws DependencyException, InvalidStateException, ProvisionedThroughputException; - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseSerializer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseSerializer.java index f36f5a66..5d7bea63 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseSerializer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseSerializer.java @@ -16,6 +16,7 @@ package software.amazon.kinesis.leases; import java.util.Collection; import java.util.Map; + import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate; @@ -29,7 +30,7 @@ public interface LeaseSerializer { /** * Construct a DynamoDB record out of a Lease object - * + * * @param lease lease object to serialize * @return an attribute value map representing the lease object */ @@ -37,13 +38,12 @@ public interface LeaseSerializer { /** * Construct a Lease object out of a DynamoDB record. - * + * * @param dynamoRecord attribute value map from DynamoDB * @return a deserialized lease object representing the attribute value map */ Lease fromDynamoRecord(Map dynamoRecord); - default Lease fromDynamoRecord(Map dynamoRecord, Lease leaseToUpdate) { throw new UnsupportedOperationException(); } @@ -56,7 +56,7 @@ public interface LeaseSerializer { /** * Special getDynamoHashKey implementation used by {@link LeaseRefresher#getLease(String)}. - * + * * @param leaseKey * @return the attribute value map representing a Lease's hash key given a string. */ @@ -131,5 +131,4 @@ public interface LeaseSerializer { * @return attribute definitions for creating a DynamoDB table to store leases */ Collection getAttributeDefinitions(); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java index ead8c195..2d082edb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/LeaseTaker.java @@ -29,14 +29,14 @@ public interface LeaseTaker { /** * Compute the set of leases available to be taken and attempt to take them. Lease taking rules are: - * + * * 1) If a lease's counter hasn't changed in long enough, try to take it. * 2) If we see a lease we've never seen before, take it only if owner == null. If it's owned, odds are the owner is * holding it. We can't tell until we see it more than once. * 3) For load balancing purposes, you may violate rules 1 and 2 for EXACTLY ONE lease per call of takeLeases(). - * + * * @return map of shardId to Lease object for leases we just successfully took. - * + * * @throws DependencyException on unexpected DynamoDB failures * @throws InvalidStateException if lease table does not exist */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/MultiStreamLease.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/MultiStreamLease.java index c79cc458..36e108c5 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/MultiStreamLease.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/MultiStreamLease.java @@ -32,8 +32,11 @@ import static com.google.common.base.Verify.verifyNotNull; @EqualsAndHashCode(callSuper = true) public class MultiStreamLease extends Lease { - @NonNull private String streamIdentifier; - @NonNull private String shardId; + @NonNull + private String streamIdentifier; + + @NonNull + private String shardId; public MultiStreamLease(MultiStreamLease other) { super(other); @@ -74,5 +77,4 @@ public class MultiStreamLease extends Lease { Validate.isInstanceOf(MultiStreamLease.class, lease); return (MultiStreamLease) lease; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/NoOpShardPrioritization.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/NoOpShardPrioritization.java index 9b97086d..2944e2d1 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/NoOpShardPrioritization.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/NoOpShardPrioritization.java @@ -19,14 +19,12 @@ import java.util.List; /** * Shard Prioritization that returns the same original list of shards without any modifications. */ -public class NoOpShardPrioritization implements - ShardPrioritization { +public class NoOpShardPrioritization implements ShardPrioritization { /** * Empty constructor for NoOp Shard Prioritization. */ - public NoOpShardPrioritization() { - } + public NoOpShardPrioritization() {} @Override public List prioritize(List original) { diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritization.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritization.java index b5796d96..02232ab2 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritization.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritization.java @@ -25,8 +25,7 @@ import java.util.Map; * It also limits number of shards that will be available for initialization based on their depth. * It doesn't make a lot of sense to work on a shard that has too many unfinished parents. */ -public class ParentsFirstShardPrioritization implements - ShardPrioritization { +public class ParentsFirstShardPrioritization implements ShardPrioritization { private static final SortingNode PROCESSING_NODE = new SortingNode(null, Integer.MIN_VALUE); private final int maxDepth; @@ -34,13 +33,13 @@ public class ParentsFirstShardPrioritization implements /** * Creates ParentFirst prioritization with filtering based on depth of the shard. * Shards that have depth > maxDepth will be ignored and will not be returned by this prioritization. - * + * * @param maxDepth any shard that is deeper than max depth, will be excluded from processing */ public ParentsFirstShardPrioritization(int maxDepth) { /* Depth 0 means that shard is completed or cannot be found, - * it is impossible to process such shards. - */ + * it is impossible to process such shards. + */ if (maxDepth <= 0) { throw new IllegalArgumentException("Max depth cannot be negative or zero. Provided value: " + maxDepth); } @@ -51,16 +50,13 @@ public class ParentsFirstShardPrioritization implements public List prioritize(List original) { Map shards = new HashMap<>(); for (ShardInfo shardInfo : original) { - shards.put(shardInfo.shardId(), - shardInfo); + shards.put(shardInfo.shardId(), shardInfo); } Map processedNodes = new HashMap<>(); for (ShardInfo shardInfo : original) { - populateDepth(shardInfo.shardId(), - shards, - processedNodes); + populateDepth(shardInfo.shardId(), shards, processedNodes); } List orderedInfos = new ArrayList<>(original.size()); @@ -77,14 +73,12 @@ public class ParentsFirstShardPrioritization implements return orderedInfos; } - private int populateDepth(String shardId, - Map shards, - Map processedNodes) { + private int populateDepth(String shardId, Map shards, Map processedNodes) { SortingNode processed = processedNodes.get(shardId); if (processed != null) { if (processed == PROCESSING_NODE) { - throw new IllegalArgumentException("Circular dependency detected. Shard Id " - + shardId + " is processed twice"); + throw new IllegalArgumentException( + "Circular dependency detected. Shard Id " + shardId + " is processed twice"); } return processed.getDepth(); } @@ -105,16 +99,11 @@ public class ParentsFirstShardPrioritization implements int maxParentDepth = 0; for (String parentId : shardInfo.parentShardIds()) { - maxParentDepth = Math.max(maxParentDepth, - populateDepth(parentId, - shards, - processedNodes)); + maxParentDepth = Math.max(maxParentDepth, populateDepth(parentId, shards, processedNodes)); } int currentNodeLevel = maxParentDepth + 1; - SortingNode previousValue = processedNodes.put(shardId, - new SortingNode(shardInfo, - currentNodeLevel)); + SortingNode previousValue = processedNodes.put(shardId, new SortingNode(shardInfo, currentNodeLevel)); if (previousValue != PROCESSING_NODE) { throw new IllegalStateException("Validation failed. Depth for shardId " + shardId + " was populated twice"); } @@ -125,13 +114,11 @@ public class ParentsFirstShardPrioritization implements /** * Class to store depth of shards during prioritization. */ - private static class SortingNode implements - Comparable { + private static class SortingNode implements Comparable { private final ShardInfo shardInfo; private final int depth; - public SortingNode(ShardInfo shardInfo, - int depth) { + public SortingNode(ShardInfo shardInfo, int depth) { this.shardInfo = shardInfo; this.depth = depth; } @@ -142,8 +129,7 @@ public class ParentsFirstShardPrioritization implements @Override public int compareTo(SortingNode o) { - return Integer.compare(depth, - o.depth); + return Integer.compare(depth, o.depth); } } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java index 81154c03..7eb065eb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardDetector.java @@ -93,7 +93,8 @@ public interface ShardDetector { * @throws ExecutionException * @throws TimeoutException */ - default List getChildShards(String shardId) throws InterruptedException, ExecutionException, TimeoutException { + default List getChildShards(String shardId) + throws InterruptedException, ExecutionException, TimeoutException { throw new UnsupportedOperationException("getChildShards not available."); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardInfo.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardInfo.java index aff3f6f0..bb59cbb1 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardInfo.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardInfo.java @@ -20,13 +20,12 @@ import java.util.LinkedList; import java.util.List; import java.util.Optional; -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.apache.commons.lang3.builder.HashCodeBuilder; - import lombok.Getter; import lombok.NonNull; import lombok.ToString; import lombok.experimental.Accessors; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; /** @@ -46,7 +45,7 @@ public class ShardInfo { /** * Creates a new ShardInfo object. The checkpoint is not part of the equality, but is used for debugging output. - * + * * @param shardId * Kinesis shardId that this will be about * @param concurrencyToken @@ -56,7 +55,8 @@ public class ShardInfo { * @param checkpoint * the latest checkpoint from lease */ - public ShardInfo(@NonNull final String shardId, + public ShardInfo( + @NonNull final String shardId, final String concurrencyToken, final Collection parentShardIds, final ExtendedSequenceNumber checkpoint) { @@ -72,7 +72,8 @@ public class ShardInfo { * @param checkpoint * @param streamIdentifierSer */ - public ShardInfo(@NonNull final String shardId, + public ShardInfo( + @NonNull final String shardId, final String concurrencyToken, final Collection parentShardIds, final ExtendedSequenceNumber checkpoint, @@ -92,7 +93,7 @@ public class ShardInfo { /** * A list of shards that are parents of this shard. This may be empty if the shard has no parents. - * + * * @return a list of shardId's that are parents of this shard, or empty if the shard has no parents. */ public List parentShardIds() { @@ -114,7 +115,11 @@ public class ShardInfo { @Override public int hashCode() { return new HashCodeBuilder() - .append(concurrencyToken).append(parentShardIds).append(shardId).append(streamIdentifierSerOpt.orElse("")).toHashCode(); + .append(concurrencyToken) + .append(parentShardIds) + .append(shardId) + .append(streamIdentifierSerOpt.orElse("")) + .toHashCode(); } /** @@ -137,10 +142,12 @@ public class ShardInfo { return false; } ShardInfo other = (ShardInfo) obj; - return new EqualsBuilder().append(concurrencyToken, other.concurrencyToken) - .append(parentShardIds, other.parentShardIds).append(shardId, other.shardId) - .append(streamIdentifierSerOpt.orElse(""), other.streamIdentifierSerOpt.orElse("")).isEquals(); - + return new EqualsBuilder() + .append(concurrencyToken, other.concurrencyToken) + .append(parentShardIds, other.parentShardIds) + .append(shardId, other.shardId) + .append(streamIdentifierSerOpt.orElse(""), other.streamIdentifierSerOpt.orElse("")) + .isEquals(); } /** @@ -159,9 +166,9 @@ public class ShardInfo { * @return lease key */ public static String getLeaseKey(ShardInfo shardInfo, String shardIdOverride) { - return shardInfo.streamIdentifierSerOpt().isPresent() ? - MultiStreamLease.getLeaseKey(shardInfo.streamIdentifierSerOpt().get(), shardIdOverride) : - shardIdOverride; + return shardInfo.streamIdentifierSerOpt().isPresent() + ? MultiStreamLease.getLeaseKey( + shardInfo.streamIdentifierSerOpt().get(), shardIdOverride) + : shardIdOverride; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardPrioritization.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardPrioritization.java index dc99d413..934b454a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardPrioritization.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardPrioritization.java @@ -24,7 +24,7 @@ public interface ShardPrioritization { /** * Returns new list of shards ordered based on their priority. * Resulted list may have fewer shards compared to original list - * + * * @param original * list of shards needed to be prioritized * @return new list that contains only shards that should be processed diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java index 1986fa49..be4fbf51 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTask.java @@ -41,16 +41,21 @@ public class ShardSyncTask implements ConsumerTask { @NonNull private final ShardDetector shardDetector; + @NonNull private final LeaseRefresher leaseRefresher; + @NonNull private final InitialPositionInStreamExtended initialPosition; + private final boolean cleanupLeasesUponShardCompletion; private final boolean garbageCollectLeases; private final boolean ignoreUnexpectedChildShards; private final long shardSyncTaskIdleTimeMillis; + @NonNull private final HierarchicalShardSyncer hierarchicalShardSyncer; + @NonNull private final MetricsFactory metricsFactory; @@ -67,8 +72,12 @@ public class ShardSyncTask implements ConsumerTask { boolean shardSyncSuccess = true; try { - boolean didPerformShardSync = hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, leaseRefresher, - initialPosition, scope, ignoreUnexpectedChildShards, + boolean didPerformShardSync = hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + leaseRefresher, + initialPosition, + scope, + ignoreUnexpectedChildShards, leaseRefresher.isLeaseTableEmpty()); if (didPerformShardSync && shardSyncTaskIdleTimeMillis > 0) { @@ -80,7 +89,8 @@ public class ShardSyncTask implements ConsumerTask { shardSyncSuccess = false; } finally { // NOTE: This metric is reflecting if a shard sync task succeeds. Customer can use this metric to monitor if - // their application encounter any shard sync failures. This metric can help to detect potential shard stuck issues + // their application encounter any shard sync failures. This metric can help to detect potential shard stuck + // issues // that are due to shard sync failures. MetricsUtil.addSuccess(scope, "SyncShards", shardSyncSuccess, MetricsLevel.DETAILED); MetricsUtil.endScope(scope); @@ -97,5 +107,4 @@ public class ShardSyncTask implements ConsumerTask { public TaskType taskType() { return taskType; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTaskManager.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTaskManager.java index 9a015c28..add8cf4f 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTaskManager.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/ShardSyncTaskManager.java @@ -20,17 +20,16 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReentrantLock; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; - import lombok.Data; import lombok.NonNull; import lombok.experimental.Accessors; import lombok.extern.slf4j.Slf4j; +import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.coordinator.ExecutorStateEvent; import software.amazon.kinesis.lifecycle.ConsumerTask; import software.amazon.kinesis.lifecycle.TaskResult; -import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.metrics.MetricsCollectingTaskDecorator; +import software.amazon.kinesis.metrics.MetricsFactory; /** * The ShardSyncTaskManager is used to track the task to sync shards with leases (create leases for new @@ -43,20 +42,27 @@ import software.amazon.kinesis.metrics.MetricsCollectingTaskDecorator; public class ShardSyncTaskManager { @NonNull private final ShardDetector shardDetector; + @NonNull private final LeaseRefresher leaseRefresher; + @NonNull private final InitialPositionInStreamExtended initialPositionInStream; + private final boolean cleanupLeasesUponShardCompletion; private final boolean garbageCollectLeases; private final boolean ignoreUnexpectedChildShards; private final long shardSyncIdleTimeMillis; + @NonNull private final ExecutorService executorService; + @NonNull private final HierarchicalShardSyncer hierarchicalShardSyncer; + @NonNull private final MetricsFactory metricsFactory; + private ConsumerTask currentTask; private CompletableFuture future; private AtomicBoolean shardSyncRequestPending; @@ -77,9 +83,14 @@ public class ShardSyncTaskManager { * @param metricsFactory */ @Deprecated - public ShardSyncTaskManager(ShardDetector shardDetector, LeaseRefresher leaseRefresher, - InitialPositionInStreamExtended initialPositionInStream, boolean cleanupLeasesUponShardCompletion, - boolean ignoreUnexpectedChildShards, long shardSyncIdleTimeMillis, ExecutorService executorService, + public ShardSyncTaskManager( + ShardDetector shardDetector, + LeaseRefresher leaseRefresher, + InitialPositionInStreamExtended initialPositionInStream, + boolean cleanupLeasesUponShardCompletion, + boolean ignoreUnexpectedChildShards, + long shardSyncIdleTimeMillis, + ExecutorService executorService, MetricsFactory metricsFactory) { this.shardDetector = shardDetector; this.leaseRefresher = leaseRefresher; @@ -108,10 +119,16 @@ public class ShardSyncTaskManager { * @param hierarchicalShardSyncer * @param metricsFactory */ - public ShardSyncTaskManager(ShardDetector shardDetector, LeaseRefresher leaseRefresher, - InitialPositionInStreamExtended initialPositionInStream, boolean cleanupLeasesUponShardCompletion, - boolean ignoreUnexpectedChildShards, long shardSyncIdleTimeMillis, ExecutorService executorService, - HierarchicalShardSyncer hierarchicalShardSyncer, MetricsFactory metricsFactory) { + public ShardSyncTaskManager( + ShardDetector shardDetector, + LeaseRefresher leaseRefresher, + InitialPositionInStreamExtended initialPositionInStream, + boolean cleanupLeasesUponShardCompletion, + boolean ignoreUnexpectedChildShards, + long shardSyncIdleTimeMillis, + ExecutorService executorService, + HierarchicalShardSyncer hierarchicalShardSyncer, + MetricsFactory metricsFactory) { this.shardDetector = shardDetector; this.leaseRefresher = leaseRefresher; this.initialPositionInStream = initialPositionInStream; @@ -131,15 +148,16 @@ public class ShardSyncTaskManager { * @return the Task Result. */ public TaskResult callShardSyncTask() { - final ShardSyncTask shardSyncTask = new ShardSyncTask(shardDetector, - leaseRefresher, - initialPositionInStream, - cleanupLeasesUponShardCompletion, - garbageCollectLeases, - ignoreUnexpectedChildShards, - shardSyncIdleTimeMillis, - hierarchicalShardSyncer, - metricsFactory); + final ShardSyncTask shardSyncTask = new ShardSyncTask( + shardDetector, + leaseRefresher, + initialPositionInStream, + cleanupLeasesUponShardCompletion, + garbageCollectLeases, + ignoreUnexpectedChildShards, + shardSyncIdleTimeMillis, + hierarchicalShardSyncer, + metricsFactory); final ConsumerTask metricCollectingTask = new MetricsCollectingTaskDecorator(shardSyncTask, metricsFactory); return metricCollectingTask.call(); } @@ -164,28 +182,27 @@ public class ShardSyncTaskManager { try { TaskResult result = future.get(); if (result.getException() != null) { - log.error("Caught exception running {} task: ", currentTask.taskType(), - result.getException()); + log.error("Caught exception running {} task: ", currentTask.taskType(), result.getException()); } } catch (InterruptedException | ExecutionException e) { log.warn("{} task encountered exception.", currentTask.taskType(), e); } } - currentTask = - new MetricsCollectingTaskDecorator( - new ShardSyncTask(shardDetector, - leaseRefresher, - initialPositionInStream, - cleanupLeasesUponShardCompletion, - garbageCollectLeases, - ignoreUnexpectedChildShards, - shardSyncIdleTimeMillis, - hierarchicalShardSyncer, - metricsFactory), - metricsFactory); + currentTask = new MetricsCollectingTaskDecorator( + new ShardSyncTask( + shardDetector, + leaseRefresher, + initialPositionInStream, + cleanupLeasesUponShardCompletion, + garbageCollectLeases, + ignoreUnexpectedChildShards, + shardSyncIdleTimeMillis, + hierarchicalShardSyncer, + metricsFactory), + metricsFactory); future = CompletableFuture.supplyAsync(() -> currentTask.call(), executorService) - .whenComplete((taskResult, exception) -> handlePendingShardSyncs(exception, taskResult)); + .whenComplete((taskResult, exception) -> handlePendingShardSyncs(exception, taskResult)); log.info(new ExecutorStateEvent(executorService).message()); @@ -195,8 +212,10 @@ public class ShardSyncTaskManager { } } else { if (log.isDebugEnabled()) { - log.debug("Previous {} task still pending. Not submitting new task. " - + "Triggered a pending request but will not be executed until the current request completes.", currentTask.taskType()); + log.debug( + "Previous {} task still pending. Not submitting new task. " + + "Triggered a pending request but will not be executed until the current request completes.", + currentTask.taskType()); } shardSyncRequestPending.compareAndSet(false /*expected*/, true /*update*/); } @@ -205,9 +224,10 @@ public class ShardSyncTaskManager { private void handlePendingShardSyncs(Throwable exception, TaskResult taskResult) { if (exception != null || taskResult.getException() != null) { - log.error("Caught exception running {} task: {}", currentTask.taskType(), + log.error( + "Caught exception running {} task: {}", + currentTask.taskType(), exception != null ? exception : taskResult.getException()); } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/UpdateField.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/UpdateField.java index 9461a18e..4ce3f1a9 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/UpdateField.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/UpdateField.java @@ -22,5 +22,6 @@ package software.amazon.kinesis.leases; * for backfilling while rolling forward to newer versions. */ public enum UpdateField { - CHILD_SHARDS, HASH_KEY_RANGE + CHILD_SHARDS, + HASH_KEY_RANGE } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java index 6c0803f2..bef76ef0 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinator.java @@ -14,8 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import com.google.common.util.concurrent.ThreadFactoryBuilder; - import java.util.Collection; import java.util.Collections; import java.util.List; @@ -30,6 +28,8 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.leases.Lease; @@ -63,9 +63,13 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { // Time to wait for in-flight Runnables to finish when calling .stop(); private static final long STOP_WAIT_TIME_MILLIS = 2000L; private static final ThreadFactory LEASE_COORDINATOR_THREAD_FACTORY = new ThreadFactoryBuilder() - .setNameFormat("LeaseCoordinator-%04d").setDaemon(true).build(); + .setNameFormat("LeaseCoordinator-%04d") + .setDaemon(true) + .build(); private static final ThreadFactory LEASE_RENEWAL_THREAD_FACTORY = new ThreadFactoryBuilder() - .setNameFormat("LeaseRenewer-%04d").setDaemon(true).build(); + .setNameFormat("LeaseRenewer-%04d") + .setDaemon(true) + .build(); private final LeaseRenewer leaseRenewer; private final LeaseTaker leaseTaker; @@ -105,18 +109,26 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { * Used to publish metrics about lease operations */ @Deprecated - public DynamoDBLeaseCoordinator(final LeaseRefresher leaseRefresher, - final String workerIdentifier, - final long leaseDurationMillis, - final long epsilonMillis, - final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, - final int maxLeaseRenewerThreadCount, - final MetricsFactory metricsFactory) { - this(leaseRefresher, workerIdentifier, leaseDurationMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewerThreadCount, + public DynamoDBLeaseCoordinator( + final LeaseRefresher leaseRefresher, + final String workerIdentifier, + final long leaseDurationMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewerThreadCount, + final MetricsFactory metricsFactory) { + this( + leaseRefresher, + workerIdentifier, + leaseDurationMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewerThreadCount, TableConstants.DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, - TableConstants.DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); + TableConstants.DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, + metricsFactory); } /** @@ -142,21 +154,29 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { * Used to publish metrics about lease operations */ @Deprecated - public DynamoDBLeaseCoordinator(final LeaseRefresher leaseRefresher, - final String workerIdentifier, - final long leaseDurationMillis, - final long epsilonMillis, - final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, - final int maxLeaseRenewerThreadCount, - final long initialLeaseTableReadCapacity, - final long initialLeaseTableWriteCapacity, - final MetricsFactory metricsFactory) { - this(leaseRefresher, workerIdentifier, leaseDurationMillis, - LeaseManagementConfig.DEFAULT_ENABLE_PRIORITY_LEASE_ASSIGNMENT, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewerThreadCount, + public DynamoDBLeaseCoordinator( + final LeaseRefresher leaseRefresher, + final String workerIdentifier, + final long leaseDurationMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewerThreadCount, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final MetricsFactory metricsFactory) { + this( + leaseRefresher, + workerIdentifier, + leaseDurationMillis, + LeaseManagementConfig.DEFAULT_ENABLE_PRIORITY_LEASE_ASSIGNMENT, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewerThreadCount, TableConstants.DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, - TableConstants.DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); + TableConstants.DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY, + metricsFactory); } /** @@ -183,7 +203,8 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { * @param metricsFactory * Used to publish metrics about lease operations */ - public DynamoDBLeaseCoordinator(final LeaseRefresher leaseRefresher, + public DynamoDBLeaseCoordinator( + final LeaseRefresher leaseRefresher, final String workerIdentifier, final long leaseDurationMillis, final boolean enablePriorityLeaseAssignment, @@ -214,7 +235,8 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { this.initialLeaseTableWriteCapacity = initialLeaseTableWriteCapacity; this.metricsFactory = metricsFactory; - log.info("With failover time {} ms and epsilon {} ms, LeaseCoordinator will renew leases every {} ms, take" + log.info( + "With failover time {} ms and epsilon {} ms, LeaseCoordinator will renew leases every {} ms, take" + "leases every {} ms, process maximum of {} leases and steal {} lease(s) at a time.", leaseDurationMillis, epsilonMillis, @@ -236,7 +258,6 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { log.error("Throwable encountered in lease taking thread", t); } } - } private class RenewerRunnable implements Runnable { @@ -251,13 +272,11 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { log.error("Throwable encountered in lease renewing thread", t); } } - } @Override public void initialize() throws ProvisionedThroughputException, DependencyException, IllegalStateException { - final boolean newTableCreated = - leaseRefresher.createLeaseTableIfNotExists(); + final boolean newTableCreated = leaseRefresher.createLeaseTableIfNotExists(); if (newTableCreated) { log.info("Created new lease table for coordinator with pay per request billing mode."); } @@ -278,15 +297,11 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { leaseCoordinatorThreadPool = Executors.newScheduledThreadPool(2, LEASE_COORDINATOR_THREAD_FACTORY); // Taker runs with fixed DELAY because we want it to run slower in the event of performance degredation. - takerFuture = leaseCoordinatorThreadPool.scheduleWithFixedDelay(new TakerRunnable(), - 0L, - takerIntervalMillis, - TimeUnit.MILLISECONDS); + takerFuture = leaseCoordinatorThreadPool.scheduleWithFixedDelay( + new TakerRunnable(), 0L, takerIntervalMillis, TimeUnit.MILLISECONDS); // Renewer runs at fixed INTERVAL because we want it to run at the same rate in the event of degredation. - leaseCoordinatorThreadPool.scheduleAtFixedRate(new RenewerRunnable(), - 0L, - renewerIntervalMillis, - TimeUnit.MILLISECONDS); + leaseCoordinatorThreadPool.scheduleAtFixedRate( + new RenewerRunnable(), 0L, renewerIntervalMillis, TimeUnit.MILLISECONDS); running = true; } @@ -350,11 +365,13 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { leaseCoordinatorThreadPool.shutdown(); try { if (leaseCoordinatorThreadPool.awaitTermination(STOP_WAIT_TIME_MILLIS, TimeUnit.MILLISECONDS)) { - log.info("Worker {} has successfully stopped lease-tracking threads", + log.info( + "Worker {} has successfully stopped lease-tracking threads", leaseTaker.getWorkerIdentifier()); } else { leaseCoordinatorThreadPool.shutdownNow(); - log.info("Worker {} stopped lease-tracking threads {} ms after stop", + log.info( + "Worker {} stopped lease-tracking threads {} ms after stop", leaseTaker.getWorkerIdentifier(), STOP_WAIT_TIME_MILLIS); } @@ -394,8 +411,9 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { } @Override - public boolean updateLease(final Lease lease, final UUID concurrencyToken, final String operation, - final String singleStreamShardId) throws DependencyException, InvalidStateException, ProvisionedThroughputException { + public boolean updateLease( + final Lease lease, final UUID concurrencyToken, final String operation, final String singleStreamShardId) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { return leaseRenewer.updateLease(lease, concurrencyToken, operation, singleStreamShardId); } @@ -407,8 +425,13 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { private static ExecutorService getLeaseRenewalExecutorService(int maximumPoolSize) { int coreLeaseCount = Math.max(maximumPoolSize / 4, 2); - return new ThreadPoolExecutor(coreLeaseCount, maximumPoolSize, 60, TimeUnit.SECONDS, - new LinkedTransferQueue<>(), LEASE_RENEWAL_THREAD_FACTORY); + return new ThreadPoolExecutor( + coreLeaseCount, + maximumPoolSize, + 60, + TimeUnit.SECONDS, + new LinkedTransferQueue<>(), + LEASE_RENEWAL_THREAD_FACTORY); } @Override @@ -421,7 +444,9 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { if (leases == null) { return Collections.emptyList(); } - return leases.stream().map(DynamoDBLeaseCoordinator::convertLeaseToAssignment).collect(Collectors.toList()); + return leases.stream() + .map(DynamoDBLeaseCoordinator::convertLeaseToAssignment) + .collect(Collectors.toList()); } /** @@ -431,11 +456,15 @@ public class DynamoDBLeaseCoordinator implements LeaseCoordinator { */ public static ShardInfo convertLeaseToAssignment(final Lease lease) { if (lease instanceof MultiStreamLease) { - return new ShardInfo(((MultiStreamLease) lease).shardId(), lease.concurrencyToken().toString(), lease.parentShardIds(), - lease.checkpoint(), ((MultiStreamLease) lease).streamIdentifier()); + return new ShardInfo( + ((MultiStreamLease) lease).shardId(), + lease.concurrencyToken().toString(), + lease.parentShardIds(), + lease.checkpoint(), + ((MultiStreamLease) lease).streamIdentifier()); } else { - return new ShardInfo(lease.leaseKey(), lease.concurrencyToken().toString(), lease.parentShardIds(), - lease.checkpoint()); + return new ShardInfo( + lease.leaseKey(), lease.concurrencyToken().toString(), lease.parentShardIds(), lease.checkpoint()); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseManagementFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseManagementFactory.java index 7d9ebeef..2eb3e707 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseManagementFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseManagementFactory.java @@ -20,6 +20,7 @@ import java.util.Collection; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.function.Function; + import lombok.Data; import lombok.NonNull; import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList; @@ -53,18 +54,25 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { @NonNull private final KinesisAsyncClient kinesisClient; + @NonNull private final DynamoDbAsyncClient dynamoDBClient; + @NonNull private final String tableName; + @NonNull private final String workerIdentifier; + @NonNull private final ExecutorService executorService; + @NonNull private final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer; + @NonNull private final LeaseSerializer leaseSerializer; + @NonNull private StreamConfig streamConfig; @@ -123,21 +131,51 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param cacheMissWarningModulus */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus) { - this(kinesisClient, streamName, dynamoDBClient, tableName, workerIdentifier, executorService, - initialPositionInStream, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, TableConstants.DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus) { + this( + kinesisClient, + streamName, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + initialPositionInStream, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + TableConstants.DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY, TableConstants.DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY); } @@ -173,23 +211,56 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param initialLeaseTableWriteCapacity */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity) { - this(kinesisClient, streamName, dynamoDBClient, tableName, workerIdentifier, executorService, - initialPositionInStream, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - new HierarchicalShardSyncer(), TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK, + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity) { + this( + kinesisClient, + streamName, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + initialPositionInStream, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + new HierarchicalShardSyncer(), + TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); } @@ -223,24 +294,59 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param tableCreatorCallback */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer hierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback) { - this(kinesisClient, streamName, dynamoDBClient, tableName, workerIdentifier, executorService, - initialPositionInStream, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - hierarchicalShardSyncer, tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer hierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback) { + this( + kinesisClient, + streamName, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + initialPositionInStream, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + hierarchicalShardSyncer, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); } /** @@ -274,25 +380,61 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param dynamoDbRequestTimeout */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer hierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback, + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer hierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, Duration dynamoDbRequestTimeout) { - this(kinesisClient, streamName, dynamoDBClient, tableName, workerIdentifier, executorService, - initialPositionInStream, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - hierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, BillingMode.PAY_PER_REQUEST); + this( + kinesisClient, + streamName, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + initialPositionInStream, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + hierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + BillingMode.PAY_PER_REQUEST); } /** @@ -327,26 +469,63 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param billingMode */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer hierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback, - Duration dynamoDbRequestTimeout, BillingMode billingMode) { + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer hierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode) { - this(kinesisClient, new StreamConfig(StreamIdentifier.singleStreamInstance(streamName), initialPositionInStream), dynamoDBClient, tableName, - workerIdentifier, executorService, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - hierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode, new DynamoDBLeaseSerializer()); + this( + kinesisClient, + new StreamConfig(StreamIdentifier.singleStreamInstance(streamName), initialPositionInStream), + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + hierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + new DynamoDBLeaseSerializer()); } /** @@ -382,26 +561,64 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param tags */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final String streamName, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final InitialPositionInStreamExtended initialPositionInStream, - final long failoverTimeMillis, final long epsilonMillis, final int maxLeasesForWorker, - final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer hierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback, - Duration dynamoDbRequestTimeout, BillingMode billingMode, Collection tags) { + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final String streamName, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final InitialPositionInStreamExtended initialPositionInStream, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer hierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode, + Collection tags) { - this(kinesisClient, new StreamConfig(StreamIdentifier.singleStreamInstance(streamName), initialPositionInStream), dynamoDBClient, tableName, - workerIdentifier, executorService, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - hierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode, new DynamoDBLeaseSerializer()); + this( + kinesisClient, + new StreamConfig(StreamIdentifier.singleStreamInstance(streamName), initialPositionInStream), + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + hierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + new DynamoDBLeaseSerializer()); } /** @@ -435,25 +652,64 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param billingMode */ @Deprecated - private DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final StreamConfig streamConfig, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final long failoverTimeMillis, final long epsilonMillis, - final int maxLeasesForWorker, final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback, - Duration dynamoDbRequestTimeout, BillingMode billingMode, LeaseSerializer leaseSerializer) { - this(kinesisClient, streamConfig, dynamoDBClient, tableName, - workerIdentifier, executorService, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - deprecatedHierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode, false, - DefaultSdkAutoConstructList.getInstance(), leaseSerializer); + private DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final StreamConfig streamConfig, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode, + LeaseSerializer leaseSerializer) { + this( + kinesisClient, + streamConfig, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + deprecatedHierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + false, + DefaultSdkAutoConstructList.getInstance(), + leaseSerializer); } /** @@ -489,26 +745,67 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param tags */ @Deprecated - private DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, final StreamConfig streamConfig, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final long failoverTimeMillis, final long epsilonMillis, - final int maxLeasesForWorker, final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback, - Duration dynamoDbRequestTimeout, BillingMode billingMode, final boolean leaseTableDeletionProtectionEnabled, - Collection tags, LeaseSerializer leaseSerializer) { - this(kinesisClient, dynamoDBClient, tableName, - workerIdentifier, executorService, failoverTimeMillis, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - deprecatedHierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode, - leaseTableDeletionProtectionEnabled, tags, leaseSerializer, null, false, + private DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final StreamConfig streamConfig, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled, + Collection tags, + LeaseSerializer leaseSerializer) { + this( + kinesisClient, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + failoverTimeMillis, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + deprecatedHierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + leaseTableDeletionProtectionEnabled, + tags, + leaseSerializer, + null, + false, LeaseManagementConfig.DEFAULT_LEASE_CLEANUP_CONFIG); this.streamConfig = streamConfig; } @@ -547,29 +844,70 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param leaseCleanupConfig */ @Deprecated - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final long failoverTimeMillis, final long epsilonMillis, - final int maxLeasesForWorker, final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback, - Duration dynamoDbRequestTimeout, BillingMode billingMode, final boolean leaseTableDeletionProtectionEnabled, - Collection tags, LeaseSerializer leaseSerializer, - Function customShardDetectorProvider, boolean isMultiStreamMode, + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final long failoverTimeMillis, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled, + Collection tags, + LeaseSerializer leaseSerializer, + Function customShardDetectorProvider, + boolean isMultiStreamMode, LeaseCleanupConfig leaseCleanupConfig) { - this(kinesisClient, dynamoDBClient, tableName, - workerIdentifier, executorService, failoverTimeMillis, - LeaseManagementConfig.DEFAULT_ENABLE_PRIORITY_LEASE_ASSIGNMENT, epsilonMillis, maxLeasesForWorker, - maxLeasesToStealAtOneTime, maxLeaseRenewalThreads, cleanupLeasesUponShardCompletion, - ignoreUnexpectedChildShards, shardSyncIntervalMillis, consistentReads, listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, maxCacheMissesBeforeReload, listShardsCacheAllowedAgeInSeconds, - cacheMissWarningModulus, initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity, - deprecatedHierarchicalShardSyncer, tableCreatorCallback, dynamoDbRequestTimeout, billingMode, - leaseTableDeletionProtectionEnabled, tags, leaseSerializer, customShardDetectorProvider, isMultiStreamMode, + this( + kinesisClient, + dynamoDBClient, + tableName, + workerIdentifier, + executorService, + failoverTimeMillis, + LeaseManagementConfig.DEFAULT_ENABLE_PRIORITY_LEASE_ASSIGNMENT, + epsilonMillis, + maxLeasesForWorker, + maxLeasesToStealAtOneTime, + maxLeaseRenewalThreads, + cleanupLeasesUponShardCompletion, + ignoreUnexpectedChildShards, + shardSyncIntervalMillis, + consistentReads, + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + maxCacheMissesBeforeReload, + listShardsCacheAllowedAgeInSeconds, + cacheMissWarningModulus, + initialLeaseTableReadCapacity, + initialLeaseTableWriteCapacity, + deprecatedHierarchicalShardSyncer, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + leaseTableDeletionProtectionEnabled, + tags, + leaseSerializer, + customShardDetectorProvider, + isMultiStreamMode, leaseCleanupConfig); } @@ -607,20 +945,38 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @param isMultiStreamMode * @param leaseCleanupConfig */ - public DynamoDBLeaseManagementFactory(final KinesisAsyncClient kinesisClient, - final DynamoDbAsyncClient dynamoDBClient, final String tableName, final String workerIdentifier, - final ExecutorService executorService, final long failoverTimeMillis, - final boolean enablePriorityLeaseAssignment, final long epsilonMillis, - final int maxLeasesForWorker, final int maxLeasesToStealAtOneTime, final int maxLeaseRenewalThreads, - final boolean cleanupLeasesUponShardCompletion, final boolean ignoreUnexpectedChildShards, - final long shardSyncIntervalMillis, final boolean consistentReads, final long listShardsBackoffTimeMillis, - final int maxListShardsRetryAttempts, final int maxCacheMissesBeforeReload, - final long listShardsCacheAllowedAgeInSeconds, final int cacheMissWarningModulus, - final long initialLeaseTableReadCapacity, final long initialLeaseTableWriteCapacity, - final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, final TableCreatorCallback tableCreatorCallback, - Duration dynamoDbRequestTimeout, BillingMode billingMode, final boolean leaseTableDeletionProtectionEnabled, - Collection tags, LeaseSerializer leaseSerializer, - Function customShardDetectorProvider, boolean isMultiStreamMode, + public DynamoDBLeaseManagementFactory( + final KinesisAsyncClient kinesisClient, + final DynamoDbAsyncClient dynamoDBClient, + final String tableName, + final String workerIdentifier, + final ExecutorService executorService, + final long failoverTimeMillis, + final boolean enablePriorityLeaseAssignment, + final long epsilonMillis, + final int maxLeasesForWorker, + final int maxLeasesToStealAtOneTime, + final int maxLeaseRenewalThreads, + final boolean cleanupLeasesUponShardCompletion, + final boolean ignoreUnexpectedChildShards, + final long shardSyncIntervalMillis, + final boolean consistentReads, + final long listShardsBackoffTimeMillis, + final int maxListShardsRetryAttempts, + final int maxCacheMissesBeforeReload, + final long listShardsCacheAllowedAgeInSeconds, + final int cacheMissWarningModulus, + final long initialLeaseTableReadCapacity, + final long initialLeaseTableWriteCapacity, + final HierarchicalShardSyncer deprecatedHierarchicalShardSyncer, + final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled, + Collection tags, + LeaseSerializer leaseSerializer, + Function customShardDetectorProvider, + boolean isMultiStreamMode, LeaseCleanupConfig leaseCleanupConfig) { this.kinesisClient = kinesisClient; this.dynamoDBClient = dynamoDBClient; @@ -658,9 +1014,11 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { @Override public LeaseCoordinator createLeaseCoordinator(@NonNull final MetricsFactory metricsFactory) { - return new DynamoDBLeaseCoordinator(this.createLeaseRefresher(), + return new DynamoDBLeaseCoordinator( + this.createLeaseRefresher(), workerIdentifier, - failoverTimeMillis, enablePriorityLeaseAssignment, + failoverTimeMillis, + enablePriorityLeaseAssignment, epsilonMillis, maxLeasesForWorker, maxLeasesToStealAtOneTime, @@ -670,15 +1028,18 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { metricsFactory); } - @Override @Deprecated + @Override + @Deprecated public ShardSyncTaskManager createShardSyncTaskManager(@NonNull final MetricsFactory metricsFactory) { - return new ShardSyncTaskManager(this.createShardDetector(), + return new ShardSyncTaskManager( + this.createShardDetector(), this.createLeaseRefresher(), streamConfig.initialPositionInStreamExtended(), cleanupLeasesUponShardCompletion, ignoreUnexpectedChildShards, shardSyncIntervalMillis, - executorService, deprecatedHierarchicalShardSyncer, + executorService, + deprecatedHierarchicalShardSyncer, metricsFactory); } @@ -702,33 +1063,49 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { * @return ShardSyncTaskManager */ @Override - public ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory, StreamConfig streamConfig, + public ShardSyncTaskManager createShardSyncTaskManager( + MetricsFactory metricsFactory, + StreamConfig streamConfig, DeletedStreamListProvider deletedStreamListProvider) { - return new ShardSyncTaskManager(this.createShardDetector(streamConfig), + return new ShardSyncTaskManager( + this.createShardDetector(streamConfig), this.createLeaseRefresher(), streamConfig.initialPositionInStreamExtended(), cleanupLeasesUponShardCompletion, ignoreUnexpectedChildShards, shardSyncIntervalMillis, executorService, - new HierarchicalShardSyncer(isMultiStreamMode, streamConfig.streamIdentifier().toString(), - deletedStreamListProvider), + new HierarchicalShardSyncer( + isMultiStreamMode, streamConfig.streamIdentifier().toString(), deletedStreamListProvider), metricsFactory); } - @Override public DynamoDBLeaseRefresher createLeaseRefresher() { - return new DynamoDBLeaseRefresher(tableName, dynamoDBClient, leaseSerializer, consistentReads, - tableCreatorCallback, dynamoDbRequestTimeout, billingMode, leaseTableDeletionProtectionEnabled, tags); + return new DynamoDBLeaseRefresher( + tableName, + dynamoDBClient, + leaseSerializer, + consistentReads, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + leaseTableDeletionProtectionEnabled, + tags); } @Override @Deprecated public ShardDetector createShardDetector() { - return new KinesisShardDetector(kinesisClient, streamConfig.streamIdentifier(), - listShardsBackoffTimeMillis, maxListShardsRetryAttempts, listShardsCacheAllowedAgeInSeconds, - maxCacheMissesBeforeReload, cacheMissWarningModulus, dynamoDbRequestTimeout); + return new KinesisShardDetector( + kinesisClient, + streamConfig.streamIdentifier(), + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + listShardsCacheAllowedAgeInSeconds, + maxCacheMissesBeforeReload, + cacheMissWarningModulus, + dynamoDbRequestTimeout); } /** @@ -739,10 +1116,17 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { */ @Override public ShardDetector createShardDetector(StreamConfig streamConfig) { - return customShardDetectorProvider != null ? customShardDetectorProvider.apply(streamConfig) : - new KinesisShardDetector(kinesisClient, streamConfig.streamIdentifier(), listShardsBackoffTimeMillis, - maxListShardsRetryAttempts, listShardsCacheAllowedAgeInSeconds, maxCacheMissesBeforeReload, - cacheMissWarningModulus, dynamoDbRequestTimeout); + return customShardDetectorProvider != null + ? customShardDetectorProvider.apply(streamConfig) + : new KinesisShardDetector( + kinesisClient, + streamConfig.streamIdentifier(), + listShardsBackoffTimeMillis, + maxListShardsRetryAttempts, + listShardsCacheAllowedAgeInSeconds, + maxCacheMissesBeforeReload, + cacheMissWarningModulus, + dynamoDbRequestTimeout); } /** @@ -753,9 +1137,12 @@ public class DynamoDBLeaseManagementFactory implements LeaseManagementFactory { */ @Override public LeaseCleanupManager createLeaseCleanupManager(MetricsFactory metricsFactory) { - return new LeaseCleanupManager(createLeaseCoordinator(metricsFactory), - metricsFactory, Executors.newSingleThreadScheduledExecutor(), - cleanupLeasesUponShardCompletion, leaseCleanupConfig.leaseCleanupIntervalMillis(), + return new LeaseCleanupManager( + createLeaseCoordinator(metricsFactory), + metricsFactory, + Executors.newSingleThreadScheduledExecutor(), + cleanupLeasesUponShardCompletion, + leaseCleanupConfig.leaseCleanupIntervalMillis(), leaseCleanupConfig.completedLeaseCleanupIntervalMillis(), leaseCleanupConfig.garbageLeaseCleanupIntervalMillis()); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java index 11807b9c..838d2d15 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresher.java @@ -14,8 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import com.google.common.collect.ImmutableMap; - import java.time.Duration; import java.util.ArrayList; import java.util.Collection; @@ -24,6 +22,8 @@ import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; + +import com.google.common.collect.ImmutableMap; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.core.util.DefaultSdkAutoConstructList; @@ -101,8 +101,11 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * @param consistentReads */ @Deprecated - public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient, - final LeaseSerializer serializer, final boolean consistentReads) { + public DynamoDBLeaseRefresher( + final String table, + final DynamoDbAsyncClient dynamoDBClient, + final LeaseSerializer serializer, + final boolean consistentReads) { this(table, dynamoDBClient, serializer, consistentReads, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); } @@ -116,10 +119,19 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * @param tableCreatorCallback */ @Deprecated - public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient, - final LeaseSerializer serializer, final boolean consistentReads, - @NonNull final TableCreatorCallback tableCreatorCallback) { - this(table, dynamoDBClient, serializer, consistentReads, tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); + public DynamoDBLeaseRefresher( + final String table, + final DynamoDbAsyncClient dynamoDBClient, + final LeaseSerializer serializer, + final boolean consistentReads, + @NonNull final TableCreatorCallback tableCreatorCallback) { + this( + table, + dynamoDBClient, + serializer, + consistentReads, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT); } /** @@ -132,10 +144,22 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * @param dynamoDbRequestTimeout */ @Deprecated - public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient, - final LeaseSerializer serializer, final boolean consistentReads, - @NonNull final TableCreatorCallback tableCreatorCallback, Duration dynamoDbRequestTimeout) { - this(table, dynamoDBClient, serializer, consistentReads, tableCreatorCallback, dynamoDbRequestTimeout, BillingMode.PAY_PER_REQUEST, false); + public DynamoDBLeaseRefresher( + final String table, + final DynamoDbAsyncClient dynamoDBClient, + final LeaseSerializer serializer, + final boolean consistentReads, + @NonNull final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout) { + this( + table, + dynamoDBClient, + serializer, + consistentReads, + tableCreatorCallback, + dynamoDbRequestTimeout, + BillingMode.PAY_PER_REQUEST, + false); } /** @@ -150,12 +174,25 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * @param leaseTableDeletionProtectionEnabled */ @Deprecated - public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient, - final LeaseSerializer serializer, final boolean consistentReads, - @NonNull final TableCreatorCallback tableCreatorCallback, Duration dynamoDbRequestTimeout, - final BillingMode billingMode, final boolean leaseTableDeletionProtectionEnabled) { - this(table, dynamoDBClient, serializer, consistentReads, tableCreatorCallback, dynamoDbRequestTimeout, - billingMode, leaseTableDeletionProtectionEnabled, DefaultSdkAutoConstructList.getInstance()); + public DynamoDBLeaseRefresher( + final String table, + final DynamoDbAsyncClient dynamoDBClient, + final LeaseSerializer serializer, + final boolean consistentReads, + @NonNull final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + final BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled) { + this( + table, + dynamoDBClient, + serializer, + consistentReads, + tableCreatorCallback, + dynamoDbRequestTimeout, + billingMode, + leaseTableDeletionProtectionEnabled, + DefaultSdkAutoConstructList.getInstance()); } /** @@ -170,11 +207,16 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * @param leaseTableDeletionProtectionEnabled * @param tags */ - public DynamoDBLeaseRefresher(final String table, final DynamoDbAsyncClient dynamoDBClient, - final LeaseSerializer serializer, final boolean consistentReads, - @NonNull final TableCreatorCallback tableCreatorCallback, Duration dynamoDbRequestTimeout, - final BillingMode billingMode, final boolean leaseTableDeletionProtectionEnabled, - final Collection tags) { + public DynamoDBLeaseRefresher( + final String table, + final DynamoDbAsyncClient dynamoDBClient, + final LeaseSerializer serializer, + final boolean consistentReads, + @NonNull final TableCreatorCallback tableCreatorCallback, + Duration dynamoDbRequestTimeout, + final BillingMode billingMode, + final boolean leaseTableDeletionProtectionEnabled, + final Collection tags) { this.table = table; this.dynamoDBClient = dynamoDBClient; this.serializer = serializer; @@ -194,8 +236,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throws ProvisionedThroughputException, DependencyException { final CreateTableRequest.Builder builder = createTableRequestBuilder(); if (BillingMode.PROVISIONED.equals(billingMode)) { - ProvisionedThroughput throughput = ProvisionedThroughput.builder().readCapacityUnits(readCapacity) - .writeCapacityUnits(writeCapacity).build(); + ProvisionedThroughput throughput = ProvisionedThroughput.builder() + .readCapacityUnits(readCapacity) + .writeCapacityUnits(writeCapacity) + .build(); builder.provisionedThroughput(throughput); } return createTableIfNotExists(builder.build()); @@ -205,8 +249,7 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * {@inheritDoc} */ @Override - public boolean createLeaseTableIfNotExists() - throws ProvisionedThroughputException, DependencyException { + public boolean createLeaseTableIfNotExists() throws ProvisionedThroughputException, DependencyException { final CreateTableRequest request = createTableRequestBuilder().build(); return createTableIfNotExists(request); @@ -259,7 +302,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { } private TableStatus tableStatus() throws DependencyException { - DescribeTableRequest request = DescribeTableRequest.builder().tableName(table).build(); + DescribeTableRequest request = + DescribeTableRequest.builder().tableName(table).build(); final AWSExceptionManager exceptionManager = createExceptionManager(); exceptionManager.add(ResourceNotFoundException.class, t -> t); @@ -267,7 +311,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { DescribeTableResponse result; try { try { - result = FutureUtils.resolveOrCancelFuture(dynamoDBClient.describeTable(request), dynamoDbRequestTimeout); + result = FutureUtils.resolveOrCancelFuture( + dynamoDBClient.describeTable(request), dynamoDbRequestTimeout); } catch (ExecutionException e) { throw exceptionManager.apply(e.getCause()); } catch (InterruptedException e) { @@ -332,9 +377,9 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * {@inheritDoc} */ @Override - public List listLeasesForStream(StreamIdentifier streamIdentifier) throws DependencyException, - InvalidStateException, ProvisionedThroughputException { - return list( null, streamIdentifier); + public List listLeasesForStream(StreamIdentifier streamIdentifier) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + return list(null, streamIdentifier); } /** @@ -380,8 +425,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { * @throws DependencyException if DynamoDB scan fail in an unexpected way * @throws ProvisionedThroughputException if DynamoDB scan fail due to exceeded capacity */ - private List list(Integer limit, Integer maxPages, StreamIdentifier streamIdentifier) throws DependencyException, InvalidStateException, - ProvisionedThroughputException { + private List list(Integer limit, Integer maxPages, StreamIdentifier streamIdentifier) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { log.debug("Listing leases from table {}", table); @@ -389,9 +434,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { if (streamIdentifier != null) { final Map expressionAttributeValues = ImmutableMap.of( - DDB_STREAM_NAME, AttributeValue.builder().s(streamIdentifier.serialize()).build() - ); - scanRequestBuilder = scanRequestBuilder.filterExpression(STREAM_NAME + " = " + DDB_STREAM_NAME) + DDB_STREAM_NAME, + AttributeValue.builder().s(streamIdentifier.serialize()).build()); + scanRequestBuilder = scanRequestBuilder + .filterExpression(STREAM_NAME + " = " + DDB_STREAM_NAME) .expressionAttributeValues(expressionAttributeValues); } @@ -401,12 +447,13 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { ScanRequest scanRequest = scanRequestBuilder.build(); final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ResourceNotFoundException.class, t -> t); + exceptionManager.add(ResourceNotFoundException.class, t -> t); exceptionManager.add(ProvisionedThroughputExceededException.class, t -> t); try { try { - ScanResponse scanResult = FutureUtils.resolveOrCancelFuture(dynamoDBClient.scan(scanRequest), dynamoDbRequestTimeout); + ScanResponse scanResult = + FutureUtils.resolveOrCancelFuture(dynamoDBClient.scan(scanRequest), dynamoDbRequestTimeout); List result = new ArrayList<>(); while (scanResult != null) { @@ -422,9 +469,12 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { log.debug("lastEvaluatedKey was null - scan finished."); } else { // Make another request, picking up where we left off. - scanRequest = scanRequest.toBuilder().exclusiveStartKey(lastEvaluatedKey).build(); + scanRequest = scanRequest.toBuilder() + .exclusiveStartKey(lastEvaluatedKey) + .build(); log.debug("lastEvaluatedKey was {}, continuing scan.", lastEvaluatedKey); - scanResult = FutureUtils.resolveOrCancelFuture(dynamoDBClient.scan(scanRequest), dynamoDbRequestTimeout); + scanResult = FutureUtils.resolveOrCancelFuture( + dynamoDBClient.scan(scanRequest), dynamoDbRequestTimeout); } } log.debug("Listed {} leases from table {}", result.size(), table); @@ -452,8 +502,11 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throws DependencyException, InvalidStateException, ProvisionedThroughputException { log.debug("Creating lease: {}", lease); - PutItemRequest request = PutItemRequest.builder().tableName(table).item(serializer.toDynamoRecord(lease)) - .expected(serializer.getDynamoNonexistantExpectation()).build(); + PutItemRequest request = PutItemRequest.builder() + .tableName(table) + .item(serializer.toDynamoRecord(lease)) + .expected(serializer.getDynamoNonexistantExpectation()) + .build(); final AWSExceptionManager exceptionManager = createExceptionManager(); exceptionManager.add(ConditionalCheckFailedException.class, t -> t); @@ -485,12 +538,16 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throws DependencyException, InvalidStateException, ProvisionedThroughputException { log.debug("Getting lease with key {}", leaseKey); - GetItemRequest request = GetItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(leaseKey)) - .consistentRead(consistentReads).build(); + GetItemRequest request = GetItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(leaseKey)) + .consistentRead(consistentReads) + .build(); final AWSExceptionManager exceptionManager = createExceptionManager(); try { try { - GetItemResponse result = FutureUtils.resolveOrCancelFuture(dynamoDBClient.getItem(request), dynamoDbRequestTimeout); + GetItemResponse result = + FutureUtils.resolveOrCancelFuture(dynamoDBClient.getItem(request), dynamoDbRequestTimeout); Map dynamoRecord = result.item(); if (CollectionUtils.isNullOrEmpty(dynamoRecord)) { @@ -520,12 +577,15 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throws DependencyException, InvalidStateException, ProvisionedThroughputException { log.debug("Renewing lease with key {}", lease.leaseKey()); - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) + UpdateItemRequest request = UpdateItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) .expected(serializer.getDynamoLeaseCounterExpectation(lease)) - .attributeUpdates(serializer.getDynamoLeaseCounterUpdate(lease)).build(); + .attributeUpdates(serializer.getDynamoLeaseCounterUpdate(lease)) + .build(); final AWSExceptionManager exceptionManager = createExceptionManager(); - exceptionManager.add(ConditionalCheckFailedException.class, t -> t); + exceptionManager.add(ConditionalCheckFailedException.class, t -> t); try { try { @@ -537,8 +597,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new DependencyException(e); } } catch (ConditionalCheckFailedException e) { - log.debug("Lease renewal failed for lease with key {} because the lease counter was not {}", - lease.leaseKey(), lease.leaseCounter()); + log.debug( + "Lease renewal failed for lease with key {} because the lease counter was not {}", + lease.leaseKey(), + lease.leaseCounter()); // If we had a spurious retry during the Dynamo update, then this conditional PUT failure // might be incorrect. So, we get the item straight away and check if the lease owner + lease @@ -546,7 +608,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { String expectedOwner = lease.leaseOwner(); Long expectedCounter = lease.leaseCounter() + 1; final Lease updatedLease = getLease(lease.leaseKey()); - if (updatedLease == null || !expectedOwner.equals(updatedLease.leaseOwner()) + if (updatedLease == null + || !expectedOwner.equals(updatedLease.leaseOwner()) || !expectedCounter.equals(updatedLease.leaseCounter())) { return false; } @@ -569,8 +632,11 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throws DependencyException, InvalidStateException, ProvisionedThroughputException { final String oldOwner = lease.leaseOwner(); - log.debug("Taking lease with leaseKey {} from {} to {}", lease.leaseKey(), - lease.leaseOwner() == null ? "nobody" : lease.leaseOwner(), owner); + log.debug( + "Taking lease with leaseKey {} from {} to {}", + lease.leaseKey(), + lease.leaseOwner() == null ? "nobody" : lease.leaseOwner(), + owner); final AWSExceptionManager exceptionManager = createExceptionManager(); exceptionManager.add(ConditionalCheckFailedException.class, t -> t); @@ -578,8 +644,12 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { Map updates = serializer.getDynamoLeaseCounterUpdate(lease); updates.putAll(serializer.getDynamoTakeLeaseUpdate(lease, owner)); - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) - .expected(serializer.getDynamoLeaseCounterExpectation(lease)).attributeUpdates(updates).build(); + UpdateItemRequest request = UpdateItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) + .expected(serializer.getDynamoLeaseCounterExpectation(lease)) + .attributeUpdates(updates) + .build(); try { try { @@ -591,8 +661,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new DependencyException(e); } } catch (ConditionalCheckFailedException e) { - log.debug("Lease renewal failed for lease with key {} because the lease counter was not {}", - lease.leaseKey(), lease.leaseCounter()); + log.debug( + "Lease renewal failed for lease with key {} because the lease counter was not {}", + lease.leaseKey(), + lease.leaseCounter()); return false; } catch (DynamoDbException | TimeoutException e) { throw convertAndRethrowExceptions("take", lease.leaseKey(), e); @@ -623,8 +695,12 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { Map updates = serializer.getDynamoLeaseCounterUpdate(lease); updates.putAll(serializer.getDynamoEvictLeaseUpdate(lease)); - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) - .expected(serializer.getDynamoLeaseOwnerExpectation(lease)).attributeUpdates(updates).build(); + UpdateItemRequest request = UpdateItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) + .expected(serializer.getDynamoLeaseOwnerExpectation(lease)) + .attributeUpdates(updates) + .build(); try { try { @@ -636,8 +712,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new DependencyException(e); } } catch (ConditionalCheckFailedException e) { - log.debug("Lease eviction failed for lease with key {} because the lease owner was not {}", - lease.leaseKey(), lease.leaseOwner()); + log.debug( + "Lease eviction failed for lease with key {} because the lease owner was not {}", + lease.leaseKey(), + lease.leaseOwner()); return false; } catch (DynamoDbException | TimeoutException e) { throw convertAndRethrowExceptions("evict", lease.leaseKey(), e); @@ -660,8 +738,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { final AWSExceptionManager exceptionManager = createExceptionManager(); for (final Lease lease : allLeases) { - DeleteItemRequest deleteRequest = DeleteItemRequest.builder().tableName(table) - .key(serializer.getDynamoHashKey(lease)).build(); + DeleteItemRequest deleteRequest = DeleteItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) + .build(); try { try { @@ -687,8 +767,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throws DependencyException, InvalidStateException, ProvisionedThroughputException { log.debug("Deleting lease with leaseKey {}", lease.leaseKey()); - DeleteItemRequest deleteRequest = DeleteItemRequest.builder().tableName(table) - .key(serializer.getDynamoHashKey(lease)).build(); + DeleteItemRequest deleteRequest = DeleteItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) + .build(); final AWSExceptionManager exceptionManager = createExceptionManager(); try { @@ -721,8 +803,12 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { Map updates = serializer.getDynamoLeaseCounterUpdate(lease); updates.putAll(serializer.getDynamoUpdateLeaseUpdate(lease)); - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) - .expected(serializer.getDynamoLeaseCounterExpectation(lease)).attributeUpdates(updates).build(); + UpdateItemRequest request = UpdateItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) + .expected(serializer.getDynamoLeaseCounterExpectation(lease)) + .attributeUpdates(updates) + .build(); try { try { @@ -733,8 +819,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new DependencyException(e); } } catch (ConditionalCheckFailedException e) { - log.debug("Lease update failed for lease with key {} because the lease counter was not {}", - lease.leaseKey(), lease.leaseCounter()); + log.debug( + "Lease update failed for lease with key {} because the lease counter was not {}", + lease.leaseKey(), + lease.leaseCounter()); return false; } catch (DynamoDbException | TimeoutException e) { throw convertAndRethrowExceptions("update", lease.leaseKey(), e); @@ -752,9 +840,12 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { final AWSExceptionManager exceptionManager = createExceptionManager(); exceptionManager.add(ConditionalCheckFailedException.class, t -> t); Map updates = serializer.getDynamoUpdateLeaseUpdate(lease, updateField); - UpdateItemRequest request = UpdateItemRequest.builder().tableName(table).key(serializer.getDynamoHashKey(lease)) + UpdateItemRequest request = UpdateItemRequest.builder() + .tableName(table) + .key(serializer.getDynamoHashKey(lease)) .expected(serializer.getDynamoExistentExpectation(lease.leaseKey())) - .attributeUpdates(updates).build(); + .attributeUpdates(updates) + .build(); try { try { FutureUtils.resolveOrCancelFuture(dynamoDBClient.updateItem(request), dynamoDbRequestTimeout); @@ -764,8 +855,10 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new DependencyException(e); } } catch (ConditionalCheckFailedException e) { - log.warn("Lease update failed for lease with key {} because the lease did not exist at the time of the update", - lease.leaseKey(), e); + log.warn( + "Lease update failed for lease with key {} because the lease did not exist at the time of the update", + lease.leaseKey(), + e); } catch (DynamoDbException | TimeoutException e) { throw convertAndRethrowExceptions("update", lease.leaseKey(), e); } @@ -800,8 +893,8 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { throw new ProvisionedThroughputException(e); } else if (e instanceof ResourceNotFoundException) { throw new InvalidStateException( - String.format("Cannot %s lease with key %s because table %s does not exist.", - operation, leaseKey, table), + String.format( + "Cannot %s lease with key %s because table %s does not exist.", operation, leaseKey, table), e); } else { return new DependencyException(e); @@ -809,10 +902,12 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { } private CreateTableRequest.Builder createTableRequestBuilder() { - final CreateTableRequest.Builder builder = CreateTableRequest.builder().tableName(table).keySchema(serializer.getKeySchema()) - .attributeDefinitions(serializer.getAttributeDefinitions()) - .deletionProtectionEnabled(leaseTableDeletionProtectionEnabled) - .tags(tags); + final CreateTableRequest.Builder builder = CreateTableRequest.builder() + .tableName(table) + .keySchema(serializer.getKeySchema()) + .attributeDefinitions(serializer.getAttributeDefinitions()) + .deletionProtectionEnabled(leaseTableDeletionProtectionEnabled) + .tags(tags); if (BillingMode.PAY_PER_REQUEST.equals(billingMode)) { builder.billingMode(billingMode); } @@ -826,7 +921,9 @@ public class DynamoDBLeaseRefresher implements LeaseRefresher { } void performPostTableCreationAction() { - tableCreatorCallback.performAction( - TableCreatorCallbackInput.builder().dynamoDbClient(dynamoDBClient).tableName(table).build()); + tableCreatorCallback.performAction(TableCreatorCallbackInput.builder() + .dynamoDbClient(dynamoDBClient) + .tableName(table) + .build()); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewer.java index ab2d38c5..88a6f9b2 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewer.java @@ -29,11 +29,10 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; -import org.apache.commons.lang3.StringUtils; - import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.common.StreamIdentifier; @@ -78,8 +77,11 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { * @param executorService * ExecutorService to use for renewing leases in parallel */ - public DynamoDBLeaseRenewer(final LeaseRefresher leaseRefresher, final String workerIdentifier, - final long leaseDurationMillis, final ExecutorService executorService, + public DynamoDBLeaseRenewer( + final LeaseRefresher leaseRefresher, + final String workerIdentifier, + final long leaseDurationMillis, + final ExecutorService executorService, final MetricsFactory metricsFactory) { this.leaseRefresher = leaseRefresher; this.workerIdentifier = workerIdentifier; @@ -108,10 +110,10 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { boolean success = false; try { - /* - * We iterate in descending order here so that the synchronized(lease) inside renewLease doesn't "lead" calls - * to getCurrentlyHeldLeases. They'll still cross paths, but they won't interleave their executions. - */ + /* + * We iterate in descending order here so that the synchronized(lease) inside renewLease doesn't "lead" calls + * to getCurrentlyHeldLeases. They'll still cross paths, but they won't interleave their executions. + */ int lostLeases = 0; List> renewLeaseTasks = new ArrayList<>(); for (Lease lease : ownedLeases.descendingMap().values()) { @@ -139,8 +141,10 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { scope.addData("CurrentLeases", ownedLeases.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY); if (leasesInUnknownState > 0) { throw new DependencyException( - String.format("Encountered an exception while renewing leases. The number" - + " of leases which might not have been renewed is %d", leasesInUnknownState), + String.format( + "Encountered an exception while renewing leases. The number" + + " of leases which might not have been renewed is %d", + leasesInUnknownState), lastException); } success = true; @@ -165,7 +169,8 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { return renewLease(lease, false); } - private boolean renewLease(Lease lease, boolean renewEvenIfExpired) throws DependencyException, InvalidStateException { + private boolean renewLease(Lease lease, boolean renewEvenIfExpired) + throws DependencyException, InvalidStateException { String leaseKey = lease.leaseKey(); final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, RENEW_ALL_LEASES_DIMENSION); @@ -201,8 +206,12 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { success = true; break; } catch (ProvisionedThroughputException e) { - log.info("Worker {} could not renew lease with key {} on try {} out of {} due to capacity", - workerIdentifier, leaseKey, i, RENEWAL_RETRIES); + log.info( + "Worker {} could not renew lease with key {} on try {} out of {} due to capacity", + workerIdentifier, + leaseKey, + i, + RENEWAL_RETRIES); } } } finally { @@ -258,8 +267,8 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { } if (copy.isExpired(leaseDurationNanos, now)) { - log.info("getCurrentlyHeldLease not returning lease with key {} because it is expired", - copy.leaseKey()); + log.info( + "getCurrentlyHeldLease not returning lease with key {} because it is expired", copy.leaseKey()); return null; } else { return copy; @@ -271,8 +280,9 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { * {@inheritDoc} */ @Override - public boolean updateLease(Lease lease, UUID concurrencyToken, @NonNull String operation, String singleStreamShardId) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + public boolean updateLease( + Lease lease, UUID concurrencyToken, @NonNull String operation, String singleStreamShardId) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { verifyNotNull(lease, "lease cannot be null"); verifyNotNull(lease.leaseKey(), "leaseKey cannot be null"); verifyNotNull(concurrencyToken, "concurrencyToken cannot be null"); @@ -281,7 +291,9 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { Lease authoritativeLease = ownedLeases.get(leaseKey); if (authoritativeLease == null) { - log.info("Worker {} could not update lease with key {} because it does not hold it", workerIdentifier, + log.info( + "Worker {} could not update lease with key {} because it does not hold it", + workerIdentifier, leaseKey); return false; } @@ -292,15 +304,17 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { * called update. */ if (!authoritativeLease.concurrencyToken().equals(concurrencyToken)) { - log.info("Worker {} refusing to update lease with key {} because concurrency tokens don't match", - workerIdentifier, leaseKey); + log.info( + "Worker {} refusing to update lease with key {} because concurrency tokens don't match", + workerIdentifier, + leaseKey); return false; } final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, operation); if (lease instanceof MultiStreamLease) { - MetricsUtil.addStreamId(scope, - StreamIdentifier.multiStreamInstance(((MultiStreamLease) lease).streamIdentifier())); + MetricsUtil.addStreamId( + scope, StreamIdentifier.multiStreamInstance(((MultiStreamLease) lease).streamIdentifier())); MetricsUtil.addShardId(scope, ((MultiStreamLease) lease).shardId()); } else if (StringUtils.isNotEmpty(singleStreamShardId)) { MetricsUtil.addShardId(scope, singleStreamShardId); @@ -359,7 +373,8 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { for (Lease lease : newLeases) { if (lease.lastCounterIncrementNanos() == null) { - log.info("addLeasesToRenew ignoring lease with key {} because it does not have lastRenewalNanos set", + log.info( + "addLeasesToRenew ignoring lease with key {} because it does not have lastRenewalNanos set", lease.leaseKey()); continue; } @@ -424,5 +439,4 @@ public class DynamoDBLeaseRenewer implements LeaseRenewer { throw new IllegalArgumentException(message); } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseSerializer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseSerializer.java index 7e12b9a9..c10cf475 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseSerializer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseSerializer.java @@ -20,7 +20,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import com.google.common.base.Strings; import software.amazon.awssdk.services.dynamodb.model.AttributeAction; import software.amazon.awssdk.services.dynamodb.model.AttributeDefinition; @@ -71,8 +70,12 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { } result.put(OWNER_SWITCHES_KEY, DynamoUtils.createAttributeValue(lease.ownerSwitchesSinceCheckpoint())); - result.put(CHECKPOINT_SEQUENCE_NUMBER_KEY, DynamoUtils.createAttributeValue(lease.checkpoint().sequenceNumber())); - result.put(CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber())); + result.put( + CHECKPOINT_SEQUENCE_NUMBER_KEY, + DynamoUtils.createAttributeValue(lease.checkpoint().sequenceNumber())); + result.put( + CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, + DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber())); if (lease.parentShardIds() != null && !lease.parentShardIds().isEmpty()) { result.put(PARENT_SHARD_ID_KEY, DynamoUtils.createAttributeValue(lease.parentShardIds())); } @@ -80,18 +83,31 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { result.put(CHILD_SHARD_IDS_KEY, DynamoUtils.createAttributeValue(lease.childShardIds())); } - if (lease.pendingCheckpoint() != null && !lease.pendingCheckpoint().sequenceNumber().isEmpty()) { - result.put(PENDING_CHECKPOINT_SEQUENCE_KEY, DynamoUtils.createAttributeValue(lease.pendingCheckpoint().sequenceNumber())); - result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, DynamoUtils.createAttributeValue(lease.pendingCheckpoint().subSequenceNumber())); + if (lease.pendingCheckpoint() != null + && !lease.pendingCheckpoint().sequenceNumber().isEmpty()) { + result.put( + PENDING_CHECKPOINT_SEQUENCE_KEY, + DynamoUtils.createAttributeValue(lease.pendingCheckpoint().sequenceNumber())); + result.put( + PENDING_CHECKPOINT_SUBSEQUENCE_KEY, + DynamoUtils.createAttributeValue(lease.pendingCheckpoint().subSequenceNumber())); } if (lease.pendingCheckpointState() != null) { - result.put(PENDING_CHECKPOINT_STATE_KEY, DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber())); + result.put( + PENDING_CHECKPOINT_STATE_KEY, + DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber())); } if (lease.hashKeyRangeForLease() != null) { - result.put(STARTING_HASH_KEY, DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey())); - result.put(ENDING_HASH_KEY, DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey())); + result.put( + STARTING_HASH_KEY, + DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedStartingHashKey())); + result.put( + ENDING_HASH_KEY, + DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedEndingHashKey())); } return result; @@ -110,20 +126,16 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { leaseToUpdate.leaseCounter(DynamoUtils.safeGetLong(dynamoRecord, LEASE_COUNTER_KEY)); leaseToUpdate.ownerSwitchesSinceCheckpoint(DynamoUtils.safeGetLong(dynamoRecord, OWNER_SWITCHES_KEY)); - leaseToUpdate.checkpoint( - new ExtendedSequenceNumber( - DynamoUtils.safeGetString(dynamoRecord, CHECKPOINT_SEQUENCE_NUMBER_KEY), - DynamoUtils.safeGetLong(dynamoRecord, CHECKPOINT_SUBSEQUENCE_NUMBER_KEY)) - ); + leaseToUpdate.checkpoint(new ExtendedSequenceNumber( + DynamoUtils.safeGetString(dynamoRecord, CHECKPOINT_SEQUENCE_NUMBER_KEY), + DynamoUtils.safeGetLong(dynamoRecord, CHECKPOINT_SUBSEQUENCE_NUMBER_KEY))); leaseToUpdate.parentShardIds(DynamoUtils.safeGetSS(dynamoRecord, PARENT_SHARD_ID_KEY)); leaseToUpdate.childShardIds(DynamoUtils.safeGetSS(dynamoRecord, CHILD_SHARD_IDS_KEY)); if (!Strings.isNullOrEmpty(DynamoUtils.safeGetString(dynamoRecord, PENDING_CHECKPOINT_SEQUENCE_KEY))) { - leaseToUpdate.pendingCheckpoint( - new ExtendedSequenceNumber( - DynamoUtils.safeGetString(dynamoRecord, PENDING_CHECKPOINT_SEQUENCE_KEY), - DynamoUtils.safeGetLong(dynamoRecord, PENDING_CHECKPOINT_SUBSEQUENCE_KEY)) - ); + leaseToUpdate.pendingCheckpoint(new ExtendedSequenceNumber( + DynamoUtils.safeGetString(dynamoRecord, PENDING_CHECKPOINT_SEQUENCE_KEY), + DynamoUtils.safeGetLong(dynamoRecord, PENDING_CHECKPOINT_SUBSEQUENCE_KEY))); } leaseToUpdate.pendingCheckpointState(DynamoUtils.safeGetByteArray(dynamoRecord, PENDING_CHECKPOINT_STATE_KEY)); @@ -159,7 +171,9 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { public Map getDynamoLeaseCounterExpectation(final Long leaseCounter) { Map result = new HashMap<>(); - ExpectedAttributeValue eav = ExpectedAttributeValue.builder().value(DynamoUtils.createAttributeValue(leaseCounter)).build(); + ExpectedAttributeValue eav = ExpectedAttributeValue.builder() + .value(DynamoUtils.createAttributeValue(leaseCounter)) + .build(); result.put(LEASE_COUNTER_KEY, eav); return result; @@ -170,13 +184,13 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { Map result = new HashMap<>(); ExpectedAttributeValue.Builder eavBuilder = ExpectedAttributeValue.builder(); - + if (lease.leaseOwner() == null) { eavBuilder = eavBuilder.exists(false); } else { eavBuilder = eavBuilder.value(DynamoUtils.createAttributeValue(lease.leaseOwner())); } - + result.put(LEASE_OWNER_KEY, eavBuilder.build()); return result; @@ -186,7 +200,8 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { public Map getDynamoNonexistantExpectation() { Map result = new HashMap<>(); - ExpectedAttributeValue expectedAV = ExpectedAttributeValue.builder().exists(false).build(); + ExpectedAttributeValue expectedAV = + ExpectedAttributeValue.builder().exists(false).build(); result.put(LEASE_KEY_KEY, expectedAV); return result; @@ -213,8 +228,10 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { public Map getDynamoLeaseCounterUpdate(Long leaseCounter) { Map result = new HashMap<>(); - AttributeValueUpdate avu = - AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(leaseCounter + 1)).action(AttributeAction.PUT).build(); + AttributeValueUpdate avu = AttributeValueUpdate.builder() + .value(DynamoUtils.createAttributeValue(leaseCounter + 1)) + .action(AttributeAction.PUT) + .build(); result.put(LEASE_COUNTER_KEY, avu); return result; @@ -224,13 +241,21 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { public Map getDynamoTakeLeaseUpdate(final Lease lease, String owner) { Map result = new HashMap<>(); - result.put(LEASE_OWNER_KEY, AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(owner)) - .action(AttributeAction.PUT).build()); + result.put( + LEASE_OWNER_KEY, + AttributeValueUpdate.builder() + .value(DynamoUtils.createAttributeValue(owner)) + .action(AttributeAction.PUT) + .build()); String oldOwner = lease.leaseOwner(); if (oldOwner != null && !oldOwner.equals(owner)) { - result.put(OWNER_SWITCHES_KEY, AttributeValueUpdate.builder().value(DynamoUtils.createAttributeValue(1L)) - .action(AttributeAction.ADD).build()); + result.put( + OWNER_SWITCHES_KEY, + AttributeValueUpdate.builder() + .value(DynamoUtils.createAttributeValue(1L)) + .action(AttributeAction.ADD) + .build()); } return result; @@ -241,68 +266,109 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { Map result = new HashMap<>(); AttributeValue value = null; - result.put(LEASE_OWNER_KEY, AttributeValueUpdate.builder().value(value).action(AttributeAction.DELETE).build()); + result.put( + LEASE_OWNER_KEY, + AttributeValueUpdate.builder() + .value(value) + .action(AttributeAction.DELETE) + .build()); return result; } protected AttributeValueUpdate putUpdate(AttributeValue attributeValue) { - return AttributeValueUpdate.builder().value(attributeValue).action(AttributeAction.PUT).build(); + return AttributeValueUpdate.builder() + .value(attributeValue) + .action(AttributeAction.PUT) + .build(); } @Override public Map getDynamoUpdateLeaseUpdate(final Lease lease) { Map result = new HashMap<>(); - result.put(CHECKPOINT_SEQUENCE_NUMBER_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.checkpoint().sequenceNumber()))); - result.put(CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber()))); - result.put(OWNER_SWITCHES_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.ownerSwitchesSinceCheckpoint()))); + result.put( + CHECKPOINT_SEQUENCE_NUMBER_KEY, + putUpdate(DynamoUtils.createAttributeValue(lease.checkpoint().sequenceNumber()))); + result.put( + CHECKPOINT_SUBSEQUENCE_NUMBER_KEY, + putUpdate(DynamoUtils.createAttributeValue(lease.checkpoint().subSequenceNumber()))); + result.put( + OWNER_SWITCHES_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.ownerSwitchesSinceCheckpoint()))); - if (lease.pendingCheckpoint() != null && !lease.pendingCheckpoint().sequenceNumber().isEmpty()) { - result.put(PENDING_CHECKPOINT_SEQUENCE_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.pendingCheckpoint().sequenceNumber()))); - result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, putUpdate(DynamoUtils.createAttributeValue( - lease.pendingCheckpoint().subSequenceNumber()))); + if (lease.pendingCheckpoint() != null + && !lease.pendingCheckpoint().sequenceNumber().isEmpty()) { + result.put( + PENDING_CHECKPOINT_SEQUENCE_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.pendingCheckpoint().sequenceNumber()))); + result.put( + PENDING_CHECKPOINT_SUBSEQUENCE_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.pendingCheckpoint().subSequenceNumber()))); } else { - result.put(PENDING_CHECKPOINT_SEQUENCE_KEY, AttributeValueUpdate.builder().action(AttributeAction.DELETE).build()); - result.put(PENDING_CHECKPOINT_SUBSEQUENCE_KEY, AttributeValueUpdate.builder().action(AttributeAction.DELETE).build()); + result.put( + PENDING_CHECKPOINT_SEQUENCE_KEY, + AttributeValueUpdate.builder() + .action(AttributeAction.DELETE) + .build()); + result.put( + PENDING_CHECKPOINT_SUBSEQUENCE_KEY, + AttributeValueUpdate.builder() + .action(AttributeAction.DELETE) + .build()); } if (lease.pendingCheckpointState() != null) { - result.put(PENDING_CHECKPOINT_STATE_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.pendingCheckpointState()))); + result.put( + PENDING_CHECKPOINT_STATE_KEY, + putUpdate(DynamoUtils.createAttributeValue(lease.pendingCheckpointState()))); } else { - result.put(PENDING_CHECKPOINT_STATE_KEY, AttributeValueUpdate.builder().action(AttributeAction.DELETE).build()); + result.put( + PENDING_CHECKPOINT_STATE_KEY, + AttributeValueUpdate.builder() + .action(AttributeAction.DELETE) + .build()); } - if (!CollectionUtils.isNullOrEmpty(lease.childShardIds())) { result.put(CHILD_SHARD_IDS_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.childShardIds()))); } if (lease.hashKeyRangeForLease() != null) { - result.put(STARTING_HASH_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey()))); - result.put(ENDING_HASH_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey()))); + result.put( + STARTING_HASH_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedStartingHashKey()))); + result.put( + ENDING_HASH_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedEndingHashKey()))); } return result; } @Override - public Map getDynamoUpdateLeaseUpdate(Lease lease, - UpdateField updateField) { + public Map getDynamoUpdateLeaseUpdate(Lease lease, UpdateField updateField) { Map result = new HashMap<>(); switch (updateField) { - case CHILD_SHARDS: - if (!CollectionUtils.isNullOrEmpty(lease.childShardIds())) { - result.put(CHILD_SHARD_IDS_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.childShardIds()))); - } - break; - case HASH_KEY_RANGE: - if (lease.hashKeyRangeForLease() != null) { - result.put(STARTING_HASH_KEY, putUpdate( - DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedStartingHashKey()))); - result.put(ENDING_HASH_KEY, putUpdate( - DynamoUtils.createAttributeValue(lease.hashKeyRangeForLease().serializedEndingHashKey()))); - } - break; + case CHILD_SHARDS: + if (!CollectionUtils.isNullOrEmpty(lease.childShardIds())) { + result.put(CHILD_SHARD_IDS_KEY, putUpdate(DynamoUtils.createAttributeValue(lease.childShardIds()))); + } + break; + case HASH_KEY_RANGE: + if (lease.hashKeyRangeForLease() != null) { + result.put( + STARTING_HASH_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedStartingHashKey()))); + result.put( + ENDING_HASH_KEY, + putUpdate(DynamoUtils.createAttributeValue( + lease.hashKeyRangeForLease().serializedEndingHashKey()))); + } + break; } return result; } @@ -310,7 +376,10 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { @Override public Collection getKeySchema() { List keySchema = new ArrayList<>(); - keySchema.add(KeySchemaElement.builder().attributeName(LEASE_KEY_KEY).keyType(KeyType.HASH).build()); + keySchema.add(KeySchemaElement.builder() + .attributeName(LEASE_KEY_KEY) + .keyType(KeyType.HASH) + .build()); return keySchema; } @@ -318,8 +387,10 @@ public class DynamoDBLeaseSerializer implements LeaseSerializer { @Override public Collection getAttributeDefinitions() { List definitions = new ArrayList<>(); - definitions.add(AttributeDefinition.builder().attributeName(LEASE_KEY_KEY) - .attributeType(ScalarAttributeType.S).build()); + definitions.add(AttributeDefinition.builder() + .attributeName(LEASE_KEY_KEY) + .attributeType(ScalarAttributeType.S) + .build()); return definitions; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java index a4aabafd..7e494204 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTaker.java @@ -14,8 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import com.google.common.annotations.VisibleForTesting; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -28,6 +26,8 @@ import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; + +import com.google.common.annotations.VisibleForTesting; import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; import software.amazon.kinesis.annotations.KinesisClientInternalApi; @@ -73,7 +73,10 @@ public class DynamoDBLeaseTaker implements LeaseTaker { private int veryOldLeaseDurationNanosMultiplier = 3; private long lastScanTimeNanos = 0L; - public DynamoDBLeaseTaker(LeaseRefresher leaseRefresher, String workerIdentifier, long leaseDurationMillis, + public DynamoDBLeaseTaker( + LeaseRefresher leaseRefresher, + String workerIdentifier, + long leaseDurationMillis, final MetricsFactory metricsFactory) { this.leaseRefresher = leaseRefresher; this.workerIdentifier = workerIdentifier; @@ -167,7 +170,7 @@ public class DynamoDBLeaseTaker implements LeaseTaker { * @throws InvalidStateException */ synchronized Map takeLeases(Callable timeProvider) - throws DependencyException, InvalidStateException { + throws DependencyException, InvalidStateException { // Key is leaseKey Map takenLeases = new HashMap<>(); @@ -186,7 +189,10 @@ public class DynamoDBLeaseTaker implements LeaseTaker { updateAllLeases(timeProvider); success = true; } catch (ProvisionedThroughputException e) { - log.info("Worker {} could not find available leases on try {} out of {}", workerIdentifier, i, + log.info( + "Worker {} could not find available leases on try {} out of {}", + workerIdentifier, + i, TAKE_RETRIES); lastException = e; } @@ -198,8 +204,11 @@ public class DynamoDBLeaseTaker implements LeaseTaker { } if (lastException != null) { - log.error("Worker {} could not scan leases table, aborting TAKE_LEASES_DIMENSION. Exception caught by" - + " last retry:", workerIdentifier, lastException); + log.error( + "Worker {} could not scan leases table, aborting TAKE_LEASES_DIMENSION. Exception caught by" + + " last retry:", + workerIdentifier, + lastException); return takenLeases; } @@ -228,8 +237,13 @@ public class DynamoDBLeaseTaker implements LeaseTaker { success = true; break; } catch (ProvisionedThroughputException e) { - log.info("Could not take lease with key {} for worker {} on try {} out of {} due to" - + " capacity", leaseKey, workerIdentifier, i, TAKE_RETRIES); + log.info( + "Could not take lease with key {} for worker {} on try {} out of {} due to" + + " capacity", + leaseKey, + workerIdentifier, + i, + TAKE_RETRIES); } } } finally { @@ -238,12 +252,18 @@ public class DynamoDBLeaseTaker implements LeaseTaker { } if (takenLeases.size() > 0) { - log.info("Worker {} successfully took {} leases: {}", workerIdentifier, takenLeases.size(), + log.info( + "Worker {} successfully took {} leases: {}", + workerIdentifier, + takenLeases.size(), stringJoin(takenLeases.keySet(), ", ")); } if (untakenLeaseKeys.size() > 0) { - log.info("Worker {} failed to take {} leases: {}", workerIdentifier, untakenLeaseKeys.size(), + log.info( + "Worker {} failed to take {} leases: {}", + workerIdentifier, + untakenLeaseKeys.size(), stringJoin(untakenLeaseKeys, ", ")); } @@ -265,21 +285,25 @@ public class DynamoDBLeaseTaker implements LeaseTaker { * @param updateAllLeasesEndTime How long it takes for update all leases to complete * @return set of leases to take. */ - private Set updateStaleLeasesWithLatestState(long updateAllLeasesEndTime, - Set leasesToTake) { + private Set updateStaleLeasesWithLatestState(long updateAllLeasesEndTime, Set leasesToTake) { if (updateAllLeasesEndTime > leaseRenewalIntervalMillis * RENEWAL_SLACK_PERCENTAGE) { - leasesToTake = leasesToTake.stream().map(lease -> { - if (lease.isMarkedForLeaseSteal()) { - try { - log.debug("Updating stale lease {}.", lease.leaseKey()); - return leaseRefresher.getLease(lease.leaseKey()); - } catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) { - log.warn("Failed to fetch latest state of the lease {} that needs to be stolen, " - + "defaulting to existing lease", lease.leaseKey(), e); - } - } - return lease; - }).collect(Collectors.toSet()); + leasesToTake = leasesToTake.stream() + .map(lease -> { + if (lease.isMarkedForLeaseSteal()) { + try { + log.debug("Updating stale lease {}.", lease.leaseKey()); + return leaseRefresher.getLease(lease.leaseKey()); + } catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) { + log.warn( + "Failed to fetch latest state of the lease {} that needs to be stolen, " + + "defaulting to existing lease", + lease.leaseKey(), + e); + } + } + return lease; + }) + .collect(Collectors.toSet()); } return leasesToTake; } @@ -316,7 +340,7 @@ public class DynamoDBLeaseTaker implements LeaseTaker { * @throws DependencyException if listLeases fails in an unexpected way */ private void updateAllLeases(Callable timeProvider) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { List freshList = leaseRefresher.listLeases(); try { lastScanTimeNanos = timeProvider.call(); @@ -349,14 +373,16 @@ public class DynamoDBLeaseTaker implements LeaseTaker { lease.lastCounterIncrementNanos(0L); if (log.isDebugEnabled()) { - log.debug("Treating new lease with key {} as never renewed because it is new and unowned.", + log.debug( + "Treating new lease with key {} as never renewed because it is new and unowned.", leaseKey); } } else { // if this new lease is owned, treat it as renewed as of the scan lease.lastCounterIncrementNanos(lastScanTimeNanos); if (log.isDebugEnabled()) { - log.debug("Treating new lease with key {} as recently renewed because it is new and owned.", + log.debug( + "Treating new lease with key {} as recently renewed because it is new and owned.", leaseKey); } } @@ -374,8 +400,8 @@ public class DynamoDBLeaseTaker implements LeaseTaker { */ private List getAvailableLeases() { return allLeases.values().stream() - .filter(lease->lease.isAvailable(leaseDurationNanos, lastScanTimeNanos)) - .collect(Collectors.toList()); + .filter(lease -> lease.isAvailable(leaseDurationNanos, lastScanTimeNanos)) + .collect(Collectors.toList()); } /** @@ -386,14 +412,15 @@ public class DynamoDBLeaseTaker implements LeaseTaker { * @return set of leases to take. */ @VisibleForTesting - Set computeLeasesToTake(List availableLeases, Callable timeProvider) throws DependencyException { + Set computeLeasesToTake(List availableLeases, Callable timeProvider) + throws DependencyException { Map leaseCounts = computeLeaseCounts(availableLeases); Set leasesToTake = new HashSet<>(); final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, TAKE_LEASES_DIMENSION); MetricsUtil.addWorkerIdentifier(scope, workerIdentifier); final int numAvailableLeases = availableLeases.size(); - final int numLeases = allLeases.size(); + final int numLeases = allLeases.size(); final int numWorkers = leaseCounts.size(); int numLeasesToReachTarget = 0; int leaseSpillover = 0; @@ -425,7 +452,11 @@ public class DynamoDBLeaseTaker implements LeaseTaker { "Worker {} target is {} leases and maxLeasesForWorker is {}. Resetting target to {}," + " lease spillover is {}. Note that some shards may not be processed if no other " + "workers are able to pick them up.", - workerIdentifier, target, maxLeasesForWorker, maxLeasesForWorker, leaseSpillover); + workerIdentifier, + target, + maxLeasesForWorker, + maxLeasesForWorker, + leaseSpillover); target = maxLeasesForWorker; } } @@ -445,14 +476,14 @@ public class DynamoDBLeaseTaker implements LeaseTaker { throw new DependencyException("Exception caught from timeProvider", e); } final long nanoThreshold = currentNanoTime - (veryOldLeaseDurationNanosMultiplier * leaseDurationNanos); - final List veryOldLeases = allLeases.values() - .stream() - .filter(lease -> nanoThreshold > lease.lastCounterIncrementNanos()) - .collect(Collectors.toList()); + final List veryOldLeases = allLeases.values().stream() + .filter(lease -> nanoThreshold > lease.lastCounterIncrementNanos()) + .collect(Collectors.toList()); if (!veryOldLeases.isEmpty()) { Collections.shuffle(veryOldLeases); - veryOldLeaseCount = Math.max(0, Math.min(maxLeasesForWorker - currentLeaseCount, veryOldLeases.size())); + veryOldLeaseCount = + Math.max(0, Math.min(maxLeasesForWorker - currentLeaseCount, veryOldLeases.size())); HashSet result = new HashSet<>(veryOldLeases.subList(0, veryOldLeaseCount)); if (veryOldLeaseCount > 0) { log.info("Taking leases that have been expired for a long time: {}", result); @@ -478,8 +509,11 @@ public class DynamoDBLeaseTaker implements LeaseTaker { // If there are no available leases and we need a lease, consider stealing. List leasesToSteal = chooseLeasesToSteal(leaseCounts, numLeasesToReachTarget, target); for (Lease leaseToSteal : leasesToSteal) { - log.info("Worker {} needed {} leases but none were available, so it will steal lease {} from {}", - workerIdentifier, numLeasesToReachTarget, leaseToSteal.leaseKey(), + log.info( + "Worker {} needed {} leases but none were available, so it will steal lease {} from {}", + workerIdentifier, + numLeasesToReachTarget, + leaseToSteal.leaseKey(), leaseToSteal.leaseOwner()); leasesToTake.add(leaseToSteal); } @@ -489,14 +523,20 @@ public class DynamoDBLeaseTaker implements LeaseTaker { log.info( "Worker {} saw {} total leases, {} available leases, {} " + "workers. Target is {} leases, I have {} leases, I will take {} leases", - workerIdentifier, numLeases, numAvailableLeases, numWorkers, target, myCount, + workerIdentifier, + numLeases, + numAvailableLeases, + numWorkers, + target, + myCount, leasesToTake.size()); } } finally { scope.addData("ExpiredLeases", numAvailableLeases, StandardUnit.COUNT, MetricsLevel.SUMMARY); scope.addData("LeaseSpillover", leaseSpillover, StandardUnit.COUNT, MetricsLevel.SUMMARY); scope.addData("LeasesToTake", leasesToTake.size(), StandardUnit.COUNT, MetricsLevel.DETAILED); - scope.addData("NeededLeases", Math.max(numLeasesToReachTarget, 0), StandardUnit.COUNT, MetricsLevel.DETAILED); + scope.addData( + "NeededLeases", Math.max(numLeasesToReachTarget, 0), StandardUnit.COUNT, MetricsLevel.DETAILED); scope.addData("NumWorkers", numWorkers, StandardUnit.COUNT, MetricsLevel.SUMMARY); scope.addData("TotalLeases", numLeases, StandardUnit.COUNT, MetricsLevel.DETAILED); scope.addData("VeryOldLeases", veryOldLeaseCount, StandardUnit.COUNT, MetricsLevel.SUMMARY); @@ -544,19 +584,17 @@ public class DynamoDBLeaseTaker implements LeaseTaker { if (numLeasesToSteal <= 0) { if (log.isDebugEnabled()) { - log.debug(String.format("Worker %s not stealing from most loaded worker %s. He has %d," - + " target is %d, and I need %d", - workerIdentifier, - mostLoadedWorker.getKey(), - mostLoadedWorker.getValue(), - target, - needed)); + log.debug(String.format( + "Worker %s not stealing from most loaded worker %s. He has %d," + + " target is %d, and I need %d", + workerIdentifier, mostLoadedWorker.getKey(), mostLoadedWorker.getValue(), target, needed)); } return leasesToSteal; } else { if (log.isDebugEnabled()) { - log.debug("Worker {} will attempt to steal {} leases from most loaded worker {}. " - + " He has {} leases, target is {}, I need {}, maxLeasesToStealAtOneTime is {}.", + log.debug( + "Worker {} will attempt to steal {} leases from most loaded worker {}. " + + " He has {} leases, target is {}, I need {}, maxLeasesToStealAtOneTime is {}.", workerIdentifier, numLeasesToSteal, mostLoadedWorker.getKey(), diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBMultiStreamLeaseSerializer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBMultiStreamLeaseSerializer.java index 78c9c6c4..66eb51e6 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBMultiStreamLeaseSerializer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/DynamoDBMultiStreamLeaseSerializer.java @@ -15,6 +15,8 @@ package software.amazon.kinesis.leases.dynamodb; +import java.util.Map; + import lombok.NoArgsConstructor; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; import software.amazon.awssdk.services.dynamodb.model.AttributeValueUpdate; @@ -22,8 +24,6 @@ import software.amazon.kinesis.leases.DynamoUtils; import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.MultiStreamLease; -import java.util.Map; - import static software.amazon.kinesis.leases.MultiStreamLease.validateAndCast; @NoArgsConstructor @@ -44,14 +44,13 @@ public class DynamoDBMultiStreamLeaseSerializer extends DynamoDBLeaseSerializer @Override public MultiStreamLease fromDynamoRecord(Map dynamoRecord) { - final MultiStreamLease multiStreamLease = (MultiStreamLease) super - .fromDynamoRecord(dynamoRecord, new MultiStreamLease()); + final MultiStreamLease multiStreamLease = + (MultiStreamLease) super.fromDynamoRecord(dynamoRecord, new MultiStreamLease()); multiStreamLease.streamIdentifier(DynamoUtils.safeGetString(dynamoRecord, STREAM_ID_KEY)); multiStreamLease.shardId(DynamoUtils.safeGetString(dynamoRecord, SHARD_ID_KEY)); return multiStreamLease; } - @Override public Map getDynamoUpdateLeaseUpdate(Lease lease) { final MultiStreamLease multiStreamLease = validateAndCast(lease); diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallback.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallback.java index 23022778..631d0473 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallback.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallback.java @@ -35,5 +35,4 @@ public interface TableCreatorCallback { * Input object for table creator */ void performAction(TableCreatorCallbackInput tableCreatorCallbackInput); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallbackInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallbackInput.java index edb31fdc..aadf6f2d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallbackInput.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/dynamodb/TableCreatorCallbackInput.java @@ -34,6 +34,7 @@ import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; public class TableCreatorCallbackInput { @NonNull private final DynamoDbAsyncClient dynamoDbClient; + @NonNull private final String tableName; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/CustomerApplicationException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/CustomerApplicationException.java index 8f2e8149..ed331e61 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/CustomerApplicationException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/CustomerApplicationException.java @@ -19,9 +19,15 @@ package software.amazon.kinesis.leases.exceptions; */ public class CustomerApplicationException extends Exception { - public CustomerApplicationException(Throwable e) { super(e); } + public CustomerApplicationException(Throwable e) { + super(e); + } - public CustomerApplicationException(String message, Throwable e) { super(message, e); } + public CustomerApplicationException(String message, Throwable e) { + super(message, e); + } - public CustomerApplicationException(String message) { super(message); } + public CustomerApplicationException(String message) { + super(message); + } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/DependencyException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/DependencyException.java index 8895d2cc..2cee9d2c 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/DependencyException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/DependencyException.java @@ -30,5 +30,4 @@ public class DependencyException extends LeasingException { public DependencyException(String message, Throwable e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/InvalidStateException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/InvalidStateException.java index 416654ae..d43dd222 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/InvalidStateException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/InvalidStateException.java @@ -33,5 +33,4 @@ public class InvalidStateException extends LeasingException { public InvalidStateException(String message) { super(message); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasePendingDeletion.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasePendingDeletion.java index ab81d7ce..9c47a149 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasePendingDeletion.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasePendingDeletion.java @@ -15,6 +15,11 @@ package software.amazon.kinesis.leases.exceptions; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + import lombok.EqualsAndHashCode; import lombok.Value; import lombok.experimental.Accessors; @@ -23,11 +28,6 @@ import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.ShardDetector; import software.amazon.kinesis.leases.ShardInfo; -import java.util.Set; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; - /** * Helper class for cleaning up leases. */ @@ -49,6 +49,8 @@ public class LeasePendingDeletion { * @throws TimeoutException */ public Set getChildShardsFromService() throws InterruptedException, ExecutionException, TimeoutException { - return shardDetector.getChildShards(shardInfo.shardId()).stream().map(c -> c.shardId()).collect(Collectors.toSet()); + return shardDetector.getChildShards(shardInfo.shardId()).stream() + .map(c -> c.shardId()) + .collect(Collectors.toSet()); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasingException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasingException.java index d2638882..58577525 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasingException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/LeasingException.java @@ -32,5 +32,4 @@ public class LeasingException extends Exception { } private static final long serialVersionUID = 1L; - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ProvisionedThroughputException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ProvisionedThroughputException.java index da4c6ad7..cedb6de7 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ProvisionedThroughputException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ProvisionedThroughputException.java @@ -28,5 +28,4 @@ public class ProvisionedThroughputException extends LeasingException { public ProvisionedThroughputException(String message, Throwable e) { super(message, e); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ShardSyncer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ShardSyncer.java index 792555d2..d095b018 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ShardSyncer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/leases/exceptions/ShardSyncer.java @@ -34,11 +34,20 @@ public class ShardSyncer { * @throws KinesisClientLibIOException */ @Deprecated - public static synchronized void checkAndCreateLeasesForNewShards(@NonNull final ShardDetector shardDetector, - final LeaseRefresher leaseRefresher, final InitialPositionInStreamExtended initialPosition, - final boolean ignoreUnexpectedChildShards, final MetricsScope scope) - throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException, InterruptedException { - HIERARCHICAL_SHARD_SYNCER.checkAndCreateLeaseForNewShards(shardDetector, leaseRefresher, initialPosition, - scope, ignoreUnexpectedChildShards, leaseRefresher.isLeaseTableEmpty()); + public static synchronized void checkAndCreateLeasesForNewShards( + @NonNull final ShardDetector shardDetector, + final LeaseRefresher leaseRefresher, + final InitialPositionInStreamExtended initialPosition, + final boolean ignoreUnexpectedChildShards, + final MetricsScope scope) + throws DependencyException, InvalidStateException, ProvisionedThroughputException, + KinesisClientLibIOException, InterruptedException { + HIERARCHICAL_SHARD_SYNCER.checkAndCreateLeaseForNewShards( + shardDetector, + leaseRefresher, + initialPosition, + scope, + ignoreUnexpectedChildShards, + leaseRefresher.isLeaseTableEmpty()); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTask.java index 5f1ee18c..697172cb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTask.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTask.java @@ -40,6 +40,7 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; public class BlockOnParentShardTask implements ConsumerTask { @NonNull private final ShardInfo shardInfo; + private final LeaseRefresher leaseRefresher; // Sleep for this duration if the parent shards have not completed processing, or we encounter an exception. private final long parentShardPollIntervalMillis; @@ -48,7 +49,7 @@ public class BlockOnParentShardTask implements ConsumerTask { /* * (non-Javadoc) - * + * * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#call() */ @Override @@ -94,12 +95,11 @@ public class BlockOnParentShardTask implements ConsumerTask { /* * (non-Javadoc) - * + * * @see com.amazonaws.services.kinesis.clientlibrary.lib.worker.ConsumerTask#taskType() */ @Override public TaskType taskType() { return taskType; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerState.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerState.java index 1a416b65..3aa03b11 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerState.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerState.java @@ -49,7 +49,7 @@ interface ConsumerState { /** * Provides the next state of the consumer if the task failed. This defaults to no state change. - * + * * @return the state to change to upon a task failure */ default ConsumerState failureTransition() { @@ -97,12 +97,11 @@ interface ConsumerState { /** * Indicates whether a state requires an external event to re-awaken for processing. - * + * * @return true if the state is some external event to restart processing, false if events can be immediately * dispatched. */ default boolean requiresAwake() { return false; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerStates.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerStates.java index 07316390..1ef197bd 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerStates.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerStates.java @@ -24,7 +24,7 @@ import software.amazon.kinesis.retrieval.ThrottlingReporter; * and state transitions is contained within the {@link ConsumerState} objects. * *

    State Diagram

    - * + * *
      *       +-------------------+
      *       | Waiting on Parent |                               +------------------+
    @@ -82,7 +82,7 @@ class ConsumerStates {
             SHUTDOWN_REQUESTED(new ShutdownNotificationState()),
             SHUTTING_DOWN(new ShuttingDownState()),
             SHUTDOWN_COMPLETE(new ShutdownCompleteState());
    -        //@formatter:on
    +        // @formatter:on
     
             @Getter
             @Accessors(fluent = true)
    @@ -120,8 +120,10 @@ class ConsumerStates {
         static class BlockedOnParentState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument consumerArgument, ShardConsumer consumer, ProcessRecordsInput input) {
    -            return new BlockOnParentShardTask(consumerArgument.shardInfo(),
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument consumerArgument, ShardConsumer consumer, ProcessRecordsInput input) {
    +            return new BlockOnParentShardTask(
    +                    consumerArgument.shardInfo(),
                         consumerArgument.leaseCoordinator().leaseRefresher(),
                         consumerArgument.parentShardPollIntervalMillis());
             }
    @@ -184,11 +186,14 @@ class ConsumerStates {
         static class InitializingState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    -            return new InitializeTask(argument.shardInfo(),
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    +            return new InitializeTask(
    +                    argument.shardInfo(),
                         argument.shardRecordProcessor(),
                         argument.checkpoint(),
    -                    argument.recordProcessorCheckpointer(), argument.initialPositionInStream(),
    +                    argument.recordProcessorCheckpointer(),
    +                    argument.initialPositionInStream(),
                         argument.recordsPublisher(),
                         argument.taskBackoffTimeMillis(),
                         argument.metricsFactory());
    @@ -244,9 +249,12 @@ class ConsumerStates {
         static class ProcessingState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    -            ThrottlingReporter throttlingReporter = new ThrottlingReporter(5, argument.shardInfo().shardId());
    -            return new ProcessTask(argument.shardInfo(),
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    +            ThrottlingReporter throttlingReporter =
    +                    new ThrottlingReporter(5, argument.shardInfo().shardId());
    +            return new ProcessTask(
    +                    argument.shardInfo(),
                         argument.shardRecordProcessor(),
                         argument.recordProcessorCheckpointer(),
                         argument.taskBackoffTimeMillis(),
    @@ -258,8 +266,7 @@ class ConsumerStates {
                         argument.idleTimeInMilliseconds(),
                         argument.aggregatorUtil(),
                         argument.metricsFactory(),
    -                    argument.schemaRegistryDecoder()
    -            );
    +                    argument.schemaRegistryDecoder());
             }
     
             @Override
    @@ -322,9 +329,11 @@ class ConsumerStates {
         static class ShutdownNotificationState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
                 // TODO: notify shutdownrequested
    -            return new ShutdownNotificationTask(argument.shardRecordProcessor(),
    +            return new ShutdownNotificationTask(
    +                    argument.shardRecordProcessor(),
                         argument.recordProcessorCheckpointer(),
                         consumer.shutdownNotification(),
                         argument.shardInfo());
    @@ -357,7 +366,6 @@ class ConsumerStates {
             public boolean isTerminal() {
                 return false;
             }
    -
         }
     
         /**
    @@ -394,7 +402,8 @@ class ConsumerStates {
         static class ShutdownNotificationCompletionState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
                 return null;
             }
     
    @@ -471,9 +480,11 @@ class ConsumerStates {
         static class ShuttingDownState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
                 // TODO: set shutdown reason
    -            return new ShutdownTask(argument.shardInfo(),
    +            return new ShutdownTask(
    +                    argument.shardInfo(),
                         argument.shardDetector(),
                         argument.shardRecordProcessor(),
                         argument.recordProcessorCheckpointer(),
    @@ -515,7 +526,6 @@ class ConsumerStates {
             public boolean isTerminal() {
                 return false;
             }
    -
         }
     
         /**
    @@ -556,7 +566,8 @@ class ConsumerStates {
         static class ShutdownCompleteState implements ConsumerState {
     
             @Override
    -        public ConsumerTask createTask(ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
    +        public ConsumerTask createTask(
    +                ShardConsumerArgument argument, ShardConsumer consumer, ProcessRecordsInput input) {
                 return null;
             }
     
    @@ -584,7 +595,5 @@ class ConsumerStates {
             public boolean isTerminal() {
                 return true;
             }
    -
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java
    index 2e607661..ef4b4429 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ConsumerTask.java
    @@ -25,7 +25,7 @@ public interface ConsumerTask extends Callable {
         /**
          * Perform task logic.
          * E.g. perform set up (e.g. fetch records) and invoke a callback (e.g. processRecords() API).
    -     * 
    +     *
          * @return TaskResult (captures any exceptions encountered during execution of the task)
          */
         TaskResult call();
    @@ -34,5 +34,4 @@ public interface ConsumerTask extends Callable {
          * @return TaskType
          */
         TaskType taskType();
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/InitializeTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/InitializeTask.java
    index 7816c1e1..705e1247 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/InitializeTask.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/InitializeTask.java
    @@ -44,19 +44,25 @@ public class InitializeTask implements ConsumerTask {
     
         @NonNull
         private final ShardInfo shardInfo;
    +
         @NonNull
         private final ShardRecordProcessor shardRecordProcessor;
    +
         @NonNull
         private final Checkpointer checkpoint;
    +
         @NonNull
         private final ShardRecordProcessorCheckpointer recordProcessorCheckpointer;
    +
         @NonNull
         private final InitialPositionInStreamExtended initialPositionInStream;
    +
         @NonNull
         private final RecordsPublisher cache;
     
         // Back off for this interval if we encounter a problem (exception)
         private final long backoffTimeMillis;
    +
         @NonNull
         private final MetricsFactory metricsFactory;
     
    @@ -78,7 +84,10 @@ public class InitializeTask implements ConsumerTask {
                 final String leaseKey = ShardInfo.getLeaseKey(shardInfo);
                 Checkpoint initialCheckpointObject = checkpoint.getCheckpointObject(leaseKey);
                 ExtendedSequenceNumber initialCheckpoint = initialCheckpointObject.checkpoint();
    -            log.debug("[{}]: Checkpoint: {} -- Initial Position: {}", leaseKey, initialCheckpoint,
    +            log.debug(
    +                    "[{}]: Checkpoint: {} -- Initial Position: {}",
    +                    leaseKey,
    +                    initialCheckpoint,
                         initialPositionInStream);
     
                 cache.start(initialCheckpoint, initialPositionInStream);
    @@ -94,8 +103,8 @@ public class InitializeTask implements ConsumerTask {
                         .pendingCheckpointState(initialCheckpointObject.pendingCheckpointState())
                         .build();
     
    -            final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory,
    -                    INITIALIZE_TASK_OPERATION);
    +            final MetricsScope scope =
    +                    MetricsUtil.createMetricsWithOperation(metricsFactory, INITIALIZE_TASK_OPERATION);
     
                 final long startTime = System.currentTimeMillis();
                 try {
    @@ -137,5 +146,4 @@ public class InitializeTask implements ConsumerTask {
         public TaskType taskType() {
             return taskType;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NoOpTaskExecutionListener.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NoOpTaskExecutionListener.java
    index 85770dbf..04d3d394 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NoOpTaskExecutionListener.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NoOpTaskExecutionListener.java
    @@ -21,11 +21,8 @@ import software.amazon.kinesis.lifecycle.events.TaskExecutionListenerInput;
      */
     public class NoOpTaskExecutionListener implements TaskExecutionListener {
         @Override
    -    public void beforeTaskExecution(TaskExecutionListenerInput input) {
    -    }
    +    public void beforeTaskExecution(TaskExecutionListenerInput input) {}
     
         @Override
    -    public void afterTaskExecution(TaskExecutionListenerInput input) {
    -    }
    +    public void afterTaskExecution(TaskExecutionListenerInput input) {}
     }
    -
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NotifyingSubscriber.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NotifyingSubscriber.java
    index f3599c71..bd69930b 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NotifyingSubscriber.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/NotifyingSubscriber.java
    @@ -17,9 +17,9 @@ package software.amazon.kinesis.lifecycle;
     
     import org.reactivestreams.Subscriber;
     import org.reactivestreams.Subscription;
    +import software.amazon.kinesis.retrieval.RecordsDeliveryAck;
     import software.amazon.kinesis.retrieval.RecordsPublisher;
     import software.amazon.kinesis.retrieval.RecordsRetrieved;
    -import software.amazon.kinesis.retrieval.RecordsDeliveryAck;
     
     /**
      * Subscriber that notifies its publisher on receipt of the onNext event.
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ProcessTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ProcessTask.java
    index fb398cda..39a6bff6 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ProcessTask.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ProcessTask.java
    @@ -28,8 +28,8 @@ import software.amazon.kinesis.leases.ShardDetector;
     import software.amazon.kinesis.leases.ShardInfo;
     import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput;
     import software.amazon.kinesis.metrics.MetricsFactory;
    -import software.amazon.kinesis.metrics.MetricsScope;
     import software.amazon.kinesis.metrics.MetricsLevel;
    +import software.amazon.kinesis.metrics.MetricsScope;
     import software.amazon.kinesis.metrics.MetricsUtil;
     import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.retrieval.AggregatorUtil;
    @@ -66,19 +66,20 @@ public class ProcessTask implements ConsumerTask {
         private final String shardInfoId;
         private final SchemaRegistryDecoder schemaRegistryDecoder;
     
    -    public ProcessTask(@NonNull ShardInfo shardInfo,
    -                       @NonNull ShardRecordProcessor shardRecordProcessor,
    -                       @NonNull ShardRecordProcessorCheckpointer recordProcessorCheckpointer,
    -                       long backoffTimeMillis,
    -                       boolean skipShardSyncAtWorkerInitializationIfLeasesExist,
    -                       ShardDetector shardDetector,
    -                       @NonNull ThrottlingReporter throttlingReporter,
    -                       ProcessRecordsInput processRecordsInput,
    -                       boolean shouldCallProcessRecordsEvenForEmptyRecordList,
    -                       long idleTimeInMilliseconds,
    -                       @NonNull AggregatorUtil aggregatorUtil,
    -                       @NonNull MetricsFactory metricsFactory,
    -                       SchemaRegistryDecoder schemaRegistryDecoder) {
    +    public ProcessTask(
    +            @NonNull ShardInfo shardInfo,
    +            @NonNull ShardRecordProcessor shardRecordProcessor,
    +            @NonNull ShardRecordProcessorCheckpointer recordProcessorCheckpointer,
    +            long backoffTimeMillis,
    +            boolean skipShardSyncAtWorkerInitializationIfLeasesExist,
    +            ShardDetector shardDetector,
    +            @NonNull ThrottlingReporter throttlingReporter,
    +            ProcessRecordsInput processRecordsInput,
    +            boolean shouldCallProcessRecordsEvenForEmptyRecordList,
    +            long idleTimeInMilliseconds,
    +            @NonNull AggregatorUtil aggregatorUtil,
    +            @NonNull MetricsFactory metricsFactory,
    +            SchemaRegistryDecoder schemaRegistryDecoder) {
             this.shardInfo = shardInfo;
             this.shardInfoId = ShardInfo.getLeaseKey(shardInfo);
             this.shardRecordProcessor = shardRecordProcessor;
    @@ -118,10 +119,13 @@ public class ProcessTask implements ConsumerTask {
              * therefore all data added to appScope, although from different shard consumer, will be sent to the same metric,
              * which is the app-level MillsBehindLatest metric.
              */
    -        final MetricsScope appScope = MetricsUtil.createMetricsWithOperation(metricsFactory, APPLICATION_TRACKER_OPERATION);
    +        final MetricsScope appScope =
    +                MetricsUtil.createMetricsWithOperation(metricsFactory, APPLICATION_TRACKER_OPERATION);
             final MetricsScope shardScope = MetricsUtil.createMetricsWithOperation(metricsFactory, PROCESS_TASK_OPERATION);
    -        shardInfo.streamIdentifierSerOpt()
    -                .ifPresent(streamId -> MetricsUtil.addStreamId(shardScope, StreamIdentifier.multiStreamInstance(streamId)));
    +        shardInfo
    +                .streamIdentifierSerOpt()
    +                .ifPresent(streamId ->
    +                        MetricsUtil.addStreamId(shardScope, StreamIdentifier.multiStreamInstance(streamId)));
             MetricsUtil.addShardId(shardScope, shardInfo.shardId());
             long startTimeMillis = System.currentTimeMillis();
             boolean success = false;
    @@ -132,13 +136,20 @@ public class ProcessTask implements ConsumerTask {
     
                 try {
                     if (processRecordsInput.millisBehindLatest() != null) {
    -                    shardScope.addData(MILLIS_BEHIND_LATEST_METRIC, processRecordsInput.millisBehindLatest(),
    -                            StandardUnit.MILLISECONDS, MetricsLevel.SUMMARY);
    -                    appScope.addData(MILLIS_BEHIND_LATEST_METRIC, processRecordsInput.millisBehindLatest(),
    -                            StandardUnit.MILLISECONDS, MetricsLevel.SUMMARY);
    +                    shardScope.addData(
    +                            MILLIS_BEHIND_LATEST_METRIC,
    +                            processRecordsInput.millisBehindLatest(),
    +                            StandardUnit.MILLISECONDS,
    +                            MetricsLevel.SUMMARY);
    +                    appScope.addData(
    +                            MILLIS_BEHIND_LATEST_METRIC,
    +                            processRecordsInput.millisBehindLatest(),
    +                            StandardUnit.MILLISECONDS,
    +                            MetricsLevel.SUMMARY);
                     }
     
    -                if (processRecordsInput.isAtShardEnd() && processRecordsInput.records().isEmpty()) {
    +                if (processRecordsInput.isAtShardEnd()
    +                        && processRecordsInput.records().isEmpty()) {
                         log.info("Reached end of shard {} and have no records to process", shardInfoId);
                         return new TaskResult(null, true);
                     }
    @@ -151,11 +162,14 @@ public class ProcessTask implements ConsumerTask {
                     }
     
                     if (!records.isEmpty()) {
    -                    shardScope.addData(RECORDS_PROCESSED_METRIC, records.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY);
    +                    shardScope.addData(
    +                            RECORDS_PROCESSED_METRIC, records.size(), StandardUnit.COUNT, MetricsLevel.SUMMARY);
                     }
     
                     recordProcessorCheckpointer.largestPermittedCheckpointValue(filterAndGetMaxExtendedSequenceNumber(
    -                        shardScope, records, recordProcessorCheckpointer.lastCheckpointValue(),
    +                        shardScope,
    +                        records,
    +                        recordProcessorCheckpointer.lastCheckpointValue(),
                             recordProcessorCheckpointer.largestPermittedCheckpointValue()));
     
                     if (shouldCallProcessRecords(records)) {
    @@ -169,7 +183,10 @@ public class ProcessTask implements ConsumerTask {
                 }
     
                 if (processRecordsInput.isAtShardEnd()) {
    -                log.info("Reached end of shard {}, and processed {} records", shardInfoId, processRecordsInput.records().size());
    +                log.info(
    +                        "Reached end of shard {}, and processed {} records",
    +                        shardInfoId,
    +                        processRecordsInput.records().size());
                     return new TaskResult(null, true);
                 }
                 return new TaskResult(exception);
    @@ -184,7 +201,10 @@ public class ProcessTask implements ConsumerTask {
             if (shard == null) {
                 return aggregatorUtil.deaggregate(records);
             } else {
    -            return aggregatorUtil.deaggregate(records, shard.hashKeyRange().startingHashKey(), shard.hashKeyRange().endingHashKey());
    +            return aggregatorUtil.deaggregate(
    +                    records,
    +                    shard.hashKeyRange().startingHashKey(),
    +                    shard.hashKeyRange().endingHashKey());
             }
         }
     
    @@ -209,24 +229,30 @@ public class ProcessTask implements ConsumerTask {
          *            the records to be dispatched. It's possible the records have been adjusted by KPL deaggregation.
          */
         private void callProcessRecords(ProcessRecordsInput input, List records) {
    -        log.debug("Calling application processRecords() with {} records from {}", records.size(),
    -                shardInfoId);
    +        log.debug("Calling application processRecords() with {} records from {}", records.size(), shardInfoId);
     
    -        final ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder().records(records)
    -                .cacheExitTime(input.cacheExitTime()).cacheEntryTime(input.cacheEntryTime())
    -                .isAtShardEnd(input.isAtShardEnd()).checkpointer(recordProcessorCheckpointer)
    -                .millisBehindLatest(input.millisBehindLatest()).build();
    +        final ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder()
    +                .records(records)
    +                .cacheExitTime(input.cacheExitTime())
    +                .cacheEntryTime(input.cacheEntryTime())
    +                .isAtShardEnd(input.isAtShardEnd())
    +                .checkpointer(recordProcessorCheckpointer)
    +                .millisBehindLatest(input.millisBehindLatest())
    +                .build();
     
             final MetricsScope scope = MetricsUtil.createMetricsWithOperation(metricsFactory, PROCESS_TASK_OPERATION);
    -        shardInfo.streamIdentifierSerOpt()
    +        shardInfo
    +                .streamIdentifierSerOpt()
                     .ifPresent(streamId -> MetricsUtil.addStreamId(scope, StreamIdentifier.multiStreamInstance(streamId)));
             MetricsUtil.addShardId(scope, shardInfo.shardId());
             final long startTime = System.currentTimeMillis();
             try {
                 shardRecordProcessor.processRecords(processRecordsInput);
             } catch (Exception e) {
    -            log.error("ShardId {}: Application processRecords() threw an exception when processing shard ",
    -                    shardInfoId, e);
    +            log.error(
    +                    "ShardId {}: Application processRecords() threw an exception when processing shard ",
    +                    shardInfoId,
    +                    e);
                 log.error("ShardId {}: Skipping over the following data records: {}", shardInfoId, records);
             } finally {
                 MetricsUtil.addLatency(scope, RECORD_PROCESSOR_PROCESS_RECORDS_METRIC, startTime, MetricsLevel.SUMMARY);
    @@ -264,21 +290,25 @@ public class ProcessTask implements ConsumerTask {
          *            previous largest permitted checkpoint value
          * @return the largest extended sequence number among the retained records
          */
    -    private ExtendedSequenceNumber filterAndGetMaxExtendedSequenceNumber(final MetricsScope scope,
    -                                                                         final List records,
    -                                                                         final ExtendedSequenceNumber lastCheckpointValue,
    -                                                                         final ExtendedSequenceNumber lastLargestPermittedCheckpointValue) {
    +    private ExtendedSequenceNumber filterAndGetMaxExtendedSequenceNumber(
    +            final MetricsScope scope,
    +            final List records,
    +            final ExtendedSequenceNumber lastCheckpointValue,
    +            final ExtendedSequenceNumber lastLargestPermittedCheckpointValue) {
             ExtendedSequenceNumber largestExtendedSequenceNumber = lastLargestPermittedCheckpointValue;
             ListIterator recordIterator = records.listIterator();
             while (recordIterator.hasNext()) {
                 KinesisClientRecord record = recordIterator.next();
    -            ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(record.sequenceNumber(),
    -                    record.subSequenceNumber());
    +            ExtendedSequenceNumber extendedSequenceNumber =
    +                    new ExtendedSequenceNumber(record.sequenceNumber(), record.subSequenceNumber());
     
                 if (extendedSequenceNumber.compareTo(lastCheckpointValue) <= 0) {
                     recordIterator.remove();
    -                log.debug("{} : removing record with ESN {} because the ESN is <= checkpoint ({})", shardInfoId,
    -                        extendedSequenceNumber, lastCheckpointValue);
    +                log.debug(
    +                        "{} : removing record with ESN {} because the ESN is <= checkpoint ({})",
    +                        shardInfoId,
    +                        extendedSequenceNumber,
    +                        lastCheckpointValue);
                     continue;
                 }
     
    @@ -287,10 +317,8 @@ public class ProcessTask implements ConsumerTask {
                     largestExtendedSequenceNumber = extendedSequenceNumber;
                 }
     
    -            scope.addData(DATA_BYTES_PROCESSED_METRIC, record.data().limit(), StandardUnit.BYTES,
    -                    MetricsLevel.SUMMARY);
    +            scope.addData(DATA_BYTES_PROCESSED_METRIC, record.data().limit(), StandardUnit.BYTES, MetricsLevel.SUMMARY);
             }
             return largestExtendedSequenceNumber;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumer.java
    index d1248384..a4c0a1e0 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumer.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumer.java
    @@ -23,15 +23,13 @@ import java.util.concurrent.ExecutorService;
     import java.util.concurrent.RejectedExecutionException;
     import java.util.function.Function;
     
    -import org.reactivestreams.Subscription;
    -
     import com.google.common.annotations.VisibleForTesting;
    -
     import lombok.AccessLevel;
     import lombok.Getter;
     import lombok.NonNull;
     import lombok.experimental.Accessors;
     import lombok.extern.slf4j.Slf4j;
    +import org.reactivestreams.Subscription;
     import software.amazon.kinesis.annotations.KinesisClientInternalApi;
     import software.amazon.kinesis.exceptions.internal.BlockedOnParentShardException;
     import software.amazon.kinesis.leases.ShardInfo;
    @@ -57,6 +55,7 @@ public class ShardConsumer {
         private final ExecutorService executorService;
         private final ShardInfo shardInfo;
         private final ShardConsumerArgument shardConsumerArgument;
    +
         @NonNull
         private final Optional logWarningForTaskAfterMillis;
     
    @@ -86,8 +85,10 @@ public class ShardConsumer {
         private ConsumerState currentState;
     
         private final Object shutdownLock = new Object();
    +
         @Getter(AccessLevel.PUBLIC)
         private volatile ShutdownReason shutdownReason;
    +
         private volatile ShutdownNotification shutdownNotification;
     
         private final ShardConsumerSubscriber subscriber;
    @@ -95,41 +96,85 @@ public class ShardConsumer {
         private ProcessRecordsInput shardEndProcessRecordsInput;
     
         @Deprecated
    -    public ShardConsumer(RecordsPublisher recordsPublisher, ExecutorService executorService, ShardInfo shardInfo,
    -            Optional logWarningForTaskAfterMillis, ShardConsumerArgument shardConsumerArgument,
    +    public ShardConsumer(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            ShardInfo shardInfo,
    +            Optional logWarningForTaskAfterMillis,
    +            ShardConsumerArgument shardConsumerArgument,
                 TaskExecutionListener taskExecutionListener) {
    -        this(recordsPublisher, executorService, shardInfo, logWarningForTaskAfterMillis, shardConsumerArgument,
    +        this(
    +                recordsPublisher,
    +                executorService,
    +                shardInfo,
    +                logWarningForTaskAfterMillis,
    +                shardConsumerArgument,
                     ConsumerStates.INITIAL_STATE,
    -                ShardConsumer.metricsWrappingFunction(shardConsumerArgument.metricsFactory()), 8, taskExecutionListener,
    +                ShardConsumer.metricsWrappingFunction(shardConsumerArgument.metricsFactory()),
    +                8,
    +                taskExecutionListener,
                     LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
         }
     
    -    public ShardConsumer(RecordsPublisher recordsPublisher, ExecutorService executorService, ShardInfo shardInfo,
    -            Optional logWarningForTaskAfterMillis, ShardConsumerArgument shardConsumerArgument,
    -            TaskExecutionListener taskExecutionListener, int readTimeoutsToIgnoreBeforeWarning) {
    -        this(recordsPublisher, executorService, shardInfo, logWarningForTaskAfterMillis, shardConsumerArgument,
    +    public ShardConsumer(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            ShardInfo shardInfo,
    +            Optional logWarningForTaskAfterMillis,
    +            ShardConsumerArgument shardConsumerArgument,
    +            TaskExecutionListener taskExecutionListener,
    +            int readTimeoutsToIgnoreBeforeWarning) {
    +        this(
    +                recordsPublisher,
    +                executorService,
    +                shardInfo,
    +                logWarningForTaskAfterMillis,
    +                shardConsumerArgument,
                     ConsumerStates.INITIAL_STATE,
    -                ShardConsumer.metricsWrappingFunction(shardConsumerArgument.metricsFactory()), 8, taskExecutionListener,
    +                ShardConsumer.metricsWrappingFunction(shardConsumerArgument.metricsFactory()),
    +                8,
    +                taskExecutionListener,
                     readTimeoutsToIgnoreBeforeWarning);
         }
     
         @Deprecated
    -    public ShardConsumer(RecordsPublisher recordsPublisher, ExecutorService executorService, ShardInfo shardInfo,
    -            Optional logWarningForTaskAfterMillis, ShardConsumerArgument shardConsumerArgument,
    -            ConsumerState initialState, Function taskMetricsDecorator, int bufferSize,
    +    public ShardConsumer(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            ShardInfo shardInfo,
    +            Optional logWarningForTaskAfterMillis,
    +            ShardConsumerArgument shardConsumerArgument,
    +            ConsumerState initialState,
    +            Function taskMetricsDecorator,
    +            int bufferSize,
                 TaskExecutionListener taskExecutionListener) {
    -        this(recordsPublisher, executorService, shardInfo, logWarningForTaskAfterMillis, shardConsumerArgument,
    -                initialState, taskMetricsDecorator, bufferSize, taskExecutionListener,
    +        this(
    +                recordsPublisher,
    +                executorService,
    +                shardInfo,
    +                logWarningForTaskAfterMillis,
    +                shardConsumerArgument,
    +                initialState,
    +                taskMetricsDecorator,
    +                bufferSize,
    +                taskExecutionListener,
                     LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
         }
     
         //
         // TODO: Make bufferSize configurable
         //
    -    public ShardConsumer(RecordsPublisher recordsPublisher, ExecutorService executorService, ShardInfo shardInfo,
    -            Optional logWarningForTaskAfterMillis, ShardConsumerArgument shardConsumerArgument,
    -            ConsumerState initialState, Function taskMetricsDecorator, int bufferSize,
    -            TaskExecutionListener taskExecutionListener, int readTimeoutsToIgnoreBeforeWarning) {
    +    public ShardConsumer(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            ShardInfo shardInfo,
    +            Optional logWarningForTaskAfterMillis,
    +            ShardConsumerArgument shardConsumerArgument,
    +            ConsumerState initialState,
    +            Function taskMetricsDecorator,
    +            int bufferSize,
    +            TaskExecutionListener taskExecutionListener,
    +            int readTimeoutsToIgnoreBeforeWarning) {
             this.recordsPublisher = recordsPublisher;
             this.executorService = executorService;
             this.shardInfo = shardInfo;
    @@ -139,8 +184,8 @@ public class ShardConsumer {
             this.taskExecutionListener = taskExecutionListener;
             this.currentState = initialState;
             this.taskMetricsDecorator = taskMetricsDecorator;
    -        subscriber = new ShardConsumerSubscriber(recordsPublisher, executorService, bufferSize, this,
    -                readTimeoutsToIgnoreBeforeWarning);
    +        subscriber = new ShardConsumerSubscriber(
    +                recordsPublisher, executorService, bufferSize, this, readTimeoutsToIgnoreBeforeWarning);
             this.bufferSize = bufferSize;
     
             if (this.shardInfo.isCompleted()) {
    @@ -221,8 +266,10 @@ public class ShardConsumer {
             }
             Throwable dispatchFailure = subscriber.getAndResetDispatchFailure();
             if (dispatchFailure != null) {
    -            log.warn("{} : Exception occurred while dispatching incoming data.  The incoming data has been skipped",
    -                    streamIdentifier, dispatchFailure);
    +            log.warn(
    +                    "{} : Exception occurred while dispatching incoming data.  The incoming data has been skipped",
    +                    streamIdentifier,
    +                    dispatchFailure);
                 return dispatchFailure;
             }
     
    @@ -238,8 +285,9 @@ public class ShardConsumer {
     
         String longRunningTaskMessage(Duration taken) {
             if (taken != null) {
    -            return String.format("Previous %s task still pending for shard %s since %s ago. ", currentTask.taskType(),
    -                    shardInfo.shardId(), taken);
    +            return String.format(
    +                    "Previous %s task still pending for shard %s since %s ago. ",
    +                    currentTask.taskType(), shardInfo.shardId(), taken);
             }
             return null;
         }
    @@ -296,46 +344,52 @@ public class ShardConsumer {
             if (currentState.state() == ConsumerStates.ShardConsumerState.PROCESSING) {
                 return CompletableFuture.completedFuture(true);
             }
    -        return CompletableFuture.supplyAsync(() -> {
    -            if (isShutdownRequested()) {
    -                throw new IllegalStateException("Shutdown requested while initializing");
    -            }
    -            executeTask(null);
    -            if (isShutdownRequested()) {
    -                throw new IllegalStateException("Shutdown requested while initializing");
    -            }
    -            return false;
    -        }, executorService);
    +        return CompletableFuture.supplyAsync(
    +                () -> {
    +                    if (isShutdownRequested()) {
    +                        throw new IllegalStateException("Shutdown requested while initializing");
    +                    }
    +                    executeTask(null);
    +                    if (isShutdownRequested()) {
    +                        throw new IllegalStateException("Shutdown requested while initializing");
    +                    }
    +                    return false;
    +                },
    +                executorService);
         }
     
         @VisibleForTesting
         CompletableFuture shutdownComplete() {
    -        return CompletableFuture.supplyAsync(() -> {
    -            synchronized (this) {
    -                if (taskOutcome != null) {
    -                    updateState(taskOutcome);
    -                } else {
    -                    //
    -                    // ShardConsumer has been asked to shutdown before the first task even had a chance to run.
    -                    // In this case generate a successful task outcome, and allow the shutdown to continue.
    -                    // This should only happen if the lease was lost before the initial state had a chance to run.
    -                    //
    -                    updateState(TaskOutcome.SUCCESSFUL);
    -                }
    -                if (isShutdown()) {
    -                    return true;
    -                }
    +        return CompletableFuture.supplyAsync(
    +                () -> {
    +                    synchronized (this) {
    +                        if (taskOutcome != null) {
    +                            updateState(taskOutcome);
    +                        } else {
    +                            //
    +                            // ShardConsumer has been asked to shutdown before the first task even had a chance to run.
    +                            // In this case generate a successful task outcome, and allow the shutdown to continue.
    +                            // This should only happen if the lease was lost before the initial state had a chance to
    +                            // run.
    +                            //
    +                            updateState(TaskOutcome.SUCCESSFUL);
    +                        }
    +                        if (isShutdown()) {
    +                            return true;
    +                        }
     
    -                executeTask(shardEndProcessRecordsInput);
    +                        executeTask(shardEndProcessRecordsInput);
     
    -                // call shutdownNotification.shutdownComplete() if shutting down as part of gracefulShutdown
    -                if (currentState.state() == ConsumerStates.ShardConsumerState.SHUTTING_DOWN &&
    -                        taskOutcome == TaskOutcome.SUCCESSFUL && shutdownNotification != null) {
    -                    shutdownNotification.shutdownComplete();
    -                }
    -                return false;
    -            }
    -        }, executorService);
    +                        // call shutdownNotification.shutdownComplete() if shutting down as part of gracefulShutdown
    +                        if (currentState.state() == ConsumerStates.ShardConsumerState.SHUTTING_DOWN
    +                                && taskOutcome == TaskOutcome.SUCCESSFUL
    +                                && shutdownNotification != null) {
    +                            shutdownNotification.shutdownComplete();
    +                        }
    +                        return false;
    +                    }
    +                },
    +                executorService);
         }
     
         private synchronized void processData(ProcessRecordsInput input) {
    @@ -344,7 +398,9 @@ public class ShardConsumer {
     
         private synchronized void executeTask(ProcessRecordsInput input) {
             TaskExecutionListenerInput taskExecutionListenerInput = TaskExecutionListenerInput.builder()
    -                .shardInfo(shardInfo).taskType(currentState.taskType()).build();
    +                .shardInfo(shardInfo)
    +                .taskType(currentState.taskType())
    +                .build();
             taskExecutionListener.beforeTaskExecution(taskExecutionListenerInput);
             ConsumerTask task = currentState.createTask(shardConsumerArgument, ShardConsumer.this, input);
             if (task != null) {
    @@ -358,7 +414,9 @@ public class ShardConsumer {
                     taskIsRunning = false;
                 }
                 taskOutcome = resultToOutcome(result);
    -            taskExecutionListenerInput = taskExecutionListenerInput.toBuilder().taskOutcome(taskOutcome).build();
    +            taskExecutionListenerInput = taskExecutionListenerInput.toBuilder()
    +                    .taskOutcome(taskOutcome)
    +                    .build();
             }
             taskExecutionListener.afterTaskExecution(taskExecutionListenerInput);
         }
    @@ -377,19 +435,19 @@ public class ShardConsumer {
         private synchronized void updateState(TaskOutcome outcome) {
             ConsumerState nextState = currentState;
             switch (outcome) {
    -        case SUCCESSFUL:
    -            nextState = currentState.successTransition();
    -            break;
    -        case END_OF_SHARD:
    -            markForShutdown(ShutdownReason.SHARD_END);
    -            break;
    -        case FAILURE:
    -            nextState = currentState.failureTransition();
    -            break;
    -        default:
    -            log.error("{} : No handler for outcome of {}", streamIdentifier, outcome.name());
    -            nextState = currentState.failureTransition();
    -            break;
    +            case SUCCESSFUL:
    +                nextState = currentState.successTransition();
    +                break;
    +            case END_OF_SHARD:
    +                markForShutdown(ShutdownReason.SHARD_END);
    +                break;
    +            case FAILURE:
    +                nextState = currentState.failureTransition();
    +                break;
    +            default:
    +                log.error("{} : No handler for outcome of {}", streamIdentifier, outcome.name());
    +                nextState = currentState.failureTransition();
    +                break;
             }
     
             nextState = handleShutdownTransition(outcome, nextState);
    @@ -411,9 +469,16 @@ public class ShardConsumer {
                 Exception taskException = taskResult.getException();
                 if (taskException instanceof BlockedOnParentShardException) {
                     // No need to log the stack trace for this exception (it is very specific).
    -                log.debug("{} : Shard {} is blocked on completion of parent shard.", streamIdentifier, shardInfo.shardId());
    +                log.debug(
    +                        "{} : Shard {} is blocked on completion of parent shard.",
    +                        streamIdentifier,
    +                        shardInfo.shardId());
                 } else {
    -                log.debug("{} : Caught exception running {} task: ", streamIdentifier, currentTask.taskType(), taskResult.getException());
    +                log.debug(
    +                        "{} : Caught exception running {} task: ",
    +                        streamIdentifier,
    +                        currentTask.taskType(),
    +                        taskResult.getException());
                 }
             }
         }
    @@ -494,5 +559,4 @@ public class ShardConsumer {
                 }
             };
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerArgument.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerArgument.java
    index 0518b830..bc1b9d20 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerArgument.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerArgument.java
    @@ -15,6 +15,8 @@
     
     package software.amazon.kinesis.lifecycle;
     
    +import java.util.concurrent.ExecutorService;
    +
     import lombok.Data;
     import lombok.NonNull;
     import lombok.experimental.Accessors;
    @@ -22,11 +24,11 @@ import software.amazon.kinesis.annotations.KinesisClientInternalApi;
     import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer;
     import software.amazon.kinesis.common.InitialPositionInStreamExtended;
     import software.amazon.kinesis.common.StreamIdentifier;
    +import software.amazon.kinesis.leases.HierarchicalShardSyncer;
     import software.amazon.kinesis.leases.LeaseCleanupManager;
     import software.amazon.kinesis.leases.LeaseCoordinator;
     import software.amazon.kinesis.leases.ShardDetector;
     import software.amazon.kinesis.leases.ShardInfo;
    -import software.amazon.kinesis.leases.HierarchicalShardSyncer;
     import software.amazon.kinesis.metrics.MetricsFactory;
     import software.amazon.kinesis.processor.Checkpointer;
     import software.amazon.kinesis.processor.ShardRecordProcessor;
    @@ -34,28 +36,34 @@ import software.amazon.kinesis.retrieval.AggregatorUtil;
     import software.amazon.kinesis.retrieval.RecordsPublisher;
     import software.amazon.kinesis.schemaregistry.SchemaRegistryDecoder;
     
    -import java.util.concurrent.ExecutorService;
    -
     @Data
     @Accessors(fluent = true)
     @KinesisClientInternalApi
     public class ShardConsumerArgument {
         @NonNull
         private final ShardInfo shardInfo;
    +
         @NonNull
         private final StreamIdentifier streamIdentifier;
    +
         @NonNull
         private final LeaseCoordinator leaseCoordinator;
    +
         @NonNull
         private final ExecutorService executorService;
    +
         @NonNull
         private final RecordsPublisher recordsPublisher;
    +
         @NonNull
         private final ShardRecordProcessor shardRecordProcessor;
    +
         @NonNull
         private final Checkpointer checkpoint;
    +
         @NonNull
         private final ShardRecordProcessorCheckpointer recordProcessorCheckpointer;
    +
         private final long parentShardPollIntervalMillis;
         private final long taskBackoffTimeMillis;
         private final boolean skipShardSyncAtWorkerInitializationIfLeasesExist;
    @@ -63,16 +71,22 @@ public class ShardConsumerArgument {
         private final int maxListShardsRetryAttempts;
         private final boolean shouldCallProcessRecordsEvenForEmptyRecordList;
         private final long idleTimeInMilliseconds;
    +
         @NonNull
         private final InitialPositionInStreamExtended initialPositionInStream;
    +
         private final boolean cleanupLeasesOfCompletedShards;
         private final boolean ignoreUnexpectedChildShards;
    +
         @NonNull
         private final ShardDetector shardDetector;
    +
         private final AggregatorUtil aggregatorUtil;
         private final HierarchicalShardSyncer hierarchicalShardSyncer;
    +
         @NonNull
         private final MetricsFactory metricsFactory;
    +
         private final LeaseCleanupManager leaseCleanupManager;
         private final SchemaRegistryDecoder schemaRegistryDecoder;
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerNotifyingSubscriber.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerNotifyingSubscriber.java
    index 3ef9fc1d..b97c0a1f 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerNotifyingSubscriber.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerNotifyingSubscriber.java
    @@ -18,9 +18,9 @@ package software.amazon.kinesis.lifecycle;
     import lombok.AllArgsConstructor;
     import org.reactivestreams.Subscriber;
     import software.amazon.kinesis.annotations.KinesisClientInternalApi;
    +import software.amazon.kinesis.retrieval.RecordsDeliveryAck;
     import software.amazon.kinesis.retrieval.RecordsPublisher;
     import software.amazon.kinesis.retrieval.RecordsRetrieved;
    -import software.amazon.kinesis.retrieval.RecordsDeliveryAck;
     
     @KinesisClientInternalApi
     @AllArgsConstructor
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerShutdownNotification.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerShutdownNotification.java
    index c4065049..1fe9fe2b 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerShutdownNotification.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerShutdownNotification.java
    @@ -38,7 +38,7 @@ public class ShardConsumerShutdownNotification implements ShutdownNotification {
     
         /**
          * Creates a new shutdown request object.
    -     * 
    +     *
          * @param leaseCoordinator
          *            the lease coordinator used to drop leases from once the initial shutdown request is completed.
          * @param lease
    @@ -50,10 +50,11 @@ public class ShardConsumerShutdownNotification implements ShutdownNotification {
          * @param shutdownCompleteLatch
          *            used to inform the caller once the record processor is fully shutdown
          */
    -    public ShardConsumerShutdownNotification(final LeaseCoordinator leaseCoordinator,
    -                                             final Lease lease,
    -                                             final CountDownLatch notificationCompleteLatch,
    -                                             final CountDownLatch shutdownCompleteLatch) {
    +    public ShardConsumerShutdownNotification(
    +            final LeaseCoordinator leaseCoordinator,
    +            final Lease lease,
    +            final CountDownLatch notificationCompleteLatch,
    +            final CountDownLatch shutdownCompleteLatch) {
             this.leaseCoordinator = leaseCoordinator;
             this.lease = lease;
             this.notificationCompleteLatch = notificationCompleteLatch;
    @@ -85,5 +86,4 @@ public class ShardConsumerShutdownNotification implements ShutdownNotification {
             shutdownCompleteLatch.countDown();
             allNotificationCompleted = true;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriber.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriber.java
    index 81528c4e..e61a351c 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriber.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriber.java
    @@ -14,6 +14,10 @@
      */
     package software.amazon.kinesis.lifecycle;
     
    +import java.time.Duration;
    +import java.time.Instant;
    +import java.util.concurrent.ExecutorService;
    +
     import com.google.common.annotations.VisibleForTesting;
     import io.reactivex.rxjava3.core.Flowable;
     import io.reactivex.rxjava3.core.Scheduler;
    @@ -29,10 +33,6 @@ import software.amazon.kinesis.retrieval.RecordsPublisher;
     import software.amazon.kinesis.retrieval.RecordsRetrieved;
     import software.amazon.kinesis.retrieval.RetryableRetrievalException;
     
    -import java.time.Duration;
    -import java.time.Instant;
    -import java.util.concurrent.ExecutorService;
    -
     @Slf4j
     @Accessors(fluent = true)
     class ShardConsumerSubscriber implements Subscriber {
    @@ -43,6 +43,7 @@ class ShardConsumerSubscriber implements Subscriber {
         private final int readTimeoutsToIgnoreBeforeWarning;
         private final String shardInfoId;
         private volatile int readTimeoutSinceLastRead = 0;
    +
         @VisibleForTesting
         final Object lockObject = new Object();
         // This holds the last time an attempt of request to upstream service was made including the first try to
    @@ -51,21 +52,36 @@ class ShardConsumerSubscriber implements Subscriber {
         private RecordsRetrieved lastAccepted = null;
     
         private Subscription subscription;
    +
         @Getter
         private volatile Instant lastDataArrival;
    +
         @Getter
         private volatile Throwable dispatchFailure;
    +
         @Getter(AccessLevel.PACKAGE)
         private volatile Throwable retrievalFailure;
     
         @Deprecated
    -    ShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize,
    -                            ShardConsumer shardConsumer) {
    -        this(recordsPublisher, executorService, bufferSize, shardConsumer, LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
    +    ShardConsumerSubscriber(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            int bufferSize,
    +            ShardConsumer shardConsumer) {
    +        this(
    +                recordsPublisher,
    +                executorService,
    +                bufferSize,
    +                shardConsumer,
    +                LifecycleConfig.DEFAULT_READ_TIMEOUTS_TO_IGNORE);
         }
     
    -    ShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize,
    -            ShardConsumer shardConsumer, int readTimeoutsToIgnoreBeforeWarning) {
    +    ShardConsumerSubscriber(
    +            RecordsPublisher recordsPublisher,
    +            ExecutorService executorService,
    +            int bufferSize,
    +            ShardConsumer shardConsumer,
    +            int readTimeoutsToIgnoreBeforeWarning) {
             this.recordsPublisher = recordsPublisher;
             this.scheduler = Schedulers.from(executorService);
             this.bufferSize = bufferSize;
    @@ -82,7 +98,9 @@ class ShardConsumerSubscriber implements Subscriber {
                 if (lastAccepted != null) {
                     recordsPublisher.restartFrom(lastAccepted);
                 }
    -            Flowable.fromPublisher(recordsPublisher).subscribeOn(scheduler).observeOn(scheduler, true, bufferSize)
    +            Flowable.fromPublisher(recordsPublisher)
    +                    .subscribeOn(scheduler)
    +                    .observeOn(scheduler, true, bufferSize)
                         .subscribe(new ShardConsumerNotifyingSubscriber(this, recordsPublisher));
             }
         }
    @@ -107,8 +125,8 @@ class ShardConsumerSubscriber implements Subscriber {
             Throwable oldFailure = null;
             if (retrievalFailure != null) {
                 synchronized (lockObject) {
    -                String logMessage = String.format("%s: Failure occurred in retrieval.  Restarting data requests",
    -                        shardInfoId);
    +                String logMessage =
    +                        String.format("%s: Failure occurred in retrieval.  Restarting data requests", shardInfoId);
                     if (retrievalFailure instanceof RetryableRetrievalException) {
                         log.debug(logMessage, retrievalFailure.getCause());
                     } else {
    @@ -133,7 +151,11 @@ class ShardConsumerSubscriber implements Subscriber {
                                 // CHECKSTYLE.OFF: LineLength
                                 "{}: Last request was dispatched at {}, but no response as of {} ({}).  Cancelling subscription, and restarting. Last successful request details -- {}",
                                 // CHECKSTYLE.ON: LineLength
    -                            shardInfoId, lastRequestTime, now, timeSinceLastResponse, recordsPublisher.getLastSuccessfulRequestDetails());
    +                            shardInfoId,
    +                            lastRequestTime,
    +                            now,
    +                            timeSinceLastResponse,
    +                            recordsPublisher.getLastSuccessfulRequestDetails());
                         cancel();
     
                         // Start the subscription again which will update the lastRequestTime as well.
    @@ -156,7 +178,10 @@ class ShardConsumerSubscriber implements Subscriber {
                     lastRequestTime = null;
                 }
                 lastDataArrival = Instant.now();
    -            shardConsumer.handleInput(input.processRecordsInput().toBuilder().cacheExitTime(Instant.now()).build(),
    +            shardConsumer.handleInput(
    +                    input.processRecordsInput().toBuilder()
    +                            .cacheExitTime(Instant.now())
    +                            .build(),
                         subscription);
     
             } catch (Throwable t) {
    @@ -196,23 +221,27 @@ class ShardConsumerSubscriber implements Subscriber {
             log.warn(
                     "{}: onError().  Cancelling subscription, and marking self as failed. KCL will "
                             + "recreate the subscription as necessary to continue processing. Last successful request details -- {}",
    -                shardInfoId, recordsPublisher.getLastSuccessfulRequestDetails(), t);
    +                shardInfoId,
    +                recordsPublisher.getLastSuccessfulRequestDetails(),
    +                t);
         }
     
         protected void logOnErrorReadTimeoutWarning(Throwable t) {
    -        log.warn("{}: onError().  Cancelling subscription, and marking self as failed. KCL will"
    -                + " recreate the subscription as necessary to continue processing. If you"
    -                + " are seeing this warning frequently consider increasing the SDK timeouts"
    -                + " by providing an OverrideConfiguration to the kinesis client. Alternatively you"
    -                + " can configure LifecycleConfig.readTimeoutsToIgnoreBeforeWarning to suppress"
    -                + " intermittent ReadTimeout warnings. Last successful request details -- {}",
    -                shardInfoId, recordsPublisher.getLastSuccessfulRequestDetails(), t);
    +        log.warn(
    +                "{}: onError().  Cancelling subscription, and marking self as failed. KCL will"
    +                        + " recreate the subscription as necessary to continue processing. If you"
    +                        + " are seeing this warning frequently consider increasing the SDK timeouts"
    +                        + " by providing an OverrideConfiguration to the kinesis client. Alternatively you"
    +                        + " can configure LifecycleConfig.readTimeoutsToIgnoreBeforeWarning to suppress"
    +                        + " intermittent ReadTimeout warnings. Last successful request details -- {}",
    +                shardInfoId,
    +                recordsPublisher.getLastSuccessfulRequestDetails(),
    +                t);
         }
     
         @Override
         public void onComplete() {
    -        log.debug("{}: onComplete(): Received onComplete.  Activity should be triggered externally",
    -                shardInfoId);
    +        log.debug("{}: onComplete(): Received onComplete.  Activity should be triggered externally", shardInfoId);
         }
     
         public void cancel() {
    @@ -220,5 +249,4 @@ class ShardConsumerSubscriber implements Subscriber {
                 subscription.cancel();
             }
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownInput.java
    index 5757b32a..3885728f 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownInput.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownInput.java
    @@ -19,8 +19,8 @@ import lombok.EqualsAndHashCode;
     import lombok.Getter;
     import lombok.ToString;
     import lombok.experimental.Accessors;
    -import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.processor.RecordProcessorCheckpointer;
    +import software.amazon.kinesis.processor.ShardRecordProcessor;
     
     /**
      * Container for the parameters to the IRecordProcessor's
    @@ -50,5 +50,4 @@ public class ShutdownInput {
          * @return The checkpointer object that the record processor should use to checkpoint
          */
         private final RecordProcessorCheckpointer checkpointer;
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotificationTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotificationTask.java
    index 8b29d4df..5356cd23 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotificationTask.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownNotificationTask.java
    @@ -33,14 +33,16 @@ public class ShutdownNotificationTask implements ConsumerTask {
         private final ShardRecordProcessor shardRecordProcessor;
         private final RecordProcessorCheckpointer recordProcessorCheckpointer;
         private final ShutdownNotification shutdownNotification;
    -//    TODO: remove if not used
    +    //    TODO: remove if not used
         private final ShardInfo shardInfo;
     
         @Override
         public TaskResult call() {
             try {
                 try {
    -                shardRecordProcessor.shutdownRequested(ShutdownRequestedInput.builder().checkpointer(recordProcessorCheckpointer).build());
    +                shardRecordProcessor.shutdownRequested(ShutdownRequestedInput.builder()
    +                        .checkpointer(recordProcessorCheckpointer)
    +                        .build());
                 } catch (Exception ex) {
                     return new TaskResult(ex);
                 }
    @@ -55,5 +57,4 @@ public class ShutdownNotificationTask implements ConsumerTask {
         public TaskType taskType() {
             return TaskType.SHUTDOWN_NOTIFICATION;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownReason.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownReason.java
    index 4a07ed7d..0be5a706 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownReason.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownReason.java
    @@ -21,7 +21,6 @@ import software.amazon.kinesis.processor.ShardRecordProcessor;
     
     import static software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState;
     
    -
     /**
      * Reason the ShardRecordProcessor is being shutdown.
      * Used to distinguish between a fail-over vs. a termination (shard is closed and all records have been delivered).
    @@ -55,6 +54,7 @@ public enum ShutdownReason {
         REQUESTED(1, ShardConsumerState.SHUTDOWN_REQUESTED.consumerState());
     
         private final int rank;
    +
         @Getter(AccessLevel.PACKAGE)
         @Accessors(fluent = true)
         private final ConsumerState shutdownState;
    @@ -66,7 +66,7 @@ public enum ShutdownReason {
     
         /**
          * Indicates whether the given reason can override the current reason.
    -     * 
    +     *
          * @param reason the reason to transition to
          * @return true if the transition is allowed, false if it's not.
          */
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownTask.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownTask.java
    index d1e978a7..4059719f 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownTask.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/ShutdownTask.java
    @@ -14,12 +14,14 @@
      */
     package software.amazon.kinesis.lifecycle;
     
    -import com.google.common.annotations.VisibleForTesting;
    -
     import java.util.List;
     import java.util.Objects;
     import java.util.Optional;
    +import java.util.Random;
    +import java.util.Set;
    +import java.util.stream.Collectors;
     
    +import com.google.common.annotations.VisibleForTesting;
     import lombok.NonNull;
     import lombok.RequiredArgsConstructor;
     import lombok.extern.slf4j.Slf4j;
    @@ -40,8 +42,8 @@ import software.amazon.kinesis.leases.ShardInfo;
     import software.amazon.kinesis.leases.UpdateField;
     import software.amazon.kinesis.leases.exceptions.CustomerApplicationException;
     import software.amazon.kinesis.leases.exceptions.DependencyException;
    -import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion;
     import software.amazon.kinesis.leases.exceptions.InvalidStateException;
    +import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion;
     import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException;
     import software.amazon.kinesis.lifecycle.events.LeaseLostInput;
     import software.amazon.kinesis.lifecycle.events.ShardEndedInput;
    @@ -53,10 +55,6 @@ import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.retrieval.RecordsPublisher;
     import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
     
    -import java.util.Random;
    -import java.util.Set;
    -import java.util.stream.Collectors;
    -
     /**
      * Task for invoking the ShardRecordProcessor shutdown() callback.
      */
    @@ -70,7 +68,8 @@ public class ShutdownTask implements ConsumerTask {
         /**
          * Reusable, immutable {@link LeaseLostInput}.
          */
    -    private static final LeaseLostInput LEASE_LOST_INPUT = LeaseLostInput.builder().build();
    +    private static final LeaseLostInput LEASE_LOST_INPUT =
    +            LeaseLostInput.builder().build();
     
         private static final Random RANDOM = new Random();
     
    @@ -79,33 +78,46 @@ public class ShutdownTask implements ConsumerTask {
     
         @NonNull
         private final ShardInfo shardInfo;
    +
         @NonNull
         private final ShardDetector shardDetector;
    +
         @NonNull
         private final ShardRecordProcessor shardRecordProcessor;
    +
         @NonNull
         private final ShardRecordProcessorCheckpointer recordProcessorCheckpointer;
    +
         @NonNull
         private final ShutdownReason reason;
    +
         @NonNull
         private final InitialPositionInStreamExtended initialPositionInStream;
    +
         private final boolean cleanupLeasesOfCompletedShards;
         private final boolean ignoreUnexpectedChildShards;
    +
         @NonNull
         private final LeaseCoordinator leaseCoordinator;
    +
         private final long backoffTimeMillis;
    +
         @NonNull
         private final RecordsPublisher recordsPublisher;
    +
         @NonNull
         private final HierarchicalShardSyncer hierarchicalShardSyncer;
    +
         @NonNull
         private final MetricsFactory metricsFactory;
     
         private final TaskType taskType = TaskType.SHUTDOWN;
     
         private final List childShards;
    +
         @NonNull
         private final StreamIdentifier streamIdentifier;
    +
         @NonNull
         private final LeaseCleanupManager leaseCleanupManager;
     
    @@ -123,8 +135,12 @@ public class ShutdownTask implements ConsumerTask {
             final String leaseKey = ShardInfo.getLeaseKey(shardInfo);
             try {
                 try {
    -                log.debug("Invoking shutdown() for shard {} with childShards {}, concurrencyToken {}. Shutdown reason: {}",
    -                        leaseKey, childShards, shardInfo.concurrencyToken(), reason);
    +                log.debug(
    +                        "Invoking shutdown() for shard {} with childShards {}, concurrencyToken {}. Shutdown reason: {}",
    +                        leaseKey,
    +                        childShards,
    +                        shardInfo.concurrencyToken(),
    +                        reason);
     
                     final long startTime = System.currentTimeMillis();
                     final Lease currentShardLease = leaseCoordinator.getCurrentlyHeldLease(leaseKey);
    @@ -137,9 +153,11 @@ public class ShutdownTask implements ConsumerTask {
                             // If InvalidStateException happens, it indicates we have a non recoverable error in short term.
                             // In this scenario, we should shutdown the shardConsumer with LEASE_LOST reason to allow
                             // other worker to take the lease and retry shutting down.
    -                        log.warn("Lease {}: Invalid state encountered while shutting down shardConsumer with SHARD_END reason. " +
    -                                "Dropping the lease and shutting down shardConsumer using LEASE_LOST reason.",
    -                                leaseKey, e);
    +                        log.warn(
    +                                "Lease {}: Invalid state encountered while shutting down shardConsumer with SHARD_END reason. "
    +                                        + "Dropping the lease and shutting down shardConsumer using LEASE_LOST reason.",
    +                                leaseKey,
    +                                e);
                             dropLease(currentShardLease, leaseKey);
                             throwOnApplicationException(leaseKey, leaseLostAction, scope, startTime);
                         }
    @@ -174,26 +192,27 @@ public class ShutdownTask implements ConsumerTask {
         }
     
         // Involves persisting child shard info, attempt to checkpoint and enqueueing lease for cleanup.
    -    private void takeShardEndAction(Lease currentShardLease,
    -            final String leaseKey, MetricsScope scope, long startTime)
    +    private void takeShardEndAction(Lease currentShardLease, final String leaseKey, MetricsScope scope, long startTime)
                 throws DependencyException, ProvisionedThroughputException, InvalidStateException,
    -            CustomerApplicationException {
    +                    CustomerApplicationException {
             // Create new lease for the child shards if they don't exist.
    -        // We have one valid scenario that shutdown task got created with SHARD_END reason and an empty list of childShards.
    -        // This would happen when KinesisDataFetcher(for polling mode) or FanOutRecordsPublisher(for StoS mode) catches ResourceNotFound exception.
    +        // We have one valid scenario that shutdown task got created with SHARD_END reason and an empty list of
    +        // childShards.
    +        // This would happen when KinesisDataFetcher(for polling mode) or FanOutRecordsPublisher(for StoS mode) catches
    +        // ResourceNotFound exception.
             // In this case, KinesisDataFetcher and FanOutRecordsPublisher will send out SHARD_END signal to trigger a
             // shutdown task with empty list of childShards.
             // This scenario could happen when customer deletes the stream while leaving the KCL application running.
             if (currentShardLease == null) {
    -            throw new InvalidStateException(leaseKey
    -                    + " : Lease not owned by the current worker. Leaving ShardEnd handling to new owner.");
    +            throw new InvalidStateException(
    +                    leaseKey + " : Lease not owned by the current worker. Leaving ShardEnd handling to new owner.");
             }
             if (!CollectionUtils.isNullOrEmpty(childShards)) {
                 createLeasesForChildShardsIfNotExist(scope);
                 updateLeaseWithChildShards(currentShardLease);
             }
    -        final LeasePendingDeletion leasePendingDeletion = new LeasePendingDeletion(streamIdentifier, currentShardLease,
    -                shardInfo, shardDetector);
    +        final LeasePendingDeletion leasePendingDeletion =
    +                new LeasePendingDeletion(streamIdentifier, currentShardLease, shardInfo, shardDetector);
             if (!leaseCleanupManager.isEnqueuedForDeletion(leasePendingDeletion)) {
                 boolean isSuccess = false;
                 try {
    @@ -212,39 +231,43 @@ public class ShutdownTask implements ConsumerTask {
     
         private boolean attemptShardEndCheckpointing(final String leaseKey, MetricsScope scope, long startTime)
                 throws DependencyException, ProvisionedThroughputException, InvalidStateException,
    -            CustomerApplicationException {
    -        final Lease leaseFromDdb = Optional.ofNullable(leaseCoordinator.leaseRefresher().getLease(leaseKey))
    +                    CustomerApplicationException {
    +        final Lease leaseFromDdb = Optional.ofNullable(
    +                        leaseCoordinator.leaseRefresher().getLease(leaseKey))
                     .orElseThrow(() -> new InvalidStateException("Lease for shard " + leaseKey + " does not exist."));
             if (!leaseFromDdb.checkpoint().equals(ExtendedSequenceNumber.SHARD_END)) {
                 // Call the shardRecordsProcessor to checkpoint with SHARD_END sequence number.
                 // The shardEnded is implemented by customer. We should validate if the SHARD_END checkpointing is
                 // successful after calling shardEnded.
    -            throwOnApplicationException(leaseKey, () -> applicationCheckpointAndVerification(leaseKey),
    -                    scope, startTime);
    +            throwOnApplicationException(
    +                    leaseKey, () -> applicationCheckpointAndVerification(leaseKey), scope, startTime);
             }
             return true;
         }
     
         private void applicationCheckpointAndVerification(final String leaseKey) {
    -        recordProcessorCheckpointer
    -                .sequenceNumberAtShardEnd(recordProcessorCheckpointer.largestPermittedCheckpointValue());
    +        recordProcessorCheckpointer.sequenceNumberAtShardEnd(
    +                recordProcessorCheckpointer.largestPermittedCheckpointValue());
             recordProcessorCheckpointer.largestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END);
    -        shardRecordProcessor.shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build());
    +        shardRecordProcessor.shardEnded(ShardEndedInput.builder()
    +                .checkpointer(recordProcessorCheckpointer)
    +                .build());
             final ExtendedSequenceNumber lastCheckpointValue = recordProcessorCheckpointer.lastCheckpointValue();
             if (!ExtendedSequenceNumber.SHARD_END.equals(lastCheckpointValue)) {
                 throw new IllegalArgumentException("Application didn't checkpoint at end of shard "
    -                    + leaseKey + ". Application must checkpoint upon shard end. " +
    -                    "See ShardRecordProcessor.shardEnded javadocs for more information.");
    +                    + leaseKey + ". Application must checkpoint upon shard end. "
    +                    + "See ShardRecordProcessor.shardEnded javadocs for more information.");
             }
         }
     
    -    private void throwOnApplicationException(final String leaseKey, Runnable action, MetricsScope metricsScope,
    -            final long startTime)
    +    private void throwOnApplicationException(
    +            final String leaseKey, Runnable action, MetricsScope metricsScope, final long startTime)
                 throws CustomerApplicationException {
             try {
                 action.run();
             } catch (Exception e) {
    -            throw new CustomerApplicationException("Customer application throws exception for shard " + leaseKey + ": ", e);
    +            throw new CustomerApplicationException(
    +                    "Customer application throws exception for shard " + leaseKey + ": ", e);
             } finally {
                 MetricsUtil.addLatency(metricsScope, RECORD_PROCESSOR_SHUTDOWN_METRIC, startTime, MetricsLevel.SUMMARY);
             }
    @@ -259,7 +282,8 @@ public class ShutdownTask implements ConsumerTask {
             if (childShards.size() == 1) {
                 final ChildShard childShard = childShards.get(0);
                 final List parentLeaseKeys = childShard.parentShards().stream()
    -                    .map(parentShardId -> ShardInfo.getLeaseKey(shardInfo, parentShardId)).collect(Collectors.toList());
    +                    .map(parentShardId -> ShardInfo.getLeaseKey(shardInfo, parentShardId))
    +                    .collect(Collectors.toList());
                 if (parentLeaseKeys.size() != 2) {
                     MetricsUtil.addCount(scope, "MissingMergeParent", 1, MetricsLevel.SUMMARY);
                     throw new InvalidStateException("Shard " + shardInfo.shardId() + "'s only child shard " + childShard
    @@ -270,9 +294,10 @@ public class ShutdownTask implements ConsumerTask {
                 final Lease parentLease1 = leaseRefresher.getLease(parentLeaseKeys.get(1));
                 if (Objects.isNull(parentLease0) != Objects.isNull(parentLease1)) {
                     MetricsUtil.addCount(scope, "MissingMergeParentLease", 1, MetricsLevel.SUMMARY);
    -                final String message = "Shard " + shardInfo.shardId() + "'s only child shard " + childShard +
    -                        " has partial parent information in lease table: [parent0=" + parentLease0 +
    -                        ", parent1=" + parentLease1 + "]. Hence deferring lease creation of child shard.";
    +                final String message = "Shard " + shardInfo.shardId() + "'s only child shard " + childShard
    +                        + " has partial parent information in lease table: [parent0="
    +                        + parentLease0 + ", parent1="
    +                        + parentLease1 + "]. Hence deferring lease creation of child shard.";
                     if (isOneInNProbability(RETRY_RANDOM_MAX_RANGE)) {
                         // abort further attempts and drop the lease; lease will
                         // be reassigned
    @@ -288,9 +313,13 @@ public class ShutdownTask implements ConsumerTask {
             for (ChildShard childShard : childShards) {
                 final String leaseKey = ShardInfo.getLeaseKey(shardInfo, childShard.shardId());
                 if (leaseRefresher.getLease(leaseKey) == null) {
    -                log.debug("{} - Shard {} - Attempting to create lease for child shard {}",
    -                        shardDetector.streamIdentifier(), shardInfo.shardId(), leaseKey);
    -                final Lease leaseToCreate = hierarchicalShardSyncer.createLeaseForChildShard(childShard, shardDetector.streamIdentifier());
    +                log.debug(
    +                        "{} - Shard {} - Attempting to create lease for child shard {}",
    +                        shardDetector.streamIdentifier(),
    +                        shardInfo.shardId(),
    +                        leaseKey);
    +                final Lease leaseToCreate =
    +                        hierarchicalShardSyncer.createLeaseForChildShard(childShard, shardDetector.streamIdentifier());
                     final long startTime = System.currentTimeMillis();
                     boolean success = false;
                     try {
    @@ -299,13 +328,18 @@ public class ShutdownTask implements ConsumerTask {
                     } finally {
                         MetricsUtil.addSuccessAndLatency(scope, "CreateLease", success, startTime, MetricsLevel.DETAILED);
                         if (leaseToCreate.checkpoint() != null) {
    -                        final String metricName = leaseToCreate.checkpoint().isSentinelCheckpoint() ?
    -                                leaseToCreate.checkpoint().sequenceNumber() : "SEQUENCE_NUMBER";
    +                        final String metricName = leaseToCreate.checkpoint().isSentinelCheckpoint()
    +                                ? leaseToCreate.checkpoint().sequenceNumber()
    +                                : "SEQUENCE_NUMBER";
                             MetricsUtil.addSuccess(scope, "CreateLease_" + metricName, true, MetricsLevel.DETAILED);
                         }
                     }
     
    -                log.info("{} - Shard {}: Created child shard lease: {}", shardDetector.streamIdentifier(), shardInfo.shardId(), leaseToCreate);
    +                log.info(
    +                        "{} - Shard {}: Created child shard lease: {}",
    +                        shardDetector.streamIdentifier(),
    +                        shardInfo.shardId(),
    +                        leaseToCreate);
                 }
             }
         }
    @@ -320,12 +354,17 @@ public class ShutdownTask implements ConsumerTask {
     
         private void updateLeaseWithChildShards(Lease currentLease)
                 throws DependencyException, InvalidStateException, ProvisionedThroughputException {
    -        Set childShardIds = childShards.stream().map(ChildShard::shardId).collect(Collectors.toSet());
    +        Set childShardIds =
    +                childShards.stream().map(ChildShard::shardId).collect(Collectors.toSet());
     
             final Lease updatedLease = currentLease.copy();
             updatedLease.childShardIds(childShardIds);
             leaseCoordinator.leaseRefresher().updateLeaseWithMetaInfo(updatedLease, UpdateField.CHILD_SHARDS);
    -        log.info("Shard {}: Updated current lease {} with child shard information: {}", shardInfo.shardId(), currentLease.leaseKey(), childShardIds);
    +        log.info(
    +                "Shard {}: Updated current lease {} with child shard information: {}",
    +                shardInfo.shardId(),
    +                currentLease.leaseKey(),
    +                childShardIds);
         }
     
         /*
    @@ -345,7 +384,9 @@ public class ShutdownTask implements ConsumerTask {
     
         private void dropLease(Lease currentLease, final String leaseKey) {
             if (currentLease == null) {
    -            log.warn("Shard {}: Unable to find the lease for shard. Will shutdown the shardConsumer directly.", leaseKey);
    +            log.warn(
    +                    "Shard {}: Unable to find the lease for shard. Will shutdown the shardConsumer directly.",
    +                    leaseKey);
             } else {
                 leaseCoordinator.dropLease(currentLease);
                 log.info("Dropped lease for shutting down ShardConsumer: " + currentLease.leaseKey());
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskResult.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskResult.java
    index b061faa4..a60ed071 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskResult.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/TaskResult.java
    @@ -69,5 +69,4 @@ public class TaskResult {
             this.exception = e;
             this.shardEndReached = isShardEndReached;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/LeaseLostInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/LeaseLostInput.java
    index 877b0a80..8981a0c0 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/LeaseLostInput.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/LeaseLostInput.java
    @@ -33,5 +33,4 @@ import software.amazon.kinesis.processor.ShardRecordProcessor;
     @Builder
     @EqualsAndHashCode
     @ToString
    -public class LeaseLostInput {
    -}
    +public class LeaseLostInput {}
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ProcessRecordsInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ProcessRecordsInput.java
    index 1ce9239b..8cbb9bac 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ProcessRecordsInput.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ProcessRecordsInput.java
    @@ -24,8 +24,8 @@ import lombok.Getter;
     import lombok.ToString;
     import lombok.experimental.Accessors;
     import software.amazon.awssdk.services.kinesis.model.ChildShard;
    -import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.processor.RecordProcessorCheckpointer;
    +import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.retrieval.KinesisClientRecord;
     
     /**
    @@ -76,7 +76,7 @@ public class ProcessRecordsInput {
     
         /**
          * How long the records spent waiting to be dispatched to the {@link ShardRecordProcessor}
    -     * 
    +     *
          * @return the amount of time that records spent waiting before processing.
          */
         public Duration timeSpentInCache() {
    @@ -85,5 +85,4 @@ public class ProcessRecordsInput {
             }
             return Duration.between(cacheEntryTime, cacheExitTime);
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShardEndedInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShardEndedInput.java
    index 407c9880..5ae07a6e 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShardEndedInput.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/ShardEndedInput.java
    @@ -20,8 +20,8 @@ import lombok.EqualsAndHashCode;
     import lombok.Getter;
     import lombok.ToString;
     import lombok.experimental.Accessors;
    -import software.amazon.kinesis.processor.ShardRecordProcessor;
     import software.amazon.kinesis.processor.RecordProcessorCheckpointer;
    +import software.amazon.kinesis.processor.ShardRecordProcessor;
     
     /**
      * Provides a checkpointer that must be used to signal the completion of the shard to the Scheduler.
    @@ -41,5 +41,4 @@ public class ShardEndedInput {
          * shutdown until a successful checkpoint occurs.
          */
         private final RecordProcessorCheckpointer checkpointer;
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/TaskExecutionListenerInput.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/TaskExecutionListenerInput.java
    index 4c01fde3..7ef8f674 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/TaskExecutionListenerInput.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/lifecycle/events/TaskExecutionListenerInput.java
    @@ -18,9 +18,9 @@ import lombok.Builder;
     import lombok.Data;
     import lombok.experimental.Accessors;
     import software.amazon.kinesis.leases.ShardInfo;
    +import software.amazon.kinesis.lifecycle.TaskExecutionListener;
     import software.amazon.kinesis.lifecycle.TaskOutcome;
     import software.amazon.kinesis.lifecycle.TaskType;
    -import software.amazon.kinesis.lifecycle.TaskExecutionListener;
     
     /**
      * Container for the parameters to the TaskExecutionListener's
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulateByNameMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulateByNameMetricsScope.java
    index a293521e..226064d1 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulateByNameMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulateByNameMetricsScope.java
    @@ -18,12 +18,10 @@ package software.amazon.kinesis.metrics;
      * This is a MetricScope with a KeyType of String. It provides the implementation of
      * getting the key based off of the String KeyType.
      */
    -
     public abstract class AccumulateByNameMetricsScope extends AccumulatingMetricsScope {
     
         @Override
         protected String getKey(String name) {
             return name;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulatingMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulatingMetricsScope.java
    index 31891ecf..5ff88525 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulatingMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/AccumulatingMetricsScope.java
    @@ -25,10 +25,10 @@ import software.amazon.awssdk.services.cloudwatch.model.StatisticSet;
      * An IMetricsScope that accumulates data from multiple calls to addData with
      * the same name parameter. It tracks min, max, sample count, and sum for each
      * named metric.
    - * 
    + *
      * @param  can be a class or object defined by the user that stores information about a MetricDatum needed
      *        by the user.
    - * 
    + *
      *        The following is a example of what a KeyType class might look like:
      *        class SampleKeyType {
      *              private long timeKeyCreated;
    @@ -61,9 +61,9 @@ public abstract class AccumulatingMetricsScope extends EndingMetricsSco
         protected abstract KeyType getKey(String name);
     
         /**
    -     * Adds data points to an IMetricsScope. Multiple calls to IMetricsScopes that have the 
    +     * Adds data points to an IMetricsScope. Multiple calls to IMetricsScopes that have the
          * same key will have their data accumulated.
    -     * 
    +     *
          * @param key
          *        data point key
          * @param name
    @@ -79,9 +79,15 @@ public abstract class AccumulatingMetricsScope extends EndingMetricsSco
             final MetricDatum datum = data.get(key);
             final MetricDatum metricDatum;
             if (datum == null) {
    -            metricDatum = MetricDatum.builder().metricName(name).unit(unit)
    -                    .statisticValues(
    -                            StatisticSet.builder().maximum(value).minimum(value).sampleCount(1.0).sum(value).build())
    +            metricDatum = MetricDatum.builder()
    +                    .metricName(name)
    +                    .unit(unit)
    +                    .statisticValues(StatisticSet.builder()
    +                            .maximum(value)
    +                            .minimum(value)
    +                            .sampleCount(1.0)
    +                            .sum(value)
    +                            .build())
                         .build();
             } else {
                 if (!datum.unit().equals(unit)) {
    @@ -91,8 +97,10 @@ public abstract class AccumulatingMetricsScope extends EndingMetricsSco
                 final StatisticSet oldStatisticSet = datum.statisticValues();
                 final StatisticSet statisticSet = oldStatisticSet.toBuilder()
                         .maximum(Math.max(value, oldStatisticSet.maximum()))
    -                    .minimum(Math.min(value, oldStatisticSet.minimum())).sampleCount(oldStatisticSet.sampleCount() + 1)
    -                    .sum(oldStatisticSet.sum() + value).build();
    +                    .minimum(Math.min(value, oldStatisticSet.minimum()))
    +                    .sampleCount(oldStatisticSet.sampleCount() + 1)
    +                    .sum(oldStatisticSet.sum() + value)
    +                    .build();
     
                 metricDatum = datum.toBuilder().statisticValues(statisticSet).build();
             }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricKey.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricKey.java
    index e52893cd..21fc919a 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricKey.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricKey.java
    @@ -21,7 +21,7 @@ import software.amazon.awssdk.services.cloudwatch.model.Dimension;
     import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
     
     /**
    - * A representation of a key of a MetricDatum. This class is useful when wanting to compare 
    + * A representation of a key of a MetricDatum. This class is useful when wanting to compare
      * whether 2 keys have the same MetricDatum. This feature will be used in MetricAccumulatingQueue
      * where we aggregate metrics across multiple MetricScopes.
      */
    @@ -29,11 +29,10 @@ public class CloudWatchMetricKey {
     
         private List dimensions;
         private String metricName;
    -    
    +
         /**
          * @param datum data point
          */
    -
         public CloudWatchMetricKey(MetricDatum datum) {
             this.dimensions = datum.dimensions();
             this.metricName = datum.metricName();
    @@ -58,5 +57,4 @@ public class CloudWatchMetricKey {
             CloudWatchMetricKey other = (CloudWatchMetricKey) obj;
             return Objects.equals(other.dimensions, dimensions) && Objects.equals(other.metricName, metricName);
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsFactory.java
    index 59e5cac1..11d77359 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsFactory.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsFactory.java
    @@ -17,7 +17,6 @@ package software.amazon.kinesis.metrics;
     import java.util.Set;
     
     import com.google.common.collect.ImmutableSet;
    -
     import lombok.NonNull;
     import software.amazon.awssdk.core.exception.AbortedException;
     import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
    @@ -33,6 +32,7 @@ public class CloudWatchMetricsFactory implements MetricsFactory {
          * immediately instead of waiting for the next scheduled call.
          */
         private final CloudWatchPublisherRunnable runnable;
    +
         private final Thread publicationThread;
     
         /**
    @@ -62,16 +62,20 @@ public class CloudWatchMetricsFactory implements MetricsFactory {
          * @param flushSize
          *            size of batch that can be published
          */
    -    public CloudWatchMetricsFactory(@NonNull final CloudWatchAsyncClient cloudWatchClient,
    -            @NonNull final String namespace, final long bufferTimeMillis, final int maxQueueSize,
    -            @NonNull final MetricsLevel metricsLevel, @NonNull final Set metricsEnabledDimensions,
    +    public CloudWatchMetricsFactory(
    +            @NonNull final CloudWatchAsyncClient cloudWatchClient,
    +            @NonNull final String namespace,
    +            final long bufferTimeMillis,
    +            final int maxQueueSize,
    +            @NonNull final MetricsLevel metricsLevel,
    +            @NonNull final Set metricsEnabledDimensions,
                 final int flushSize) {
             this.metricsLevel = metricsLevel;
    -        this.metricsEnabledDimensions = (metricsEnabledDimensions == null ? ImmutableSet.of()
    -                : ImmutableSet.copyOf(metricsEnabledDimensions));
    +        this.metricsEnabledDimensions =
    +                (metricsEnabledDimensions == null ? ImmutableSet.of() : ImmutableSet.copyOf(metricsEnabledDimensions));
     
    -        runnable = new CloudWatchPublisherRunnable(new CloudWatchMetricsPublisher(cloudWatchClient, namespace),
    -                bufferTimeMillis, maxQueueSize, flushSize);
    +        runnable = new CloudWatchPublisherRunnable(
    +                new CloudWatchMetricsPublisher(cloudWatchClient, namespace), bufferTimeMillis, maxQueueSize, flushSize);
             publicationThread = new Thread(runnable);
             publicationThread.setName("cw-metrics-publisher");
             publicationThread.start();
    @@ -90,5 +94,4 @@ public class CloudWatchMetricsFactory implements MetricsFactory {
                 throw AbortedException.builder().message(e.getMessage()).cause(e).build();
             }
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisher.java
    index 24a6e464..82ad1876 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisher.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisher.java
    @@ -14,6 +14,12 @@
      */
     package software.amazon.kinesis.metrics;
     
    +import java.util.ArrayList;
    +import java.util.List;
    +import java.util.concurrent.CompletableFuture;
    +import java.util.concurrent.ExecutionException;
    +import java.util.concurrent.TimeoutException;
    +
     import lombok.extern.slf4j.Slf4j;
     import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
     import software.amazon.awssdk.services.cloudwatch.model.CloudWatchException;
    @@ -21,12 +27,6 @@ import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
     import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest;
     import software.amazon.kinesis.retrieval.AWSExceptionManager;
     
    -import java.util.ArrayList;
    -import java.util.List;
    -import java.util.concurrent.CompletableFuture;
    -import java.util.concurrent.ExecutionException;
    -import java.util.concurrent.TimeoutException;
    -
     import static java.util.concurrent.TimeUnit.MILLISECONDS;
     
     /**
    @@ -38,6 +38,7 @@ public class CloudWatchMetricsPublisher {
         private static final int BATCH_SIZE = 20;
         private static final int PUT_TIMEOUT_MILLIS = 5000;
         private static final AWSExceptionManager CW_EXCEPTION_MANAGER = new AWSExceptionManager();
    +
         static {
             CW_EXCEPTION_MANAGER.add(CloudWatchException.class, t -> t);
         }
    @@ -71,9 +72,11 @@ public class CloudWatchMetricsPublisher {
                 try {
                     PutMetricDataRequest.Builder finalRequest = request;
                     // This needs to be blocking. Making it asynchronous leads to increased throttling.
    -                blockingExecute(cloudWatchAsyncClient.putMetricData(finalRequest.build()), PUT_TIMEOUT_MILLIS,
    +                blockingExecute(
    +                        cloudWatchAsyncClient.putMetricData(finalRequest.build()),
    +                        PUT_TIMEOUT_MILLIS,
                             CW_EXCEPTION_MANAGER);
    -            } catch(CloudWatchException | TimeoutException e) {
    +            } catch (CloudWatchException | TimeoutException e) {
                     log.warn("Could not publish {} datums to CloudWatch", endIndex - startIndex, e);
                 } catch (Exception e) {
                     log.error("Unknown exception while publishing {} datums to CloudWatch", endIndex - startIndex, e);
    @@ -81,8 +84,9 @@ public class CloudWatchMetricsPublisher {
             }
         }
     
    -    private static  void blockingExecute(CompletableFuture future, long timeOutMillis,
    -            AWSExceptionManager exceptionManager) throws TimeoutException {
    +    private static  void blockingExecute(
    +            CompletableFuture future, long timeOutMillis, AWSExceptionManager exceptionManager)
    +            throws TimeoutException {
             try {
                 future.get(timeOutMillis, MILLISECONDS);
             } catch (ExecutionException e) {
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsScope.java
    index c2d38526..0346d9a1 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchMetricsScope.java
    @@ -18,7 +18,6 @@ import java.util.List;
     import java.util.Set;
     import java.util.stream.Collectors;
     
    -
     /**
      * Metrics scope for CloudWatch metrics.
      */
    @@ -32,8 +31,8 @@ public class CloudWatchMetricsScope extends FilteringMetricsScope implements Met
          * @param metricsLevel Metrics level to enable. All data with level below this will be dropped.
          * @param metricsEnabledDimensions Enabled dimensions for CloudWatch metrics.
          */
    -    public CloudWatchMetricsScope(CloudWatchPublisherRunnable publisher,
    -                                  MetricsLevel metricsLevel, Set metricsEnabledDimensions) {
    +    public CloudWatchMetricsScope(
    +            CloudWatchPublisherRunnable publisher, MetricsLevel metricsLevel, Set metricsEnabledDimensions) {
             super(metricsLevel, metricsEnabledDimensions);
             this.publisher = publisher;
         }
    @@ -49,11 +48,11 @@ public class CloudWatchMetricsScope extends FilteringMetricsScope implements Met
             super.end();
     
             final List> dataWithKeys = data.values().stream()
    -                .map(metricDatum -> metricDatum.toBuilder().dimensions(getDimensions()).build())
    +                .map(metricDatum ->
    +                        metricDatum.toBuilder().dimensions(getDimensions()).build())
                     .map(metricDatum -> new MetricDatumWithKey<>(new CloudWatchMetricKey(metricDatum), metricDatum))
                     .collect(Collectors.toList());
     
             publisher.enqueue(dataWithKeys);
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnable.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnable.java
    index aeb40b45..5bb41c73 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnable.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnable.java
    @@ -43,27 +43,26 @@ public class CloudWatchPublisherRunnable implements Runnable {
     
         /**
          * Constructor.
    -     * 
    +     *
          * @param metricsPublisher publishes metrics
          * @param bufferTimeMillis time between publishing metrics
          * @param maxQueueSize max size of metrics to publish
          * @param batchSize size of batch that can be published at a time
          */
    -
    -    public CloudWatchPublisherRunnable(CloudWatchMetricsPublisher metricsPublisher,
    -                                       long bufferTimeMillis,
    -                                       int maxQueueSize,
    -                                       int batchSize) {
    +    public CloudWatchPublisherRunnable(
    +            CloudWatchMetricsPublisher metricsPublisher, long bufferTimeMillis, int maxQueueSize, int batchSize) {
             this(metricsPublisher, bufferTimeMillis, maxQueueSize, batchSize, 0);
         }
     
    -    public CloudWatchPublisherRunnable(CloudWatchMetricsPublisher metricsPublisher,
    -                                       long bufferTimeMillis,
    -                                       int maxQueueSize,
    -                                       int batchSize,
    -                                       int maxJitter) {
    +    public CloudWatchPublisherRunnable(
    +            CloudWatchMetricsPublisher metricsPublisher,
    +            long bufferTimeMillis,
    +            int maxQueueSize,
    +            int batchSize,
    +            int maxJitter) {
             if (log.isDebugEnabled()) {
    -            log.debug("Constructing CloudWatchPublisherRunnable with maxBufferTimeMillis {} maxQueueSize {} batchSize {} maxJitter {}",
    +            log.debug(
    +                    "Constructing CloudWatchPublisherRunnable with maxBufferTimeMillis {} maxQueueSize {} batchSize {} maxJitter {}",
                         bufferTimeMillis,
                         maxQueueSize,
                         batchSize,
    @@ -98,7 +97,7 @@ public class CloudWatchPublisherRunnable implements Runnable {
             synchronized (queue) {
                 /*
                  * We should send if:
    -             * 
    +             *
                  * it's been maxBufferTimeMillis since our last send
                  * or if the queue contains > batchSize elements
                  * or if we're shutting down
    @@ -121,8 +120,7 @@ public class CloudWatchPublisherRunnable implements Runnable {
                 } else {
                     long waitTime = bufferTimeMillis - timeSinceFlush;
                     if (log.isDebugEnabled()) {
    -                    log.debug("Waiting up to {} ms for {} more datums to appear.", waitTime, flushSize
    -                            - queue.size());
    +                    log.debug("Waiting up to {} ms for {} more datums to appear.", waitTime, flushSize - queue.size());
                     }
     
                     try {
    @@ -169,7 +167,7 @@ public class CloudWatchPublisherRunnable implements Runnable {
     
         /**
          * Enqueues metric data for publication.
    -     * 
    +     *
          * @param data collection of MetricDatum to enqueue
          */
         public void enqueue(Collection> data) {
    @@ -197,5 +195,4 @@ public class CloudWatchPublisherRunnable implements Runnable {
                 queue.notify();
             }
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/DimensionTrackingMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/DimensionTrackingMetricsScope.java
    index 9b6390ad..708be460 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/DimensionTrackingMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/DimensionTrackingMetricsScope.java
    @@ -14,25 +14,23 @@
      */
     package software.amazon.kinesis.metrics;
     
    -import software.amazon.awssdk.services.cloudwatch.model.Dimension;
    -
     import java.util.HashSet;
     import java.util.Set;
     
    +import software.amazon.awssdk.services.cloudwatch.model.Dimension;
     
     /**
      * DimensionTrackingMetricsScope is where we provide functionality for dimensions.
      * Dimensions allow the user to be able view their metrics based off of the parameters they specify.
    - * 
    + *
      * The following examples show how to add dimensions if they would like to view their all metrics
      * pertaining to a particular stream or for a specific date.
    - * 
    + *
      * myScope.addDimension("StreamName", "myStreamName");
      * myScope.addDimension("Date", "Dec012013");
    - * 
    - * 
    + *
    + *
      */
    -
     public abstract class DimensionTrackingMetricsScope implements MetricsScope {
     
         private Set dimensions = new HashSet<>();
    @@ -45,9 +43,7 @@ public abstract class DimensionTrackingMetricsScope implements MetricsScope {
         /**
          * @return a set of dimensions for an IMetricsScope
          */
    -
         protected Set getDimensions() {
             return dimensions;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/FilteringMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/FilteringMetricsScope.java
    index 96849850..7a118af9 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/FilteringMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/FilteringMetricsScope.java
    @@ -54,10 +54,10 @@ public class FilteringMetricsScope extends AccumulateByNameMetricsScope {
          * @param metricsEnabledDimensions Enabled dimensions.
          */
         public FilteringMetricsScope(MetricsLevel metricsLevel, Set metricsEnabledDimensions) {
    -          this.metricsLevel = metricsLevel;
    -          this.metricsEnabledDimensions = metricsEnabledDimensions;
    -          this.metricsEnabledDimensionsAll = (metricsEnabledDimensions != null &&
    -                  metricsEnabledDimensions.contains(METRICS_DIMENSIONS_ALL));
    +        this.metricsLevel = metricsLevel;
    +        this.metricsEnabledDimensions = metricsEnabledDimensions;
    +        this.metricsEnabledDimensionsAll =
    +                (metricsEnabledDimensions != null && metricsEnabledDimensions.contains(METRICS_DIMENSIONS_ALL));
         }
     
         /**
    @@ -95,8 +95,8 @@ public class FilteringMetricsScope extends AccumulateByNameMetricsScope {
          */
         @Override
         public void addDimension(String name, String value) {
    -        if (!metricsEnabledDimensionsAll &&
    -                (metricsEnabledDimensions == null || !metricsEnabledDimensions.contains(name))) {
    +        if (!metricsEnabledDimensionsAll
    +                && (metricsEnabledDimensions == null || !metricsEnabledDimensions.contains(name))) {
                 // Drop dimension.
                 return;
             }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/InterceptingMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/InterceptingMetricsFactory.java
    index d05b61ad..795b1f25 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/InterceptingMetricsFactory.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/InterceptingMetricsFactory.java
    @@ -14,7 +14,6 @@
      */
     package software.amazon.kinesis.metrics;
     
    -
     import software.amazon.awssdk.services.cloudwatch.model.StandardUnit;
     
     public abstract class InterceptingMetricsFactory implements MetricsFactory {
    @@ -40,7 +39,8 @@ public abstract class InterceptingMetricsFactory implements MetricsFactory {
             scope.addData(name, value, unit);
         }
     
    -    protected void interceptAddData(String name, double value, StandardUnit unit, MetricsLevel level, MetricsScope scope) {
    +    protected void interceptAddData(
    +            String name, double value, StandardUnit unit, MetricsLevel level, MetricsScope scope) {
             scope.addData(name, value, unit, level);
         }
     
    @@ -79,7 +79,5 @@ public abstract class InterceptingMetricsFactory implements MetricsFactory {
             public void end() {
                 interceptEnd(other);
             }
    -
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsFactory.java
    index 8b879be0..5c0f3ca9 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsFactory.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsFactory.java
    @@ -23,5 +23,4 @@ public class LogMetricsFactory implements MetricsFactory {
         public LogMetricsScope createMetrics() {
             return new LogMetricsScope();
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsScope.java
    index 16b86f3d..8879c574 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/LogMetricsScope.java
    @@ -14,8 +14,6 @@
      */
     package software.amazon.kinesis.metrics;
     
    -
    -
     import lombok.extern.slf4j.Slf4j;
     import software.amazon.awssdk.services.cloudwatch.model.Dimension;
     import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
    @@ -41,7 +39,8 @@ public class LogMetricsScope extends AccumulateByNameMetricsScope {
     
             for (MetricDatum datum : data.values()) {
                 StatisticSet statistics = datum.statisticValues();
    -            output.append(String.format("Name=%25s\tMin=%.2f\tMax=%.2f\tCount=%.2f\tSum=%.2f\tAvg=%.2f\tUnit=%s\n",
    +            output.append(String.format(
    +                    "Name=%25s\tMin=%.2f\tMax=%.2f\tCount=%.2f\tSum=%.2f\tAvg=%.2f\tUnit=%s\n",
                         datum.metricName(),
                         statistics.minimum(),
                         statistics.maximum(),
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricAccumulatingQueue.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricAccumulatingQueue.java
    index 603558d0..b9b89e1b 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricAccumulatingQueue.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricAccumulatingQueue.java
    @@ -24,13 +24,12 @@ import java.util.concurrent.LinkedBlockingQueue;
     import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
     import software.amazon.awssdk.services.cloudwatch.model.StatisticSet;
     
    -
     /**
      * Helper class for accumulating MetricDatums with the same name and dimensions.
    - * 
    + *
      * @param  can be a class or object defined by the user that stores information about a MetricDatum needed
      *        by the user.
    - * 
    + *
      *        The following is a example of what a KeyType class might look like:
      *        class SampleKeyType {
      *              private long timeKeyCreated;
    @@ -75,7 +74,7 @@ public class MetricAccumulatingQueue {
         /**
          * We use a queue and a map in this method. The reason for this is because, the queue will keep our metrics in
          * FIFO order and the map will provide us with constant time lookup to get the appropriate MetricDatum.
    -     * 
    +     *
          * @param key metric key to be inserted into queue
          * @param datum metric to be inserted into queue
          * @return a boolean depending on whether the datum was inserted into the queue
    @@ -106,10 +105,12 @@ public class MetricAccumulatingQueue {
             StatisticSet oldStats = oldDatum.statisticValues();
             StatisticSet newStats = newDatum.statisticValues();
     
    -        StatisticSet statisticSet = oldStats.toBuilder().sum(oldStats.sum() + newStats.sum())
    +        StatisticSet statisticSet = oldStats.toBuilder()
    +                .sum(oldStats.sum() + newStats.sum())
                     .minimum(Math.min(oldStats.minimum(), newStats.minimum()))
                     .maximum(Math.max(oldStats.maximum(), newStats.maximum()))
    -                .sampleCount(oldStats.sampleCount() + newStats.sampleCount()).build();
    +                .sampleCount(oldStats.sampleCount() + newStats.sampleCount())
    +                .build();
     
             MetricDatum datum = oldDatum.toBuilder().statisticValues(statisticSet).build();
             metricDatumWithKey.datum(datum);
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricDatumWithKey.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricDatumWithKey.java
    index e94c8730..3e542496 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricDatumWithKey.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricDatumWithKey.java
    @@ -14,17 +14,17 @@
      */
     package software.amazon.kinesis.metrics;
     
    +import java.util.Objects;
    +
     import lombok.AllArgsConstructor;
     import lombok.Setter;
     import lombok.experimental.Accessors;
     import software.amazon.awssdk.services.cloudwatch.model.MetricDatum;
     
    -import java.util.Objects;
    -
     /**
      * This class is used to store a MetricDatum as well as KeyType which stores specific information about
      * that particular MetricDatum.
    - * 
    + *
      * @param  is a class that stores information about a MetricDatum. This is useful
      *        to compare MetricDatums, aggregate similar MetricDatums or store information about a datum
      *        that may be relevant to the user (i.e. MetricName, CustomerId, TimeStamp, etc).
    @@ -69,5 +69,4 @@ public class MetricDatumWithKey {
             MetricDatumWithKey other = (MetricDatumWithKey) obj;
             return Objects.equals(other.key, key) && Objects.equals(other.datum, datum);
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsCollectingTaskDecorator.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsCollectingTaskDecorator.java
    index 224562a4..7cc679a4 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsCollectingTaskDecorator.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsCollectingTaskDecorator.java
    @@ -30,7 +30,7 @@ public class MetricsCollectingTaskDecorator implements ConsumerTask {
     
         /**
          * Constructor.
    -     * 
    +     *
          * @param other
          *            task to report metrics on
          * @param factory
    @@ -46,14 +46,15 @@ public class MetricsCollectingTaskDecorator implements ConsumerTask {
          */
         @Override
         public TaskResult call() {
    -        MetricsScope scope = MetricsUtil.createMetricsWithOperation(factory, other.getClass().getSimpleName());
    +        MetricsScope scope =
    +                MetricsUtil.createMetricsWithOperation(factory, other.getClass().getSimpleName());
             TaskResult result = null;
             final long startTimeMillis = System.currentTimeMillis();
             try {
                 result = other.call();
             } finally {
    -            MetricsUtil.addSuccessAndLatency(scope, result != null && result.getException() == null, startTimeMillis,
    -                    MetricsLevel.SUMMARY);
    +            MetricsUtil.addSuccessAndLatency(
    +                    scope, result != null && result.getException() == null, startTimeMillis, MetricsLevel.SUMMARY);
                 MetricsUtil.endScope(scope);
             }
             return result;
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsConfig.java
    index 2027192f..b97bfb10 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsConfig.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsConfig.java
    @@ -19,7 +19,6 @@ import java.util.HashSet;
     import java.util.Set;
     
     import com.google.common.collect.ImmutableSet;
    -
     import lombok.Data;
     import lombok.experimental.Accessors;
     import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient;
    @@ -33,14 +32,16 @@ public class MetricsConfig {
         /**
          * Metrics dimensions that always will be enabled regardless of the config provided by user.
          */
    -    public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS = ImmutableSet
    -            .of(MetricsUtil.OPERATION_DIMENSION_NAME);
    +    public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS =
    +            ImmutableSet.of(MetricsUtil.OPERATION_DIMENSION_NAME);
     
         /**
          * Allowed dimensions for CloudWatch metrics. By default, worker ID dimension will be disabled.
          */
    -    public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS_WITH_SHARD_ID = ImmutableSet. builder()
    -            .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS).add(MetricsUtil.SHARD_ID_DIMENSION_NAME).build();
    +    public static final Set METRICS_ALWAYS_ENABLED_DIMENSIONS_WITH_SHARD_ID = ImmutableSet.builder()
    +            .addAll(METRICS_ALWAYS_ENABLED_DIMENSIONS)
    +            .add(MetricsUtil.SHARD_ID_DIMENSION_NAME)
    +            .build();
     
         /**
          * Metrics dimensions that signify all possible dimensions.
    @@ -110,8 +111,14 @@ public class MetricsConfig {
     
         public MetricsFactory metricsFactory() {
             if (metricsFactory == null) {
    -            metricsFactory = new CloudWatchMetricsFactory(cloudWatchClient(), namespace(), metricsBufferTimeMillis(),
    -                    metricsMaxQueueSize(), metricsLevel(), metricsEnabledDimensions(), publisherFlushBuffer());
    +            metricsFactory = new CloudWatchMetricsFactory(
    +                    cloudWatchClient(),
    +                    namespace(),
    +                    metricsBufferTimeMillis(),
    +                    metricsMaxQueueSize(),
    +                    metricsLevel(),
    +                    metricsEnabledDimensions(),
    +                    publisherFlushBuffer());
             }
             return metricsFactory;
         }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsScope.java
    index 2efabcd1..ed18d45c 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsScope.java
    @@ -30,7 +30,7 @@ public interface MetricsScope {
         /**
          * Adds a data point to this IMetricsScope. Multiple calls against the same IMetricsScope with the same name
          * parameter will result in accumulation.
    -     * 
    +     *
          * @param name data point name
          * @param value data point value
          * @param unit unit of data point
    @@ -40,7 +40,7 @@ public interface MetricsScope {
         /**
          * Adds a data point to this IMetricsScope if given metrics level is enabled. Multiple calls against the same
          * IMetricsScope with the same name parameter will result in accumulation.
    -     * 
    +     *
          * @param name data point name
          * @param value data point value
          * @param unit unit of data point
    @@ -50,7 +50,7 @@ public interface MetricsScope {
     
         /**
          * Adds a dimension that applies to all metrics in this IMetricsScope.
    -     * 
    +     *
          * @param name dimension name
          * @param value dimension value
          */
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsUtil.java
    index 20c7c244..8098109b 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsUtil.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/MetricsUtil.java
    @@ -15,9 +15,8 @@
     
     package software.amazon.kinesis.metrics;
     
    -import org.apache.commons.lang3.StringUtils;
    -
     import lombok.NonNull;
    +import org.apache.commons.lang3.StringUtils;
     import software.amazon.awssdk.services.cloudwatch.model.StandardUnit;
     import software.amazon.kinesis.common.StreamIdentifier;
     
    @@ -36,8 +35,8 @@ public class MetricsUtil {
             return createMetricScope(metricsFactory, null);
         }
     
    -    public static MetricsScope createMetricsWithOperation(@NonNull final MetricsFactory metricsFactory,
    -                                                          @NonNull final String operation) {
    +    public static MetricsScope createMetricsWithOperation(
    +            @NonNull final MetricsFactory metricsFactory, @NonNull final String operation) {
             return createMetricScope(metricsFactory, operation);
         }
     
    @@ -58,44 +57,60 @@ public class MetricsUtil {
                     .ifPresent(acc -> addOperation(metricsScope, STREAM_IDENTIFIER, streamId.serialize()));
         }
     
    -    public static void addWorkerIdentifier(@NonNull final MetricsScope metricsScope,
    -            @NonNull final String workerIdentifier) {
    +    public static void addWorkerIdentifier(
    +            @NonNull final MetricsScope metricsScope, @NonNull final String workerIdentifier) {
             addOperation(metricsScope, WORKER_IDENTIFIER_DIMENSION, workerIdentifier);
         }
     
    -    public static void addOperation(@NonNull final MetricsScope metricsScope, @NonNull final String dimension,
    -                                    @NonNull final String value) {
    +    public static void addOperation(
    +            @NonNull final MetricsScope metricsScope, @NonNull final String dimension, @NonNull final String value) {
             metricsScope.addDimension(dimension, value);
         }
     
    -    public static void addSuccessAndLatency(@NonNull final MetricsScope metricsScope, final boolean success,
    -                                            final long startTime, @NonNull final MetricsLevel metricsLevel) {
    +    public static void addSuccessAndLatency(
    +            @NonNull final MetricsScope metricsScope,
    +            final boolean success,
    +            final long startTime,
    +            @NonNull final MetricsLevel metricsLevel) {
             addSuccessAndLatency(metricsScope, null, success, startTime, metricsLevel);
         }
     
    -    public static void addSuccessAndLatency(@NonNull final MetricsScope metricsScope, final String dimension,
    -                                            final boolean success, final long startTime, @NonNull final MetricsLevel metricsLevel) {
    +    public static void addSuccessAndLatency(
    +            @NonNull final MetricsScope metricsScope,
    +            final String dimension,
    +            final boolean success,
    +            final long startTime,
    +            @NonNull final MetricsLevel metricsLevel) {
             addSuccess(metricsScope, dimension, success, metricsLevel);
             addLatency(metricsScope, dimension, startTime, metricsLevel);
         }
     
    -    public static void addLatency(@NonNull final MetricsScope metricsScope, final String dimension,
    -                                  final long startTime, @NonNull final MetricsLevel metricsLevel) {
    -        final String metricName = StringUtils.isEmpty(dimension) ? TIME_METRIC
    -                : String.format("%s.%s", dimension, TIME_METRIC);
    -        metricsScope.addData(metricName, System.currentTimeMillis() - startTime, StandardUnit.MILLISECONDS,
    -                metricsLevel);
    +    public static void addLatency(
    +            @NonNull final MetricsScope metricsScope,
    +            final String dimension,
    +            final long startTime,
    +            @NonNull final MetricsLevel metricsLevel) {
    +        final String metricName =
    +                StringUtils.isEmpty(dimension) ? TIME_METRIC : String.format("%s.%s", dimension, TIME_METRIC);
    +        metricsScope.addData(
    +                metricName, System.currentTimeMillis() - startTime, StandardUnit.MILLISECONDS, metricsLevel);
         }
     
    -    public static void addSuccess(@NonNull final MetricsScope metricsScope, final String dimension,
    -                                  final boolean success, @NonNull final MetricsLevel metricsLevel) {
    -        final String metricName = StringUtils.isEmpty(dimension) ? SUCCESS_METRIC
    -                : String.format("%s.%s", dimension, SUCCESS_METRIC);
    +    public static void addSuccess(
    +            @NonNull final MetricsScope metricsScope,
    +            final String dimension,
    +            final boolean success,
    +            @NonNull final MetricsLevel metricsLevel) {
    +        final String metricName =
    +                StringUtils.isEmpty(dimension) ? SUCCESS_METRIC : String.format("%s.%s", dimension, SUCCESS_METRIC);
             metricsScope.addData(metricName, success ? 1 : 0, StandardUnit.COUNT, metricsLevel);
         }
     
    -    public static void addCount(@NonNull final MetricsScope metricsScope, final String dimension,
    -            final long count, @NonNull final MetricsLevel metricsLevel) {
    +    public static void addCount(
    +            @NonNull final MetricsScope metricsScope,
    +            final String dimension,
    +            final long count,
    +            @NonNull final MetricsLevel metricsLevel) {
             metricsScope.addData(dimension, count, StandardUnit.COUNT, metricsLevel);
         }
     
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsFactory.java
    index 793b90d4..cb4e4a12 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsFactory.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsFactory.java
    @@ -22,5 +22,4 @@ public class NullMetricsFactory implements MetricsFactory {
         public MetricsScope createMetrics() {
             return SCOPE;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsScope.java
    index a872ab56..e99f4d9c 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/NullMetricsScope.java
    @@ -19,22 +19,14 @@ import software.amazon.awssdk.services.cloudwatch.model.StandardUnit;
     public class NullMetricsScope implements MetricsScope {
     
         @Override
    -    public void addData(String name, double value, StandardUnit unit) {
    -
    -    }
    +    public void addData(String name, double value, StandardUnit unit) {}
     
         @Override
    -    public void addData(String name, double value, StandardUnit unit, MetricsLevel level) {
    -
    -    }
    +    public void addData(String name, double value, StandardUnit unit, MetricsLevel level) {}
     
         @Override
    -    public void addDimension(String name, String value) {
    -
    -    }
    +    public void addDimension(String name, String value) {}
     
         @Override
    -    public void end() {
    -
    -    }
    +    public void end() {}
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingScope.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingScope.java
    index 7a79e510..94ccdb96 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingScope.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/metrics/ThreadSafeMetricsDelegatingScope.java
    @@ -14,7 +14,6 @@
      */
     package software.amazon.kinesis.metrics;
     
    -
     import software.amazon.awssdk.services.cloudwatch.model.StandardUnit;
     
     /**
    @@ -65,5 +64,4 @@ public class ThreadSafeMetricsDelegatingScope implements MetricsScope {
         public synchronized void end() {
             delegate.end();
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/Checkpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/Checkpointer.java
    index 2ffadc06..ec53e7cc 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/Checkpointer.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/Checkpointer.java
    @@ -14,8 +14,8 @@
      */
     package software.amazon.kinesis.processor;
     
    -import software.amazon.kinesis.exceptions.KinesisClientLibException;
     import software.amazon.kinesis.checkpoint.Checkpoint;
    +import software.amazon.kinesis.exceptions.KinesisClientLibException;
     import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
     
     /**
    @@ -24,18 +24,18 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber;
     public interface Checkpointer {
     
         /**
    -     * Record a checkpoint for a shard (e.g. sequence and subsequence numbers of last record processed 
    +     * Record a checkpoint for a shard (e.g. sequence and subsequence numbers of last record processed
          * by application). Upon failover, record processing is resumed from this point.
    -     * 
    +     *
          * @param leaseKey Checkpoint is specified for this shard.
          * @param checkpointValue Value of the checkpoint (e.g. Kinesis sequence number and subsequence number)
          * @param concurrencyToken Used with conditional writes to prevent stale updates
    -     *        (e.g. if there was a fail over to a different record processor, we don't want to 
    +     *        (e.g. if there was a fail over to a different record processor, we don't want to
          *        overwrite it's checkpoint)
          * @throws KinesisClientLibException Thrown if we were unable to save the checkpoint
          */
         void setCheckpoint(String leaseKey, ExtendedSequenceNumber checkpointValue, String concurrencyToken)
    -        throws KinesisClientLibException;
    +            throws KinesisClientLibException;
     
         /**
          * Get the current checkpoint stored for the specified shard. Useful for checking that the parent shard
    @@ -58,7 +58,6 @@ public interface Checkpointer {
          */
         Checkpoint getCheckpointObject(String leaseKey) throws KinesisClientLibException;
     
    -
         /**
          * Record intent to checkpoint for a shard. Upon failover, the pendingCheckpointValue will be passed to the new
          * ShardRecordProcessor's initialize() method.
    @@ -71,7 +70,7 @@ public interface Checkpointer {
          * @throws KinesisClientLibException Thrown if we were unable to save the checkpoint
          */
         void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken)
    -        throws KinesisClientLibException;
    +            throws KinesisClientLibException;
     
         /**
          * Record intent to checkpoint for a shard. Upon failover, the pendingCheckpoint and pendingCheckpointState will be
    @@ -86,11 +85,14 @@ public interface Checkpointer {
          *
          * @throws KinesisClientLibException Thrown if we were unable to save the checkpoint
          */
    -    void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken, byte[] pendingCheckpointState)
    +    void prepareCheckpoint(
    +            String leaseKey,
    +            ExtendedSequenceNumber pendingCheckpoint,
    +            String concurrencyToken,
    +            byte[] pendingCheckpointState)
                 throws KinesisClientLibException;
     
         void operation(String operation);
     
         String operation();
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/FormerStreamsLeasesDeletionStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/FormerStreamsLeasesDeletionStrategy.java
    index 232c428d..608aab50 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/FormerStreamsLeasesDeletionStrategy.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/FormerStreamsLeasesDeletionStrategy.java
    @@ -15,11 +15,11 @@
     
     package software.amazon.kinesis.processor;
     
    -import software.amazon.kinesis.common.StreamIdentifier;
    -
     import java.time.Duration;
     import java.util.List;
     
    +import software.amazon.kinesis.common.StreamIdentifier;
    +
     /**
      * Strategy for cleaning up the leases for former streams.
      */
    @@ -101,9 +101,4 @@ public interface FormerStreamsLeasesDeletionStrategy {
                 return StreamsLeasesDeletionType.PROVIDED_STREAMS_DEFERRED_DELETION;
             }
         }
    -
     }
    -
    -
    -
    -
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/MultiStreamTracker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/MultiStreamTracker.java
    index ead38333..cfff520d 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/MultiStreamTracker.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/MultiStreamTracker.java
    @@ -24,5 +24,4 @@ public interface MultiStreamTracker extends StreamTracker {
         default boolean isMultiStream() {
             return true;
         }
    -
     }
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/PreparedCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/PreparedCheckpointer.java
    index a87f536a..ef4a40a1 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/PreparedCheckpointer.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/PreparedCheckpointer.java
    @@ -50,6 +50,5 @@ public interface PreparedCheckpointer {
          */
         void checkpoint()
                 throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException,
    -            IllegalArgumentException;
    -
    -}
    \ No newline at end of file
    +                    IllegalArgumentException;
    +}
    diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ProcessorConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ProcessorConfig.java
    index 7641bc44..d5366f3a 100644
    --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ProcessorConfig.java
    +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ProcessorConfig.java
    @@ -37,5 +37,4 @@ public class ProcessorConfig {
          * 

    Default value: false

    */ private boolean callProcessRecordsEvenForEmptyRecordList = false; - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/RecordProcessorCheckpointer.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/RecordProcessorCheckpointer.java index 34b2930c..2a868951 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/RecordProcessorCheckpointer.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/RecordProcessorCheckpointer.java @@ -34,7 +34,7 @@ public interface RecordProcessorCheckpointer { * In steady state, applications should checkpoint periodically (e.g. once every 5 minutes). * Calling this API too frequently can slow down the application (because it puts pressure on the underlying * checkpoint storage layer). - * + * * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. * Consider increasing the throughput/capacity of the checkpoint store or reducing checkpoint frequency. * @throws ShutdownException The record processor instance has been shutdown. Another instance may have @@ -46,13 +46,13 @@ public interface RecordProcessorCheckpointer { * backoff and retry. */ void checkpoint() - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; /** * This method will checkpoint the progress at the provided record. This method is analogous to * {@link #checkpoint()} but provides the ability to specify the record at which to * checkpoint. - * + * * @param record A record at which to checkpoint in this shard. Upon failover, * the Kinesis Client Library will start fetching records after this record's sequence number. * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. @@ -66,13 +66,13 @@ public interface RecordProcessorCheckpointer { * backoff and retry. */ void checkpoint(Record record) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException; /** * This method will checkpoint the progress at the provided sequenceNumber. This method is analogous to * {@link #checkpoint()} but provides the ability to specify the sequence number at which to * checkpoint. - * + * * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover, * the Kinesis Client Library will start fetching records after this sequence number. * @throws ThrottlingException Can't store checkpoint. Can be caused by checkpointing too frequently. @@ -90,14 +90,14 @@ public interface RecordProcessorCheckpointer { * 2.) It is not a valid sequence number for a record in this shard. */ void checkpoint(String sequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException; /** * This method will checkpoint the progress at the provided sequenceNumber and subSequenceNumber, the latter for - * aggregated records produced with the Producer Library. This method is analogous to {@link #checkpoint()} + * aggregated records produced with the Producer Library. This method is analogous to {@link #checkpoint()} * but provides the ability to specify the sequence and subsequence numbers at which to checkpoint. - * + * * @param sequenceNumber A sequence number at which to checkpoint in this shard. Upon failover, the Kinesis * Client Library will start fetching records after the given sequence and subsequence numbers. * @param subSequenceNumber A subsequence number at which to checkpoint within this shard. Upon failover, the @@ -117,8 +117,8 @@ public interface RecordProcessorCheckpointer { * 2.) It is not a valid sequence number for a record in this shard. */ void checkpoint(String sequenceNumber, long subSequenceNumber) - throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, + IllegalArgumentException; /** * This method will record a pending checkpoint at the last data record that was delivered to the record processor. @@ -236,7 +236,7 @@ public interface RecordProcessorCheckpointer { * {@link #prepareCheckpoint()} but provides the ability to specify the sequence number at which to checkpoint. * * @param sequenceNumber A sequence number at which to prepare checkpoint in this shard. - + * * @return an PreparedCheckpointer object that can be called later to persist the checkpoint. * * @throws ThrottlingException Can't store pending checkpoint. Can be caused by checkpointing too frequently. @@ -255,7 +255,7 @@ public interface RecordProcessorCheckpointer { */ PreparedCheckpointer prepareCheckpoint(String sequenceNumber) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + IllegalArgumentException; /** * This method will record a pending checkpoint at the provided sequenceNumber. This method is analogous to @@ -284,7 +284,7 @@ public interface RecordProcessorCheckpointer { */ PreparedCheckpointer prepareCheckpoint(String sequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + IllegalArgumentException; /** * This method will record a pending checkpoint at the provided sequenceNumber and subSequenceNumber, the latter for @@ -312,7 +312,7 @@ public interface RecordProcessorCheckpointer { */ PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + IllegalArgumentException; /** * This method will record a pending checkpoint at the provided sequenceNumber and subSequenceNumber, the latter for @@ -343,7 +343,7 @@ public interface RecordProcessorCheckpointer { */ PreparedCheckpointer prepareCheckpoint(String sequenceNumber, long subSequenceNumber, byte[] applicationState) throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException, - IllegalArgumentException; + IllegalArgumentException; Checkpointer checkpointer(); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessor.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessor.java index 9b8b6946..34cb3314 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessor.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShardRecordProcessor.java @@ -30,7 +30,7 @@ public interface ShardRecordProcessor { * Invoked by the Amazon Kinesis Client Library before data records are delivered to the ShardRecordProcessor instance * (via processRecords). * - * @param initializationInput Provides information related to initialization + * @param initializationInput Provides information related to initialization */ void initialize(InitializationInput initializationInput); @@ -48,7 +48,7 @@ public interface ShardRecordProcessor { /** * Called when the lease that tied to this record processor has been lost. Once the lease has been lost the record * processor can no longer checkpoint. - * + * * @param leaseLostInput * access to functions and data related to the loss of the lease. Currently this has no functionality. */ @@ -60,7 +60,7 @@ public interface ShardRecordProcessor { * * When this is called the record processor must call {@link RecordProcessorCheckpointer#checkpoint()}, * otherwise an exception will be thrown and the all child shards of this shard will not make progress. - * + * * @param shardEndedInput * provides access to a checkpointer method for completing processing of the shard. */ @@ -76,5 +76,4 @@ public interface ShardRecordProcessor { * completed. */ void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShutdownNotificationAware.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShutdownNotificationAware.java index 0b3de8ab..3a22c1c7 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShutdownNotificationAware.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/ShutdownNotificationAware.java @@ -23,9 +23,8 @@ public interface ShutdownNotificationAware { * Called when the worker has been requested to shutdown, and gives the record processor a chance to checkpoint. * * The record processor will still have shutdown called. - * + * * @param checkpointer the checkpointer that can be used to save progress. */ void shutdownRequested(RecordProcessorCheckpointer checkpointer); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/SingleStreamTracker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/SingleStreamTracker.java index 9b5f85c3..04075efa 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/SingleStreamTracker.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/SingleStreamTracker.java @@ -58,14 +58,11 @@ public class SingleStreamTracker implements StreamTracker { } public SingleStreamTracker( - StreamIdentifier streamIdentifier, - @NonNull InitialPositionInStreamExtended initialPosition) { + StreamIdentifier streamIdentifier, @NonNull InitialPositionInStreamExtended initialPosition) { this(streamIdentifier, new StreamConfig(streamIdentifier, initialPosition)); } - public SingleStreamTracker( - String streamName, - @NonNull InitialPositionInStreamExtended initialPosition) { + public SingleStreamTracker(String streamName, @NonNull InitialPositionInStreamExtended initialPosition) { this(StreamIdentifier.singleStreamInstance(streamName), initialPosition); } @@ -88,5 +85,4 @@ public class SingleStreamTracker implements StreamTracker { public boolean isMultiStream() { return false; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/StreamTracker.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/StreamTracker.java index befa3709..48b5c88d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/StreamTracker.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/processor/StreamTracker.java @@ -15,13 +15,13 @@ package software.amazon.kinesis.processor; +import java.util.List; + import software.amazon.kinesis.common.InitialPositionInStream; import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.StreamConfig; import software.amazon.kinesis.common.StreamIdentifier; -import java.util.List; - /** * Interface for stream trackers. * KCL will periodically probe this interface to learn about the new and old streams. @@ -81,5 +81,4 @@ public interface StreamTracker { *

    */ boolean isMultiStream(); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AWSExceptionManager.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AWSExceptionManager.java index 8081b946..c5368ad4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AWSExceptionManager.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AWSExceptionManager.java @@ -32,14 +32,15 @@ import software.amazon.kinesis.annotations.KinesisClientInternalApi; */ @KinesisClientInternalApi public class AWSExceptionManager { - private final Map, Function> map = new HashMap<>(); + private final Map, Function> map = + new HashMap<>(); @Setter @Accessors(fluent = true) private Function defaultFunction = RuntimeException::new; - public void add(@NonNull final Class clazz, - @NonNull final Function function) { + public void add( + @NonNull final Class clazz, @NonNull final Function function) { map.put(clazz, function); } @@ -66,5 +67,4 @@ public class AWSExceptionManager { (Function) handleFor(t); return f.apply(t); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AggregatorUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AggregatorUtil.java index 533f47ab..fc28274a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AggregatorUtil.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/AggregatorUtil.java @@ -24,12 +24,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import org.apache.commons.lang3.StringUtils; - import com.google.protobuf.InvalidProtocolBufferException; - import lombok.NonNull; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import software.amazon.kinesis.retrieval.kpl.Messages; /** @@ -37,7 +35,7 @@ import software.amazon.kinesis.retrieval.kpl.Messages; */ @Slf4j public class AggregatorUtil { - public static final byte[] AGGREGATED_RECORD_MAGIC = new byte[]{-13, -119, -102, -62}; + public static final byte[] AGGREGATED_RECORD_MAGIC = new byte[] {-13, -119, -102, -62}; private static final int DIGEST_SIZE = 16; private static final BigInteger STARTING_HASH_KEY = new BigInteger("0"); // largest hash key = 2^128-1 @@ -58,7 +56,7 @@ public class AggregatorUtil { /** * Deaggregate any KPL records found. This method converts the starting and ending hash keys to {@link BigInteger}s * before passing them on to {@link #deaggregate(List, BigInteger, BigInteger)} - * + * * @param records * the records to potentially deaggreate * @param startingHashKey @@ -67,8 +65,8 @@ public class AggregatorUtil { * the ending hash key of the shard * @return A list of records with any aggregate records deaggregated */ - public List deaggregate(List records, String startingHashKey, - String endingHashKey) { + public List deaggregate( + List records, String startingHashKey, String endingHashKey) { return deaggregate(records, new BigInteger(startingHashKey), new BigInteger(endingHashKey)); } @@ -91,9 +89,8 @@ public class AggregatorUtil { * the endingHashKey. */ // CHECKSTYLE:OFF NPathComplexity - public List deaggregate(List records, - BigInteger startingHashKey, - BigInteger endingHashKey) { + public List deaggregate( + List records, BigInteger startingHashKey, BigInteger endingHashKey) { List result = new ArrayList<>(); byte[] magic = new byte[AGGREGATED_RECORD_MAGIC.length]; byte[] digest = new byte[DIGEST_SIZE]; @@ -130,7 +127,8 @@ public class AggregatorUtil { List pks = ar.getPartitionKeyTableList(); List ehks = ar.getExplicitHashKeyTableList(); long aat = r.approximateArrivalTimestamp() == null - ? -1 : r.approximateArrivalTimestamp().toEpochMilli(); + ? -1 + : r.approximateArrivalTimestamp().toEpochMilli(); try { int recordsInCurrRecord = 0; for (Messages.Record mr : ar.getRecordsList()) { @@ -157,7 +155,8 @@ public class AggregatorUtil { .partitionKey(partitionKey) .explicitHashKey(explicitHashKey) .build(); - result.add(convertRecordToKinesisClientRecord(record, true, subSeqNum++, explicitHashKey)); + result.add( + convertRecordToKinesisClientRecord(record, true, subSeqNum++, explicitHashKey)); } } catch (Exception e) { StringBuilder sb = new StringBuilder(); @@ -171,14 +170,25 @@ public class AggregatorUtil { sb.append(s).append("\n"); } for (Messages.Record mr : ar.getRecordsList()) { - sb.append("Record: [hasEhk=").append(mr.hasExplicitHashKeyIndex()).append(", ") - .append("ehkIdx=").append(mr.getExplicitHashKeyIndex()).append(", ") - .append("pkIdx=").append(mr.getPartitionKeyIndex()).append(", ") - .append("dataLen=").append(mr.getData().toByteArray().length).append("]\n"); + sb.append("Record: [hasEhk=") + .append(mr.hasExplicitHashKeyIndex()) + .append(", ") + .append("ehkIdx=") + .append(mr.getExplicitHashKeyIndex()) + .append(", ") + .append("pkIdx=") + .append(mr.getPartitionKeyIndex()) + .append(", ") + .append("dataLen=") + .append(mr.getData().toByteArray().length) + .append("]\n"); } - sb.append("Sequence number: ").append(r.sequenceNumber()).append("\n") + sb.append("Sequence number: ") + .append(r.sequenceNumber()) + .append("\n") .append("Raw data: ") - .append(javax.xml.bind.DatatypeConverter.printBase64Binary(messageData)).append("\n"); + .append(javax.xml.bind.DatatypeConverter.printBase64Binary(messageData)) + .append("\n"); log.error(sb.toString(), e); } } catch (InvalidProtocolBufferException e) { @@ -199,7 +209,8 @@ public class AggregatorUtil { return md5(data); } - protected BigInteger effectiveHashKey(String partitionKey, String explicitHashKey) throws UnsupportedEncodingException { + protected BigInteger effectiveHashKey(String partitionKey, String explicitHashKey) + throws UnsupportedEncodingException { if (explicitHashKey == null) { return new BigInteger(1, md5(partitionKey.getBytes("UTF-8"))); } @@ -215,10 +226,11 @@ public class AggregatorUtil { } } - public KinesisClientRecord convertRecordToKinesisClientRecord(@NonNull final KinesisClientRecord record, - final boolean aggregated, - final long subSequenceNumber, - final String explicitHashKey) { + public KinesisClientRecord convertRecordToKinesisClientRecord( + @NonNull final KinesisClientRecord record, + final boolean aggregated, + final long subSequenceNumber, + final String explicitHashKey) { return KinesisClientRecord.builder() .data(record.data()) .partitionKey(record.partitionKey()) diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherProviderConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherProviderConfig.java index b5c7b23e..d8395c9a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherProviderConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherProviderConfig.java @@ -16,19 +16,20 @@ package software.amazon.kinesis.retrieval; import java.time.Duration; + import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.metrics.MetricsFactory; public interface DataFetcherProviderConfig { /** - * Gets stream identifier for dataFetcher. - */ + * Gets stream identifier for dataFetcher. + */ StreamIdentifier getStreamIdentifier(); /** - * Gets shard id. - */ + * Gets shard id. + */ String getShardId(); /** diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherResult.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherResult.java index 5ed68765..82e9da36 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherResult.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetcherResult.java @@ -22,7 +22,7 @@ import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; public interface DataFetcherResult { /** * The result of the request to Kinesis - * + * * @return The result of the request, this can be null if the request failed. */ GetRecordsResponse getResult(); @@ -30,14 +30,14 @@ public interface DataFetcherResult { /** * Accepts the result, and advances the shard iterator. A result from the data fetcher must be accepted before any * further progress can be made. - * + * * @return the result of the request, this can be null if the request failed. */ GetRecordsResponse accept(); /** * Indicates whether this result is at the end of the shard or not - * + * * @return true if the result is at the end of a shard, false otherwise */ boolean isShardEnd(); diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetchingStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetchingStrategy.java index 529016ee..ba0ec587 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetchingStrategy.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataFetchingStrategy.java @@ -18,5 +18,6 @@ package software.amazon.kinesis.retrieval; * */ public enum DataFetchingStrategy { - DEFAULT, PREFETCH_CACHED; + DEFAULT, + PREFETCH_CACHED; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataRetrievalUtil.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataRetrievalUtil.java index ba743e61..72ecd19a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataRetrievalUtil.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/DataRetrievalUtil.java @@ -15,21 +15,23 @@ package software.amazon.kinesis.retrieval; +import java.util.List; + import software.amazon.awssdk.services.kinesis.model.ChildShard; import software.amazon.awssdk.utils.CollectionUtils; -import java.util.List; - public class DataRetrievalUtil { public static boolean isValidResult(String shardEndIndicator, List childShards) { - // shardEndIndicator is nextShardIterator for GetRecordsResponse, and is continuationSequenceNumber for SubscribeToShardEvent + // shardEndIndicator is nextShardIterator for GetRecordsResponse, and is continuationSequenceNumber for + // SubscribeToShardEvent // There are two valid scenarios for the shardEndIndicator and childShards combination. // 1. ShardEnd scenario: shardEndIndicator should be null and childShards should be a non-empty list. - // 2. Non-ShardEnd scenario: shardEndIndicator should be non-null and childShards should be null or an empty list. + // 2. Non-ShardEnd scenario: shardEndIndicator should be non-null and childShards should be null or an empty + // list. // Otherwise, the retrieval result is invalid. - if (shardEndIndicator == null && CollectionUtils.isNullOrEmpty(childShards) || - shardEndIndicator != null && !CollectionUtils.isNullOrEmpty(childShards)) { + if (shardEndIndicator == null && CollectionUtils.isNullOrEmpty(childShards) + || shardEndIndicator != null && !CollectionUtils.isNullOrEmpty(childShards)) { return false; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetrievalStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetrievalStrategy.java index 3ff8e620..ca0cfbe8 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetrievalStrategy.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetrievalStrategy.java @@ -15,6 +15,7 @@ package software.amazon.kinesis.retrieval; import java.util.Optional; + import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; import software.amazon.kinesis.retrieval.polling.DataFetcher; import software.amazon.kinesis.retrieval.polling.KinesisDataFetcher; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetriever.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetriever.java index 158413f9..42942813 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetriever.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/GetRecordsRetriever.java @@ -17,7 +17,7 @@ package software.amazon.kinesis.retrieval; import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; /** - * This class uses the GetRecordsRetrievalStrategy class to retrieve the next set of records and update the cache. + * This class uses the GetRecordsRetrievalStrategy class to retrieve the next set of records and update the cache. */ public interface GetRecordsRetriever { GetRecordsResponse getNextRecords(int maxRecords); diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/IteratorBuilder.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/IteratorBuilder.java index 9e9adf91..bead706d 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/IteratorBuilder.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/IteratorBuilder.java @@ -16,28 +16,43 @@ import software.amazon.kinesis.common.InitialPositionInStreamExtended; @KinesisClientInternalApi public class IteratorBuilder { - public static SubscribeToShardRequest.Builder request(SubscribeToShardRequest.Builder builder, - String sequenceNumber, InitialPositionInStreamExtended initialPosition) { - return builder.startingPosition(request(StartingPosition.builder(), sequenceNumber, initialPosition).build()); - } - - public static SubscribeToShardRequest.Builder reconnectRequest(SubscribeToShardRequest.Builder builder, - String sequenceNumber, InitialPositionInStreamExtended initialPosition) { - return builder.startingPosition( - reconnectRequest(StartingPosition.builder(), sequenceNumber, initialPosition).build()); - } - - public static StartingPosition.Builder request(StartingPosition.Builder builder, String sequenceNumber, + public static SubscribeToShardRequest.Builder request( + SubscribeToShardRequest.Builder builder, + String sequenceNumber, InitialPositionInStreamExtended initialPosition) { - return apply(builder, StartingPosition.Builder::type, StartingPosition.Builder::timestamp, - StartingPosition.Builder::sequenceNumber, initialPosition, sequenceNumber, + return builder.startingPosition(request(StartingPosition.builder(), sequenceNumber, initialPosition) + .build()); + } + + public static SubscribeToShardRequest.Builder reconnectRequest( + SubscribeToShardRequest.Builder builder, + String sequenceNumber, + InitialPositionInStreamExtended initialPosition) { + return builder.startingPosition(reconnectRequest(StartingPosition.builder(), sequenceNumber, initialPosition) + .build()); + } + + public static StartingPosition.Builder request( + StartingPosition.Builder builder, String sequenceNumber, InitialPositionInStreamExtended initialPosition) { + return apply( + builder, + StartingPosition.Builder::type, + StartingPosition.Builder::timestamp, + StartingPosition.Builder::sequenceNumber, + initialPosition, + sequenceNumber, ShardIteratorType.AT_SEQUENCE_NUMBER); } - public static StartingPosition.Builder reconnectRequest(StartingPosition.Builder builder, String sequenceNumber, - InitialPositionInStreamExtended initialPosition) { - return apply(builder, StartingPosition.Builder::type, StartingPosition.Builder::timestamp, - StartingPosition.Builder::sequenceNumber, initialPosition, sequenceNumber, + public static StartingPosition.Builder reconnectRequest( + StartingPosition.Builder builder, String sequenceNumber, InitialPositionInStreamExtended initialPosition) { + return apply( + builder, + StartingPosition.Builder::type, + StartingPosition.Builder::timestamp, + StartingPosition.Builder::sequenceNumber, + initialPosition, + sequenceNumber, ShardIteratorType.AFTER_SEQUENCE_NUMBER); } @@ -49,11 +64,11 @@ public class IteratorBuilder { * @param initialPosition One of LATEST, TRIM_HORIZON, or AT_TIMESTAMP. * @return An updated GetShardIteratorRequest.Builder. */ - public static GetShardIteratorRequest.Builder request(GetShardIteratorRequest.Builder builder, - String sequenceNumber, - InitialPositionInStreamExtended initialPosition) { + public static GetShardIteratorRequest.Builder request( + GetShardIteratorRequest.Builder builder, + String sequenceNumber, + InitialPositionInStreamExtended initialPosition) { return getShardIteratorRequest(builder, sequenceNumber, initialPosition, ShardIteratorType.AT_SEQUENCE_NUMBER); - } /** @@ -64,22 +79,30 @@ public class IteratorBuilder { * @param initialPosition One of LATEST, TRIM_HORIZON, or AT_TIMESTAMP. * @return An updated GetShardIteratorRequest.Builder. */ - public static GetShardIteratorRequest.Builder reconnectRequest(GetShardIteratorRequest.Builder builder, - String sequenceNumber, - InitialPositionInStreamExtended initialPosition) { - return getShardIteratorRequest(builder, sequenceNumber, initialPosition, ShardIteratorType.AFTER_SEQUENCE_NUMBER); + public static GetShardIteratorRequest.Builder reconnectRequest( + GetShardIteratorRequest.Builder builder, + String sequenceNumber, + InitialPositionInStreamExtended initialPosition) { + return getShardIteratorRequest( + builder, sequenceNumber, initialPosition, ShardIteratorType.AFTER_SEQUENCE_NUMBER); } - private static GetShardIteratorRequest.Builder getShardIteratorRequest(GetShardIteratorRequest.Builder builder, - String sequenceNumber, - InitialPositionInStreamExtended initialPosition, - ShardIteratorType shardIteratorType) { - return apply(builder, GetShardIteratorRequest.Builder::shardIteratorType, GetShardIteratorRequest.Builder::timestamp, - GetShardIteratorRequest.Builder::startingSequenceNumber, initialPosition, sequenceNumber, + private static GetShardIteratorRequest.Builder getShardIteratorRequest( + GetShardIteratorRequest.Builder builder, + String sequenceNumber, + InitialPositionInStreamExtended initialPosition, + ShardIteratorType shardIteratorType) { + return apply( + builder, + GetShardIteratorRequest.Builder::shardIteratorType, + GetShardIteratorRequest.Builder::timestamp, + GetShardIteratorRequest.Builder::startingSequenceNumber, + initialPosition, + sequenceNumber, shardIteratorType); } - private final static Map SHARD_ITERATOR_MAPPING; + private static final Map SHARD_ITERATOR_MAPPING; static { Map map = new HashMap<>(); @@ -95,22 +118,25 @@ public class IteratorBuilder { R apply(R updated, T value); } - private static R apply(R initial, UpdatingFunction shardIterFunc, - UpdatingFunction dateFunc, UpdatingFunction sequenceFunction, - InitialPositionInStreamExtended initialPositionInStreamExtended, String sequenceNumber, + private static R apply( + R initial, + UpdatingFunction shardIterFunc, + UpdatingFunction dateFunc, + UpdatingFunction sequenceFunction, + InitialPositionInStreamExtended initialPositionInStreamExtended, + String sequenceNumber, ShardIteratorType defaultIteratorType) { - ShardIteratorType iteratorType = SHARD_ITERATOR_MAPPING.getOrDefault( - sequenceNumber, defaultIteratorType); + ShardIteratorType iteratorType = SHARD_ITERATOR_MAPPING.getOrDefault(sequenceNumber, defaultIteratorType); R result = shardIterFunc.apply(initial, iteratorType); switch (iteratorType) { - case AT_TIMESTAMP: - return dateFunc.apply(result, initialPositionInStreamExtended.getTimestamp().toInstant()); - case AT_SEQUENCE_NUMBER: - case AFTER_SEQUENCE_NUMBER: - return sequenceFunction.apply(result, sequenceNumber); - default: - return result; + case AT_TIMESTAMP: + return dateFunc.apply( + result, initialPositionInStreamExtended.getTimestamp().toInstant()); + case AT_SEQUENCE_NUMBER: + case AFTER_SEQUENCE_NUMBER: + return sequenceFunction.apply(result, sequenceNumber); + default: + return result; } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisClientRecord.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisClientRecord.java index 8a3d4d13..5e8018f9 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisClientRecord.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisClientRecord.java @@ -47,8 +47,12 @@ public class KinesisClientRecord { private final Schema schema; public static KinesisClientRecord fromRecord(Record record) { - return KinesisClientRecord.builder().sequenceNumber(record.sequenceNumber()) - .approximateArrivalTimestamp(record.approximateArrivalTimestamp()).data(record.data().asByteBuffer()) - .partitionKey(record.partitionKey()).encryptionType(record.encryptionType()).build(); + return KinesisClientRecord.builder() + .sequenceNumber(record.sequenceNumber()) + .approximateArrivalTimestamp(record.approximateArrivalTimestamp()) + .data(record.data().asByteBuffer()) + .partitionKey(record.partitionKey()) + .encryptionType(record.encryptionType()) + .build(); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisDataFetcherProviderConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisDataFetcherProviderConfig.java index 7cf6cdcf..68f8f7c2 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisDataFetcherProviderConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/KinesisDataFetcherProviderConfig.java @@ -16,12 +16,12 @@ package software.amazon.kinesis.retrieval; import java.time.Duration; + import lombok.Data; import lombok.NonNull; import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.metrics.MetricsFactory; - /** * Configuration needed for custom data fetchers */ diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsDeliveryAck.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsDeliveryAck.java index 487e1637..a4b255fb 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsDeliveryAck.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsDeliveryAck.java @@ -25,5 +25,4 @@ public interface RecordsDeliveryAck { * @return id that uniquely determines a record batch and its source. */ BatchUniqueIdentifier batchUniqueIdentifier(); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsFetcherFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsFetcherFactory.java index 328273b2..eb62a98e 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsFetcherFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsFetcherFactory.java @@ -27,11 +27,14 @@ public interface RecordsFetcherFactory { * @param shardId ShardId of the shard that the fetcher will retrieve records for * @param metricsFactory MetricsFactory used to create metricScope * @param maxRecords Max number of records to be returned in a single get call - * + * * @return RecordsPublisher used to get records from Kinesis. */ - RecordsPublisher createRecordsFetcher(GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, String shardId, - MetricsFactory metricsFactory, int maxRecords); + RecordsPublisher createRecordsFetcher( + GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, + String shardId, + MetricsFactory metricsFactory, + int maxRecords); /** * Sets the maximum number of ProcessRecordsInput objects the RecordsPublisher can hold, before further requests are @@ -82,5 +85,4 @@ public interface RecordsFetcherFactory { void idleMillisBetweenCalls(long idleMillisBetweenCalls); long idleMillisBetweenCalls(); - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsPublisher.java index 98c0375e..60507c25 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsPublisher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsPublisher.java @@ -16,7 +16,6 @@ package software.amazon.kinesis.retrieval; import org.reactivestreams.Publisher; - import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.RequestDetails; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; @@ -35,7 +34,9 @@ public interface RecordsPublisher extends Publisher { * @param initialPositionInStreamExtended * if there is no sequence number the initial position to use */ - void start(ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended); + void start( + ExtendedSequenceNumber extendedSequenceNumber, + InitialPositionInStreamExtended initialPositionInStreamExtended); /** * Restart from the last accepted and processed diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsRetrieved.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsRetrieved.java index f6f5bb7f..d2a3ddc2 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsRetrieved.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RecordsRetrieved.java @@ -20,7 +20,7 @@ public interface RecordsRetrieved { /** * Retrieves the records that have been received via one of the publishers - * + * * @return the processRecordsInput received */ ProcessRecordsInput processRecordsInput(); diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalConfig.java index d50e7d19..fdd6c445 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalConfig.java @@ -111,29 +111,34 @@ public class RetrievalConfig { * @see StreamTracker#createStreamConfig(StreamIdentifier) */ @Deprecated - private InitialPositionInStreamExtended initialPositionInStreamExtended = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.LATEST); + private InitialPositionInStreamExtended initialPositionInStreamExtended = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); private RetrievalSpecificConfig retrievalSpecificConfig; private RetrievalFactory retrievalFactory; - public RetrievalConfig(@NonNull KinesisAsyncClient kinesisAsyncClient, @NonNull String streamName, - @NonNull String applicationName) { + public RetrievalConfig( + @NonNull KinesisAsyncClient kinesisAsyncClient, + @NonNull String streamName, + @NonNull String applicationName) { this(kinesisAsyncClient, new SingleStreamTracker(streamName), applicationName); } - public RetrievalConfig(@NonNull KinesisAsyncClient kinesisAsyncClient, @NonNull Arn streamArn, - @NonNull String applicationName) { + public RetrievalConfig( + @NonNull KinesisAsyncClient kinesisAsyncClient, @NonNull Arn streamArn, @NonNull String applicationName) { this(kinesisAsyncClient, new SingleStreamTracker(streamArn), applicationName); } - public RetrievalConfig(@NonNull KinesisAsyncClient kinesisAsyncClient, @NonNull StreamTracker streamTracker, - @NonNull String applicationName) { + public RetrievalConfig( + @NonNull KinesisAsyncClient kinesisAsyncClient, + @NonNull StreamTracker streamTracker, + @NonNull String applicationName) { this.kinesisClient = kinesisAsyncClient; this.streamTracker = streamTracker; this.applicationName = applicationName; - this.appStreamTracker = DeprecationUtils.convert(streamTracker, + this.appStreamTracker = DeprecationUtils.convert( + streamTracker, singleStreamTracker -> singleStreamTracker.streamConfigList().get(0)); } @@ -148,7 +153,8 @@ public class RetrievalConfig { * @see StreamTracker#createStreamConfig(StreamIdentifier) */ @Deprecated - public RetrievalConfig initialPositionInStreamExtended(InitialPositionInStreamExtended initialPositionInStreamExtended) { + public RetrievalConfig initialPositionInStreamExtended( + InitialPositionInStreamExtended initialPositionInStreamExtended) { if (streamTracker().isMultiStream()) { throw new IllegalArgumentException( "Cannot set initialPositionInStreamExtended when multiStreamTracker is set"); @@ -170,8 +176,7 @@ public class RetrievalConfig { public RetrievalFactory retrievalFactory() { if (retrievalFactory == null) { if (retrievalSpecificConfig == null) { - final FanOutConfig fanOutConfig = new FanOutConfig(kinesisClient()) - .applicationName(applicationName()); + final FanOutConfig fanOutConfig = new FanOutConfig(kinesisClient()).applicationName(applicationName()); if (!streamTracker.isMultiStream()) { final String streamName = getSingleStreamIdentifier().streamName(); fanOutConfig.streamName(streamName); @@ -190,5 +195,4 @@ public class RetrievalConfig { private StreamIdentifier getSingleStreamIdentifier() { return streamTracker.streamConfigList().get(0).streamIdentifier(); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalFactory.java index 04727294..72b75074 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalFactory.java @@ -51,7 +51,8 @@ public interface RetrievalFactory { * @param metricsFactory The {@link MetricsFactory} for recording metrics. * @return A {@link RecordsPublisher} instance for retrieving records from the shard. */ - default RecordsPublisher createGetRecordsCache(ShardInfo shardInfo, StreamConfig streamConfig, MetricsFactory metricsFactory) { + default RecordsPublisher createGetRecordsCache( + ShardInfo shardInfo, StreamConfig streamConfig, MetricsFactory metricsFactory) { return createGetRecordsCache(shardInfo, metricsFactory); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalSpecificConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalSpecificConfig.java index d38fe054..4aa2114a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalSpecificConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/RetrievalSpecificConfig.java @@ -40,5 +40,4 @@ public interface RetrievalSpecificConfig { default void validateState(boolean isMultiStream) { // TODO convert this to a non-default implementation in a "major" release } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ThrottlingReporter.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ThrottlingReporter.java index 91192ad3..01890354 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ThrottlingReporter.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/ThrottlingReporter.java @@ -14,10 +14,9 @@ */ package software.amazon.kinesis.retrieval; -import org.slf4j.Logger; - import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.slf4j.Logger; @RequiredArgsConstructor @Slf4j @@ -30,15 +29,13 @@ public class ThrottlingReporter { public void throttled() { consecutiveThrottles++; - String message = "Shard '" + shardId + "' has been throttled " - + consecutiveThrottles + " consecutively"; + String message = "Shard '" + shardId + "' has been throttled " + consecutiveThrottles + " consecutively"; if (consecutiveThrottles > maxConsecutiveWarnThrottles) { getLog().error(message); } else { getLog().warn(message); } - } public void success() { @@ -48,5 +45,4 @@ public class ThrottlingReporter { protected Logger getLog() { return log; } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConfig.java index 16307377..346f30f4 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConfig.java @@ -15,13 +15,11 @@ package software.amazon.kinesis.retrieval.fanout; -import org.apache.commons.lang3.ObjectUtils; - import com.google.common.base.Preconditions; - import lombok.Data; import lombok.NonNull; import lombok.experimental.Accessors; +import org.apache.commons.lang3.ObjectUtils; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.kinesis.leases.exceptions.DependencyException; import software.amazon.kinesis.retrieval.RetrievalFactory; @@ -106,17 +104,22 @@ public class FanOutConfig implements RetrievalSpecificConfig { private FanOutConsumerRegistration createConsumerRegistration(String streamName) { String consumerToCreate = ObjectUtils.firstNonNull(consumerName(), applicationName()); - return createConsumerRegistration(kinesisClient(), + return createConsumerRegistration( + kinesisClient(), Preconditions.checkNotNull(streamName, "streamName must be set for consumer creation"), - Preconditions.checkNotNull(consumerToCreate, - "applicationName or consumerName must be set for consumer creation")); - + Preconditions.checkNotNull( + consumerToCreate, "applicationName or consumerName must be set for consumer creation")); } - protected FanOutConsumerRegistration createConsumerRegistration(KinesisAsyncClient client, String stream, - String consumerToCreate) { - return new FanOutConsumerRegistration(client, stream, consumerToCreate, maxDescribeStreamSummaryRetries(), - maxDescribeStreamConsumerRetries(), registerStreamConsumerRetries(), retryBackoffMillis()); + protected FanOutConsumerRegistration createConsumerRegistration( + KinesisAsyncClient client, String stream, String consumerToCreate) { + return new FanOutConsumerRegistration( + client, + stream, + consumerToCreate, + maxDescribeStreamSummaryRetries(), + maxDescribeStreamConsumerRetries(), + registerStreamConsumerRetries(), + retryBackoffMillis()); } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistration.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistration.java index 9bcdd83c..eaf699a3 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistration.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistration.java @@ -17,14 +17,13 @@ package software.amazon.kinesis.retrieval.fanout; import java.util.concurrent.ExecutionException; -import org.apache.commons.lang3.StringUtils; - import lombok.AccessLevel; import lombok.NonNull; import lombok.RequiredArgsConstructor; import lombok.Setter; import lombok.experimental.Accessors; import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.model.ConsumerStatus; import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest; @@ -52,15 +51,19 @@ import software.amazon.kinesis.retrieval.ConsumerRegistration; public class FanOutConsumerRegistration implements ConsumerRegistration { @NonNull private final KinesisAsyncClient kinesisClient; + private final String streamName; + @NonNull private final String streamConsumerName; + private final int maxDescribeStreamSummaryRetries; private final int maxDescribeStreamConsumerRetries; private final int registerStreamConsumerRetries; private final long retryBackoffMillis; private String streamArn; + @Setter(AccessLevel.PRIVATE) private String streamConsumerArn; @@ -104,7 +107,9 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { } } catch (ResourceInUseException e) { // Consumer is present, call DescribeStreamConsumer - log.debug("{} : Got ResourceInUseException consumer exists, will call DescribeStreamConsumer again.", streamName); + log.debug( + "{} : Got ResourceInUseException consumer exists, will call DescribeStreamConsumer again.", + streamName); response = describeStreamConsumer(); } } @@ -123,9 +128,10 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { private RegisterStreamConsumerResponse registerStreamConsumer() throws DependencyException { final AWSExceptionManager exceptionManager = createExceptionManager(); try { - final RegisterStreamConsumerRequest request = KinesisRequestsBuilder - .registerStreamConsumerRequestBuilder().streamARN(streamArn()) - .consumerName(streamConsumerName).build(); + final RegisterStreamConsumerRequest request = KinesisRequestsBuilder.registerStreamConsumerRequestBuilder() + .streamARN(streamArn()) + .consumerName(streamConsumerName) + .build(); return kinesisClient.registerStreamConsumer(request).get(); } catch (ExecutionException e) { throw exceptionManager.apply(e.getCause()); @@ -135,18 +141,21 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { } private DescribeStreamConsumerResponse describeStreamConsumer() throws DependencyException { - final DescribeStreamConsumerRequest.Builder requestBuilder = KinesisRequestsBuilder - .describeStreamConsumerRequestBuilder(); + final DescribeStreamConsumerRequest.Builder requestBuilder = + KinesisRequestsBuilder.describeStreamConsumerRequestBuilder(); final DescribeStreamConsumerRequest request; if (StringUtils.isEmpty(streamConsumerArn)) { - request = requestBuilder.streamARN(streamArn()).consumerName(streamConsumerName).build(); + request = requestBuilder + .streamARN(streamArn()) + .consumerName(streamConsumerName) + .build(); } else { request = requestBuilder.consumerARN(streamConsumerArn).build(); } - final ServiceCallerSupplier dsc = () -> kinesisClient - .describeStreamConsumer(request).get(); + final ServiceCallerSupplier dsc = + () -> kinesisClient.describeStreamConsumer(request).get(); return retryWhenThrottled(dsc, maxDescribeStreamConsumerRetries, "DescribeStreamConsumer"); } @@ -178,10 +187,14 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { private String streamArn() throws DependencyException { if (StringUtils.isEmpty(streamArn)) { - final DescribeStreamSummaryRequest request = KinesisRequestsBuilder - .describeStreamSummaryRequestBuilder().streamName(streamName).build(); - final ServiceCallerSupplier dss = () -> kinesisClient.describeStreamSummary(request).get() - .streamDescriptionSummary().streamARN(); + final DescribeStreamSummaryRequest request = KinesisRequestsBuilder.describeStreamSummaryRequestBuilder() + .streamName(streamName) + .build(); + final ServiceCallerSupplier dss = () -> kinesisClient + .describeStreamSummary(request) + .get() + .streamDescriptionSummary() + .streamARN(); streamArn = retryWhenThrottled(dss, maxDescribeStreamSummaryRetries, "DescribeStreamSummary"); } @@ -194,8 +207,9 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { T get() throws ExecutionException, InterruptedException; } - private T retryWhenThrottled(@NonNull final ServiceCallerSupplier retriever, final int maxRetries, - @NonNull final String apiName) throws DependencyException { + private T retryWhenThrottled( + @NonNull final ServiceCallerSupplier retriever, final int maxRetries, @NonNull final String apiName) + throws DependencyException { final AWSExceptionManager exceptionManager = createExceptionManager(); LimitExceededException finalException = null; @@ -223,8 +237,8 @@ public class FanOutConsumerRegistration implements ConsumerRegistration { } if (finalException == null) { - throw new IllegalStateException( - String.format("%s : Finished all retries and no exception was caught while calling %s", streamName, apiName)); + throw new IllegalStateException(String.format( + "%s : Finished all retries and no exception was caught while calling %s", streamName, apiName)); } throw finalException; diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisher.java index a17e5e82..3206e759 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisher.java @@ -15,6 +15,15 @@ package software.amazon.kinesis.retrieval.fanout; +import java.time.Instant; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + import com.google.common.annotations.VisibleForTesting; import lombok.AccessLevel; import lombok.Data; @@ -49,23 +58,14 @@ import software.amazon.kinesis.retrieval.RecordsRetrieved; import software.amazon.kinesis.retrieval.RetryableRetrievalException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import java.time.Instant; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; - import static software.amazon.kinesis.common.DiagnosticUtils.takeDelayedDeliveryActionIfRequired; import static software.amazon.kinesis.retrieval.DataRetrievalUtil.isValidResult; @Slf4j @KinesisClientInternalApi public class FanOutRecordsPublisher implements RecordsPublisher { - private static final ThrowableCategory ACQUIRE_TIMEOUT_CATEGORY = new ThrowableCategory( - ThrowableType.ACQUIRE_TIMEOUT); + private static final ThrowableCategory ACQUIRE_TIMEOUT_CATEGORY = + new ThrowableCategory(ThrowableType.ACQUIRE_TIMEOUT); private static final ThrowableCategory READ_TIMEOUT_CATEGORY = new ThrowableCategory(ThrowableType.READ_TIMEOUT); // Max burst of 10 payload events + 1 terminal event (onError/onComplete) from the service. private static final int MAX_EVENT_BURST_FROM_SERVICE = 10 + 1; @@ -78,16 +78,19 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private final AtomicInteger subscribeToShardId = new AtomicInteger(0); private RecordFlow flow; - @Getter @VisibleForTesting + + @Getter + @VisibleForTesting private String currentSequenceNumber; + private InitialPositionInStreamExtended initialPositionInStreamExtended; private boolean isFirstConnection = true; private Subscriber subscriber; private long availableQueueSpace = 0; - private BlockingQueue recordsDeliveryQueue = new LinkedBlockingQueue<>( - MAX_EVENT_BURST_FROM_SERVICE); + private BlockingQueue recordsDeliveryQueue = + new LinkedBlockingQueue<>(MAX_EVENT_BURST_FROM_SERVICE); private RequestDetails lastSuccessfulRequestDetails = new RequestDetails(); @@ -98,7 +101,8 @@ public class FanOutRecordsPublisher implements RecordsPublisher { this.streamAndShardId = shardId; } - public FanOutRecordsPublisher(KinesisAsyncClient kinesis, String shardId, String consumerArn, String streamIdentifierSer) { + public FanOutRecordsPublisher( + KinesisAsyncClient kinesis, String shardId, String consumerArn, String streamIdentifierSer) { this.kinesis = kinesis; this.shardId = shardId; this.consumerArn = consumerArn; @@ -106,11 +110,15 @@ public class FanOutRecordsPublisher implements RecordsPublisher { } @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, + public void start( + ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended) { synchronized (lockObject) { - log.debug("[{}] Initializing Publisher @ Sequence: {} -- Initial Position: {}", streamAndShardId, - extendedSequenceNumber, initialPositionInStreamExtended); + log.debug( + "[{}] Initializing Publisher @ Sequence: {} -- Initial Position: {}", + streamAndShardId, + extendedSequenceNumber, + initialPositionInStreamExtended); this.initialPositionInStreamExtended = initialPositionInStreamExtended; this.currentSequenceNumber = extendedSequenceNumber.sequenceNumber(); this.isFirstConnection = true; @@ -178,12 +186,12 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // RecordFlow of the current event that needs to be returned RecordFlow flowToBeReturned = null; - final RecordsRetrieved recordsRetrieved = recordsRetrievedContext != null ? - recordsRetrievedContext.getRecordsRetrieved() : null; + final RecordsRetrieved recordsRetrieved = + recordsRetrievedContext != null ? recordsRetrievedContext.getRecordsRetrieved() : null; // Check if the ack corresponds to the head of the delivery queue. - if (recordsRetrieved != null && recordsRetrieved.batchUniqueIdentifier() - .equals(recordsDeliveryAck.batchUniqueIdentifier())) { + if (recordsRetrieved != null + && recordsRetrieved.batchUniqueIdentifier().equals(recordsDeliveryAck.batchUniqueIdentifier())) { // It is now safe to remove the element recordsDeliveryQueue.poll(); // Take action based on the time spent by the event in queue. @@ -200,16 +208,22 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // Check if the mismatched event belongs to active flow. If publisher receives an ack for a // missing event in active flow, then it means the event was already acked or cleared // from the queue due to a potential bug. - if (flow != null && recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier() - .equals(flow.getSubscribeToShardId())) { + if (flow != null + && recordsDeliveryAck + .batchUniqueIdentifier() + .getFlowIdentifier() + .equals(flow.getSubscribeToShardId())) { log.error( "{}: Received unexpected ack for the active subscription {}. Throwing.", - streamAndShardId, recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier()); + streamAndShardId, + recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier()); throw new IllegalStateException("Unexpected ack for the active subscription"); } // Otherwise publisher received a stale ack. else { - log.info("{}: Publisher received an ack for stale subscription {}. Ignoring.", streamAndShardId, + log.info( + "{}: Publisher received an ack for stale subscription {}. Ignoring.", + streamAndShardId, recordsDeliveryAck.batchUniqueIdentifier().getFlowIdentifier()); } } @@ -231,9 +245,12 @@ public class FanOutRecordsPublisher implements RecordsPublisher { } } catch (IllegalStateException e) { // CHECKSTYLE.OFF: LineLength - log.warn("{}: Unable to enqueue the payload due to capacity restrictions in delivery queue with remaining capacity {}. Last successful request details -- {}", + log.warn( + "{}: Unable to enqueue the payload due to capacity restrictions in delivery queue with remaining capacity {}. Last successful request details -- {}", // CHECKSTYLE.ON: LineLength - streamAndShardId, recordsDeliveryQueue.remainingCapacity(), lastSuccessfulRequestDetails); + streamAndShardId, + recordsDeliveryQueue.remainingCapacity(), + lastSuccessfulRequestDetails); throw e; } catch (Throwable t) { log.error("{}: Unable to deliver event to the shard consumer.", streamAndShardId, t); @@ -245,6 +262,7 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private static final class RecordsRetrievedContext { @Getter(AccessLevel.NONE) private final Either recordsOrShutdownEvent; + private final RecordFlow recordFlow; private final Instant enqueueTimestamp; @@ -254,8 +272,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // This method is not thread-safe. You need to acquire a lock in the caller in order to execute this. void executeEventAction(Subscriber subscriber) { - recordsOrShutdownEvent.apply(recordsEvent -> subscriber.onNext(recordsEvent), - shutdownEvent -> shutdownEvent.getSubscriptionShutdownAction().run()); + recordsOrShutdownEvent.apply(recordsEvent -> subscriber.onNext(recordsEvent), shutdownEvent -> shutdownEvent + .getSubscriptionShutdownAction() + .run()); } } @@ -265,7 +284,8 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private final String eventIdentifier; private final Throwable shutdownEventThrowableOptional; - SubscriptionShutdownEvent(Runnable subscriptionShutdownAction, String eventIdentifier, Throwable shutdownEventThrowableOptional) { + SubscriptionShutdownEvent( + Runnable subscriptionShutdownAction, String eventIdentifier, Throwable shutdownEventThrowableOptional) { this.subscriptionShutdownAction = subscriptionShutdownAction; this.eventIdentifier = eventIdentifier; this.shutdownEventThrowableOptional = shutdownEventThrowableOptional; @@ -289,10 +309,12 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // Clear the delivery queue so that any stale entries from previous subscription are discarded. resetRecordsDeliveryStateOnSubscriptionOnInit(); SubscribeToShardRequest.Builder builder = KinesisRequestsBuilder.subscribeToShardRequestBuilder() - .shardId(shardId).consumerARN(consumerArn); + .shardId(shardId) + .consumerARN(consumerArn); SubscribeToShardRequest request; if (isFirstConnection) { - request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStreamExtended).build(); + request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStreamExtended) + .build(); } else { request = IteratorBuilder.reconnectRequest(builder, sequenceNumber, initialPositionInStreamExtended) .build(); @@ -303,7 +325,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { String instanceId = shardId + "-" + subscribeInvocationId; log.debug( "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#subscribeToShard) @ {} id: {} -- Starting subscribe to shard", - streamAndShardId, connectionStart, instanceId); + streamAndShardId, + connectionStart, + instanceId); flow = new RecordFlow(this, connectionStart, instanceId); kinesis.subscribeToShard(request, flow); } @@ -311,17 +335,21 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private void errorOccurred(RecordFlow triggeringFlow, Throwable t) { synchronized (lockObject) { - if (!hasValidSubscriber()) { if (hasValidFlow()) { log.warn( - "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- Subscriber is null." + - " Last successful request details -- {}", streamAndShardId, flow.connectionStartedAt, - flow.subscribeToShardId, lastSuccessfulRequestDetails); + "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- Subscriber is null." + + " Last successful request details -- {}", + streamAndShardId, + flow.connectionStartedAt, + flow.subscribeToShardId, + lastSuccessfulRequestDetails); } else { log.warn( - "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) -- Subscriber and flow are null." + - " Last successful request details -- {}", streamAndShardId, lastSuccessfulRequestDetails); + "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) -- Subscriber and flow are null." + + " Last successful request details -- {}", + streamAndShardId, + lastSuccessfulRequestDetails); } return; } @@ -332,23 +360,26 @@ public class FanOutRecordsPublisher implements RecordsPublisher { if (isActiveFlow(triggeringFlow)) { if (flow != null) { String logMessage = String.format( - "%s: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ %s id: %s -- %s." + - " Last successful request details -- %s", streamAndShardId, flow.connectionStartedAt, - flow.subscribeToShardId, category.throwableTypeString, lastSuccessfulRequestDetails); + "%s: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ %s id: %s -- %s." + + " Last successful request details -- %s", + streamAndShardId, + flow.connectionStartedAt, + flow.subscribeToShardId, + category.throwableTypeString, + lastSuccessfulRequestDetails); switch (category.throwableType) { - case READ_TIMEOUT: - log.debug(logMessage, propagationThrowable); - propagationThrowable = new RetryableRetrievalException(category.throwableTypeString, - (Exception) propagationThrowable.getCause()); - break; - case ACQUIRE_TIMEOUT: - logAcquireTimeoutMessage(t); - // - // Fall through is intentional here as we still want to log the details of the exception - // - default: - log.warn(logMessage, propagationThrowable); - + case READ_TIMEOUT: + log.debug(logMessage, propagationThrowable); + propagationThrowable = new RetryableRetrievalException( + category.throwableTypeString, (Exception) propagationThrowable.getCause()); + break; + case ACQUIRE_TIMEOUT: + logAcquireTimeoutMessage(t); + // + // Fall through is intentional here as we still want to log the details of the exception + // + default: + log.warn(logMessage, propagationThrowable); } flow.cancel(); } @@ -358,8 +389,11 @@ public class FanOutRecordsPublisher implements RecordsPublisher { try { handleFlowError(propagationThrowable, triggeringFlow); } catch (Throwable innerThrowable) { - log.warn("{}: Exception while calling subscriber.onError. Last successful request details -- {}", - streamAndShardId, lastSuccessfulRequestDetails, innerThrowable); + log.warn( + "{}: Exception while calling subscriber.onError. Last successful request details -- {}", + streamAndShardId, + lastSuccessfulRequestDetails, + innerThrowable); } subscriber = null; flow = null; @@ -369,12 +403,13 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime] - (FanOutRecordsPublisher#errorOccurred) @ {} id: {} -- {} -> triggeringFlow wasn't the active flow. Didn't dispatch error", // CHECKSTYLE.ON: LineLength - streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId, + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId, category.throwableTypeString); triggeringFlow.cancel(); } } - } } @@ -382,18 +417,21 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private void resetRecordsDeliveryStateOnSubscriptionOnInit() { // Clear any lingering records in the queue. if (!recordsDeliveryQueue.isEmpty()) { - log.warn("{}: Found non-empty queue while starting subscription. This indicates unsuccessful clean up of " - + "previous subscription - {}. Last successful request details -- {}", - streamAndShardId, subscribeToShardId, lastSuccessfulRequestDetails); + log.warn( + "{}: Found non-empty queue while starting subscription. This indicates unsuccessful clean up of " + + "previous subscription - {}. Last successful request details -- {}", + streamAndShardId, + subscribeToShardId, + lastSuccessfulRequestDetails); recordsDeliveryQueue.clear(); } } protected void logAcquireTimeoutMessage(Throwable t) { - log.error("An acquire timeout occurred which usually indicates that the KinesisAsyncClient supplied has a " + - "low maximum streams limit. " + - "Please use the software.amazon.kinesis.common.KinesisClientUtil to setup the client, " + - "or refer to the class to setup the client manually."); + log.error("An acquire timeout occurred which usually indicates that the KinesisAsyncClient supplied has a " + + "low maximum streams limit. " + + "Please use the software.amazon.kinesis.common.KinesisClientUtil to setup the client, " + + "or refer to the class to setup the client manually."); } private void handleFlowError(Throwable t, RecordFlow triggeringFlow) { @@ -404,8 +442,12 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // The ack received for this onNext event will be ignored by the publisher as the global flow object should // be either null or renewed when the ack's flow identifier is evaluated. FanoutRecordsRetrieved response = new FanoutRecordsRetrieved( - ProcessRecordsInput.builder().records(Collections.emptyList()).isAtShardEnd(true) - .childShards(Collections.emptyList()).build(), null, + ProcessRecordsInput.builder() + .records(Collections.emptyList()) + .isAtShardEnd(true) + .childShards(Collections.emptyList()) + .build(), + null, triggeringFlow != null ? triggeringFlow.getSubscribeToShardId() : shardId + "-no-flow-found"); subscriber.onNext(response); subscriber.onComplete(); @@ -415,7 +457,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { } private enum ThrowableType { - ACQUIRE_TIMEOUT("AcquireTimeout"), READ_TIMEOUT("ReadTimeout"), OTHER("Other"); + ACQUIRE_TIMEOUT("AcquireTimeout"), + READ_TIMEOUT("ReadTimeout"), + OTHER("Other"); String value; @@ -427,6 +471,7 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private static class ThrowableCategory { @NonNull final ThrowableType throwableType; + @NonNull final String throwableTypeString; @@ -470,7 +515,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { if (!hasValidSubscriber()) { log.debug( "{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Subscriber is null.", - streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); triggeringFlow.cancel(); if (flow != null) { flow.cancel(); @@ -480,36 +527,45 @@ public class FanOutRecordsPublisher implements RecordsPublisher { if (!isActiveFlow(triggeringFlow)) { log.debug( "{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Received records for an inactive flow.", - streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); return; } try { - // If recordBatchEvent is not valid event, RuntimeException will be thrown here and trigger the errorOccurred call. + // If recordBatchEvent is not valid event, RuntimeException will be thrown here and trigger the + // errorOccurred call. // Since the triggeringFlow is active flow, it will then trigger the handleFlowError call. - // Since the exception is not ResourceNotFoundException, it will trigger onError in the ShardConsumerSubscriber. + // Since the exception is not ResourceNotFoundException, it will trigger onError in the + // ShardConsumerSubscriber. // The ShardConsumerSubscriber will finally cancel the subscription. if (!isValidResult(recordBatchEvent.continuationSequenceNumber(), recordBatchEvent.childShards())) { - throw new InvalidStateException("RecordBatchEvent for flow " + triggeringFlow.toString() + " is invalid." - + " event.continuationSequenceNumber: " + recordBatchEvent.continuationSequenceNumber() - + ". event.childShards: " + recordBatchEvent.childShards()); + throw new InvalidStateException("RecordBatchEvent for flow " + triggeringFlow.toString() + + " is invalid." + + " event.continuationSequenceNumber: " + recordBatchEvent.continuationSequenceNumber() + + ". event.childShards: " + recordBatchEvent.childShards()); } - List records = recordBatchEvent.records().stream().map(KinesisClientRecord::fromRecord) - .collect(Collectors.toList()); + List records = recordBatchEvent.records().stream() + .map(KinesisClientRecord::fromRecord) + .collect(Collectors.toList()); ProcessRecordsInput input = ProcessRecordsInput.builder() - .cacheEntryTime(Instant.now()) - .millisBehindLatest(recordBatchEvent.millisBehindLatest()) - .isAtShardEnd(recordBatchEvent.continuationSequenceNumber() == null) - .records(records) - .childShards(recordBatchEvent.childShards()) - .build(); - FanoutRecordsRetrieved recordsRetrieved = new FanoutRecordsRetrieved(input, - recordBatchEvent.continuationSequenceNumber(), triggeringFlow.subscribeToShardId); + .cacheEntryTime(Instant.now()) + .millisBehindLatest(recordBatchEvent.millisBehindLatest()) + .isAtShardEnd(recordBatchEvent.continuationSequenceNumber() == null) + .records(records) + .childShards(recordBatchEvent.childShards()) + .build(); + FanoutRecordsRetrieved recordsRetrieved = new FanoutRecordsRetrieved( + input, recordBatchEvent.continuationSequenceNumber(), triggeringFlow.subscribeToShardId); bufferCurrentEventAndScheduleIfRequired(recordsRetrieved, triggeringFlow); } catch (Throwable t) { - log.warn("{}: Unable to buffer or schedule onNext for subscriber. Failing publisher." + - " Last successful request details -- {}", streamAndShardId, lastSuccessfulRequestDetails); + log.warn( + "{}: Unable to buffer or schedule onNext for subscriber. Failing publisher." + + " Last successful request details -- {}", + streamAndShardId, + lastSuccessfulRequestDetails); errorOccurred(triggeringFlow, t); } } @@ -521,7 +577,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime] (FanOutRecordsPublisher#recordsReceived) @ {} id: {} -- Attempted to decrement availableQueueSpace to below 0", // CHECKSTYLE.ON: LineLength - streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); } else { availableQueueSpace--; if (availableQueueSpace > 0) { @@ -536,14 +594,19 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private void onComplete(RecordFlow triggeringFlow) { synchronized (lockObject) { - log.debug("{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {}", streamAndShardId, - triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + log.debug( + "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {}", + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); triggeringFlow.cancel(); if (!hasValidSubscriber()) { - log.debug("{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {}", + log.debug( + "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {}", streamAndShardId, - triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); return; } @@ -552,7 +615,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher#onComplete) @ {} id: {} -- Received spurious onComplete from unexpected flow. Ignoring.", // CHECKSTYLE.ON: LineLength - streamAndShardId, triggeringFlow.connectionStartedAt, triggeringFlow.subscribeToShardId); + streamAndShardId, + triggeringFlow.connectionStartedAt, + triggeringFlow.subscribeToShardId); return; } @@ -613,7 +678,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // CHECKSTYLE.OFF: LineLength "{}: (FanOutRecordsPublisher/Subscription#request) - Rejected an attempt to request({}), because subscribers don't match. Last successful request details -- {}", // CHECKSTYLE.ON: LineLength - streamAndShardId, n, lastSuccessfulRequestDetails); + streamAndShardId, + n, + lastSuccessfulRequestDetails); return; } if (flow == null) { @@ -642,7 +709,8 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // CHECKSTYLE.OFF: LineLength "{}: (FanOutRecordsPublisher/Subscription#cancel) - Rejected attempt to cancel subscription, because subscribers don't match. Last successful request details -- {}", // CHECKSTYLE.ON: LineLength - streamAndShardId, lastSuccessfulRequestDetails); + streamAndShardId, + lastSuccessfulRequestDetails); return; } if (!hasValidSubscriber()) { @@ -650,13 +718,16 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // CHECKSTYLE.OFF: LineLength "{}: (FanOutRecordsPublisher/Subscription#cancel) - Cancelled called even with an invalid subscriber. Last successful request details -- {}", // CHECKSTYLE.ON: LineLength - streamAndShardId, lastSuccessfulRequestDetails); + streamAndShardId, + lastSuccessfulRequestDetails); } subscriber = null; if (flow != null) { log.debug( "{}: [SubscriptionLifetime]: (FanOutRecordsPublisher/Subscription#cancel) @ {} id: {}", - streamAndShardId, flow.connectionStartedAt, flow.subscribeToShardId); + streamAndShardId, + flow.connectionStartedAt, + flow.subscribeToShardId); flow.cancel(); availableQueueSpace = 0; } @@ -733,7 +804,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private final FanOutRecordsPublisher parent; private final Instant connectionStartedAt; - @Getter @VisibleForTesting + + @Getter + @VisibleForTesting private final String subscribeToShardId; private RecordSubscription subscription; @@ -744,13 +817,18 @@ public class FanOutRecordsPublisher implements RecordsPublisher { @Override public void onEventStream(SdkPublisher publisher) { synchronized (parent.lockObject) { - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- Subscribe", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + log.debug( + "{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- Subscribe", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); if (!parent.isActiveFlow(this)) { this.isDisposed = true; log.debug( "{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- parent is disposed", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); parent.rejectSubscription(publisher); return; } @@ -758,7 +836,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { try { log.debug( "{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- creating record subscription", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); subscription = new RecordSubscription(parent, this, connectionStartedAt, subscribeToShardId); publisher.subscribe(subscription); @@ -769,7 +849,10 @@ public class FanOutRecordsPublisher implements RecordsPublisher { } catch (Throwable t) { log.debug( "{}: [SubscriptionLifetime]: (RecordFlow#onEventStream) @ {} id: {} -- throwable during record subscription: {}", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, t.getMessage()); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + t.getMessage()); parent.errorOccurred(this, t); } } @@ -777,10 +860,15 @@ public class FanOutRecordsPublisher implements RecordsPublisher { @Override public void responseReceived(SubscribeToShardResponse response) { - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#responseReceived) @ {} id: {} -- Response received. Request id - {}", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, response.responseMetadata().requestId()); + log.debug( + "{}: [SubscriptionLifetime]: (RecordFlow#responseReceived) @ {} id: {} -- Response received. Request id - {}", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + response.responseMetadata().requestId()); - final RequestDetails requestDetails = new RequestDetails(response.responseMetadata().requestId(), connectionStartedAt.toString()); + final RequestDetails requestDetails = + new RequestDetails(response.responseMetadata().requestId(), connectionStartedAt.toString()); parent.setLastSuccessfulRequestDetails(requestDetails); } @@ -794,8 +882,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { () -> { parent.recordsDeliveryQueue.poll(); executeExceptionOccurred(throwable); - }, - "onError", throwable); + }, + "onError", + throwable); tryEnqueueSubscriptionShutdownEvent(subscriptionShutdownEvent); } } @@ -803,15 +892,22 @@ public class FanOutRecordsPublisher implements RecordsPublisher { private void executeExceptionOccurred(Throwable throwable) { synchronized (parent.lockObject) { - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- {}: {}", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(), + log.debug( + "{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- {}: {}", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + throwable.getClass().getName(), throwable.getMessage()); if (this.isDisposed) { log.debug( // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- This flow has been disposed, not dispatching error. {}: {}", // CHECKSTYLE.ON: LineLength - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(), + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + throwable.getClass().getName(), throwable.getMessage()); this.isErrorDispatched = true; } @@ -824,7 +920,10 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordFlow#exceptionOccurred) @ {} id: {} -- An error has previously been dispatched, not dispatching this error {}: {}", // CHECKSTYLE.OFF: LineLength - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, throwable.getClass().getName(), + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + throwable.getClass().getName(), throwable.getMessage()); } } @@ -840,7 +939,7 @@ public class FanOutRecordsPublisher implements RecordsPublisher { () -> { parent.recordsDeliveryQueue.poll(); executeComplete(); - }, + }, "onComplete"); tryEnqueueSubscriptionShutdownEvent(subscriptionShutdownEvent); } @@ -850,22 +949,28 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // This method is not thread safe. This needs to be executed after acquiring lock on parent.lockObject private void tryEnqueueSubscriptionShutdownEvent(SubscriptionShutdownEvent subscriptionShutdownEvent) { try { - parent.recordsDeliveryQueue - .add(new RecordsRetrievedContext(Either.right(subscriptionShutdownEvent), this, Instant.now())); + parent.recordsDeliveryQueue.add( + new RecordsRetrievedContext(Either.right(subscriptionShutdownEvent), this, Instant.now())); } catch (Exception e) { log.warn( // CHECKSTYLE.OFF: LineLength "{}: Unable to enqueue the {} shutdown event due to capacity restrictions in delivery queue with remaining capacity {}. Ignoring. Last successful request details -- {}", // CHECKSTYLE.ON: LineLength - parent.streamAndShardId, subscriptionShutdownEvent.getEventIdentifier(), parent.recordsDeliveryQueue.remainingCapacity(), - parent.lastSuccessfulRequestDetails, subscriptionShutdownEvent.getShutdownEventThrowableOptional()); + parent.streamAndShardId, + subscriptionShutdownEvent.getEventIdentifier(), + parent.recordsDeliveryQueue.remainingCapacity(), + parent.lastSuccessfulRequestDetails, + subscriptionShutdownEvent.getShutdownEventThrowableOptional()); } } private void executeComplete() { synchronized (parent.lockObject) { - log.debug("{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- Connection completed", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + log.debug( + "{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- Connection completed", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); if (isCancelled) { // @@ -874,8 +979,10 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // the // subscription, which was cancelled for a reason (usually queue overflow). // - log.warn("{}: complete called on a cancelled subscription. Ignoring completion. Last successful request details -- {}", - parent.streamAndShardId, parent.lastSuccessfulRequestDetails); + log.warn( + "{}: complete called on a cancelled subscription. Ignoring completion. Last successful request details -- {}", + parent.streamAndShardId, + parent.lastSuccessfulRequestDetails); return; } if (this.isDisposed) { @@ -883,7 +990,10 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- This flow has been disposed not dispatching completion. Last successful request details -- {}", // CHECKSTYLE.ON: LineLength - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, parent.lastSuccessfulRequestDetails); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + parent.lastSuccessfulRequestDetails); return; } @@ -903,7 +1013,11 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordFlow#complete) @ {} id: {} -- Exception while trying to cancel failed subscription: {}", // CHECKSTYLE.ON: LineLength - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, t.getMessage(), t); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + t.getMessage(), + t); } } } @@ -943,15 +1057,20 @@ public class FanOutRecordsPublisher implements RecordsPublisher { public void cancel() { synchronized (parent.lockObject) { - log.debug("{}: [SubscriptionLifetime]: (RecordSubscription#cancel) @ {} id: {} -- Cancel called", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + log.debug( + "{}: [SubscriptionLifetime]: (RecordSubscription#cancel) @ {} id: {} -- Cancel called", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); flow.isCancelled = true; if (subscription != null) { subscription.cancel(); } else { log.debug( "{}: [SubscriptionLifetime]: (RecordSubscription#cancel) @ {} id: {} -- SDK subscription is null", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); } } } @@ -967,23 +1086,32 @@ public class FanOutRecordsPublisher implements RecordsPublisher { // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- Subscription was cancelled before onSubscribe", // CHECKSTYLE.ON: LineLength - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); } if (flow.isDisposed) { log.debug( // CHECKSTYLE.OFF: LineLength "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- RecordFlow has been disposed cancelling subscribe", // CHECKSTYLE.ON: LineLength - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); } log.debug( "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- RecordFlow requires cancelling", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); cancel(); } log.debug( "{}: [SubscriptionLifetime]: (RecordSubscription#onSubscribe) @ {} id: {} -- Outstanding: {} items so requesting an item", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId, parent.availableQueueSpace); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + parent.availableQueueSpace); if (parent.availableQueueSpace > 0) { request(1); } @@ -996,7 +1124,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { if (flow.shouldSubscriptionCancel()) { log.debug( "{}: [SubscriptionLifetime]: (RecordSubscription#onNext) @ {} id: {} -- RecordFlow requires cancelling", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); cancel(); return; } @@ -1011,8 +1141,13 @@ public class FanOutRecordsPublisher implements RecordsPublisher { @Override public void onError(Throwable t) { - log.debug("{}: [SubscriptionLifetime]: (RecordSubscription#onError) @ {} id: {} -- {}: {}", parent.streamAndShardId, - connectionStartedAt, subscribeToShardId, t.getClass().getName(), t.getMessage()); + log.debug( + "{}: [SubscriptionLifetime]: (RecordSubscription#onError) @ {} id: {} -- {}: {}", + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId, + t.getClass().getName(), + t.getMessage()); // // We don't propagate the throwable, as the SDK will call @@ -1024,7 +1159,9 @@ public class FanOutRecordsPublisher implements RecordsPublisher { public void onComplete() { log.debug( "{}: [SubscriptionLifetime]: (RecordSubscription#onComplete) @ {} id: {} -- Allowing RecordFlow to call onComplete", - parent.streamAndShardId, connectionStartedAt, subscribeToShardId); + parent.streamAndShardId, + connectionStartedAt, + subscribeToShardId); } } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRetrievalFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRetrievalFactory.java index a83d0370..05a42c0a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRetrievalFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/FanOutRetrievalFactory.java @@ -15,6 +15,12 @@ package software.amazon.kinesis.retrieval.fanout; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; +import javax.annotation.Nullable; + import lombok.NonNull; import lombok.RequiredArgsConstructor; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; @@ -26,12 +32,6 @@ import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.retrieval.RetrievalFactory; -import javax.annotation.Nullable; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import java.util.function.Function; - @RequiredArgsConstructor @KinesisClientInternalApi public class FanOutRetrievalFactory implements RetrievalFactory { @@ -44,22 +44,29 @@ public class FanOutRetrievalFactory implements RetrievalFactory { private final Map implicitConsumerArnTracker = new HashMap<>(); @Override - public RecordsPublisher createGetRecordsCache(@NonNull final ShardInfo shardInfo, + public RecordsPublisher createGetRecordsCache( + @NonNull final ShardInfo shardInfo, @NonNull final StreamConfig streamConfig, @Nullable final MetricsFactory metricsFactory) { final Optional streamIdentifierStr = shardInfo.streamIdentifierSerOpt(); if (streamIdentifierStr.isPresent()) { - return new FanOutRecordsPublisher(kinesisClient, shardInfo.shardId(), + return new FanOutRecordsPublisher( + kinesisClient, + shardInfo.shardId(), getOrCreateConsumerArn(streamConfig.streamIdentifier(), streamConfig.consumerArn()), streamIdentifierStr.get()); } else { - return new FanOutRecordsPublisher(kinesisClient, shardInfo.shardId(), + return new FanOutRecordsPublisher( + kinesisClient, + shardInfo.shardId(), getOrCreateConsumerArn(streamConfig.streamIdentifier(), defaultConsumerArn)); } } private String getOrCreateConsumerArn(StreamIdentifier streamIdentifier, String consumerArn) { - return consumerArn != null ? consumerArn : implicitConsumerArnTracker - .computeIfAbsent(streamIdentifier, sId -> consumerArnCreator.apply(sId.streamName())); + return consumerArn != null + ? consumerArn + : implicitConsumerArnTracker.computeIfAbsent( + streamIdentifier, sId -> consumerArnCreator.apply(sId.streamName())); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/MultipleSubscriberException.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/MultipleSubscriberException.java index f48adaa7..4d2ef80a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/MultipleSubscriberException.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/fanout/MultipleSubscriberException.java @@ -15,5 +15,4 @@ package software.amazon.kinesis.retrieval.fanout; -public class MultipleSubscriberException extends RuntimeException { -} +public class MultipleSubscriberException extends RuntimeException {} diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumber.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumber.java index fed58739..fa52ed2a 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumber.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumber.java @@ -26,7 +26,7 @@ import software.amazon.kinesis.checkpoint.SentinelCheckpoint; /** * Represents a two-part sequence number for records aggregated by the Kinesis * Producer Library. - * + * *

    * The KPL combines multiple user records into a single Kinesis record. Each * user record therefore has an integer sub-sequence number, in addition to the @@ -48,17 +48,17 @@ public class ExtendedSequenceNumber implements Comparable SENTINEL_VALUES = Collections.unmodifiableSet( - Arrays.stream(SentinelCheckpoint.values()).map(SentinelCheckpoint::name).collect(Collectors.toSet())); + private static final Set SENTINEL_VALUES = + Collections.unmodifiableSet(Arrays.stream(SentinelCheckpoint.values()) + .map(SentinelCheckpoint::name) + .collect(Collectors.toSet())); /** * Construct an ExtendedSequenceNumber. The sub-sequence number defaults to * 0. - * + * * @param sequenceNumber * Sequence number of the Kinesis record */ @@ -89,7 +91,7 @@ public class ExtendedSequenceNumber implements Comparableoptional string value = 2; + */ + boolean hasValue(); + /** + * optional string value = 2; + */ + java.lang.String getValue(); + /** + * optional string value = 2; + */ + com.google.protobuf.ByteString getValueBytes(); } /** * Protobuf type {@code Tag} */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder implements - // @@protoc_insertion_point(builder_implements:Tag) - Messages.TagOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return Messages.internal_static_Tag_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return Messages.internal_static_Tag_fieldAccessorTable - .ensureFieldAccessorsInitialized( - Messages.Tag.class, Messages.Tag.Builder.class); - } - - // Construct using Messages.Tag.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + public static final class Tag extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:Tag) + TagOrBuilder { + // Use Tag.newBuilder() to construct. + private Tag(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); } - } - private static Builder create() { - return new Builder(); - } - public Builder clear() { - super.clear(); - key_ = ""; - bitField0_ = (bitField0_ & ~0x00000001); - value_ = ""; - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return Messages.internal_static_Tag_descriptor; - } - - public Messages.Tag getDefaultInstanceForType() { - return Messages.Tag.getDefaultInstance(); - } - - public Messages.Tag build() { - Messages.Tag result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); + private Tag(boolean noInit) { + this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - return result; - } - public Messages.Tag buildPartial() { - Messages.Tag result = new Messages.Tag(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + private static final Tag defaultInstance; + + public static Tag getDefaultInstance() { + return defaultInstance; } - result.key_ = key_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; + + public Tag getDefaultInstanceForType() { + return defaultInstance; } - result.value_ = value_; - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof Messages.Tag) { - return mergeFrom((Messages.Tag)other); - } else { - super.mergeFrom(other); - return this; + private final com.google.protobuf.UnknownFieldSet unknownFields; + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; } - } - public Builder mergeFrom(Messages.Tag other) { - if (other == Messages.Tag.getDefaultInstance()) return this; - if (other.hasKey()) { - bitField0_ |= 0x00000001; - key_ = other.key_; - onChanged(); - } - if (other.hasValue()) { - bitField0_ |= 0x00000002; - value_ = other.value_; - onChanged(); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasKey()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Messages.Tag parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (Messages.Tag) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; - - private java.lang.Object key_ = ""; - /** - * required string key = 1; - */ - public boolean hasKey() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required string key = 1; - */ - public java.lang.String getKey() { - java.lang.Object ref = key_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * required string key = 1; - */ - public com.google.protobuf.ByteString - getKeyBytes() { - java.lang.Object ref = key_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - key_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * required string key = 1; - */ - public Builder setKey( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - key_ = value; - onChanged(); - return this; - } - /** - * required string key = 1; - */ - public Builder clearKey() { - bitField0_ = (bitField0_ & ~0x00000001); - key_ = getDefaultInstance().getKey(); - onChanged(); - return this; - } - /** - * required string key = 1; - */ - public Builder setKeyBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000001; - key_ = value; - onChanged(); - return this; - } - - private java.lang.Object value_ = ""; - /** - * optional string value = 2; - */ - public boolean hasValue() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional string value = 2; - */ - public java.lang.String getValue() { - java.lang.Object ref = value_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - value_ = s; - } - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * optional string value = 2; - */ - public com.google.protobuf.ByteString - getValueBytes() { - java.lang.Object ref = value_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8( - (java.lang.String) ref); - value_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * optional string value = 2; - */ - public Builder setValue( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - return this; - } - /** - * optional string value = 2; - */ - public Builder clearValue() { - bitField0_ = (bitField0_ & ~0x00000002); - value_ = getDefaultInstance().getValue(); - onChanged(); - return this; - } - /** - * optional string value = 2; - */ - public Builder setValueBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000002; - value_ = value; - onChanged(); - return this; - } - - // @@protoc_insertion_point(builder_scope:Tag) - } - - static { - defaultInstance = new Tag(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:Tag) - } - - public interface RecordOrBuilder extends - // @@protoc_insertion_point(interface_extends:Record) - com.google.protobuf.MessageOrBuilder { - - /** - * required uint64 partition_key_index = 1; - */ - boolean hasPartitionKeyIndex(); - /** - * required uint64 partition_key_index = 1; - */ - long getPartitionKeyIndex(); - - /** - * optional uint64 explicit_hash_key_index = 2; - */ - boolean hasExplicitHashKeyIndex(); - /** - * optional uint64 explicit_hash_key_index = 2; - */ - long getExplicitHashKeyIndex(); - - /** - * required bytes data = 3; - */ - boolean hasData(); - /** - * required bytes data = 3; - */ - com.google.protobuf.ByteString getData(); - - /** - * repeated .Tag tags = 4; - */ - java.util.List - getTagsList(); - /** - * repeated .Tag tags = 4; - */ - Messages.Tag getTags(int index); - /** - * repeated .Tag tags = 4; - */ - int getTagsCount(); - /** - * repeated .Tag tags = 4; - */ - java.util.List - getTagsOrBuilderList(); - /** - * repeated .Tag tags = 4; - */ - Messages.TagOrBuilder getTagsOrBuilder( - int index); - } - /** - * Protobuf type {@code Record} - */ - public static final class Record extends - com.google.protobuf.GeneratedMessage implements - // @@protoc_insertion_point(message_implements:Record) - RecordOrBuilder { - // Use Record.newBuilder() to construct. - private Record(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private Record(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final Record defaultInstance; - public static Record getDefaultInstance() { - return defaultInstance; - } - - public Record getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private Record( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; + private Tag( + com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField( + input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000001; + key_ = bs; + break; + } + case 18: { + com.google.protobuf.ByteString bs = input.readBytes(); + bitField0_ |= 0x00000002; + value_ = bs; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); } - case 8: { - bitField0_ |= 0x00000001; - partitionKeyIndex_ = input.readUInt64(); - break; - } - case 16: { - bitField0_ |= 0x00000002; - explicitHashKeyIndex_ = input.readUInt64(); - break; - } - case 26: { - bitField0_ |= 0x00000004; - data_ = input.readBytes(); - break; - } - case 34: { - if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - tags_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000008; - } - tags_.add(input.readMessage(Messages.Tag.PARSER, extensionRegistry)); - break; - } - } } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { - tags_ = java.util.Collections.unmodifiableList(tags_); + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Messages.internal_static_Tag_descriptor; } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return Messages.internal_static_Record_descriptor; - } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return Messages.internal_static_Record_fieldAccessorTable - .ensureFieldAccessorsInitialized( - Messages.Record.class, Messages.Record.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public Record parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Record(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - private int bitField0_; - public static final int PARTITION_KEY_INDEX_FIELD_NUMBER = 1; - private long partitionKeyIndex_; - /** - * required uint64 partition_key_index = 1; - */ - public boolean hasPartitionKeyIndex() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint64 partition_key_index = 1; - */ - public long getPartitionKeyIndex() { - return partitionKeyIndex_; - } - - public static final int EXPLICIT_HASH_KEY_INDEX_FIELD_NUMBER = 2; - private long explicitHashKeyIndex_; - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public boolean hasExplicitHashKeyIndex() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public long getExplicitHashKeyIndex() { - return explicitHashKeyIndex_; - } - - public static final int DATA_FIELD_NUMBER = 3; - private com.google.protobuf.ByteString data_; - /** - * required bytes data = 3; - */ - public boolean hasData() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required bytes data = 3; - */ - public com.google.protobuf.ByteString getData() { - return data_; - } - - public static final int TAGS_FIELD_NUMBER = 4; - private java.util.List tags_; - /** - * repeated .Tag tags = 4; - */ - public java.util.List getTagsList() { - return tags_; - } - /** - * repeated .Tag tags = 4; - */ - public java.util.List - getTagsOrBuilderList() { - return tags_; - } - /** - * repeated .Tag tags = 4; - */ - public int getTagsCount() { - return tags_.size(); - } - /** - * repeated .Tag tags = 4; - */ - public Messages.Tag getTags(int index) { - return tags_.get(index); - } - /** - * repeated .Tag tags = 4; - */ - public Messages.TagOrBuilder getTagsOrBuilder( - int index) { - return tags_.get(index); - } - - private void initFields() { - partitionKeyIndex_ = 0L; - explicitHashKeyIndex_ = 0L; - data_ = com.google.protobuf.ByteString.EMPTY; - tags_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - if (!hasPartitionKeyIndex()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasData()) { - memoizedIsInitialized = 0; - return false; - } - for (int i = 0; i < getTagsCount(); i++) { - if (!getTags(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { + return Messages.internal_static_Tag_fieldAccessorTable.ensureFieldAccessorsInitialized( + Messages.Tag.class, Messages.Tag.Builder.class); } - } - memoizedIsInitialized = 1; - return true; + + public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { + public Tag parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Tag(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + public static final int KEY_FIELD_NUMBER = 1; + private java.lang.Object key_; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VALUE_FIELD_NUMBER = 2; + private java.lang.Object value_; + /** + * optional string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + value_ = s; + } + return s; + } + } + /** + * optional string value = 2; + */ + public com.google.protobuf.ByteString getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private void initFields() { + key_ = ""; + value_ = ""; + } + + private byte memoizedIsInitialized = -1; + + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (!hasKey()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getValueBytes()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, getKeyBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(2, getValueBytes()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + + @java.lang.Override + protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + public static Messages.Tag parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Messages.Tag parseFrom( + com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Messages.Tag parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Messages.Tag parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Messages.Tag parseFrom(java.io.InputStream input) throws java.io.IOException { + return PARSER.parseFrom(input); + } + + public static Messages.Tag parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Messages.Tag parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + + public static Messages.Tag parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + + public static Messages.Tag parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return PARSER.parseFrom(input); + } + + public static Messages.Tag parseFrom( + com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { + return Builder.create(); + } + + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder(Messages.Tag prototype) { + return newBuilder().mergeFrom(prototype); + } + + public Builder toBuilder() { + return newBuilder(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code Tag} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:Tag) + Messages.TagOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Messages.internal_static_Tag_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { + return Messages.internal_static_Tag_fieldAccessorTable.ensureFieldAccessorsInitialized( + Messages.Tag.class, Messages.Tag.Builder.class); + } + + // Construct using Messages.Tag.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {} + } + + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + key_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + value_ = ""; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Messages.internal_static_Tag_descriptor; + } + + public Messages.Tag getDefaultInstanceForType() { + return Messages.Tag.getDefaultInstance(); + } + + public Messages.Tag build() { + Messages.Tag result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public Messages.Tag buildPartial() { + Messages.Tag result = new Messages.Tag(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.key_ = key_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.value_ = value_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Messages.Tag) { + return mergeFrom((Messages.Tag) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Messages.Tag other) { + if (other == Messages.Tag.getDefaultInstance()) return this; + if (other.hasKey()) { + bitField0_ |= 0x00000001; + key_ = other.key_; + onChanged(); + } + if (other.hasValue()) { + bitField0_ |= 0x00000002; + value_ = other.value_; + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasKey()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Messages.Tag parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (Messages.Tag) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.lang.Object key_ = ""; + /** + * required string key = 1; + */ + public boolean hasKey() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string key = 1; + */ + public java.lang.String getKey() { + java.lang.Object ref = key_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + key_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string key = 1; + */ + public com.google.protobuf.ByteString getKeyBytes() { + java.lang.Object ref = key_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + key_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string key = 1; + */ + public Builder setKey(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder clearKey() { + bitField0_ = (bitField0_ & ~0x00000001); + key_ = getDefaultInstance().getKey(); + onChanged(); + return this; + } + /** + * required string key = 1; + */ + public Builder setKeyBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + key_ = value; + onChanged(); + return this; + } + + private java.lang.Object value_ = ""; + /** + * optional string value = 2; + */ + public boolean hasValue() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional string value = 2; + */ + public java.lang.String getValue() { + java.lang.Object ref = value_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + value_ = s; + } + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string value = 2; + */ + public com.google.protobuf.ByteString getValueBytes() { + java.lang.Object ref = value_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + value_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string value = 2; + */ + public Builder setValue(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + /** + * optional string value = 2; + */ + public Builder clearValue() { + bitField0_ = (bitField0_ & ~0x00000002); + value_ = getDefaultInstance().getValue(); + onChanged(); + return this; + } + /** + * optional string value = 2; + */ + public Builder setValueBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + value_ = value; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:Tag) + } + + static { + defaultInstance = new Tag(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Tag) } - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeUInt64(1, partitionKeyIndex_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeUInt64(2, explicitHashKeyIndex_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - output.writeBytes(3, data_); - } - for (int i = 0; i < tags_.size(); i++) { - output.writeMessage(4, tags_.get(i)); - } - getUnknownFields().writeTo(output); - } + public interface RecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:Record) + com.google.protobuf.MessageOrBuilder { - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; + /** + * required uint64 partition_key_index = 1; + */ + boolean hasPartitionKeyIndex(); + /** + * required uint64 partition_key_index = 1; + */ + long getPartitionKeyIndex(); - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(1, partitionKeyIndex_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeUInt64Size(2, explicitHashKeyIndex_); - } - if (((bitField0_ & 0x00000004) == 0x00000004)) { - size += com.google.protobuf.CodedOutputStream - .computeBytesSize(3, data_); - } - for (int i = 0; i < tags_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(4, tags_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } + /** + * optional uint64 explicit_hash_key_index = 2; + */ + boolean hasExplicitHashKeyIndex(); + /** + * optional uint64 explicit_hash_key_index = 2; + */ + long getExplicitHashKeyIndex(); - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } + /** + * required bytes data = 3; + */ + boolean hasData(); + /** + * required bytes data = 3; + */ + com.google.protobuf.ByteString getData(); - public static Messages.Record parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static Messages.Record parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static Messages.Record parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static Messages.Record parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static Messages.Record parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static Messages.Record parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static Messages.Record parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static Messages.Record parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static Messages.Record parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static Messages.Record parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(Messages.Record prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; + /** + * repeated .Tag tags = 4; + */ + java.util.List getTagsList(); + /** + * repeated .Tag tags = 4; + */ + Messages.Tag getTags(int index); + /** + * repeated .Tag tags = 4; + */ + int getTagsCount(); + /** + * repeated .Tag tags = 4; + */ + java.util.List getTagsOrBuilderList(); + /** + * repeated .Tag tags = 4; + */ + Messages.TagOrBuilder getTagsOrBuilder(int index); } /** * Protobuf type {@code Record} */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder implements - // @@protoc_insertion_point(builder_implements:Record) - Messages.RecordOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return Messages.internal_static_Record_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return Messages.internal_static_Record_fieldAccessorTable - .ensureFieldAccessorsInitialized( - Messages.Record.class, Messages.Record.Builder.class); - } - - // Construct using Messages.Record.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getTagsFieldBuilder(); + public static final class Record extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:Record) + RecordOrBuilder { + // Use Record.newBuilder() to construct. + private Record(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); } - } - private static Builder create() { - return new Builder(); - } - public Builder clear() { - super.clear(); - partitionKeyIndex_ = 0L; - bitField0_ = (bitField0_ & ~0x00000001); - explicitHashKeyIndex_ = 0L; - bitField0_ = (bitField0_ & ~0x00000002); - data_ = com.google.protobuf.ByteString.EMPTY; - bitField0_ = (bitField0_ & ~0x00000004); - if (tagsBuilder_ == null) { - tags_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - } else { - tagsBuilder_.clear(); + private Record(boolean noInit) { + this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - return this; - } - public Builder clone() { - return create().mergeFrom(buildPartial()); - } + private static final Record defaultInstance; - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return Messages.internal_static_Record_descriptor; - } + public static Record getDefaultInstance() { + return defaultInstance; + } - public Messages.Record getDefaultInstanceForType() { - return Messages.Record.getDefaultInstance(); - } + public Record getDefaultInstanceForType() { + return defaultInstance; + } - public Messages.Record build() { - Messages.Record result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } + private final com.google.protobuf.UnknownFieldSet unknownFields; - public Messages.Record buildPartial() { - Messages.Record result = new Messages.Record(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; } - result.partitionKeyIndex_ = partitionKeyIndex_; - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - result.explicitHashKeyIndex_ = explicitHashKeyIndex_; - if (((from_bitField0_ & 0x00000004) == 0x00000004)) { - to_bitField0_ |= 0x00000004; - } - result.data_ = data_; - if (tagsBuilder_ == null) { - if (((bitField0_ & 0x00000008) == 0x00000008)) { - tags_ = java.util.Collections.unmodifiableList(tags_); - bitField0_ = (bitField0_ & ~0x00000008); - } - result.tags_ = tags_; - } else { - result.tags_ = tagsBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof Messages.Record) { - return mergeFrom((Messages.Record)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(Messages.Record other) { - if (other == Messages.Record.getDefaultInstance()) return this; - if (other.hasPartitionKeyIndex()) { - setPartitionKeyIndex(other.getPartitionKeyIndex()); - } - if (other.hasExplicitHashKeyIndex()) { - setExplicitHashKeyIndex(other.getExplicitHashKeyIndex()); - } - if (other.hasData()) { - setData(other.getData()); - } - if (tagsBuilder_ == null) { - if (!other.tags_.isEmpty()) { - if (tags_.isEmpty()) { - tags_ = other.tags_; - bitField0_ = (bitField0_ & ~0x00000008); - } else { - ensureTagsIsMutable(); - tags_.addAll(other.tags_); + private Record( + com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField( + input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + partitionKeyIndex_ = input.readUInt64(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + explicitHashKeyIndex_ = input.readUInt64(); + break; + } + case 26: { + bitField0_ |= 0x00000004; + data_ = input.readBytes(); + break; + } + case 34: { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tags_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000008; + } + tags_.add(input.readMessage(Messages.Tag.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { + tags_ = java.util.Collections.unmodifiableList(tags_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); } - onChanged(); - } - } else { - if (!other.tags_.isEmpty()) { - if (tagsBuilder_.isEmpty()) { - tagsBuilder_.dispose(); - tagsBuilder_ = null; - tags_ = other.tags_; - bitField0_ = (bitField0_ & ~0x00000008); - tagsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getTagsFieldBuilder() : null; - } else { - tagsBuilder_.addAllMessages(other.tags_); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Messages.internal_static_Record_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { + return Messages.internal_static_Record_fieldAccessorTable.ensureFieldAccessorsInitialized( + Messages.Record.class, Messages.Record.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = new com.google.protobuf.AbstractParser() { + public Record parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Record(input, extensionRegistry); } - } - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } + }; - public final boolean isInitialized() { - if (!hasPartitionKeyIndex()) { - - return false; + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; } - if (!hasData()) { - - return false; - } - for (int i = 0; i < getTagsCount(); i++) { - if (!getTags(i).isInitialized()) { - - return false; - } - } - return true; - } - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Messages.Record parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (Messages.Record) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } + private int bitField0_; + public static final int PARTITION_KEY_INDEX_FIELD_NUMBER = 1; + private long partitionKeyIndex_; + /** + * required uint64 partition_key_index = 1; + */ + public boolean hasPartitionKeyIndex() { + return ((bitField0_ & 0x00000001) == 0x00000001); } - return this; - } - private int bitField0_; - - private long partitionKeyIndex_ ; - /** - * required uint64 partition_key_index = 1; - */ - public boolean hasPartitionKeyIndex() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - /** - * required uint64 partition_key_index = 1; - */ - public long getPartitionKeyIndex() { - return partitionKeyIndex_; - } - /** - * required uint64 partition_key_index = 1; - */ - public Builder setPartitionKeyIndex(long value) { - bitField0_ |= 0x00000001; - partitionKeyIndex_ = value; - onChanged(); - return this; - } - /** - * required uint64 partition_key_index = 1; - */ - public Builder clearPartitionKeyIndex() { - bitField0_ = (bitField0_ & ~0x00000001); - partitionKeyIndex_ = 0L; - onChanged(); - return this; - } - - private long explicitHashKeyIndex_ ; - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public boolean hasExplicitHashKeyIndex() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public long getExplicitHashKeyIndex() { - return explicitHashKeyIndex_; - } - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public Builder setExplicitHashKeyIndex(long value) { - bitField0_ |= 0x00000002; - explicitHashKeyIndex_ = value; - onChanged(); - return this; - } - /** - * optional uint64 explicit_hash_key_index = 2; - */ - public Builder clearExplicitHashKeyIndex() { - bitField0_ = (bitField0_ & ~0x00000002); - explicitHashKeyIndex_ = 0L; - onChanged(); - return this; - } - - private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY; - /** - * required bytes data = 3; - */ - public boolean hasData() { - return ((bitField0_ & 0x00000004) == 0x00000004); - } - /** - * required bytes data = 3; - */ - public com.google.protobuf.ByteString getData() { - return data_; - } - /** - * required bytes data = 3; - */ - public Builder setData(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - bitField0_ |= 0x00000004; - data_ = value; - onChanged(); - return this; - } - /** - * required bytes data = 3; - */ - public Builder clearData() { - bitField0_ = (bitField0_ & ~0x00000004); - data_ = getDefaultInstance().getData(); - onChanged(); - return this; - } - - private java.util.List tags_ = - java.util.Collections.emptyList(); - private void ensureTagsIsMutable() { - if (!((bitField0_ & 0x00000008) == 0x00000008)) { - tags_ = new java.util.ArrayList(tags_); - bitField0_ |= 0x00000008; - } - } - - private com.google.protobuf.RepeatedFieldBuilder< - Messages.Tag, Messages.Tag.Builder, Messages.TagOrBuilder> tagsBuilder_; - - /** - * repeated .Tag tags = 4; - */ - public java.util.List getTagsList() { - if (tagsBuilder_ == null) { - return java.util.Collections.unmodifiableList(tags_); - } else { - return tagsBuilder_.getMessageList(); + /** + * required uint64 partition_key_index = 1; + */ + public long getPartitionKeyIndex() { + return partitionKeyIndex_; } - } - /** - * repeated .Tag tags = 4; - */ - public int getTagsCount() { - if (tagsBuilder_ == null) { - return tags_.size(); - } else { - return tagsBuilder_.getCount(); - } - } - /** - * repeated .Tag tags = 4; - */ - public Messages.Tag getTags(int index) { - if (tagsBuilder_ == null) { - return tags_.get(index); - } else { - return tagsBuilder_.getMessage(index); - } - } - /** - * repeated .Tag tags = 4; - */ - public Builder setTags( - int index, Messages.Tag value) { - if (tagsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTagsIsMutable(); - tags_.set(index, value); - onChanged(); - } else { - tagsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder setTags( - int index, Messages.Tag.Builder builderForValue) { - if (tagsBuilder_ == null) { - ensureTagsIsMutable(); - tags_.set(index, builderForValue.build()); - onChanged(); - } else { - tagsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder addTags(Messages.Tag value) { - if (tagsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTagsIsMutable(); - tags_.add(value); - onChanged(); - } else { - tagsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder addTags( - int index, Messages.Tag value) { - if (tagsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureTagsIsMutable(); - tags_.add(index, value); - onChanged(); - } else { - tagsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder addTags( - Messages.Tag.Builder builderForValue) { - if (tagsBuilder_ == null) { - ensureTagsIsMutable(); - tags_.add(builderForValue.build()); - onChanged(); - } else { - tagsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder addTags( - int index, Messages.Tag.Builder builderForValue) { - if (tagsBuilder_ == null) { - ensureTagsIsMutable(); - tags_.add(index, builderForValue.build()); - onChanged(); - } else { - tagsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder addAllTags( - java.lang.Iterable values) { - if (tagsBuilder_ == null) { - ensureTagsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, tags_); - onChanged(); - } else { - tagsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder clearTags() { - if (tagsBuilder_ == null) { - tags_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000008); - onChanged(); - } else { - tagsBuilder_.clear(); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Builder removeTags(int index) { - if (tagsBuilder_ == null) { - ensureTagsIsMutable(); - tags_.remove(index); - onChanged(); - } else { - tagsBuilder_.remove(index); - } - return this; - } - /** - * repeated .Tag tags = 4; - */ - public Messages.Tag.Builder getTagsBuilder( - int index) { - return getTagsFieldBuilder().getBuilder(index); - } - /** - * repeated .Tag tags = 4; - */ - public Messages.TagOrBuilder getTagsOrBuilder( - int index) { - if (tagsBuilder_ == null) { - return tags_.get(index); } else { - return tagsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .Tag tags = 4; - */ - public java.util.List - getTagsOrBuilderList() { - if (tagsBuilder_ != null) { - return tagsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(tags_); - } - } - /** - * repeated .Tag tags = 4; - */ - public Messages.Tag.Builder addTagsBuilder() { - return getTagsFieldBuilder().addBuilder( - Messages.Tag.getDefaultInstance()); - } - /** - * repeated .Tag tags = 4; - */ - public Messages.Tag.Builder addTagsBuilder( - int index) { - return getTagsFieldBuilder().addBuilder( - index, Messages.Tag.getDefaultInstance()); - } - /** - * repeated .Tag tags = 4; - */ - public java.util.List - getTagsBuilderList() { - return getTagsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - Messages.Tag, Messages.Tag.Builder, Messages.TagOrBuilder> - getTagsFieldBuilder() { - if (tagsBuilder_ == null) { - tagsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - Messages.Tag, Messages.Tag.Builder, Messages.TagOrBuilder>( - tags_, - ((bitField0_ & 0x00000008) == 0x00000008), - getParentForChildren(), - isClean()); - tags_ = null; - } - return tagsBuilder_; - } - // @@protoc_insertion_point(builder_scope:Record) - } + public static final int EXPLICIT_HASH_KEY_INDEX_FIELD_NUMBER = 2; + private long explicitHashKeyIndex_; + /** + * optional uint64 explicit_hash_key_index = 2; + */ + public boolean hasExplicitHashKeyIndex() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 explicit_hash_key_index = 2; + */ + public long getExplicitHashKeyIndex() { + return explicitHashKeyIndex_; + } - static { - defaultInstance = new Record(true); - defaultInstance.initFields(); - } + public static final int DATA_FIELD_NUMBER = 3; + private com.google.protobuf.ByteString data_; + /** + * required bytes data = 3; + */ + public boolean hasData() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes data = 3; + */ + public com.google.protobuf.ByteString getData() { + return data_; + } - // @@protoc_insertion_point(class_scope:Record) - } + public static final int TAGS_FIELD_NUMBER = 4; + private java.util.List tags_; + /** + * repeated .Tag tags = 4; + */ + public java.util.List getTagsList() { + return tags_; + } + /** + * repeated .Tag tags = 4; + */ + public java.util.List getTagsOrBuilderList() { + return tags_; + } + /** + * repeated .Tag tags = 4; + */ + public int getTagsCount() { + return tags_.size(); + } + /** + * repeated .Tag tags = 4; + */ + public Messages.Tag getTags(int index) { + return tags_.get(index); + } + /** + * repeated .Tag tags = 4; + */ + public Messages.TagOrBuilder getTagsOrBuilder(int index) { + return tags_.get(index); + } - public interface AggregatedRecordOrBuilder extends - // @@protoc_insertion_point(interface_extends:AggregatedRecord) - com.google.protobuf.MessageOrBuilder { + private void initFields() { + partitionKeyIndex_ = 0L; + explicitHashKeyIndex_ = 0L; + data_ = com.google.protobuf.ByteString.EMPTY; + tags_ = java.util.Collections.emptyList(); + } - /** - * repeated string partition_key_table = 1; - */ - com.google.protobuf.ProtocolStringList - getPartitionKeyTableList(); - /** - * repeated string partition_key_table = 1; - */ - int getPartitionKeyTableCount(); - /** - * repeated string partition_key_table = 1; - */ - java.lang.String getPartitionKeyTable(int index); - /** - * repeated string partition_key_table = 1; - */ - com.google.protobuf.ByteString - getPartitionKeyTableBytes(int index); + private byte memoizedIsInitialized = -1; - /** - * repeated string explicit_hash_key_table = 2; - */ - com.google.protobuf.ProtocolStringList - getExplicitHashKeyTableList(); - /** - * repeated string explicit_hash_key_table = 2; - */ - int getExplicitHashKeyTableCount(); - /** - * repeated string explicit_hash_key_table = 2; - */ - java.lang.String getExplicitHashKeyTable(int index); - /** - * repeated string explicit_hash_key_table = 2; - */ - com.google.protobuf.ByteString - getExplicitHashKeyTableBytes(int index); + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; - /** - * repeated .Record records = 3; - */ - java.util.List - getRecordsList(); - /** - * repeated .Record records = 3; - */ - Messages.Record getRecords(int index); - /** - * repeated .Record records = 3; - */ - int getRecordsCount(); - /** - * repeated .Record records = 3; - */ - java.util.List - getRecordsOrBuilderList(); - /** - * repeated .Record records = 3; - */ - Messages.RecordOrBuilder getRecordsOrBuilder( - int index); - } - /** - * Protobuf type {@code AggregatedRecord} - */ - public static final class AggregatedRecord extends - com.google.protobuf.GeneratedMessage implements - // @@protoc_insertion_point(message_implements:AggregatedRecord) - AggregatedRecordOrBuilder { - // Use AggregatedRecord.newBuilder() to construct. - private AggregatedRecord(com.google.protobuf.GeneratedMessage.Builder builder) { - super(builder); - this.unknownFields = builder.getUnknownFields(); - } - private AggregatedRecord(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - - private static final AggregatedRecord defaultInstance; - public static AggregatedRecord getDefaultInstance() { - return defaultInstance; - } - - public AggregatedRecord getDefaultInstanceForType() { - return defaultInstance; - } - - private final com.google.protobuf.UnknownFieldSet unknownFields; - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { - return this.unknownFields; - } - private AggregatedRecord( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - initFields(); - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - done = true; - } - break; + if (!hasPartitionKeyIndex()) { + memoizedIsInitialized = 0; + return false; } - case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); - if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - partitionKeyTable_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000001; - } - partitionKeyTable_.add(bs); - break; + if (!hasData()) { + memoizedIsInitialized = 0; + return false; } - case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); - if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - explicitHashKeyTable_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000002; - } - explicitHashKeyTable_.add(bs); - break; + for (int i = 0; i < getTagsCount(); i++) { + if (!getTags(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } - case 26: { - if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - records_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000004; - } - records_.add(input.readMessage(Messages.Record.PARSER, extensionRegistry)); - break; + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, partitionKeyIndex_); } - } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, explicitHashKeyIndex_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, data_); + } + for (int i = 0; i < tags_.size(); i++) { + output.writeMessage(4, tags_.get(i)); + } + getUnknownFields().writeTo(output); } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException( - e.getMessage()).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { - partitionKeyTable_ = partitionKeyTable_.getUnmodifiableView(); + + private int memoizedSerializedSize = -1; + + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(1, partitionKeyIndex_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream.computeUInt64Size(2, explicitHashKeyIndex_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(3, data_); + } + for (int i = 0; i < tags_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, tags_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; } - if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { - explicitHashKeyTable_ = explicitHashKeyTable_.getUnmodifiableView(); + + private static final long serialVersionUID = 0L; + + @java.lang.Override + protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { + return super.writeReplace(); } - if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { - records_ = java.util.Collections.unmodifiableList(records_); + + public static Messages.Record parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return Messages.internal_static_AggregatedRecord_descriptor; - } - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return Messages.internal_static_AggregatedRecord_fieldAccessorTable - .ensureFieldAccessorsInitialized( - Messages.AggregatedRecord.class, Messages.AggregatedRecord.Builder.class); - } - - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - public AggregatedRecord parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new AggregatedRecord(input, extensionRegistry); - } - }; - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - public static final int PARTITION_KEY_TABLE_FIELD_NUMBER = 1; - private com.google.protobuf.LazyStringList partitionKeyTable_; - /** - * repeated string partition_key_table = 1; - */ - public com.google.protobuf.ProtocolStringList - getPartitionKeyTableList() { - return partitionKeyTable_; - } - /** - * repeated string partition_key_table = 1; - */ - public int getPartitionKeyTableCount() { - return partitionKeyTable_.size(); - } - /** - * repeated string partition_key_table = 1; - */ - public java.lang.String getPartitionKeyTable(int index) { - return partitionKeyTable_.get(index); - } - /** - * repeated string partition_key_table = 1; - */ - public com.google.protobuf.ByteString - getPartitionKeyTableBytes(int index) { - return partitionKeyTable_.getByteString(index); - } - - public static final int EXPLICIT_HASH_KEY_TABLE_FIELD_NUMBER = 2; - private com.google.protobuf.LazyStringList explicitHashKeyTable_; - /** - * repeated string explicit_hash_key_table = 2; - */ - public com.google.protobuf.ProtocolStringList - getExplicitHashKeyTableList() { - return explicitHashKeyTable_; - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public int getExplicitHashKeyTableCount() { - return explicitHashKeyTable_.size(); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public java.lang.String getExplicitHashKeyTable(int index) { - return explicitHashKeyTable_.get(index); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public com.google.protobuf.ByteString - getExplicitHashKeyTableBytes(int index) { - return explicitHashKeyTable_.getByteString(index); - } - - public static final int RECORDS_FIELD_NUMBER = 3; - private java.util.List records_; - /** - * repeated .Record records = 3; - */ - public java.util.List getRecordsList() { - return records_; - } - /** - * repeated .Record records = 3; - */ - public java.util.List - getRecordsOrBuilderList() { - return records_; - } - /** - * repeated .Record records = 3; - */ - public int getRecordsCount() { - return records_.size(); - } - /** - * repeated .Record records = 3; - */ - public Messages.Record getRecords(int index) { - return records_.get(index); - } - /** - * repeated .Record records = 3; - */ - public Messages.RecordOrBuilder getRecordsOrBuilder( - int index) { - return records_.get(index); - } - - private void initFields() { - partitionKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - explicitHashKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - records_ = java.util.Collections.emptyList(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - for (int i = 0; i < getRecordsCount(); i++) { - if (!getRecords(i).isInitialized()) { - memoizedIsInitialized = 0; - return false; + public static Messages.Record parseFrom( + com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - } - memoizedIsInitialized = 1; - return true; - } - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - for (int i = 0; i < partitionKeyTable_.size(); i++) { - output.writeBytes(1, partitionKeyTable_.getByteString(i)); - } - for (int i = 0; i < explicitHashKeyTable_.size(); i++) { - output.writeBytes(2, explicitHashKeyTable_.getByteString(i)); - } - for (int i = 0; i < records_.size(); i++) { - output.writeMessage(3, records_.get(i)); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < partitionKeyTable_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(partitionKeyTable_.getByteString(i)); + public static Messages.Record parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - size += dataSize; - size += 1 * getPartitionKeyTableList().size(); - } - { - int dataSize = 0; - for (int i = 0; i < explicitHashKeyTable_.size(); i++) { - dataSize += com.google.protobuf.CodedOutputStream - .computeBytesSizeNoTag(explicitHashKeyTable_.getByteString(i)); + + public static Messages.Record parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); } - size += dataSize; - size += 1 * getExplicitHashKeyTableList().size(); - } - for (int i = 0; i < records_.size(); i++) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, records_.get(i)); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; + + public static Messages.Record parseFrom(java.io.InputStream input) throws java.io.IOException { + return PARSER.parseFrom(input); + } + + public static Messages.Record parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Messages.Record parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + + public static Messages.Record parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + + public static Messages.Record parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return PARSER.parseFrom(input); + } + + public static Messages.Record parseFrom( + com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { + return Builder.create(); + } + + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder(Messages.Record prototype) { + return newBuilder().mergeFrom(prototype); + } + + public Builder toBuilder() { + return newBuilder(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code Record} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:Record) + Messages.RecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Messages.internal_static_Record_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { + return Messages.internal_static_Record_fieldAccessorTable.ensureFieldAccessorsInitialized( + Messages.Record.class, Messages.Record.Builder.class); + } + + // Construct using Messages.Record.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTagsFieldBuilder(); + } + } + + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + partitionKeyIndex_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + explicitHashKeyIndex_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + data_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000004); + if (tagsBuilder_ == null) { + tags_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + } else { + tagsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Messages.internal_static_Record_descriptor; + } + + public Messages.Record getDefaultInstanceForType() { + return Messages.Record.getDefaultInstance(); + } + + public Messages.Record build() { + Messages.Record result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public Messages.Record buildPartial() { + Messages.Record result = new Messages.Record(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.partitionKeyIndex_ = partitionKeyIndex_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.explicitHashKeyIndex_ = explicitHashKeyIndex_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.data_ = data_; + if (tagsBuilder_ == null) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { + tags_ = java.util.Collections.unmodifiableList(tags_); + bitField0_ = (bitField0_ & ~0x00000008); + } + result.tags_ = tags_; + } else { + result.tags_ = tagsBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Messages.Record) { + return mergeFrom((Messages.Record) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Messages.Record other) { + if (other == Messages.Record.getDefaultInstance()) return this; + if (other.hasPartitionKeyIndex()) { + setPartitionKeyIndex(other.getPartitionKeyIndex()); + } + if (other.hasExplicitHashKeyIndex()) { + setExplicitHashKeyIndex(other.getExplicitHashKeyIndex()); + } + if (other.hasData()) { + setData(other.getData()); + } + if (tagsBuilder_ == null) { + if (!other.tags_.isEmpty()) { + if (tags_.isEmpty()) { + tags_ = other.tags_; + bitField0_ = (bitField0_ & ~0x00000008); + } else { + ensureTagsIsMutable(); + tags_.addAll(other.tags_); + } + onChanged(); + } + } else { + if (!other.tags_.isEmpty()) { + if (tagsBuilder_.isEmpty()) { + tagsBuilder_.dispose(); + tagsBuilder_ = null; + tags_ = other.tags_; + bitField0_ = (bitField0_ & ~0x00000008); + tagsBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? getTagsFieldBuilder() + : null; + } else { + tagsBuilder_.addAllMessages(other.tags_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasPartitionKeyIndex()) { + + return false; + } + if (!hasData()) { + + return false; + } + for (int i = 0; i < getTagsCount(); i++) { + if (!getTags(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Messages.Record parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (Messages.Record) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private long partitionKeyIndex_; + /** + * required uint64 partition_key_index = 1; + */ + public boolean hasPartitionKeyIndex() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 partition_key_index = 1; + */ + public long getPartitionKeyIndex() { + return partitionKeyIndex_; + } + /** + * required uint64 partition_key_index = 1; + */ + public Builder setPartitionKeyIndex(long value) { + bitField0_ |= 0x00000001; + partitionKeyIndex_ = value; + onChanged(); + return this; + } + /** + * required uint64 partition_key_index = 1; + */ + public Builder clearPartitionKeyIndex() { + bitField0_ = (bitField0_ & ~0x00000001); + partitionKeyIndex_ = 0L; + onChanged(); + return this; + } + + private long explicitHashKeyIndex_; + /** + * optional uint64 explicit_hash_key_index = 2; + */ + public boolean hasExplicitHashKeyIndex() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 explicit_hash_key_index = 2; + */ + public long getExplicitHashKeyIndex() { + return explicitHashKeyIndex_; + } + /** + * optional uint64 explicit_hash_key_index = 2; + */ + public Builder setExplicitHashKeyIndex(long value) { + bitField0_ |= 0x00000002; + explicitHashKeyIndex_ = value; + onChanged(); + return this; + } + /** + * optional uint64 explicit_hash_key_index = 2; + */ + public Builder clearExplicitHashKeyIndex() { + bitField0_ = (bitField0_ & ~0x00000002); + explicitHashKeyIndex_ = 0L; + onChanged(); + return this; + } + + private com.google.protobuf.ByteString data_ = com.google.protobuf.ByteString.EMPTY; + /** + * required bytes data = 3; + */ + public boolean hasData() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required bytes data = 3; + */ + public com.google.protobuf.ByteString getData() { + return data_; + } + /** + * required bytes data = 3; + */ + public Builder setData(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000004; + data_ = value; + onChanged(); + return this; + } + /** + * required bytes data = 3; + */ + public Builder clearData() { + bitField0_ = (bitField0_ & ~0x00000004); + data_ = getDefaultInstance().getData(); + onChanged(); + return this; + } + + private java.util.List tags_ = java.util.Collections.emptyList(); + + private void ensureTagsIsMutable() { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { + tags_ = new java.util.ArrayList(tags_); + bitField0_ |= 0x00000008; + } + } + + private com.google.protobuf.RepeatedFieldBuilder + tagsBuilder_; + + /** + * repeated .Tag tags = 4; + */ + public java.util.List getTagsList() { + if (tagsBuilder_ == null) { + return java.util.Collections.unmodifiableList(tags_); + } else { + return tagsBuilder_.getMessageList(); + } + } + /** + * repeated .Tag tags = 4; + */ + public int getTagsCount() { + if (tagsBuilder_ == null) { + return tags_.size(); + } else { + return tagsBuilder_.getCount(); + } + } + /** + * repeated .Tag tags = 4; + */ + public Messages.Tag getTags(int index) { + if (tagsBuilder_ == null) { + return tags_.get(index); + } else { + return tagsBuilder_.getMessage(index); + } + } + /** + * repeated .Tag tags = 4; + */ + public Builder setTags(int index, Messages.Tag value) { + if (tagsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTagsIsMutable(); + tags_.set(index, value); + onChanged(); + } else { + tagsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .Tag tags = 4; + */ + public Builder setTags(int index, Messages.Tag.Builder builderForValue) { + if (tagsBuilder_ == null) { + ensureTagsIsMutable(); + tags_.set(index, builderForValue.build()); + onChanged(); + } else { + tagsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .Tag tags = 4; + */ + public Builder addTags(Messages.Tag value) { + if (tagsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTagsIsMutable(); + tags_.add(value); + onChanged(); + } else { + tagsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .Tag tags = 4; + */ + public Builder addTags(int index, Messages.Tag value) { + if (tagsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureTagsIsMutable(); + tags_.add(index, value); + onChanged(); + } else { + tagsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .Tag tags = 4; + */ + public Builder addTags(Messages.Tag.Builder builderForValue) { + if (tagsBuilder_ == null) { + ensureTagsIsMutable(); + tags_.add(builderForValue.build()); + onChanged(); + } else { + tagsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .Tag tags = 4; + */ + public Builder addTags(int index, Messages.Tag.Builder builderForValue) { + if (tagsBuilder_ == null) { + ensureTagsIsMutable(); + tags_.add(index, builderForValue.build()); + onChanged(); + } else { + tagsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .Tag tags = 4; + */ + public Builder addAllTags(java.lang.Iterable values) { + if (tagsBuilder_ == null) { + ensureTagsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, tags_); + onChanged(); + } else { + tagsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .Tag tags = 4; + */ + public Builder clearTags() { + if (tagsBuilder_ == null) { + tags_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000008); + onChanged(); + } else { + tagsBuilder_.clear(); + } + return this; + } + /** + * repeated .Tag tags = 4; + */ + public Builder removeTags(int index) { + if (tagsBuilder_ == null) { + ensureTagsIsMutable(); + tags_.remove(index); + onChanged(); + } else { + tagsBuilder_.remove(index); + } + return this; + } + /** + * repeated .Tag tags = 4; + */ + public Messages.Tag.Builder getTagsBuilder(int index) { + return getTagsFieldBuilder().getBuilder(index); + } + /** + * repeated .Tag tags = 4; + */ + public Messages.TagOrBuilder getTagsOrBuilder(int index) { + if (tagsBuilder_ == null) { + return tags_.get(index); + } else { + return tagsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .Tag tags = 4; + */ + public java.util.List getTagsOrBuilderList() { + if (tagsBuilder_ != null) { + return tagsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(tags_); + } + } + /** + * repeated .Tag tags = 4; + */ + public Messages.Tag.Builder addTagsBuilder() { + return getTagsFieldBuilder().addBuilder(Messages.Tag.getDefaultInstance()); + } + /** + * repeated .Tag tags = 4; + */ + public Messages.Tag.Builder addTagsBuilder(int index) { + return getTagsFieldBuilder().addBuilder(index, Messages.Tag.getDefaultInstance()); + } + /** + * repeated .Tag tags = 4; + */ + public java.util.List getTagsBuilderList() { + return getTagsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder + getTagsFieldBuilder() { + if (tagsBuilder_ == null) { + tagsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + Messages.Tag, Messages.Tag.Builder, Messages.TagOrBuilder>( + tags_, ((bitField0_ & 0x00000008) == 0x00000008), getParentForChildren(), isClean()); + tags_ = null; + } + return tagsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:Record) + } + + static { + defaultInstance = new Record(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Record) } - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } + public interface AggregatedRecordOrBuilder + extends + // @@protoc_insertion_point(interface_extends:AggregatedRecord) + com.google.protobuf.MessageOrBuilder { - public static Messages.AggregatedRecord parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static Messages.AggregatedRecord parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static Messages.AggregatedRecord parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static Messages.AggregatedRecord parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - public static Messages.AggregatedRecord parseFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static Messages.AggregatedRecord parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } - public static Messages.AggregatedRecord parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input); - } - public static Messages.AggregatedRecord parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseDelimitedFrom(input, extensionRegistry); - } - public static Messages.AggregatedRecord parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return PARSER.parseFrom(input); - } - public static Messages.AggregatedRecord parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return PARSER.parseFrom(input, extensionRegistry); - } + /** + * repeated string partition_key_table = 1; + */ + com.google.protobuf.ProtocolStringList getPartitionKeyTableList(); + /** + * repeated string partition_key_table = 1; + */ + int getPartitionKeyTableCount(); + /** + * repeated string partition_key_table = 1; + */ + java.lang.String getPartitionKeyTable(int index); + /** + * repeated string partition_key_table = 1; + */ + com.google.protobuf.ByteString getPartitionKeyTableBytes(int index); - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(Messages.AggregatedRecord prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } + /** + * repeated string explicit_hash_key_table = 2; + */ + com.google.protobuf.ProtocolStringList getExplicitHashKeyTableList(); + /** + * repeated string explicit_hash_key_table = 2; + */ + int getExplicitHashKeyTableCount(); + /** + * repeated string explicit_hash_key_table = 2; + */ + java.lang.String getExplicitHashKeyTable(int index); + /** + * repeated string explicit_hash_key_table = 2; + */ + com.google.protobuf.ByteString getExplicitHashKeyTableBytes(int index); - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; + /** + * repeated .Record records = 3; + */ + java.util.List getRecordsList(); + /** + * repeated .Record records = 3; + */ + Messages.Record getRecords(int index); + /** + * repeated .Record records = 3; + */ + int getRecordsCount(); + /** + * repeated .Record records = 3; + */ + java.util.List getRecordsOrBuilderList(); + /** + * repeated .Record records = 3; + */ + Messages.RecordOrBuilder getRecordsOrBuilder(int index); } /** * Protobuf type {@code AggregatedRecord} */ - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder implements - // @@protoc_insertion_point(builder_implements:AggregatedRecord) - Messages.AggregatedRecordOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return Messages.internal_static_AggregatedRecord_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return Messages.internal_static_AggregatedRecord_fieldAccessorTable - .ensureFieldAccessorsInitialized( - Messages.AggregatedRecord.class, Messages.AggregatedRecord.Builder.class); - } - - // Construct using Messages.AggregatedRecord.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getRecordsFieldBuilder(); + public static final class AggregatedRecord extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:AggregatedRecord) + AggregatedRecordOrBuilder { + // Use AggregatedRecord.newBuilder() to construct. + private AggregatedRecord(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); } - } - private static Builder create() { - return new Builder(); - } - public Builder clear() { - super.clear(); - partitionKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - explicitHashKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - if (recordsBuilder_ == null) { - records_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - } else { - recordsBuilder_.clear(); + private AggregatedRecord(boolean noInit) { + this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - return this; - } - public Builder clone() { - return create().mergeFrom(buildPartial()); - } + private static final AggregatedRecord defaultInstance; - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return Messages.internal_static_AggregatedRecord_descriptor; - } + public static AggregatedRecord getDefaultInstance() { + return defaultInstance; + } - public Messages.AggregatedRecord getDefaultInstanceForType() { - return Messages.AggregatedRecord.getDefaultInstance(); - } + public AggregatedRecord getDefaultInstanceForType() { + return defaultInstance; + } - public Messages.AggregatedRecord build() { - Messages.AggregatedRecord result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } + private final com.google.protobuf.UnknownFieldSet unknownFields; - public Messages.AggregatedRecord buildPartial() { - Messages.AggregatedRecord result = new Messages.AggregatedRecord(this); - int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - partitionKeyTable_ = partitionKeyTable_.getUnmodifiableView(); - bitField0_ = (bitField0_ & ~0x00000001); + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; } - result.partitionKeyTable_ = partitionKeyTable_; - if (((bitField0_ & 0x00000002) == 0x00000002)) { - explicitHashKeyTable_ = explicitHashKeyTable_.getUnmodifiableView(); - bitField0_ = (bitField0_ & ~0x00000002); - } - result.explicitHashKeyTable_ = explicitHashKeyTable_; - if (recordsBuilder_ == null) { - if (((bitField0_ & 0x00000004) == 0x00000004)) { - records_ = java.util.Collections.unmodifiableList(records_); - bitField0_ = (bitField0_ & ~0x00000004); - } - result.records_ = records_; - } else { - result.records_ = recordsBuilder_.build(); - } - onBuilt(); - return result; - } - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof Messages.AggregatedRecord) { - return mergeFrom((Messages.AggregatedRecord)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(Messages.AggregatedRecord other) { - if (other == Messages.AggregatedRecord.getDefaultInstance()) return this; - if (!other.partitionKeyTable_.isEmpty()) { - if (partitionKeyTable_.isEmpty()) { - partitionKeyTable_ = other.partitionKeyTable_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensurePartitionKeyTableIsMutable(); - partitionKeyTable_.addAll(other.partitionKeyTable_); - } - onChanged(); - } - if (!other.explicitHashKeyTable_.isEmpty()) { - if (explicitHashKeyTable_.isEmpty()) { - explicitHashKeyTable_ = other.explicitHashKeyTable_; - bitField0_ = (bitField0_ & ~0x00000002); - } else { - ensureExplicitHashKeyTableIsMutable(); - explicitHashKeyTable_.addAll(other.explicitHashKeyTable_); - } - onChanged(); - } - if (recordsBuilder_ == null) { - if (!other.records_.isEmpty()) { - if (records_.isEmpty()) { - records_ = other.records_; - bitField0_ = (bitField0_ & ~0x00000004); - } else { - ensureRecordsIsMutable(); - records_.addAll(other.records_); + private AggregatedRecord( + com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField( + input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + com.google.protobuf.ByteString bs = input.readBytes(); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + partitionKeyTable_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + partitionKeyTable_.add(bs); + break; + } + case 18: { + com.google.protobuf.ByteString bs = input.readBytes(); + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + explicitHashKeyTable_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000002; + } + explicitHashKeyTable_.add(bs); + break; + } + case 26: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + records_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + records_.add(input.readMessage(Messages.Record.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + partitionKeyTable_ = partitionKeyTable_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + explicitHashKeyTable_ = explicitHashKeyTable_.getUnmodifiableView(); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + records_ = java.util.Collections.unmodifiableList(records_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); } - onChanged(); - } - } else { - if (!other.records_.isEmpty()) { - if (recordsBuilder_.isEmpty()) { - recordsBuilder_.dispose(); - recordsBuilder_ = null; - records_ = other.records_; - bitField0_ = (bitField0_ & ~0x00000004); - recordsBuilder_ = - com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? - getRecordsFieldBuilder() : null; - } else { - recordsBuilder_.addAllMessages(other.records_); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Messages.internal_static_AggregatedRecord_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { + return Messages.internal_static_AggregatedRecord_fieldAccessorTable.ensureFieldAccessorsInitialized( + Messages.AggregatedRecord.class, Messages.AggregatedRecord.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AggregatedRecord parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AggregatedRecord(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public static final int PARTITION_KEY_TABLE_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList partitionKeyTable_; + /** + * repeated string partition_key_table = 1; + */ + public com.google.protobuf.ProtocolStringList getPartitionKeyTableList() { + return partitionKeyTable_; + } + /** + * repeated string partition_key_table = 1; + */ + public int getPartitionKeyTableCount() { + return partitionKeyTable_.size(); + } + /** + * repeated string partition_key_table = 1; + */ + public java.lang.String getPartitionKeyTable(int index) { + return partitionKeyTable_.get(index); + } + /** + * repeated string partition_key_table = 1; + */ + public com.google.protobuf.ByteString getPartitionKeyTableBytes(int index) { + return partitionKeyTable_.getByteString(index); + } + + public static final int EXPLICIT_HASH_KEY_TABLE_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList explicitHashKeyTable_; + /** + * repeated string explicit_hash_key_table = 2; + */ + public com.google.protobuf.ProtocolStringList getExplicitHashKeyTableList() { + return explicitHashKeyTable_; + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public int getExplicitHashKeyTableCount() { + return explicitHashKeyTable_.size(); + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public java.lang.String getExplicitHashKeyTable(int index) { + return explicitHashKeyTable_.get(index); + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public com.google.protobuf.ByteString getExplicitHashKeyTableBytes(int index) { + return explicitHashKeyTable_.getByteString(index); + } + + public static final int RECORDS_FIELD_NUMBER = 3; + private java.util.List records_; + /** + * repeated .Record records = 3; + */ + public java.util.List getRecordsList() { + return records_; + } + /** + * repeated .Record records = 3; + */ + public java.util.List getRecordsOrBuilderList() { + return records_; + } + /** + * repeated .Record records = 3; + */ + public int getRecordsCount() { + return records_.size(); + } + /** + * repeated .Record records = 3; + */ + public Messages.Record getRecords(int index) { + return records_.get(index); + } + /** + * repeated .Record records = 3; + */ + public Messages.RecordOrBuilder getRecordsOrBuilder(int index) { + return records_.get(index); + } + + private void initFields() { + partitionKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; + explicitHashKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; + records_ = java.util.Collections.emptyList(); + } + + private byte memoizedIsInitialized = -1; + + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + for (int i = 0; i < getRecordsCount(); i++) { + if (!getRecords(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } } - } + memoizedIsInitialized = 1; + return true; } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - public final boolean isInitialized() { - for (int i = 0; i < getRecordsCount(); i++) { - if (!getRecords(i).isInitialized()) { - - return false; - } + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < partitionKeyTable_.size(); i++) { + output.writeBytes(1, partitionKeyTable_.getByteString(i)); + } + for (int i = 0; i < explicitHashKeyTable_.size(); i++) { + output.writeBytes(2, explicitHashKeyTable_.getByteString(i)); + } + for (int i = 0; i < records_.size(); i++) { + output.writeMessage(3, records_.get(i)); + } + getUnknownFields().writeTo(output); } - return true; - } - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Messages.AggregatedRecord parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (Messages.AggregatedRecord) e.getUnfinishedMessage(); - throw e; - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - private int bitField0_; + private int memoizedSerializedSize = -1; - private com.google.protobuf.LazyStringList partitionKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensurePartitionKeyTableIsMutable() { - if (!((bitField0_ & 0x00000001) == 0x00000001)) { - partitionKeyTable_ = new com.google.protobuf.LazyStringArrayList(partitionKeyTable_); - bitField0_ |= 0x00000001; - } - } - /** - * repeated string partition_key_table = 1; - */ - public com.google.protobuf.ProtocolStringList - getPartitionKeyTableList() { - return partitionKeyTable_.getUnmodifiableView(); - } - /** - * repeated string partition_key_table = 1; - */ - public int getPartitionKeyTableCount() { - return partitionKeyTable_.size(); - } - /** - * repeated string partition_key_table = 1; - */ - public java.lang.String getPartitionKeyTable(int index) { - return partitionKeyTable_.get(index); - } - /** - * repeated string partition_key_table = 1; - */ - public com.google.protobuf.ByteString - getPartitionKeyTableBytes(int index) { - return partitionKeyTable_.getByteString(index); - } - /** - * repeated string partition_key_table = 1; - */ - public Builder setPartitionKeyTable( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartitionKeyTableIsMutable(); - partitionKeyTable_.set(index, value); - onChanged(); - return this; - } - /** - * repeated string partition_key_table = 1; - */ - public Builder addPartitionKeyTable( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartitionKeyTableIsMutable(); - partitionKeyTable_.add(value); - onChanged(); - return this; - } - /** - * repeated string partition_key_table = 1; - */ - public Builder addAllPartitionKeyTable( - java.lang.Iterable values) { - ensurePartitionKeyTableIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, partitionKeyTable_); - onChanged(); - return this; - } - /** - * repeated string partition_key_table = 1; - */ - public Builder clearPartitionKeyTable() { - partitionKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - /** - * repeated string partition_key_table = 1; - */ - public Builder addPartitionKeyTableBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensurePartitionKeyTableIsMutable(); - partitionKeyTable_.add(value); - onChanged(); - return this; - } + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; - private com.google.protobuf.LazyStringList explicitHashKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - private void ensureExplicitHashKeyTableIsMutable() { - if (!((bitField0_ & 0x00000002) == 0x00000002)) { - explicitHashKeyTable_ = new com.google.protobuf.LazyStringArrayList(explicitHashKeyTable_); - bitField0_ |= 0x00000002; - } - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public com.google.protobuf.ProtocolStringList - getExplicitHashKeyTableList() { - return explicitHashKeyTable_.getUnmodifiableView(); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public int getExplicitHashKeyTableCount() { - return explicitHashKeyTable_.size(); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public java.lang.String getExplicitHashKeyTable(int index) { - return explicitHashKeyTable_.get(index); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public com.google.protobuf.ByteString - getExplicitHashKeyTableBytes(int index) { - return explicitHashKeyTable_.getByteString(index); - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public Builder setExplicitHashKeyTable( - int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureExplicitHashKeyTableIsMutable(); - explicitHashKeyTable_.set(index, value); - onChanged(); - return this; - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public Builder addExplicitHashKeyTable( - java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureExplicitHashKeyTableIsMutable(); - explicitHashKeyTable_.add(value); - onChanged(); - return this; - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public Builder addAllExplicitHashKeyTable( - java.lang.Iterable values) { - ensureExplicitHashKeyTableIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, explicitHashKeyTable_); - onChanged(); - return this; - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public Builder clearExplicitHashKeyTable() { - explicitHashKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000002); - onChanged(); - return this; - } - /** - * repeated string explicit_hash_key_table = 2; - */ - public Builder addExplicitHashKeyTableBytes( - com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensureExplicitHashKeyTableIsMutable(); - explicitHashKeyTable_.add(value); - onChanged(); - return this; - } + size = 0; + { + int dataSize = 0; + for (int i = 0; i < partitionKeyTable_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream.computeBytesSizeNoTag( + partitionKeyTable_.getByteString(i)); + } + size += dataSize; + size += 1 * getPartitionKeyTableList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < explicitHashKeyTable_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream.computeBytesSizeNoTag( + explicitHashKeyTable_.getByteString(i)); + } + size += dataSize; + size += 1 * getExplicitHashKeyTableList().size(); + } + for (int i = 0; i < records_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, records_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } - private java.util.List records_ = - java.util.Collections.emptyList(); - private void ensureRecordsIsMutable() { - if (!((bitField0_ & 0x00000004) == 0x00000004)) { - records_ = new java.util.ArrayList(records_); - bitField0_ |= 0x00000004; - } - } + private static final long serialVersionUID = 0L; - private com.google.protobuf.RepeatedFieldBuilder< - Messages.Record, Messages.Record.Builder, Messages.RecordOrBuilder> recordsBuilder_; + @java.lang.Override + protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { + return super.writeReplace(); + } - /** - * repeated .Record records = 3; - */ - public java.util.List getRecordsList() { - if (recordsBuilder_ == null) { - return java.util.Collections.unmodifiableList(records_); - } else { - return recordsBuilder_.getMessageList(); + public static Messages.AggregatedRecord parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); } - } - /** - * repeated .Record records = 3; - */ - public int getRecordsCount() { - if (recordsBuilder_ == null) { - return records_.size(); - } else { - return recordsBuilder_.getCount(); - } - } - /** - * repeated .Record records = 3; - */ - public Messages.Record getRecords(int index) { - if (recordsBuilder_ == null) { - return records_.get(index); - } else { - return recordsBuilder_.getMessage(index); - } - } - /** - * repeated .Record records = 3; - */ - public Builder setRecords( - int index, Messages.Record value) { - if (recordsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRecordsIsMutable(); - records_.set(index, value); - onChanged(); - } else { - recordsBuilder_.setMessage(index, value); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder setRecords( - int index, Messages.Record.Builder builderForValue) { - if (recordsBuilder_ == null) { - ensureRecordsIsMutable(); - records_.set(index, builderForValue.build()); - onChanged(); - } else { - recordsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder addRecords(Messages.Record value) { - if (recordsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRecordsIsMutable(); - records_.add(value); - onChanged(); - } else { - recordsBuilder_.addMessage(value); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder addRecords( - int index, Messages.Record value) { - if (recordsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureRecordsIsMutable(); - records_.add(index, value); - onChanged(); - } else { - recordsBuilder_.addMessage(index, value); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder addRecords( - Messages.Record.Builder builderForValue) { - if (recordsBuilder_ == null) { - ensureRecordsIsMutable(); - records_.add(builderForValue.build()); - onChanged(); - } else { - recordsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder addRecords( - int index, Messages.Record.Builder builderForValue) { - if (recordsBuilder_ == null) { - ensureRecordsIsMutable(); - records_.add(index, builderForValue.build()); - onChanged(); - } else { - recordsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder addAllRecords( - java.lang.Iterable values) { - if (recordsBuilder_ == null) { - ensureRecordsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, records_); - onChanged(); - } else { - recordsBuilder_.addAllMessages(values); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder clearRecords() { - if (recordsBuilder_ == null) { - records_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000004); - onChanged(); - } else { - recordsBuilder_.clear(); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Builder removeRecords(int index) { - if (recordsBuilder_ == null) { - ensureRecordsIsMutable(); - records_.remove(index); - onChanged(); - } else { - recordsBuilder_.remove(index); - } - return this; - } - /** - * repeated .Record records = 3; - */ - public Messages.Record.Builder getRecordsBuilder( - int index) { - return getRecordsFieldBuilder().getBuilder(index); - } - /** - * repeated .Record records = 3; - */ - public Messages.RecordOrBuilder getRecordsOrBuilder( - int index) { - if (recordsBuilder_ == null) { - return records_.get(index); } else { - return recordsBuilder_.getMessageOrBuilder(index); - } - } - /** - * repeated .Record records = 3; - */ - public java.util.List - getRecordsOrBuilderList() { - if (recordsBuilder_ != null) { - return recordsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(records_); - } - } - /** - * repeated .Record records = 3; - */ - public Messages.Record.Builder addRecordsBuilder() { - return getRecordsFieldBuilder().addBuilder( - Messages.Record.getDefaultInstance()); - } - /** - * repeated .Record records = 3; - */ - public Messages.Record.Builder addRecordsBuilder( - int index) { - return getRecordsFieldBuilder().addBuilder( - index, Messages.Record.getDefaultInstance()); - } - /** - * repeated .Record records = 3; - */ - public java.util.List - getRecordsBuilderList() { - return getRecordsFieldBuilder().getBuilderList(); - } - private com.google.protobuf.RepeatedFieldBuilder< - Messages.Record, Messages.Record.Builder, Messages.RecordOrBuilder> - getRecordsFieldBuilder() { - if (recordsBuilder_ == null) { - recordsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< - Messages.Record, Messages.Record.Builder, Messages.RecordOrBuilder>( - records_, - ((bitField0_ & 0x00000004) == 0x00000004), - getParentForChildren(), - isClean()); - records_ = null; - } - return recordsBuilder_; - } - // @@protoc_insertion_point(builder_scope:AggregatedRecord) + public static Messages.AggregatedRecord parseFrom( + com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Messages.AggregatedRecord parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static Messages.AggregatedRecord parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static Messages.AggregatedRecord parseFrom(java.io.InputStream input) throws java.io.IOException { + return PARSER.parseFrom(input); + } + + public static Messages.AggregatedRecord parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Messages.AggregatedRecord parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + + public static Messages.AggregatedRecord parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + + public static Messages.AggregatedRecord parseFrom(com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + + public static Messages.AggregatedRecord parseFrom( + com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { + return Builder.create(); + } + + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder(Messages.AggregatedRecord prototype) { + return newBuilder().mergeFrom(prototype); + } + + public Builder toBuilder() { + return newBuilder(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code AggregatedRecord} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:AggregatedRecord) + Messages.AggregatedRecordOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return Messages.internal_static_AggregatedRecord_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { + return Messages.internal_static_AggregatedRecord_fieldAccessorTable.ensureFieldAccessorsInitialized( + Messages.AggregatedRecord.class, Messages.AggregatedRecord.Builder.class); + } + + // Construct using Messages.AggregatedRecord.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getRecordsFieldBuilder(); + } + } + + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + partitionKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + explicitHashKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + if (recordsBuilder_ == null) { + records_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + } else { + recordsBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return Messages.internal_static_AggregatedRecord_descriptor; + } + + public Messages.AggregatedRecord getDefaultInstanceForType() { + return Messages.AggregatedRecord.getDefaultInstance(); + } + + public Messages.AggregatedRecord build() { + Messages.AggregatedRecord result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public Messages.AggregatedRecord buildPartial() { + Messages.AggregatedRecord result = new Messages.AggregatedRecord(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + partitionKeyTable_ = partitionKeyTable_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.partitionKeyTable_ = partitionKeyTable_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + explicitHashKeyTable_ = explicitHashKeyTable_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.explicitHashKeyTable_ = explicitHashKeyTable_; + if (recordsBuilder_ == null) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { + records_ = java.util.Collections.unmodifiableList(records_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.records_ = records_; + } else { + result.records_ = recordsBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof Messages.AggregatedRecord) { + return mergeFrom((Messages.AggregatedRecord) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(Messages.AggregatedRecord other) { + if (other == Messages.AggregatedRecord.getDefaultInstance()) return this; + if (!other.partitionKeyTable_.isEmpty()) { + if (partitionKeyTable_.isEmpty()) { + partitionKeyTable_ = other.partitionKeyTable_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensurePartitionKeyTableIsMutable(); + partitionKeyTable_.addAll(other.partitionKeyTable_); + } + onChanged(); + } + if (!other.explicitHashKeyTable_.isEmpty()) { + if (explicitHashKeyTable_.isEmpty()) { + explicitHashKeyTable_ = other.explicitHashKeyTable_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureExplicitHashKeyTableIsMutable(); + explicitHashKeyTable_.addAll(other.explicitHashKeyTable_); + } + onChanged(); + } + if (recordsBuilder_ == null) { + if (!other.records_.isEmpty()) { + if (records_.isEmpty()) { + records_ = other.records_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureRecordsIsMutable(); + records_.addAll(other.records_); + } + onChanged(); + } + } else { + if (!other.records_.isEmpty()) { + if (recordsBuilder_.isEmpty()) { + recordsBuilder_.dispose(); + recordsBuilder_ = null; + records_ = other.records_; + bitField0_ = (bitField0_ & ~0x00000004); + recordsBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders + ? getRecordsFieldBuilder() + : null; + } else { + recordsBuilder_.addAllMessages(other.records_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getRecordsCount(); i++) { + if (!getRecords(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + Messages.AggregatedRecord parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (Messages.AggregatedRecord) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringList partitionKeyTable_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensurePartitionKeyTableIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + partitionKeyTable_ = new com.google.protobuf.LazyStringArrayList(partitionKeyTable_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated string partition_key_table = 1; + */ + public com.google.protobuf.ProtocolStringList getPartitionKeyTableList() { + return partitionKeyTable_.getUnmodifiableView(); + } + /** + * repeated string partition_key_table = 1; + */ + public int getPartitionKeyTableCount() { + return partitionKeyTable_.size(); + } + /** + * repeated string partition_key_table = 1; + */ + public java.lang.String getPartitionKeyTable(int index) { + return partitionKeyTable_.get(index); + } + /** + * repeated string partition_key_table = 1; + */ + public com.google.protobuf.ByteString getPartitionKeyTableBytes(int index) { + return partitionKeyTable_.getByteString(index); + } + /** + * repeated string partition_key_table = 1; + */ + public Builder setPartitionKeyTable(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeyTableIsMutable(); + partitionKeyTable_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string partition_key_table = 1; + */ + public Builder addPartitionKeyTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeyTableIsMutable(); + partitionKeyTable_.add(value); + onChanged(); + return this; + } + /** + * repeated string partition_key_table = 1; + */ + public Builder addAllPartitionKeyTable(java.lang.Iterable values) { + ensurePartitionKeyTableIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, partitionKeyTable_); + onChanged(); + return this; + } + /** + * repeated string partition_key_table = 1; + */ + public Builder clearPartitionKeyTable() { + partitionKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * repeated string partition_key_table = 1; + */ + public Builder addPartitionKeyTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensurePartitionKeyTableIsMutable(); + partitionKeyTable_.add(value); + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList explicitHashKeyTable_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureExplicitHashKeyTableIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + explicitHashKeyTable_ = new com.google.protobuf.LazyStringArrayList(explicitHashKeyTable_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public com.google.protobuf.ProtocolStringList getExplicitHashKeyTableList() { + return explicitHashKeyTable_.getUnmodifiableView(); + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public int getExplicitHashKeyTableCount() { + return explicitHashKeyTable_.size(); + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public java.lang.String getExplicitHashKeyTable(int index) { + return explicitHashKeyTable_.get(index); + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public com.google.protobuf.ByteString getExplicitHashKeyTableBytes(int index) { + return explicitHashKeyTable_.getByteString(index); + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public Builder setExplicitHashKeyTable(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureExplicitHashKeyTableIsMutable(); + explicitHashKeyTable_.set(index, value); + onChanged(); + return this; + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public Builder addExplicitHashKeyTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureExplicitHashKeyTableIsMutable(); + explicitHashKeyTable_.add(value); + onChanged(); + return this; + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public Builder addAllExplicitHashKeyTable(java.lang.Iterable values) { + ensureExplicitHashKeyTableIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, explicitHashKeyTable_); + onChanged(); + return this; + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public Builder clearExplicitHashKeyTable() { + explicitHashKeyTable_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + /** + * repeated string explicit_hash_key_table = 2; + */ + public Builder addExplicitHashKeyTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureExplicitHashKeyTableIsMutable(); + explicitHashKeyTable_.add(value); + onChanged(); + return this; + } + + private java.util.List records_ = java.util.Collections.emptyList(); + + private void ensureRecordsIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + records_ = new java.util.ArrayList(records_); + bitField0_ |= 0x00000004; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + Messages.Record, Messages.Record.Builder, Messages.RecordOrBuilder> + recordsBuilder_; + + /** + * repeated .Record records = 3; + */ + public java.util.List getRecordsList() { + if (recordsBuilder_ == null) { + return java.util.Collections.unmodifiableList(records_); + } else { + return recordsBuilder_.getMessageList(); + } + } + /** + * repeated .Record records = 3; + */ + public int getRecordsCount() { + if (recordsBuilder_ == null) { + return records_.size(); + } else { + return recordsBuilder_.getCount(); + } + } + /** + * repeated .Record records = 3; + */ + public Messages.Record getRecords(int index) { + if (recordsBuilder_ == null) { + return records_.get(index); + } else { + return recordsBuilder_.getMessage(index); + } + } + /** + * repeated .Record records = 3; + */ + public Builder setRecords(int index, Messages.Record value) { + if (recordsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRecordsIsMutable(); + records_.set(index, value); + onChanged(); + } else { + recordsBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .Record records = 3; + */ + public Builder setRecords(int index, Messages.Record.Builder builderForValue) { + if (recordsBuilder_ == null) { + ensureRecordsIsMutable(); + records_.set(index, builderForValue.build()); + onChanged(); + } else { + recordsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .Record records = 3; + */ + public Builder addRecords(Messages.Record value) { + if (recordsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRecordsIsMutable(); + records_.add(value); + onChanged(); + } else { + recordsBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .Record records = 3; + */ + public Builder addRecords(int index, Messages.Record value) { + if (recordsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureRecordsIsMutable(); + records_.add(index, value); + onChanged(); + } else { + recordsBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .Record records = 3; + */ + public Builder addRecords(Messages.Record.Builder builderForValue) { + if (recordsBuilder_ == null) { + ensureRecordsIsMutable(); + records_.add(builderForValue.build()); + onChanged(); + } else { + recordsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .Record records = 3; + */ + public Builder addRecords(int index, Messages.Record.Builder builderForValue) { + if (recordsBuilder_ == null) { + ensureRecordsIsMutable(); + records_.add(index, builderForValue.build()); + onChanged(); + } else { + recordsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .Record records = 3; + */ + public Builder addAllRecords(java.lang.Iterable values) { + if (recordsBuilder_ == null) { + ensureRecordsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, records_); + onChanged(); + } else { + recordsBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .Record records = 3; + */ + public Builder clearRecords() { + if (recordsBuilder_ == null) { + records_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + } else { + recordsBuilder_.clear(); + } + return this; + } + /** + * repeated .Record records = 3; + */ + public Builder removeRecords(int index) { + if (recordsBuilder_ == null) { + ensureRecordsIsMutable(); + records_.remove(index); + onChanged(); + } else { + recordsBuilder_.remove(index); + } + return this; + } + /** + * repeated .Record records = 3; + */ + public Messages.Record.Builder getRecordsBuilder(int index) { + return getRecordsFieldBuilder().getBuilder(index); + } + /** + * repeated .Record records = 3; + */ + public Messages.RecordOrBuilder getRecordsOrBuilder(int index) { + if (recordsBuilder_ == null) { + return records_.get(index); + } else { + return recordsBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .Record records = 3; + */ + public java.util.List getRecordsOrBuilderList() { + if (recordsBuilder_ != null) { + return recordsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(records_); + } + } + /** + * repeated .Record records = 3; + */ + public Messages.Record.Builder addRecordsBuilder() { + return getRecordsFieldBuilder().addBuilder(Messages.Record.getDefaultInstance()); + } + /** + * repeated .Record records = 3; + */ + public Messages.Record.Builder addRecordsBuilder(int index) { + return getRecordsFieldBuilder().addBuilder(index, Messages.Record.getDefaultInstance()); + } + /** + * repeated .Record records = 3; + */ + public java.util.List getRecordsBuilderList() { + return getRecordsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilder< + Messages.Record, Messages.Record.Builder, Messages.RecordOrBuilder> + getRecordsFieldBuilder() { + if (recordsBuilder_ == null) { + recordsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + Messages.Record, Messages.Record.Builder, Messages.RecordOrBuilder>( + records_, ((bitField0_ & 0x00000004) == 0x00000004), getParentForChildren(), isClean()); + records_ = null; + } + return recordsBuilder_; + } + + // @@protoc_insertion_point(builder_scope:AggregatedRecord) + } + + static { + defaultInstance = new AggregatedRecord(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:AggregatedRecord) } + private static final com.google.protobuf.Descriptors.Descriptor internal_static_Tag_descriptor; + private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_Tag_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor internal_static_Record_descriptor; + private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_Record_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor internal_static_AggregatedRecord_descriptor; + private static com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_AggregatedRecord_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + static { - defaultInstance = new AggregatedRecord(true); - defaultInstance.initFields(); + java.lang.String[] descriptorData = { + "\n\016messages.proto\"!\n\003Tag\022\013\n\003key\030\001 \002(\t\022\r\n\005" + + "value\030\002 \001(\t\"h\n\006Record\022\033\n\023partition_key_i" + + "ndex\030\001 \002(\004\022\037\n\027explicit_hash_key_index\030\002 " + + "\001(\004\022\014\n\004data\030\003 \002(\014\022\022\n\004tags\030\004 \003(\0132\004.Tag\"j\n" + + "\020AggregatedRecord\022\033\n\023partition_key_table" + + "\030\001 \003(\t\022\037\n\027explicit_hash_key_table\030\002 \003(\t\022" + + "\030\n\007records\030\003 \003(\0132\007.Record" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}, assigner); + internal_static_Tag_descriptor = getDescriptor().getMessageTypes().get(0); + internal_static_Tag_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Tag_descriptor, new java.lang.String[] { + "Key", "Value", + }); + internal_static_Record_descriptor = getDescriptor().getMessageTypes().get(1); + internal_static_Record_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Record_descriptor, new java.lang.String[] { + "PartitionKeyIndex", "ExplicitHashKeyIndex", "Data", "Tags", + }); + internal_static_AggregatedRecord_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_AggregatedRecord_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AggregatedRecord_descriptor, new java.lang.String[] { + "PartitionKeyTable", "ExplicitHashKeyTable", "Records", + }); } - // @@protoc_insertion_point(class_scope:AggregatedRecord) - } - - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_Tag_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_Tag_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_Record_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_Record_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_AggregatedRecord_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_AggregatedRecord_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor - getDescriptor() { - return descriptor; - } - private static com.google.protobuf.Descriptors.FileDescriptor - descriptor; - static { - java.lang.String[] descriptorData = { - "\n\016messages.proto\"!\n\003Tag\022\013\n\003key\030\001 \002(\t\022\r\n\005" + - "value\030\002 \001(\t\"h\n\006Record\022\033\n\023partition_key_i" + - "ndex\030\001 \002(\004\022\037\n\027explicit_hash_key_index\030\002 " + - "\001(\004\022\014\n\004data\030\003 \002(\014\022\022\n\004tags\030\004 \003(\0132\004.Tag\"j\n" + - "\020AggregatedRecord\022\033\n\023partition_key_table" + - "\030\001 \003(\t\022\037\n\027explicit_hash_key_table\030\002 \003(\t\022" + - "\030\n\007records\030\003 \003(\0132\007.Record" - }; - com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = - new com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() { - public com.google.protobuf.ExtensionRegistry assignDescriptors( - com.google.protobuf.Descriptors.FileDescriptor root) { - descriptor = root; - return null; - } - }; - com.google.protobuf.Descriptors.FileDescriptor - .internalBuildGeneratedFileFrom(descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - }, assigner); - internal_static_Tag_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_Tag_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_Tag_descriptor, - new java.lang.String[] { "Key", "Value", }); - internal_static_Record_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_Record_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_Record_descriptor, - new java.lang.String[] { "PartitionKeyIndex", "ExplicitHashKeyIndex", "Data", "Tags", }); - internal_static_AggregatedRecord_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_AggregatedRecord_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_AggregatedRecord_descriptor, - new java.lang.String[] { "PartitionKeyTable", "ExplicitHashKeyTable", "Records", }); - } - - // @@protoc_insertion_point(outer_class_scope) + // @@protoc_insertion_point(outer_class_scope) } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategy.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategy.java index c142d8be..1501bb19 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategy.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategy.java @@ -29,7 +29,6 @@ import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; @@ -53,19 +52,32 @@ public class AsynchronousGetRecordsRetrievalStrategy implements GetRecordsRetrie private final String shardId; final Supplier> completionServiceSupplier; - public AsynchronousGetRecordsRetrievalStrategy(@NonNull final KinesisDataFetcher dataFetcher, - final int retryGetRecordsInSeconds, final int maxGetRecordsThreadPool, String shardId) { + public AsynchronousGetRecordsRetrievalStrategy( + @NonNull final KinesisDataFetcher dataFetcher, + final int retryGetRecordsInSeconds, + final int maxGetRecordsThreadPool, + String shardId) { this(dataFetcher, buildExector(maxGetRecordsThreadPool, shardId), retryGetRecordsInSeconds, shardId); } - public AsynchronousGetRecordsRetrievalStrategy(final KinesisDataFetcher dataFetcher, - final ExecutorService executorService, final int retryGetRecordsInSeconds, String shardId) { - this(dataFetcher, executorService, retryGetRecordsInSeconds, () -> new ExecutorCompletionService<>(executorService), + public AsynchronousGetRecordsRetrievalStrategy( + final KinesisDataFetcher dataFetcher, + final ExecutorService executorService, + final int retryGetRecordsInSeconds, + String shardId) { + this( + dataFetcher, + executorService, + retryGetRecordsInSeconds, + () -> new ExecutorCompletionService<>(executorService), shardId); } - AsynchronousGetRecordsRetrievalStrategy(KinesisDataFetcher dataFetcher, ExecutorService executorService, - int retryGetRecordsInSeconds, Supplier> completionServiceSupplier, + AsynchronousGetRecordsRetrievalStrategy( + KinesisDataFetcher dataFetcher, + ExecutorService executorService, + int retryGetRecordsInSeconds, + Supplier> completionServiceSupplier, String shardId) { this.dataFetcher = dataFetcher; this.executorService = executorService; @@ -92,8 +104,8 @@ public class AsynchronousGetRecordsRetrievalStrategy implements GetRecordsRetrie } try { - Future resultFuture = completionService.poll(retryGetRecordsInSeconds, - TimeUnit.SECONDS); + Future resultFuture = + completionService.poll(retryGetRecordsInSeconds, TimeUnit.SECONDS); if (resultFuture != null) { // // Fix to ensure that we only let the shard iterator advance when we intend to return the result @@ -135,9 +147,16 @@ public class AsynchronousGetRecordsRetrievalStrategy implements GetRecordsRetrie private static ExecutorService buildExector(int maxGetRecordsThreadPool, String shardId) { String threadNameFormat = "get-records-worker-" + shardId + "-%d"; - return new ThreadPoolExecutor(CORE_THREAD_POOL_COUNT, maxGetRecordsThreadPool, TIME_TO_KEEP_ALIVE, - TimeUnit.SECONDS, new LinkedBlockingQueue<>(1), - new ThreadFactoryBuilder().setDaemon(true).setNameFormat(threadNameFormat).build(), + return new ThreadPoolExecutor( + CORE_THREAD_POOL_COUNT, + maxGetRecordsThreadPool, + TIME_TO_KEEP_ALIVE, + TimeUnit.SECONDS, + new LinkedBlockingQueue<>(1), + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat(threadNameFormat) + .build(), new ThreadPoolExecutor.AbortPolicy()); } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/BlockingRecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/BlockingRecordsPublisher.java index 33be11d4..80d4ae61 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/BlockingRecordsPublisher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/BlockingRecordsPublisher.java @@ -20,10 +20,9 @@ import java.util.List; import java.util.stream.Collectors; import org.reactivestreams.Subscriber; +import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.common.InitialPositionInStreamExtended; - -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; import software.amazon.kinesis.common.RequestDetails; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; @@ -44,14 +43,15 @@ public class BlockingRecordsPublisher implements RecordsPublisher { private Subscriber subscriber; private RequestDetails lastSuccessfulRequestDetails = new RequestDetails(); - public BlockingRecordsPublisher(final int maxRecordsPerCall, - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy) { + public BlockingRecordsPublisher( + final int maxRecordsPerCall, final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy) { this.maxRecordsPerCall = maxRecordsPerCall; this.getRecordsRetrievalStrategy = getRecordsRetrievalStrategy; } @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, + public void start( + ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended) { // // Nothing to do here @@ -60,10 +60,12 @@ public class BlockingRecordsPublisher implements RecordsPublisher { public ProcessRecordsInput getNextResult() { GetRecordsResponse getRecordsResult = getRecordsRetrievalStrategy.getRecords(maxRecordsPerCall); - final RequestDetails getRecordsRequestDetails = new RequestDetails(getRecordsResult.responseMetadata().requestId(), Instant.now().toString()); + final RequestDetails getRecordsRequestDetails = new RequestDetails( + getRecordsResult.responseMetadata().requestId(), Instant.now().toString()); setLastSuccessfulRequestDetails(getRecordsRequestDetails); List records = getRecordsResult.records().stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); + .map(KinesisClientRecord::fromRecord) + .collect(Collectors.toList()); return ProcessRecordsInput.builder() .records(records) .millisBehindLatest(getRecordsResult.millisBehindLatest()) diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/DataFetcher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/DataFetcher.java index ae1c6f30..ac71b4c7 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/DataFetcher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/DataFetcher.java @@ -17,6 +17,7 @@ package software.amazon.kinesis.retrieval.polling; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; + import lombok.NonNull; import software.amazon.awssdk.services.kinesis.model.GetRecordsRequest; import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; @@ -40,8 +41,7 @@ public interface DataFetcher { * @param initialCheckpoint Current checkpoint sequence number for this shard. * @param initialPositionInStream The initialPositionInStream. */ - void initialize(String initialCheckpoint, - InitialPositionInStreamExtended initialPositionInStream); + void initialize(String initialCheckpoint, InitialPositionInStreamExtended initialPositionInStream); /** * Initializes this KinesisDataFetcher's iterator based on the checkpointed sequence number as an @@ -50,8 +50,7 @@ public interface DataFetcher { * @param initialCheckpoint Current checkpoint sequence number for this shard. * @param initialPositionInStream The initialPositionInStream. */ - void initialize(ExtendedSequenceNumber initialCheckpoint, - InitialPositionInStreamExtended initialPositionInStream); + void initialize(ExtendedSequenceNumber initialCheckpoint, InitialPositionInStreamExtended initialPositionInStream); /** * Advances this KinesisDataFetcher's internal iterator to be at the passed-in sequence number. @@ -59,8 +58,7 @@ public interface DataFetcher { * @param sequenceNumber advance the iterator to the record at this sequence number. * @param initialPositionInStream The initialPositionInStream. */ - void advanceIteratorTo(String sequenceNumber, - InitialPositionInStreamExtended initialPositionInStream); + void advanceIteratorTo(String sequenceNumber, InitialPositionInStreamExtended initialPositionInStream); /** * Gets a new iterator from the last known sequence number i.e. the sequence number of the last record from the last @@ -75,7 +73,8 @@ public interface DataFetcher { * @param sequenceNumber reset the iterator to the record at this sequence number. * @param initialPositionInStream the current position in the stream to reset the iterator to. */ - void resetIterator(String shardIterator, String sequenceNumber, InitialPositionInStreamExtended initialPositionInStream); + void resetIterator( + String shardIterator, String sequenceNumber, InitialPositionInStreamExtended initialPositionInStream); /** * Retrieves the response based on the request. @@ -99,7 +98,8 @@ public interface DataFetcher { * @param request used to obtain the next shard iterator * @return next iterator string */ - String getNextIterator(GetShardIteratorRequest request) throws ExecutionException, InterruptedException, TimeoutException; + String getNextIterator(GetShardIteratorRequest request) + throws ExecutionException, InterruptedException, TimeoutException; /** * Gets the next set of records based on the iterator. diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcher.java index 495dcfb1..85260e49 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcher.java @@ -14,12 +14,12 @@ */ package software.amazon.kinesis.retrieval.polling; -import com.google.common.collect.Iterables; - import java.time.Duration; import java.util.Collections; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; + +import com.google.common.collect.Iterables; import lombok.AccessLevel; import lombok.Data; import lombok.Getter; @@ -73,25 +73,37 @@ public class KinesisDataFetcher implements DataFetcher { @NonNull private final KinesisAsyncClient kinesisClient; - @NonNull @Getter + + @NonNull + @Getter private final StreamIdentifier streamIdentifier; + @NonNull private final String shardId; + private final int maxRecords; + @NonNull private final MetricsFactory metricsFactory; + private final Duration maxFutureWait; private final String streamAndShardId; @Deprecated - public KinesisDataFetcher(KinesisAsyncClient kinesisClient, String streamName, String shardId, int maxRecords, MetricsFactory metricsFactory) { - this(kinesisClient, new KinesisDataFetcherProviderConfig( - StreamIdentifier.singleStreamInstance(streamName), - shardId, - metricsFactory, - maxRecords, - PollingConfig.DEFAULT_REQUEST_TIMEOUT - )); + public KinesisDataFetcher( + KinesisAsyncClient kinesisClient, + String streamName, + String shardId, + int maxRecords, + MetricsFactory metricsFactory) { + this( + kinesisClient, + new KinesisDataFetcherProviderConfig( + StreamIdentifier.singleStreamInstance(streamName), + shardId, + metricsFactory, + maxRecords, + PollingConfig.DEFAULT_REQUEST_TIMEOUT)); } /** @@ -106,7 +118,8 @@ public class KinesisDataFetcher implements DataFetcher { * @param kinesisClient * @param kinesisDataFetcherProviderConfig */ - public KinesisDataFetcher(KinesisAsyncClient kinesisClient, DataFetcherProviderConfig kinesisDataFetcherProviderConfig) { + public KinesisDataFetcher( + KinesisAsyncClient kinesisClient, DataFetcherProviderConfig kinesisDataFetcherProviderConfig) { this.kinesisClient = kinesisClient; this.maxFutureWait = kinesisDataFetcherProviderConfig.getKinesisRequestTimeout(); this.maxRecords = kinesisDataFetcherProviderConfig.getMaxRecords(); @@ -118,6 +131,7 @@ public class KinesisDataFetcher implements DataFetcher { @Getter private boolean isShardEndReached; + private boolean isInitialized; private String lastKnownSequenceNumber; private InitialPositionInStreamExtended initialPositionInStream; @@ -147,15 +161,15 @@ public class KinesisDataFetcher implements DataFetcher { // CHECKSTYLE.OFF: MemberName final DataFetcherResult TERMINAL_RESULT = new DataFetcherResult() { - // CHECKSTYLE.ON: MemberName + // CHECKSTYLE.ON: MemberName @Override public GetRecordsResponse getResult() { return GetRecordsResponse.builder() - .millisBehindLatest(null) - .records(Collections.emptyList()) - .nextShardIterator(null) - .childShards(Collections.emptyList()) - .build(); + .millisBehindLatest(null) + .records(Collections.emptyList()) + .nextShardIterator(null) + .childShards(Collections.emptyList()) + .build(); } @Override @@ -204,16 +218,17 @@ public class KinesisDataFetcher implements DataFetcher { * @param initialPositionInStream The initialPositionInStream. */ @Override - public void initialize(final String initialCheckpoint, - final InitialPositionInStreamExtended initialPositionInStream) { + public void initialize( + final String initialCheckpoint, final InitialPositionInStreamExtended initialPositionInStream) { log.info("Initializing shard {} with {}", streamAndShardId, initialCheckpoint); advanceIteratorTo(initialCheckpoint, initialPositionInStream); isInitialized = true; } @Override - public void initialize(final ExtendedSequenceNumber initialCheckpoint, - final InitialPositionInStreamExtended initialPositionInStream) { + public void initialize( + final ExtendedSequenceNumber initialCheckpoint, + final InitialPositionInStreamExtended initialPositionInStream) { log.info("Initializing shard {} with {}", streamAndShardId, initialCheckpoint.sequenceNumber()); advanceIteratorTo(initialCheckpoint.sequenceNumber(), initialPositionInStream); isInitialized = true; @@ -226,27 +241,31 @@ public class KinesisDataFetcher implements DataFetcher { * @param initialPositionInStream The initialPositionInStream. */ @Override - public void advanceIteratorTo(final String sequenceNumber, - final InitialPositionInStreamExtended initialPositionInStream) { + public void advanceIteratorTo( + final String sequenceNumber, final InitialPositionInStreamExtended initialPositionInStream) { advanceIteratorTo(sequenceNumber, initialPositionInStream, false); } - private void advanceIteratorTo(final String sequenceNumber, - final InitialPositionInStreamExtended initialPositionInStream, - boolean isIteratorRestart) { + private void advanceIteratorTo( + final String sequenceNumber, + final InitialPositionInStreamExtended initialPositionInStream, + boolean isIteratorRestart) { if (sequenceNumber == null) { throw new IllegalArgumentException("SequenceNumber should not be null: shardId " + shardId); } GetShardIteratorRequest.Builder builder = KinesisRequestsBuilder.getShardIteratorRequestBuilder() - .streamName(streamIdentifier.streamName()).shardId(shardId); + .streamName(streamIdentifier.streamName()) + .shardId(shardId); streamIdentifier.streamArnOptional().ifPresent(arn -> builder.streamARN(arn.toString())); GetShardIteratorRequest request; if (isIteratorRestart) { - request = IteratorBuilder.reconnectRequest(builder, sequenceNumber, initialPositionInStream).build(); + request = IteratorBuilder.reconnectRequest(builder, sequenceNumber, initialPositionInStream) + .build(); } else { - request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStream).build(); + request = IteratorBuilder.request(builder, sequenceNumber, initialPositionInStream) + .build(); } log.debug("[GetShardIterator] Request has parameters {}", request); @@ -273,8 +292,12 @@ public class KinesisDataFetcher implements DataFetcher { log.info("Caught ResourceNotFoundException when getting an iterator for shard {}", streamAndShardId, e); nextIterator = null; } finally { - MetricsUtil.addSuccessAndLatency(metricsScope, String.format("%s.%s", METRICS_PREFIX, "getShardIterator"), - success, startTime, MetricsLevel.DETAILED); + MetricsUtil.addSuccessAndLatency( + metricsScope, + String.format("%s.%s", METRICS_PREFIX, "getShardIterator"), + success, + startTime, + MetricsLevel.DETAILED); MetricsUtil.endScope(metricsScope); } @@ -295,26 +318,29 @@ public class KinesisDataFetcher implements DataFetcher { throw new IllegalStateException( "Make sure to initialize the KinesisDataFetcher before restarting the iterator."); } - log.debug("Restarting iterator for sequence number {} on shard id {}", - lastKnownSequenceNumber, streamAndShardId); + log.debug( + "Restarting iterator for sequence number {} on shard id {}", lastKnownSequenceNumber, streamAndShardId); advanceIteratorTo(lastKnownSequenceNumber, initialPositionInStream, true); } @Override - public void resetIterator(String shardIterator, String sequenceNumber, InitialPositionInStreamExtended initialPositionInStream) { + public void resetIterator( + String shardIterator, String sequenceNumber, InitialPositionInStreamExtended initialPositionInStream) { this.nextIterator = shardIterator; this.lastKnownSequenceNumber = sequenceNumber; this.initialPositionInStream = initialPositionInStream; } @Override - public GetRecordsResponse getGetRecordsResponse(GetRecordsRequest request) throws ExecutionException, InterruptedException, TimeoutException { - final GetRecordsResponse response = FutureUtils.resolveOrCancelFuture(kinesisClient.getRecords(request), - maxFutureWait); + public GetRecordsResponse getGetRecordsResponse(GetRecordsRequest request) + throws ExecutionException, InterruptedException, TimeoutException { + final GetRecordsResponse response = + FutureUtils.resolveOrCancelFuture(kinesisClient.getRecords(request), maxFutureWait); if (!isValidResult(response.nextShardIterator(), response.childShards())) { throw new RetryableRetrievalException("GetRecords response is not valid for shard: " + streamAndShardId + ". nextShardIterator: " + response.nextShardIterator() - + ". childShards: " + response.childShards() + ". Will retry GetRecords with the same nextIterator."); + + ". childShards: " + response.childShards() + + ". Will retry GetRecords with the same nextIterator."); } return response; } @@ -322,15 +348,17 @@ public class KinesisDataFetcher implements DataFetcher { @Override public GetRecordsRequest getGetRecordsRequest(String nextIterator) { GetRecordsRequest.Builder builder = KinesisRequestsBuilder.getRecordsRequestBuilder() - .shardIterator(nextIterator).limit(maxRecords); + .shardIterator(nextIterator) + .limit(maxRecords); streamIdentifier.streamArnOptional().ifPresent(arn -> builder.streamARN(arn.toString())); return builder.build(); } @Override - public String getNextIterator(GetShardIteratorRequest request) throws ExecutionException, InterruptedException, TimeoutException { - final GetShardIteratorResponse result = FutureUtils - .resolveOrCancelFuture(kinesisClient.getShardIterator(request), maxFutureWait); + public String getNextIterator(GetShardIteratorRequest request) + throws ExecutionException, InterruptedException, TimeoutException { + final GetShardIteratorResponse result = + FutureUtils.resolveOrCancelFuture(kinesisClient.getShardIterator(request), maxFutureWait); return result.shardIterator(); } @@ -341,7 +369,7 @@ public class KinesisDataFetcher implements DataFetcher { final MetricsScope metricsScope = MetricsUtil.createMetricsWithOperation(metricsFactory, OPERATION); MetricsUtil.addStreamId(metricsScope, streamIdentifier); MetricsUtil.addShardId(metricsScope, shardId); - boolean success = false ; + boolean success = false; long startTime = System.currentTimeMillis(); try { final GetRecordsResponse response = getGetRecordsResponse(request); @@ -356,8 +384,12 @@ public class KinesisDataFetcher implements DataFetcher { } catch (TimeoutException e) { throw new RetryableRetrievalException(e.getMessage(), e); } finally { - MetricsUtil.addSuccessAndLatency(metricsScope, String.format("%s.%s", METRICS_PREFIX, "getRecords"), - success, startTime, MetricsLevel.DETAILED); + MetricsUtil.addSuccessAndLatency( + metricsScope, + String.format("%s.%s", METRICS_PREFIX, "getRecords"), + success, + startTime, + MetricsLevel.DETAILED); MetricsUtil.endScope(metricsScope); } } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PollingConfig.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PollingConfig.java index cbbf8d11..1fe924d7 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PollingConfig.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PollingConfig.java @@ -148,8 +148,8 @@ public class PollingConfig implements RetrievalSpecificConfig { public PollingConfig maxRecords(int maxRecords) { if (maxRecords > DEFAULT_MAX_RECORDS) { - throw new IllegalArgumentException( - "maxRecords must be less than or equal to " + DEFAULT_MAX_RECORDS + " but current value is " + maxRecords()); + throw new IllegalArgumentException("maxRecords must be less than or equal to " + DEFAULT_MAX_RECORDS + + " but current value is " + maxRecords()); } this.maxRecords = maxRecords; return this; @@ -166,8 +166,13 @@ public class PollingConfig implements RetrievalSpecificConfig { if (usePollingConfigIdleTimeValue) { recordsFetcherFactory.idleMillisBetweenCalls(idleTimeBetweenReadsInMillis); } - return new SynchronousBlockingRetrievalFactory(streamName(), kinesisClient(), recordsFetcherFactory, - maxRecords(), kinesisRequestTimeout, dataFetcherProvider); + return new SynchronousBlockingRetrievalFactory( + streamName(), + kinesisClient(), + recordsFetcherFactory, + maxRecords(), + kinesisRequestTimeout, + dataFetcherProvider); } @Override diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java index b94c7cf1..02e2f7f5 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisher.java @@ -15,8 +15,6 @@ package software.amazon.kinesis.retrieval.polling; -import com.google.common.annotations.VisibleForTesting; - import java.time.Duration; import java.time.Instant; import java.util.List; @@ -27,6 +25,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; + +import com.google.common.annotations.VisibleForTesting; import lombok.AccessLevel; import lombok.Data; import lombok.Getter; @@ -62,6 +62,7 @@ import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.retrieval.RecordsRetrieved; import software.amazon.kinesis.retrieval.RetryableRetrievalException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + import static software.amazon.kinesis.common.DiagnosticUtils.takeDelayedDeliveryActionIfRequired; /** @@ -99,8 +100,11 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { private final String streamAndShardId; private final long awaitTerminationTimeoutMillis; private Subscriber subscriber; - @VisibleForTesting @Getter + + @VisibleForTesting + @Getter private final PublisherSession publisherSession; + private final ReentrantReadWriteLock resetLock = new ReentrantReadWriteLock(); private boolean wasReset = false; private Instant lastEventDeliveryTime = Instant.EPOCH; @@ -110,15 +114,19 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { @Accessors(fluent = true) static final class PublisherSession { private final AtomicLong requestedResponses = new AtomicLong(0); - @VisibleForTesting @Getter + + @VisibleForTesting + @Getter private final LinkedBlockingQueue prefetchRecordsQueue; + private final PrefetchCounters prefetchCounters; private final DataFetcher dataFetcher; private InitialPositionInStreamExtended initialPositionInStreamExtended; private String highestSequenceNumber; // Initialize the session on publisher start. - void init(ExtendedSequenceNumber extendedSequenceNumber, + void init( + ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended) { this.initialPositionInStreamExtended = initialPositionInStreamExtended; this.highestSequenceNumber = extendedSequenceNumber.sequenceNumber(); @@ -134,16 +142,18 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { prefetchRecordsQueue.clear(); prefetchCounters.reset(); highestSequenceNumber = prefetchRecordsRetrieved.lastBatchSequenceNumber(); - dataFetcher.resetIterator(prefetchRecordsRetrieved.shardIterator(), highestSequenceNumber, - initialPositionInStreamExtended); + dataFetcher.resetIterator( + prefetchRecordsRetrieved.shardIterator(), highestSequenceNumber, initialPositionInStreamExtended); } // Handle records delivery ack and execute nextEventDispatchAction. // This method is not thread-safe and needs to be called after acquiring a monitor. - void handleRecordsDeliveryAck(RecordsDeliveryAck recordsDeliveryAck, String streamAndShardId, Runnable nextEventDispatchAction) { + void handleRecordsDeliveryAck( + RecordsDeliveryAck recordsDeliveryAck, String streamAndShardId, Runnable nextEventDispatchAction) { final PrefetchRecordsRetrieved recordsToCheck = peekNextRecord(); // Verify if the ack matches the head of the queue and evict it. - if (recordsToCheck != null && recordsToCheck.batchUniqueIdentifier().equals(recordsDeliveryAck.batchUniqueIdentifier())) { + if (recordsToCheck != null + && recordsToCheck.batchUniqueIdentifier().equals(recordsDeliveryAck.batchUniqueIdentifier())) { evictPublishedRecordAndUpdateDemand(streamAndShardId); nextEventDispatchAction.run(); } else { @@ -152,8 +162,12 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { // to happen. final BatchUniqueIdentifier peekedBatchUniqueIdentifier = recordsToCheck == null ? null : recordsToCheck.batchUniqueIdentifier(); - log.info("{} : Received a stale notification with id {} instead of expected id {} at {}. Will ignore.", - streamAndShardId, recordsDeliveryAck.batchUniqueIdentifier(), peekedBatchUniqueIdentifier, Instant.now()); + log.info( + "{} : Received a stale notification with id {} instead of expected id {} at {}. Will ignore.", + streamAndShardId, + recordsDeliveryAck.batchUniqueIdentifier(), + peekedBatchUniqueIdentifier, + Instant.now()); } } @@ -167,7 +181,8 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { } else { log.info( "{}: No record batch found while evicting from the prefetch queue. This indicates the prefetch buffer" - + " was reset.", streamAndShardId); + + " was reset.", + streamAndShardId); } return result; } @@ -180,7 +195,8 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { return prefetchRecordsQueue.peek(); } - boolean offerRecords(PrefetchRecordsRetrieved recordsRetrieved, long idleMillisBetweenCalls) throws InterruptedException { + boolean offerRecords(PrefetchRecordsRetrieved recordsRetrieved, long idleMillisBetweenCalls) + throws InterruptedException { return prefetchRecordsQueue.offer(recordsRetrieved, idleMillisBetweenCalls, TimeUnit.MILLISECONDS); } @@ -188,15 +204,14 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { prefetchCounters.removed(result.processRecordsInput); requestedResponses.decrementAndGet(); } - } /** * Constructor for the PrefetchRecordsPublisher. This cache prefetches records from Kinesis and stores them in a * LinkedBlockingQueue. - * + * * @see PrefetchRecordsPublisher - * + * * @param maxPendingProcessRecordsInput Max number of ProcessRecordsInput that can be held in the cache before * blocking * @param maxByteSize Max byte size of the queue before blocking next get records call @@ -207,22 +222,27 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { * @param idleMillisBetweenCalls maximum time to wait before dispatching the next get records call * @param awaitTerminationTimeoutMillis maximum time to wait for graceful shutdown of executorService */ - public PrefetchRecordsPublisher(final int maxPendingProcessRecordsInput, final int maxByteSize, final int maxRecordsCount, - final int maxRecordsPerCall, - @NonNull final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, - @NonNull final ExecutorService executorService, - final long idleMillisBetweenCalls, - @NonNull final MetricsFactory metricsFactory, - @NonNull final String operation, - @NonNull final String shardId, - final long awaitTerminationTimeoutMillis) { + public PrefetchRecordsPublisher( + final int maxPendingProcessRecordsInput, + final int maxByteSize, + final int maxRecordsCount, + final int maxRecordsPerCall, + @NonNull final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, + @NonNull final ExecutorService executorService, + final long idleMillisBetweenCalls, + @NonNull final MetricsFactory metricsFactory, + @NonNull final String operation, + @NonNull final String shardId, + final long awaitTerminationTimeoutMillis) { this.getRecordsRetrievalStrategy = getRecordsRetrievalStrategy; this.maxRecordsPerCall = maxRecordsPerCall; this.maxPendingProcessRecordsInput = maxPendingProcessRecordsInput; this.maxByteSize = maxByteSize; this.maxRecordsCount = maxRecordsCount; - this.publisherSession = new PublisherSession(new LinkedBlockingQueue<>(this.maxPendingProcessRecordsInput), - new PrefetchCounters(), this.getRecordsRetrievalStrategy.dataFetcher()); + this.publisherSession = new PublisherSession( + new LinkedBlockingQueue<>(this.maxPendingProcessRecordsInput), + new PrefetchCounters(), + this.getRecordsRetrievalStrategy.dataFetcher()); this.executorService = executorService; this.metricsFactory = new ThreadSafeMetricsDelegatingFactory(metricsFactory); this.idleMillisBetweenCalls = idleMillisBetweenCalls; @@ -249,22 +269,35 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { * @param executorService Executor service for the cache * @param idleMillisBetweenCalls maximum time to wait before dispatching the next get records call */ - public PrefetchRecordsPublisher(final int maxPendingProcessRecordsInput, final int maxByteSize, final int maxRecordsCount, - final int maxRecordsPerCall, - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, - final ExecutorService executorService, - final long idleMillisBetweenCalls, - final MetricsFactory metricsFactory, - final String operation, - final String shardId) { - this(maxPendingProcessRecordsInput, maxByteSize, maxRecordsCount, maxRecordsPerCall, - getRecordsRetrievalStrategy, executorService, idleMillisBetweenCalls, - metricsFactory, operation, shardId, - DEFAULT_AWAIT_TERMINATION_TIMEOUT_MILLIS); + public PrefetchRecordsPublisher( + final int maxPendingProcessRecordsInput, + final int maxByteSize, + final int maxRecordsCount, + final int maxRecordsPerCall, + final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, + final ExecutorService executorService, + final long idleMillisBetweenCalls, + final MetricsFactory metricsFactory, + final String operation, + final String shardId) { + this( + maxPendingProcessRecordsInput, + maxByteSize, + maxRecordsCount, + maxRecordsPerCall, + getRecordsRetrievalStrategy, + executorService, + idleMillisBetweenCalls, + metricsFactory, + operation, + shardId, + DEFAULT_AWAIT_TERMINATION_TIMEOUT_MILLIS); } @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended) { + public void start( + ExtendedSequenceNumber extendedSequenceNumber, + InitialPositionInStreamExtended initialPositionInStreamExtended) { if (executorService.isShutdown()) { throw new IllegalStateException("ExecutorService has been shutdown."); } @@ -410,11 +443,17 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { final String lastBatchSequenceNumber; final String shardIterator; final BatchUniqueIdentifier batchUniqueIdentifier; - @Accessors(fluent = false) @Setter(AccessLevel.NONE) boolean dispatched = false; + + @Accessors(fluent = false) + @Setter(AccessLevel.NONE) + boolean dispatched = false; PrefetchRecordsRetrieved prepareForPublish() { - return new PrefetchRecordsRetrieved(processRecordsInput.toBuilder().cacheExitTime(Instant.now()).build(), - lastBatchSequenceNumber, shardIterator, batchUniqueIdentifier); + return new PrefetchRecordsRetrieved( + processRecordsInput.toBuilder().cacheExitTime(Instant.now()).build(), + lastBatchSequenceNumber, + shardIterator, + batchUniqueIdentifier); } @Override @@ -423,30 +462,32 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { } // Indicates if this record batch was already dispatched for delivery. - void dispatched() { dispatched = true; } + void dispatched() { + dispatched = true; + } /** * Generate batch unique identifier for PrefetchRecordsRetrieved, where flow will be empty. * @return BatchUniqueIdentifier */ public static BatchUniqueIdentifier generateBatchUniqueIdentifier() { - return new BatchUniqueIdentifier(UUID.randomUUID().toString(), - StringUtils.EMPTY); + return new BatchUniqueIdentifier(UUID.randomUUID().toString(), StringUtils.EMPTY); } - } private String calculateHighestSequenceNumber(ProcessRecordsInput processRecordsInput) { String result = publisherSession.highestSequenceNumber(); - if (processRecordsInput.records() != null && !processRecordsInput.records().isEmpty()) { - result = processRecordsInput.records().get(processRecordsInput.records().size() - 1).sequenceNumber(); + if (processRecordsInput.records() != null + && !processRecordsInput.records().isEmpty()) { + result = processRecordsInput + .records() + .get(processRecordsInput.records().size() - 1) + .sequenceNumber(); } return result; } - private static class PositionResetException extends RuntimeException { - - } + private static class PositionResetException extends RuntimeException {} private class DefaultGetRecordsCacheDaemon implements Runnable { volatile boolean isShutdown = false; @@ -462,16 +503,19 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { try { resetLock.readLock().lock(); makeRetrievalAttempt(); - } catch(PositionResetException pre) { + } catch (PositionResetException pre) { log.debug("{} : Position was reset while attempting to add item to queue.", streamAndShardId); } catch (Throwable e) { if (e instanceof InterruptedException) { Thread.currentThread().interrupt(); } - log.error("{} : Unexpected exception was thrown. This could probably be an issue or a bug." + - " Please search for the exception/error online to check what is going on. If the " + - "issue persists or is a recurring problem, feel free to open an issue on, " + - "https://github.com/awslabs/amazon-kinesis-client.", streamAndShardId, e); + log.error( + "{} : Unexpected exception was thrown. This could probably be an issue or a bug." + + " Please search for the exception/error online to check what is going on. If the " + + "issue persists or is a recurring problem, feel free to open an issue on, " + + "https://github.com/awslabs/amazon-kinesis-client.", + streamAndShardId, + e); } finally { resetLock.readLock().unlock(); } @@ -488,36 +532,52 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { lastSuccessfulCall = Instant.now(); final List records = getRecordsResult.records().stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); + .map(KinesisClientRecord::fromRecord) + .collect(Collectors.toList()); ProcessRecordsInput processRecordsInput = ProcessRecordsInput.builder() .records(records) .millisBehindLatest(getRecordsResult.millisBehindLatest()) .cacheEntryTime(lastSuccessfulCall) - .isAtShardEnd(getRecordsRetrievalStrategy.dataFetcher().isShardEndReached()) + .isAtShardEnd( + getRecordsRetrievalStrategy.dataFetcher().isShardEndReached()) .childShards(getRecordsResult.childShards()) .build(); - PrefetchRecordsRetrieved recordsRetrieved = new PrefetchRecordsRetrieved(processRecordsInput, - calculateHighestSequenceNumber(processRecordsInput), getRecordsResult.nextShardIterator(), + PrefetchRecordsRetrieved recordsRetrieved = new PrefetchRecordsRetrieved( + processRecordsInput, + calculateHighestSequenceNumber(processRecordsInput), + getRecordsResult.nextShardIterator(), PrefetchRecordsRetrieved.generateBatchUniqueIdentifier()); publisherSession.highestSequenceNumber(recordsRetrieved.lastBatchSequenceNumber); - log.debug("Last sequence number retrieved for streamAndShardId {} is {}", streamAndShardId, + log.debug( + "Last sequence number retrieved for streamAndShardId {} is {}", + streamAndShardId, recordsRetrieved.lastBatchSequenceNumber); addArrivedRecordsInput(recordsRetrieved); drainQueueForRequests(); } catch (PositionResetException pse) { throw pse; } catch (RetryableRetrievalException rre) { - log.info("{} : Timeout occurred while waiting for response from Kinesis. Will retry the request.", streamAndShardId); + log.info( + "{} : Timeout occurred while waiting for response from Kinesis. Will retry the request.", + streamAndShardId); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - log.info("{} : Thread was interrupted, indicating shutdown was called on the cache.", streamAndShardId); + log.info( + "{} : Thread was interrupted, indicating shutdown was called on the cache.", + streamAndShardId); } catch (InvalidArgumentException e) { - log.info("{} : records threw InvalidArgumentException - iterator will be refreshed before retrying", streamAndShardId, e); + log.info( + "{} : records threw InvalidArgumentException - iterator will be refreshed before retrying", + streamAndShardId, + e); publisherSession.dataFetcher().restartIterator(); } catch (ExpiredIteratorException e) { - log.info("{} : records threw ExpiredIteratorException - restarting" - + " after greatest seqNum passed to customer", streamAndShardId, e); + log.info( + "{} : records threw ExpiredIteratorException - restarting" + + " after greatest seqNum passed to customer", + streamAndShardId, + e); MetricsUtil.addStreamId(scope, streamId); scope.addData(EXPIRED_ITERATOR_METRIC, 1, StandardUnit.COUNT, MetricsLevel.SUMMARY); @@ -541,8 +601,10 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { publisherSession.prefetchCounters().waitForConsumer(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); - log.info("{} : Thread was interrupted while waiting for the consumer. " + - "Shutdown has probably been started", streamAndShardId); + log.info( + "{} : Thread was interrupted while waiting for the consumer. " + + "Shutdown has probably been started", + streamAndShardId); } } } @@ -563,7 +625,8 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { Thread.sleep(idleMillisBetweenCalls); return; } - long timeSinceLastCall = Duration.between(lastSuccessfulCall, Instant.now()).abs().toMillis(); + long timeSinceLastCall = + Duration.between(lastSuccessfulCall, Instant.now()).abs().toMillis(); if (timeSinceLastCall < idleMillisBetweenCalls) { Thread.sleep(idleMillisBetweenCalls - timeSinceLastCall); } @@ -593,12 +656,15 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { } private long getByteSize(final ProcessRecordsInput result) { - return result.records().stream().mapToLong(record -> record.data().limit()).sum(); + return result.records().stream() + .mapToLong(record -> record.data().limit()) + .sum(); } public synchronized void waitForConsumer() throws InterruptedException { if (!shouldGetNewRecords()) { - log.debug("{} : Queue is full waiting for consumer for {} ms", streamAndShardId, idleMillisBetweenCalls); + log.debug( + "{} : Queue is full waiting for consumer for {} ms", streamAndShardId, idleMillisBetweenCalls); this.wait(idleMillisBetweenCalls); } } @@ -617,9 +683,9 @@ public class PrefetchRecordsPublisher implements RecordsPublisher { @Override public String toString() { - return String.format("{ Requests: %d, Records: %d, Bytes: %d }", publisherSession.prefetchRecordsQueue().size(), size, - byteSize); + return String.format( + "{ Requests: %d, Records: %d, Bytes: %d }", + publisherSession.prefetchRecordsQueue().size(), size, byteSize); } } - } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SimpleRecordsFetcherFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SimpleRecordsFetcherFactory.java index a74e3f31..2f1dea62 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SimpleRecordsFetcherFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SimpleRecordsFetcherFactory.java @@ -17,7 +17,6 @@ package software.amazon.kinesis.retrieval.polling; import java.util.concurrent.Executors; import com.google.common.util.concurrent.ThreadFactoryBuilder; - import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.annotations.KinesisClientInternalApi; import software.amazon.kinesis.metrics.MetricsFactory; @@ -36,26 +35,37 @@ public class SimpleRecordsFetcherFactory implements RecordsFetcherFactory { private DataFetchingStrategy dataFetchingStrategy = DataFetchingStrategy.DEFAULT; @Override - public RecordsPublisher createRecordsFetcher(GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, String shardId, - MetricsFactory metricsFactory, int maxRecords) { + public RecordsPublisher createRecordsFetcher( + GetRecordsRetrievalStrategy getRecordsRetrievalStrategy, + String shardId, + MetricsFactory metricsFactory, + int maxRecords) { - return new PrefetchRecordsPublisher(maxPendingProcessRecordsInput, maxByteSize, maxRecordsCount, maxRecords, + return new PrefetchRecordsPublisher( + maxPendingProcessRecordsInput, + maxByteSize, + maxRecordsCount, + maxRecords, getRecordsRetrievalStrategy, - Executors - .newFixedThreadPool(1, - new ThreadFactoryBuilder().setDaemon(true) - .setNameFormat("prefetch-cache-" + shardId + "-%04d").build()), - idleMillisBetweenCalls, metricsFactory, "ProcessTask", shardId); - + Executors.newFixedThreadPool( + 1, + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("prefetch-cache-" + shardId + "-%04d") + .build()), + idleMillisBetweenCalls, + metricsFactory, + "ProcessTask", + shardId); } @Override - public void maxPendingProcessRecordsInput(int maxPendingProcessRecordsInput){ + public void maxPendingProcessRecordsInput(int maxPendingProcessRecordsInput) { this.maxPendingProcessRecordsInput = maxPendingProcessRecordsInput; } @Override - public void maxByteSize(int maxByteSize){ + public void maxByteSize(int maxByteSize) { this.maxByteSize = maxByteSize; } @@ -65,7 +75,7 @@ public class SimpleRecordsFetcherFactory implements RecordsFetcherFactory { } @Override - public void dataFetchingStrategy(DataFetchingStrategy dataFetchingStrategy){ + public void dataFetchingStrategy(DataFetchingStrategy dataFetchingStrategy) { this.dataFetchingStrategy = dataFetchingStrategy; } diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousBlockingRetrievalFactory.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousBlockingRetrievalFactory.java index 9b3190d5..509e261f 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousBlockingRetrievalFactory.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/retrieval/polling/SynchronousBlockingRetrievalFactory.java @@ -17,6 +17,7 @@ package software.amazon.kinesis.retrieval.polling; import java.time.Duration; import java.util.function.Function; + import lombok.Data; import lombok.NonNull; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; @@ -41,8 +42,10 @@ public class SynchronousBlockingRetrievalFactory implements RetrievalFactory { @NonNull private final String streamName; + @NonNull private final KinesisAsyncClient kinesisClient; + @NonNull private final RecordsFetcherFactory recordsFetcherFactory; @@ -51,19 +54,20 @@ public class SynchronousBlockingRetrievalFactory implements RetrievalFactory { private final Function dataFetcherProvider; - public SynchronousBlockingRetrievalFactory(String streamName, - KinesisAsyncClient kinesisClient, - RecordsFetcherFactory recordsFetcherFactory, - int maxRecords, - Duration kinesisRequestTimeout, - Function dataFetcherProvider) { + public SynchronousBlockingRetrievalFactory( + String streamName, + KinesisAsyncClient kinesisClient, + RecordsFetcherFactory recordsFetcherFactory, + int maxRecords, + Duration kinesisRequestTimeout, + Function dataFetcherProvider) { this.streamName = streamName; this.kinesisClient = kinesisClient; this.recordsFetcherFactory = recordsFetcherFactory; this.maxRecords = maxRecords; this.kinesisRequestTimeout = kinesisRequestTimeout; - this.dataFetcherProvider = dataFetcherProvider == null ? - defaultDataFetcherProvider(kinesisClient) : dataFetcherProvider; + this.dataFetcherProvider = + dataFetcherProvider == null ? defaultDataFetcherProvider(kinesisClient) : dataFetcherProvider; } private static Function defaultDataFetcherProvider( @@ -71,15 +75,12 @@ public class SynchronousBlockingRetrievalFactory implements RetrievalFactory { return dataFetcherProviderConfig -> new KinesisDataFetcher(kinesisClient, dataFetcherProviderConfig); } - private GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy(@NonNull final ShardInfo shardInfo, + private GetRecordsRetrievalStrategy createGetRecordsRetrievalStrategy( + @NonNull final ShardInfo shardInfo, @NonNull final StreamIdentifier streamIdentifier, @NonNull final MetricsFactory metricsFactory) { final DataFetcherProviderConfig kinesisDataFetcherProviderConfig = new KinesisDataFetcherProviderConfig( - streamIdentifier, - shardInfo.shardId(), - metricsFactory, - maxRecords, - kinesisRequestTimeout); + streamIdentifier, shardInfo.shardId(), metricsFactory, maxRecords, kinesisRequestTimeout); final DataFetcher dataFetcher = this.dataFetcherProvider.apply(kinesisDataFetcherProviderConfig); @@ -87,7 +88,8 @@ public class SynchronousBlockingRetrievalFactory implements RetrievalFactory { } @Override - public RecordsPublisher createGetRecordsCache(@NonNull final ShardInfo shardInfo, + public RecordsPublisher createGetRecordsCache( + @NonNull final ShardInfo shardInfo, @NonNull final StreamConfig streamConfig, @NonNull final MetricsFactory metricsFactory) { return recordsFetcherFactory.createRecordsFetcher( diff --git a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/schemaregistry/SchemaRegistryDecoder.java b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/schemaregistry/SchemaRegistryDecoder.java index 76415a85..56742a5e 100644 --- a/amazon-kinesis-client/src/main/java/software/amazon/kinesis/schemaregistry/SchemaRegistryDecoder.java +++ b/amazon-kinesis-client/src/main/java/software/amazon/kinesis/schemaregistry/SchemaRegistryDecoder.java @@ -1,15 +1,15 @@ package software.amazon.kinesis.schemaregistry; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + import com.amazonaws.services.schemaregistry.common.Schema; import com.amazonaws.services.schemaregistry.deserializers.GlueSchemaRegistryDeserializer; import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.common.KinesisClientLibraryPackage; import software.amazon.kinesis.retrieval.KinesisClientRecord; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; - /** * Identifies and decodes Glue Schema Registry data from incoming KinesisClientRecords. */ @@ -18,8 +18,7 @@ public class SchemaRegistryDecoder { private static final String USER_AGENT_APP_NAME = "kcl" + "-" + KinesisClientLibraryPackage.VERSION; private final GlueSchemaRegistryDeserializer glueSchemaRegistryDeserializer; - public SchemaRegistryDecoder( - GlueSchemaRegistryDeserializer glueSchemaRegistryDeserializer) { + public SchemaRegistryDecoder(GlueSchemaRegistryDeserializer glueSchemaRegistryDeserializer) { this.glueSchemaRegistryDeserializer = glueSchemaRegistryDeserializer; this.glueSchemaRegistryDeserializer.overrideUserAgentApp(USER_AGENT_APP_NAME); } @@ -29,8 +28,7 @@ public class SchemaRegistryDecoder { * @param records List * @return List */ - public List decode( - final List records) { + public List decode(final List records) { final List decodedRecords = new ArrayList<>(); for (final KinesisClientRecord record : records) { @@ -58,15 +56,10 @@ public class SchemaRegistryDecoder { final Schema schema = glueSchemaRegistryDeserializer.getSchema(data); final ByteBuffer recordData = ByteBuffer.wrap(glueSchemaRegistryDeserializer.getData(data)); - return - record.toBuilder() - .schema(schema) - .data(recordData) - .build(); + return record.toBuilder().schema(schema).data(recordData).build(); } catch (Exception e) { - log.warn("Unable to decode Glue Schema Registry information from record {}: ", - record.sequenceNumber(), e); - //We ignore Glue Schema Registry failures and return the record. + log.warn("Unable to decode Glue Schema Registry information from record {}: ", record.sequenceNumber(), e); + // We ignore Glue Schema Registry failures and return the record. return record; } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestConsumer.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestConsumer.java index 47e87904..bca8284b 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestConsumer.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestConsumer.java @@ -1,11 +1,23 @@ package software.amazon.kinesis.application; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + import com.fasterxml.jackson.databind.ObjectMapper; import lombok.Data; import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.RandomStringUtils; -import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.core.SdkBytes; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; @@ -32,18 +44,6 @@ import software.amazon.kinesis.utils.RecordValidationStatus; import software.amazon.kinesis.utils.ReshardOptions; import software.amazon.kinesis.utils.StreamExistenceManager; -import java.math.BigInteger; -import java.nio.ByteBuffer; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - import static org.junit.Assume.assumeTrue; @Slf4j @@ -104,7 +104,9 @@ public class TestConsumer { // Sleep to allow the producer/consumer to run and then end the test case. // If non-reshard sleep 3 minutes, else sleep 4 minutes per scale. - final int sleepMinutes = (consumerConfig.getReshardFactorList() == null) ? 3 : (4 * consumerConfig.getReshardFactorList().size()); + final int sleepMinutes = (consumerConfig.getReshardFactorList() == null) + ? 3 + : (4 * consumerConfig.getReshardFactorList().size()); Thread.sleep(TimeUnit.MINUTES.toMillis(sleepMinutes)); // Stops sending dummy data. @@ -129,7 +131,8 @@ public class TestConsumer { } } - private void cleanTestResources(StreamExistenceManager streamExistenceManager, LeaseTableManager leaseTableManager) throws Exception { + private void cleanTestResources(StreamExistenceManager streamExistenceManager, LeaseTableManager leaseTableManager) + throws Exception { log.info("----------Before starting, Cleaning test environment----------"); log.info("----------Deleting all lease tables in account----------"); leaseTableManager.deleteAllResource(); @@ -149,12 +152,11 @@ public class TestConsumer { log.info("----Reshard Config found: {}", consumerConfig.getReshardFactorList()); for (String streamName : consumerConfig.getStreamNames()) { - final StreamScaler streamScaler = new StreamScaler(kinesisClientForStreamOwner, streamName, - consumerConfig.getReshardFactorList(), consumerConfig); + final StreamScaler streamScaler = new StreamScaler( + kinesisClientForStreamOwner, streamName, consumerConfig.getReshardFactorList(), consumerConfig); // Schedule the stream scales 4 minutes apart with 2 minute starting delay - for (int i = 0; i < consumerConfig.getReshardFactorList() - .size(); i++) { + for (int i = 0; i < consumerConfig.getReshardFactorList().size(); i++) { producerExecutor.schedule(streamScaler, (4 * i) + 2, TimeUnit.MINUTES); } } @@ -171,19 +173,19 @@ public class TestConsumer { if (consumerConfig.getRetrievalMode().equals(RetrievalMode.POLLING)) { retrievalConfig = consumerConfig.getRetrievalConfig(configsBuilder, null); } else if (consumerConfig.isCrossAccount()) { - retrievalConfig = consumerConfig.getRetrievalConfig(configsBuilder, - streamToConsumerArnsMap); + retrievalConfig = consumerConfig.getRetrievalConfig(configsBuilder, streamToConsumerArnsMap); } else { retrievalConfig = configsBuilder.retrievalConfig(); } checkpointConfig = configsBuilder.checkpointConfig(); coordinatorConfig = configsBuilder.coordinatorConfig(); - leaseManagementConfig = configsBuilder.leaseManagementConfig() + leaseManagementConfig = configsBuilder + .leaseManagementConfig() .initialPositionInStream( - InitialPositionInStreamExtended.newInitialPosition(consumerConfig.getInitialPosition()) - ) - .initialLeaseTableReadCapacity(50).initialLeaseTableWriteCapacity(50); + InitialPositionInStreamExtended.newInitialPosition(consumerConfig.getInitialPosition())) + .initialLeaseTableReadCapacity(50) + .initialLeaseTableWriteCapacity(50); lifecycleConfig = configsBuilder.lifecycleConfig(); processorConfig = configsBuilder.processorConfig(); metricsConfig = configsBuilder.metricsConfig(); @@ -196,8 +198,7 @@ public class TestConsumer { lifecycleConfig, metricsConfig, processorConfig, - retrievalConfig - ); + retrievalConfig); } private void startConsumer() { @@ -220,19 +221,20 @@ public class TestConsumer { for (String streamName : consumerConfig.getStreamNames()) { try { final PutRecordRequest request = PutRecordRequest.builder() - .partitionKey(RandomStringUtils.randomAlphabetic(5, 20)) - .streamName(streamName) - .data(SdkBytes.fromByteBuffer(wrapWithCounter(5, payloadCounter))) // 1024 - // is 1 KB - .build(); - kinesisClientForStreamOwner.putRecord(request) - .get(); + .partitionKey(RandomStringUtils.randomAlphabetic(5, 20)) + .streamName(streamName) + .data(SdkBytes.fromByteBuffer(wrapWithCounter(5, payloadCounter))) // 1024 + // is 1 KB + .build(); + kinesisClientForStreamOwner.putRecord(request).get(); // Increment the payload counter if the putRecord call was successful payloadCounter = payloadCounter.add(new BigInteger("1")); successfulPutRecords += 1; - log.info("---------Record published for stream {}, successfulPutRecords is now: {}", - streamName, successfulPutRecords); + log.info( + "---------Record published for stream {}, successfulPutRecords is now: {}", + streamName, + successfulPutRecords); } catch (InterruptedException e) { log.info("Interrupted, assuming shutdown. ", e); } catch (ExecutionException | RuntimeException e) { @@ -267,14 +269,17 @@ public class TestConsumer { private void validateRecordProcessor() throws Exception { log.info("The number of expected records is: {}", successfulPutRecords); - final RecordValidationStatus errorVal = consumerConfig.getRecordValidator().validateRecords(successfulPutRecords); + final RecordValidationStatus errorVal = + consumerConfig.getRecordValidator().validateRecords(successfulPutRecords); if (errorVal != RecordValidationStatus.NO_ERROR) { - throw new RuntimeException("There was an error validating the records that were processed: " + errorVal.toString()); + throw new RuntimeException( + "There was an error validating the records that were processed: " + errorVal.toString()); } log.info("---------Completed validation of processed records.---------"); } - private void deleteResources(StreamExistenceManager streamExistenceManager, LeaseTableManager leaseTableManager) throws Exception { + private void deleteResources(StreamExistenceManager streamExistenceManager, LeaseTableManager leaseTableManager) + throws Exception { log.info("-------------Start deleting streams.---------"); for (String streamName : consumerConfig.getStreamNames()) { log.info("Deleting stream {}", streamName); @@ -295,17 +300,26 @@ public class TestConsumer { private DescribeStreamSummaryRequest describeStreamSummaryRequest; private synchronized void scaleStream() throws InterruptedException, ExecutionException { - final DescribeStreamSummaryResponse response = client.describeStreamSummary(describeStreamSummaryRequest).get(); + final DescribeStreamSummaryResponse response = + client.describeStreamSummary(describeStreamSummaryRequest).get(); final int openShardCount = response.streamDescriptionSummary().openShardCount(); final int targetShardCount = scalingFactors.get(scalingFactorIdx).calculateShardCount(openShardCount); - log.info("Scaling stream {} from {} shards to {} shards w/ scaling factor {}", - streamName, openShardCount, targetShardCount, scalingFactors.get(scalingFactorIdx)); + log.info( + "Scaling stream {} from {} shards to {} shards w/ scaling factor {}", + streamName, + openShardCount, + targetShardCount, + scalingFactors.get(scalingFactorIdx)); final UpdateShardCountRequest updateShardCountRequest = UpdateShardCountRequest.builder() - .streamName(streamName).targetShardCount(targetShardCount).scalingType(ScalingType.UNIFORM_SCALING).build(); - final UpdateShardCountResponse shardCountResponse = client.updateShardCount(updateShardCountRequest).get(); + .streamName(streamName) + .targetShardCount(targetShardCount) + .scalingType(ScalingType.UNIFORM_SCALING) + .build(); + final UpdateShardCountResponse shardCountResponse = + client.updateShardCount(updateShardCountRequest).get(); log.info("Executed shard scaling request. Response Details : {}", shardCountResponse.toString()); scalingFactorIdx++; @@ -320,7 +334,9 @@ public class TestConsumer { log.info("Starting stream scaling with params : {}", this); if (describeStreamSummaryRequest == null) { - describeStreamSummaryRequest = DescribeStreamSummaryRequest.builder().streamName(streamName).build(); + describeStreamSummaryRequest = DescribeStreamSummaryRequest.builder() + .streamName(streamName) + .build(); } try { scaleStream(); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessor.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessor.java index 037f180b..6d04afcf 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessor.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessor.java @@ -1,21 +1,21 @@ package software.amazon.kinesis.application; +import java.nio.ByteBuffer; + import lombok.extern.slf4j.Slf4j; import org.slf4j.MDC; import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.exceptions.InvalidStateException; import software.amazon.kinesis.exceptions.ShutdownException; +import software.amazon.kinesis.lifecycle.events.InitializationInput; import software.amazon.kinesis.lifecycle.events.LeaseLostInput; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.lifecycle.events.ShardEndedInput; import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput; import software.amazon.kinesis.processor.ShardRecordProcessor; -import software.amazon.kinesis.lifecycle.events.InitializationInput; import software.amazon.kinesis.retrieval.KinesisClientRecord; import software.amazon.kinesis.utils.RecordValidatorQueue; -import java.nio.ByteBuffer; - /** * Implement initialization and deletion of shards and shard record processing */ @@ -46,7 +46,6 @@ public class TestRecordProcessor implements ShardRecordProcessor { } } - @Override public void processRecords(ProcessRecordsInput processRecordsInput) { MDC.put(SHARD_ID_MDC_KEY, shardId); @@ -109,6 +108,4 @@ public class TestRecordProcessor implements ShardRecordProcessor { MDC.remove(SHARD_ID_MDC_KEY); } } - - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessorFactory.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessorFactory.java index 98f50ca0..e36d9ba2 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessorFactory.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/application/TestRecordProcessorFactory.java @@ -22,5 +22,4 @@ public class TestRecordProcessorFactory implements ShardRecordProcessorFactory { public ShardRecordProcessor shardRecordProcessor(StreamIdentifier streamIdentifier) { return new TestRecordProcessor(streamIdentifier, this.recordValidator); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/CheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/CheckpointerTest.java index eb341238..b618aa7a 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/CheckpointerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/CheckpointerTest.java @@ -17,7 +17,6 @@ package software.amazon.kinesis.checkpoint; import org.junit.Assert; import org.junit.Before; import org.junit.Test; - import software.amazon.kinesis.processor.Checkpointer; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; @@ -44,7 +43,7 @@ public class CheckpointerTest { ExtendedSequenceNumber registeredCheckpoint = checkpoint.getCheckpoint(shardId); Assert.assertEquals(extendedSequenceNumber, registeredCheckpoint); } - + @Test public final void testAdvancingSetCheckpoint() throws Exception { String shardId = "myShardId"; @@ -70,7 +69,8 @@ public class CheckpointerTest { ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(checkpointValue); checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken); Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedSequenceNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); } @@ -86,11 +86,14 @@ public class CheckpointerTest { checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), testConcurrencyToken); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); } - @Test public final void testInitialPrepareCheckpointWithApplicationState() throws Exception { String sequenceNumber = "1"; @@ -101,13 +104,18 @@ public class CheckpointerTest { checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); ExtendedSequenceNumber extendedPendingCheckpointNumber = new ExtendedSequenceNumber(pendingCheckpointValue); - checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), testConcurrencyToken, - applicationState); + checkpoint.prepareCheckpoint( + shardId, new ExtendedSequenceNumber(pendingCheckpointValue), testConcurrencyToken, applicationState); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - Assert.assertEquals(applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); } @Test @@ -122,8 +130,12 @@ public class CheckpointerTest { ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedSequenceNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); } } @@ -138,12 +150,17 @@ public class CheckpointerTest { for (Integer i = 0; i < 10; i++) { String sequenceNumber = i.toString(); ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber(sequenceNumber); - checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken, - applicationState); + checkpoint.prepareCheckpoint( + shardId, new ExtendedSequenceNumber(sequenceNumber), testConcurrencyToken, applicationState); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedSequenceNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - Assert.assertEquals(applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedSequenceNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); } } @@ -158,20 +175,28 @@ public class CheckpointerTest { ExtendedSequenceNumber extendedCheckpointNumber = new ExtendedSequenceNumber(checkpointValue); checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); // prepare checkpoint ExtendedSequenceNumber extendedPendingCheckpointNumber = new ExtendedSequenceNumber(pendingCheckpointValue); checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); // do checkpoint checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken); Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); } @@ -187,21 +212,31 @@ public class CheckpointerTest { ExtendedSequenceNumber extendedCheckpointNumber = new ExtendedSequenceNumber(checkpointValue); checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(checkpointValue), concurrencyToken); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); // prepare checkpoint ExtendedSequenceNumber extendedPendingCheckpointNumber = new ExtendedSequenceNumber(pendingCheckpointValue); - checkpoint.prepareCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken, applicationState); + checkpoint.prepareCheckpoint( + shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken, applicationState); Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); - Assert.assertEquals(applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); + Assert.assertEquals( + extendedCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); + Assert.assertEquals( + applicationState, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); // do checkpoint checkpoint.setCheckpoint(shardId, new ExtendedSequenceNumber(pendingCheckpointValue), concurrencyToken); Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpoint(shardId)); - Assert.assertEquals(extendedPendingCheckpointNumber, checkpoint.getCheckpointObject(shardId).checkpoint()); + Assert.assertEquals( + extendedPendingCheckpointNumber, + checkpoint.getCheckpointObject(shardId).checkpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpoint()); Assert.assertEquals(null, checkpoint.getCheckpointObject(shardId).pendingCheckpointState()); } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/InMemoryCheckpointer.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/InMemoryCheckpointer.java index 2a93e83d..635678f0 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/InMemoryCheckpointer.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/InMemoryCheckpointer.java @@ -17,11 +17,10 @@ package software.amazon.kinesis.checkpoint; import java.util.HashMap; import java.util.Map; +import lombok.extern.slf4j.Slf4j; import software.amazon.kinesis.processor.Checkpointer; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import lombok.extern.slf4j.Slf4j; - /** * Everything is stored in memory and there is no fault-tolerance. */ @@ -65,7 +64,10 @@ public class InMemoryCheckpointer implements Checkpointer { } @Override - public void prepareCheckpoint(String leaseKey, ExtendedSequenceNumber pendingCheckpoint, String concurrencyToken, + public void prepareCheckpoint( + String leaseKey, + ExtendedSequenceNumber pendingCheckpoint, + String concurrencyToken, byte[] pendingCheckpointState) { pendingCheckpoints.put(leaseKey, pendingCheckpoint); pendingCheckpointStates.put(leaseKey, pendingCheckpointState); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/SequenceNumberValidatorTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/SequenceNumberValidatorTest.java index ab23e0b4..d2ab2601 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/SequenceNumberValidatorTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/SequenceNumberValidatorTest.java @@ -14,11 +14,11 @@ */ package software.amazon.kinesis.checkpoint; +import java.util.Optional; + import org.junit.Before; import org.junit.Test; -import java.util.Optional; - import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; import static org.junit.Assert.assertThat; @@ -32,7 +32,6 @@ public class SequenceNumberValidatorTest { validator = new SequenceNumberValidator(); } - @Test public void matchingSequenceNumberTest() { String sequenceNumber = "49587497311274533994574834252742144236107130636007899138"; @@ -44,7 +43,8 @@ public class SequenceNumberValidatorTest { Optional shardId = validator.shardIdFor(sequenceNumber); assertThat(shardId, equalTo(Optional.of(expectedShardId))); - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.of(true))); + assertThat( + validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.of(true))); } @Test @@ -58,7 +58,8 @@ public class SequenceNumberValidatorTest { Optional shardId = validator.shardIdFor(sequenceNumber); assertThat(shardId, not(equalTo(invalidShardId))); - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, invalidShardId), equalTo(Optional.of(false))); + assertThat( + validator.validateSequenceNumberForShard(sequenceNumber, invalidShardId), equalTo(Optional.of(false))); } @Test @@ -72,7 +73,8 @@ public class SequenceNumberValidatorTest { Optional shardId = validator.shardIdFor(sequenceNumber); assertThat(shardId, equalTo(Optional.empty())); - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); + assertThat( + validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); } @Test @@ -83,7 +85,8 @@ public class SequenceNumberValidatorTest { assertThat(validator.versionFor(sequenceNumber), equalTo(Optional.empty())); assertThat(validator.shardIdFor(sequenceNumber), equalTo(Optional.empty())); - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); + assertThat( + validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); } @Test @@ -94,8 +97,7 @@ public class SequenceNumberValidatorTest { assertThat(validator.versionFor(sequenceNumber), equalTo(Optional.empty())); assertThat(validator.shardIdFor(sequenceNumber), equalTo(Optional.empty())); - assertThat(validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); + assertThat( + validator.validateSequenceNumberForShard(sequenceNumber, expectedShardId), equalTo(Optional.empty())); } - - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointerTest.java index e51616d9..523324d2 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardPreparedCheckpointerTest.java @@ -14,12 +14,12 @@ */ package software.amazon.kinesis.checkpoint; -import software.amazon.kinesis.processor.PreparedCheckpointer; -import software.amazon.kinesis.processor.RecordProcessorCheckpointer; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; +import software.amazon.kinesis.processor.PreparedCheckpointer; +import software.amazon.kinesis.processor.RecordProcessorCheckpointer; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; public class ShardPreparedCheckpointerTest { @@ -60,4 +60,4 @@ public class ShardPreparedCheckpointerTest { // nothing happens here checkpointer.checkpoint(); } -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardShardRecordProcessorCheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardShardRecordProcessorCheckpointerTest.java index 98ce1dc5..a198dcef 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardShardRecordProcessorCheckpointerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/checkpoint/ShardShardRecordProcessorCheckpointerTest.java @@ -14,11 +14,6 @@ */ package software.amazon.kinesis.checkpoint; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; @@ -28,13 +23,17 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.model.Record; import software.amazon.kinesis.leases.ShardInfo; import software.amazon.kinesis.processor.Checkpointer; import software.amazon.kinesis.processor.PreparedCheckpointer; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.fail; + /** * */ @@ -88,7 +87,7 @@ public class ShardShardRecordProcessorCheckpointerTest { /** * Test method for * {@link ShardRecordProcessorCheckpointer#checkpoint(Record record)}. - */ + */ @Test public final void testCheckpointRecord() throws Exception { ShardRecordProcessorCheckpointer processingCheckpointer = @@ -100,7 +99,7 @@ public class ShardShardRecordProcessorCheckpointerTest { processingCheckpointer.checkpoint(record); assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); } - + /** * Test method for * {@link ShardRecordProcessorCheckpointer#checkpoint(Record record)}. @@ -112,12 +111,12 @@ public class ShardShardRecordProcessorCheckpointerTest { processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030"); Record record = makeRecord("5030"); - //UserRecord subRecord = new UserRecord(record); + // UserRecord subRecord = new UserRecord(record); processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); processingCheckpointer.checkpoint(record); assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); } - + /** * Test method for * {@link ShardRecordProcessorCheckpointer#checkpoint(String sequenceNumber)}. @@ -132,7 +131,7 @@ public class ShardShardRecordProcessorCheckpointerTest { processingCheckpointer.checkpoint("5035"); assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); } - + /** * Test method for * {@link ShardRecordProcessorCheckpointer#checkpoint(String sequenceNumber, long subSequenceNumber)}. @@ -162,7 +161,6 @@ public class ShardShardRecordProcessorCheckpointerTest { assertThat(checkpoint.getCheckpoint(shardId), equalTo(extendedSequenceNumber)); } - /** * Test method for * {@link ShardRecordProcessorCheckpointer#prepareCheckpoint()}. @@ -231,7 +229,7 @@ public class ShardShardRecordProcessorCheckpointerTest { processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5030"); Record record = makeRecord("5030"); - //UserRecord subRecord = new UserRecord(record); + // UserRecord subRecord = new UserRecord(record); processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); PreparedCheckpointer preparedCheckpoint = processingCheckpointer.prepareCheckpoint(record); assertThat(checkpoint.getCheckpoint(shardId), equalTo(startingExtendedSequenceNumber)); @@ -252,7 +250,8 @@ public class ShardShardRecordProcessorCheckpointerTest { */ @Test public final void testPrepareCheckpointSequenceNumber() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5035"); processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); @@ -275,7 +274,8 @@ public class ShardShardRecordProcessorCheckpointerTest { */ @Test public final void testPrepareCheckpointExtendedSequenceNumber() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); ExtendedSequenceNumber extendedSequenceNumber = new ExtendedSequenceNumber("5040"); processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); @@ -297,11 +297,13 @@ public class ShardShardRecordProcessorCheckpointerTest { */ @Test public final void testPrepareCheckpointAtShardEnd() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); ExtendedSequenceNumber extendedSequenceNumber = ExtendedSequenceNumber.SHARD_END; processingCheckpointer.largestPermittedCheckpointValue(extendedSequenceNumber); - PreparedCheckpointer preparedCheckpoint = processingCheckpointer.prepareCheckpoint(ExtendedSequenceNumber.SHARD_END.sequenceNumber()); + PreparedCheckpointer preparedCheckpoint = + processingCheckpointer.prepareCheckpoint(ExtendedSequenceNumber.SHARD_END.sequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(startingExtendedSequenceNumber)); assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(startingExtendedSequenceNumber)); assertThat(preparedCheckpoint.pendingCheckpoint(), equalTo(extendedSequenceNumber)); @@ -314,13 +316,13 @@ public class ShardShardRecordProcessorCheckpointerTest { assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); } - /** * Test that having multiple outstanding prepared checkpointers works if they are redeemed in the right order. */ @Test public final void testMultipleOutstandingCheckpointersHappyCase() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); processingCheckpointer.largestPermittedCheckpointValue(new ExtendedSequenceNumber("6040")); @@ -351,7 +353,8 @@ public class ShardShardRecordProcessorCheckpointerTest { */ @Test public final void testMultipleOutstandingCheckpointersOutOfOrder() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); processingCheckpointer.setInitialCheckpointValue(startingExtendedSequenceNumber); processingCheckpointer.largestPermittedCheckpointValue(new ExtendedSequenceNumber("7040")); @@ -404,7 +407,8 @@ public class ShardShardRecordProcessorCheckpointerTest { */ @Test public final void testClientSpecifiedCheckpoint() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); // Several checkpoints we're gonna hit ExtendedSequenceNumber tooSmall = new ExtendedSequenceNumber("2"); @@ -426,55 +430,68 @@ public class ShardShardRecordProcessorCheckpointerTest { } // advance to first - processingCheckpointer.checkpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); - processingCheckpointer.checkpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); // advance to second - processingCheckpointer.checkpoint(secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(secondSequenceNumber)); - ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt = - { tooSmall, // Shouldn't be able to move before the first value we ever checkpointed - firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number - tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer - lastSequenceNumberOfShard, // Just another big value that we will use later - null, // Not a valid sequence number - new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string - ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max - ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value - ExtendedSequenceNumber.LATEST, // Can't go back to an initial sentinel value + ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt = { + tooSmall, // Shouldn't be able to move before the first value we ever checkpointed + firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number + tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer + lastSequenceNumberOfShard, // Just another big value that we will use later + null, // Not a valid sequence number + new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string + ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max + ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value + ExtendedSequenceNumber.LATEST, // Can't go back to an initial sentinel value }; for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) { try { - processingCheckpointer.checkpoint(badCheckpointValue.sequenceNumber(), badCheckpointValue.subSequenceNumber()); + processingCheckpointer.checkpoint( + badCheckpointValue.sequenceNumber(), badCheckpointValue.subSequenceNumber()); fail("checkpointing at bad or out of order sequence didn't throw exception"); } catch (IllegalArgumentException e) { } catch (NullPointerException e) { - + } - assertThat("Checkpoint value should not have changed", checkpoint.getCheckpoint(shardId), + assertThat( + "Checkpoint value should not have changed", + checkpoint.getCheckpoint(shardId), equalTo(secondSequenceNumber)); - assertThat("Last checkpoint value should not have changed", processingCheckpointer.lastCheckpointValue(), + assertThat( + "Last checkpoint value should not have changed", + processingCheckpointer.lastCheckpointValue(), equalTo(secondSequenceNumber)); - assertThat("Largest sequence number should not have changed", - processingCheckpointer.largestPermittedCheckpointValue(), equalTo(thirdSequenceNumber)); + assertThat( + "Largest sequence number should not have changed", + processingCheckpointer.largestPermittedCheckpointValue(), + equalTo(thirdSequenceNumber)); } // advance to third number - processingCheckpointer.checkpoint(thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(thirdSequenceNumber)); // Testing a feature that prevents checkpointing at SHARD_END twice processingCheckpointer.largestPermittedCheckpointValue(lastSequenceNumberOfShard); processingCheckpointer.sequenceNumberAtShardEnd(processingCheckpointer.largestPermittedCheckpointValue()); processingCheckpointer.largestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END); - processingCheckpointer.checkpoint(lastSequenceNumberOfShard.sequenceNumber(), lastSequenceNumberOfShard.subSequenceNumber()); + processingCheckpointer.checkpoint( + lastSequenceNumberOfShard.sequenceNumber(), lastSequenceNumberOfShard.subSequenceNumber()); assertThat( "Checkpoing at the sequence number at the end of a shard should be the same as checkpointing at SHARD_END", - processingCheckpointer.lastCheckpointValue(), equalTo(ExtendedSequenceNumber.SHARD_END)); + processingCheckpointer.lastCheckpointValue(), + equalTo(ExtendedSequenceNumber.SHARD_END)); } /** @@ -485,7 +502,8 @@ public class ShardShardRecordProcessorCheckpointerTest { */ @Test public final void testClientSpecifiedTwoPhaseCheckpoint() throws Exception { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); // Several checkpoints we're gonna hit ExtendedSequenceNumber tooSmall = new ExtendedSequenceNumber("2"); @@ -514,12 +532,13 @@ public class ShardShardRecordProcessorCheckpointerTest { } // advance to first - processingCheckpointer.checkpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); // prepare checkpoint at initial checkpoint value - PreparedCheckpointer doesNothingPreparedCheckpoint = - processingCheckpointer.prepareCheckpoint(firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); + PreparedCheckpointer doesNothingPreparedCheckpoint = processingCheckpointer.prepareCheckpoint( + firstSequenceNumber.sequenceNumber(), firstSequenceNumber.subSequenceNumber()); assertThat(doesNothingPreparedCheckpoint instanceof DoesNothingPreparedCheckpointer, equalTo(true)); assertThat(doesNothingPreparedCheckpoint.pendingCheckpoint(), equalTo(firstSequenceNumber)); assertThat(checkpoint.getCheckpoint(shardId), equalTo(firstSequenceNumber)); @@ -533,63 +552,80 @@ public class ShardShardRecordProcessorCheckpointerTest { assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); // advance to second - processingCheckpointer.prepareCheckpoint(secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); + processingCheckpointer.prepareCheckpoint( + secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(secondSequenceNumber)); - processingCheckpointer.checkpoint(secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + secondSequenceNumber.sequenceNumber(), secondSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(secondSequenceNumber)); assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); - ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt = - { tooSmall, // Shouldn't be able to move before the first value we ever checkpointed - firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number - tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer - lastSequenceNumberOfShard, // Just another big value that we will use later - null, // Not a valid sequence number - new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string - ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max - ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value - ExtendedSequenceNumber.LATEST, // Can't go back to an initial sentinel value - }; + ExtendedSequenceNumber[] valuesWeShouldNotBeAbleToCheckpointAt = { + tooSmall, // Shouldn't be able to move before the first value we ever checkpointed + firstSequenceNumber, // Shouldn't even be able to move back to a once used sequence number + tooBigSequenceNumber, // Can't exceed the max sequence number in the checkpointer + lastSequenceNumberOfShard, // Just another big value that we will use later + null, // Not a valid sequence number + new ExtendedSequenceNumber("bogus-checkpoint-value"), // Can't checkpoint at non-numeric string + ExtendedSequenceNumber.SHARD_END, // Can't go to the end unless it is set as the max + ExtendedSequenceNumber.TRIM_HORIZON, // Can't go back to an initial sentinel value + ExtendedSequenceNumber.LATEST, // Can't go back to an initial sentinel value + }; for (ExtendedSequenceNumber badCheckpointValue : valuesWeShouldNotBeAbleToCheckpointAt) { try { - processingCheckpointer.prepareCheckpoint(badCheckpointValue.sequenceNumber(), badCheckpointValue.subSequenceNumber()); + processingCheckpointer.prepareCheckpoint( + badCheckpointValue.sequenceNumber(), badCheckpointValue.subSequenceNumber()); fail("checkpointing at bad or out of order sequence didn't throw exception"); } catch (IllegalArgumentException e) { } catch (NullPointerException e) { } - assertThat("Checkpoint value should not have changed", checkpoint.getCheckpoint(shardId), + assertThat( + "Checkpoint value should not have changed", + checkpoint.getCheckpoint(shardId), equalTo(secondSequenceNumber)); - assertThat("Last checkpoint value should not have changed", processingCheckpointer.lastCheckpointValue(), + assertThat( + "Last checkpoint value should not have changed", + processingCheckpointer.lastCheckpointValue(), equalTo(secondSequenceNumber)); - assertThat("Largest sequence number should not have changed", - processingCheckpointer.largestPermittedCheckpointValue(), equalTo(thirdSequenceNumber)); + assertThat( + "Largest sequence number should not have changed", + processingCheckpointer.largestPermittedCheckpointValue(), + equalTo(thirdSequenceNumber)); assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); } // advance to third number - processingCheckpointer.prepareCheckpoint(thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); + processingCheckpointer.prepareCheckpoint( + thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(thirdSequenceNumber)); - processingCheckpointer.checkpoint(thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); + processingCheckpointer.checkpoint( + thirdSequenceNumber.sequenceNumber(), thirdSequenceNumber.subSequenceNumber()); assertThat(checkpoint.getCheckpoint(shardId), equalTo(thirdSequenceNumber)); // Testing a feature that prevents checkpointing at SHARD_END twice processingCheckpointer.largestPermittedCheckpointValue(lastSequenceNumberOfShard); processingCheckpointer.sequenceNumberAtShardEnd(processingCheckpointer.largestPermittedCheckpointValue()); processingCheckpointer.largestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END); - processingCheckpointer.prepareCheckpoint(lastSequenceNumberOfShard.sequenceNumber(), lastSequenceNumberOfShard.subSequenceNumber()); + processingCheckpointer.prepareCheckpoint( + lastSequenceNumberOfShard.sequenceNumber(), lastSequenceNumberOfShard.subSequenceNumber()); assertThat( "Preparing a checkpoing at the sequence number at the end of a shard should be the same as preparing a checkpoint at SHARD_END", - checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), equalTo(ExtendedSequenceNumber.SHARD_END)); + checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), + equalTo(ExtendedSequenceNumber.SHARD_END)); } private enum CheckpointAction { - NONE, NO_SEQUENCE_NUMBER, WITH_SEQUENCE_NUMBER; + NONE, + NO_SEQUENCE_NUMBER, + WITH_SEQUENCE_NUMBER; } private enum CheckpointerType { - CHECKPOINTER, PREPARED_CHECKPOINTER, PREPARE_THEN_CHECKPOINTER; + CHECKPOINTER, + PREPARED_CHECKPOINTER, + PREPARE_THEN_CHECKPOINTER; } /** @@ -603,7 +639,8 @@ public class ShardShardRecordProcessorCheckpointerTest { @Test public final void testMixedCheckpointCalls() throws Exception { for (LinkedHashMap testPlan : getMixedCallsTestPlan()) { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); testMixedCheckpointCalls(processingCheckpointer, testPlan, CheckpointerType.CHECKPOINTER); } } @@ -618,7 +655,8 @@ public class ShardShardRecordProcessorCheckpointerTest { @Test public final void testMixedTwoPhaseCheckpointCalls() throws Exception { for (LinkedHashMap testPlan : getMixedCallsTestPlan()) { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); testMixedCheckpointCalls(processingCheckpointer, testPlan, CheckpointerType.PREPARED_CHECKPOINTER); } } @@ -635,13 +673,15 @@ public class ShardShardRecordProcessorCheckpointerTest { @Test public final void testMixedTwoPhaseCheckpointCalls2() throws Exception { for (LinkedHashMap testPlan : getMixedCallsTestPlan()) { - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); testMixedCheckpointCalls(processingCheckpointer, testPlan, CheckpointerType.PREPARE_THEN_CHECKPOINTER); } } private List> getMixedCallsTestPlan() { - List> testPlans = new ArrayList>(); + List> testPlans = + new ArrayList>(); /* * Simulate a scenario where the checkpointer is created at "latest". @@ -713,9 +753,11 @@ public class ShardShardRecordProcessorCheckpointerTest { * A map describing which checkpoint value to set in the checkpointer, and what action to take * @throws Exception */ - private void testMixedCheckpointCalls(ShardRecordProcessorCheckpointer processingCheckpointer, + private void testMixedCheckpointCalls( + ShardRecordProcessorCheckpointer processingCheckpointer, LinkedHashMap checkpointValueAndAction, - CheckpointerType checkpointerType) throws Exception { + CheckpointerType checkpointerType) + throws Exception { for (Entry entry : checkpointValueAndAction.entrySet()) { PreparedCheckpointer preparedCheckpoint = null; @@ -723,61 +765,68 @@ public class ShardShardRecordProcessorCheckpointerTest { if (SentinelCheckpoint.SHARD_END.toString().equals(entry.getKey())) { // Before shard end, we will pretend to do what we expect the shutdown task to do - processingCheckpointer.sequenceNumberAtShardEnd(processingCheckpointer - .largestPermittedCheckpointValue()); + processingCheckpointer.sequenceNumberAtShardEnd( + processingCheckpointer.largestPermittedCheckpointValue()); } // Advance the largest checkpoint and check that it is updated. processingCheckpointer.largestPermittedCheckpointValue(new ExtendedSequenceNumber(entry.getKey())); - assertThat("Expected the largest checkpoint value to be updated after setting it", + assertThat( + "Expected the largest checkpoint value to be updated after setting it", processingCheckpointer.largestPermittedCheckpointValue(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); switch (entry.getValue()) { - case NONE: - // We were told to not checkpoint, so lets just make sure the last checkpoint value is the same as - // when this block started then continue to the next instruction - assertThat("Expected the last checkpoint value to stay the same if we didn't checkpoint", - processingCheckpointer.lastCheckpointValue(), equalTo(lastCheckpointValue)); - continue; - case NO_SEQUENCE_NUMBER: - switch (checkpointerType) { - case CHECKPOINTER: - processingCheckpointer.checkpoint(); - break; - case PREPARED_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); - preparedCheckpoint.checkpoint(); - case PREPARE_THEN_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); - processingCheckpointer.checkpoint( - preparedCheckpoint.pendingCheckpoint().sequenceNumber(), - preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); - } - break; - case WITH_SEQUENCE_NUMBER: - switch (checkpointerType) { - case CHECKPOINTER: - processingCheckpointer.checkpoint(entry.getKey()); - break; - case PREPARED_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); - preparedCheckpoint.checkpoint(); - case PREPARE_THEN_CHECKPOINTER: - preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); - processingCheckpointer.checkpoint( - preparedCheckpoint.pendingCheckpoint().sequenceNumber(), - preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); - } - break; + case NONE: + // We were told to not checkpoint, so lets just make sure the last checkpoint value is the same as + // when this block started then continue to the next instruction + assertThat( + "Expected the last checkpoint value to stay the same if we didn't checkpoint", + processingCheckpointer.lastCheckpointValue(), + equalTo(lastCheckpointValue)); + continue; + case NO_SEQUENCE_NUMBER: + switch (checkpointerType) { + case CHECKPOINTER: + processingCheckpointer.checkpoint(); + break; + case PREPARED_CHECKPOINTER: + preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); + preparedCheckpoint.checkpoint(); + case PREPARE_THEN_CHECKPOINTER: + preparedCheckpoint = processingCheckpointer.prepareCheckpoint(); + processingCheckpointer.checkpoint( + preparedCheckpoint.pendingCheckpoint().sequenceNumber(), + preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); + } + break; + case WITH_SEQUENCE_NUMBER: + switch (checkpointerType) { + case CHECKPOINTER: + processingCheckpointer.checkpoint(entry.getKey()); + break; + case PREPARED_CHECKPOINTER: + preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); + preparedCheckpoint.checkpoint(); + case PREPARE_THEN_CHECKPOINTER: + preparedCheckpoint = processingCheckpointer.prepareCheckpoint(entry.getKey()); + processingCheckpointer.checkpoint( + preparedCheckpoint.pendingCheckpoint().sequenceNumber(), + preparedCheckpoint.pendingCheckpoint().subSequenceNumber()); + } + break; } // We must have checkpointed to get here, so let's make sure our last checkpoint value is up to date - assertThat("Expected the last checkpoint value to change after checkpointing", - processingCheckpointer.lastCheckpointValue(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); - assertThat("Expected the largest checkpoint value to remain the same since the last set", + assertThat( + "Expected the last checkpoint value to change after checkpointing", + processingCheckpointer.lastCheckpointValue(), + equalTo(new ExtendedSequenceNumber(entry.getKey()))); + assertThat( + "Expected the largest checkpoint value to remain the same since the last set", processingCheckpointer.largestPermittedCheckpointValue(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); assertThat(checkpoint.getCheckpoint(shardId), equalTo(new ExtendedSequenceNumber(entry.getKey()))); - assertThat(checkpoint.getCheckpointObject(shardId).checkpoint(), + assertThat( + checkpoint.getCheckpointObject(shardId).checkpoint(), equalTo(new ExtendedSequenceNumber(entry.getKey()))); assertThat(checkpoint.getCheckpointObject(shardId).pendingCheckpoint(), nullValue()); } @@ -797,7 +846,8 @@ public class ShardShardRecordProcessorCheckpointerTest { @Test public final void testSetMetricsScopeDuringCheckpointing() throws Exception { // First call to checkpoint - ShardRecordProcessorCheckpointer processingCheckpointer = new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); + ShardRecordProcessorCheckpointer processingCheckpointer = + new ShardRecordProcessorCheckpointer(shardInfo, checkpoint); ExtendedSequenceNumber sequenceNumber = new ExtendedSequenceNumber("5019"); processingCheckpointer.largestPermittedCheckpointValue(sequenceNumber); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/ConfigsBuilderTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/ConfigsBuilderTest.java index d84b90f7..1343285e 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/ConfigsBuilderTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/ConfigsBuilderTest.java @@ -18,10 +18,6 @@ package software.amazon.kinesis.common; import java.util.Arrays; import java.util.Optional; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.mockito.Mockito.mock; - import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -36,6 +32,10 @@ import software.amazon.kinesis.processor.ShardRecordProcessorFactory; import software.amazon.kinesis.processor.SingleStreamTracker; import software.amazon.kinesis.processor.StreamTracker; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.mockito.Mockito.mock; + @RunWith(MockitoJUnitRunner.class) public class ConfigsBuilderTest { @@ -66,7 +66,13 @@ public class ConfigsBuilderTest { createConfig(new SingleStreamTracker(streamArn)))) { assertEquals(Optional.empty(), cb.appStreamTracker().left()); assertEquals(streamName, cb.appStreamTracker().right().get()); - assertEquals(streamName, cb.streamTracker().streamConfigList().get(0).streamIdentifier().streamName()); + assertEquals( + streamName, + cb.streamTracker() + .streamConfigList() + .get(0) + .streamIdentifier() + .streamName()); assertFalse(cb.streamTracker().isMultiStream()); } } @@ -76,25 +82,45 @@ public class ConfigsBuilderTest { final StreamTracker mockMultiStreamTracker = mock(MultiStreamTracker.class); final ConfigsBuilder configByMultiTracker = createConfig(mockMultiStreamTracker); assertEquals(Optional.empty(), configByMultiTracker.appStreamTracker().right()); - assertEquals(mockMultiStreamTracker, configByMultiTracker.appStreamTracker().left().get()); + assertEquals( + mockMultiStreamTracker, + configByMultiTracker.appStreamTracker().left().get()); assertEquals(mockMultiStreamTracker, configByMultiTracker.streamTracker()); } private ConfigsBuilder createConfig(String streamName) { // intentional invocation of constructor where streamName is a String - return new ConfigsBuilder(streamName, APPLICATION_NAME, mockKinesisClient, mockDynamoClient, - mockCloudWatchClient, WORKER_IDENTIFIER, mockShardProcessorFactory); + return new ConfigsBuilder( + streamName, + APPLICATION_NAME, + mockKinesisClient, + mockDynamoClient, + mockCloudWatchClient, + WORKER_IDENTIFIER, + mockShardProcessorFactory); } private ConfigsBuilder createConfig(Arn streamArn) { // intentional invocation of constructor where streamArn is an Arn - return new ConfigsBuilder(streamArn, APPLICATION_NAME, mockKinesisClient, mockDynamoClient, - mockCloudWatchClient, WORKER_IDENTIFIER, mockShardProcessorFactory); + return new ConfigsBuilder( + streamArn, + APPLICATION_NAME, + mockKinesisClient, + mockDynamoClient, + mockCloudWatchClient, + WORKER_IDENTIFIER, + mockShardProcessorFactory); } private ConfigsBuilder createConfig(StreamTracker streamTracker) { - return new ConfigsBuilder(streamTracker, APPLICATION_NAME, mockKinesisClient, mockDynamoClient, - mockCloudWatchClient, WORKER_IDENTIFIER, mockShardProcessorFactory); + return new ConfigsBuilder( + streamTracker, + APPLICATION_NAME, + mockKinesisClient, + mockDynamoClient, + mockCloudWatchClient, + WORKER_IDENTIFIER, + mockShardProcessorFactory); } private static Arn createArn(String streamName) { @@ -106,5 +132,4 @@ public class ConfigsBuilderTest { .resource("stream/" + streamName) .build(); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/DeprecationUtilsTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/DeprecationUtilsTest.java index 39991b78..8ed46d0b 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/DeprecationUtilsTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/DeprecationUtilsTest.java @@ -17,15 +17,15 @@ package software.amazon.kinesis.common; import java.util.function.Function; -import static org.junit.Assert.assertEquals; -import static org.mockito.Mockito.mock; - import org.junit.Test; import software.amazon.awssdk.utils.Either; import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.SingleStreamTracker; import software.amazon.kinesis.processor.StreamTracker; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.mock; + public class DeprecationUtilsTest { @Test @@ -41,5 +41,4 @@ public class DeprecationUtilsTest { public void testUnsupportedStreamTrackerConversion() { DeprecationUtils.convert(mock(StreamTracker.class), Function.identity()); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/FutureUtilsTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/FutureUtilsTest.java index 558687cd..1911d537 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/FutureUtilsTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/FutureUtilsTest.java @@ -14,6 +14,10 @@ */ package software.amazon.kinesis.common; +import java.time.Duration; +import java.util.concurrent.Future; +import java.util.concurrent.TimeoutException; + import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -21,10 +25,6 @@ import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; -import java.time.Duration; -import java.util.concurrent.Future; -import java.util.concurrent.TimeoutException; - import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.eq; @@ -52,4 +52,4 @@ public class FutureUtilsTest { verify(future).cancel(eq(true)); } } -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamConfigTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamConfigTest.java index 9ba3267d..ff004304 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamConfigTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamConfigTest.java @@ -1,14 +1,13 @@ package software.amazon.kinesis.common; -import static software.amazon.kinesis.common.InitialPositionInStream.TRIM_HORIZON; - import org.junit.Test; +import static software.amazon.kinesis.common.InitialPositionInStream.TRIM_HORIZON; + public class StreamConfigTest { @Test(expected = NullPointerException.class) public void testNullStreamIdentifier() { new StreamConfig(null, InitialPositionInStreamExtended.newInitialPosition(TRIM_HORIZON)); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamIdentifierTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamIdentifierTest.java index ae9e134f..d2779c3c 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamIdentifierTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/common/StreamIdentifierTest.java @@ -1,13 +1,13 @@ package software.amazon.kinesis.common; +import java.util.Arrays; +import java.util.Optional; + import org.junit.Assert; import org.junit.Test; import software.amazon.awssdk.arns.Arn; import software.amazon.awssdk.regions.Region; -import java.util.Arrays; -import java.util.Optional; - import static org.junit.Assert.assertEquals; public class StreamIdentifierTest { @@ -37,7 +37,7 @@ public class StreamIdentifierTest { public void testMultiStreamDeserializationFail() { for (final String pattern : Arrays.asList( ":stream-name:123", // missing account id -// "123456789:stream-name:123", // account id not 12 digits + // "123456789:stream-name:123", // account id not 12 digits "123456789abc:stream-name:123", // 12char alphanumeric account id "123456789012::123", // missing stream name "123456789012:stream-name", // missing delimiter and creation epoch @@ -46,7 +46,7 @@ public class StreamIdentifierTest { "123456789012:stream-name:abc", // non-numeric creation epoch "", "::" // missing account id, stream name, and epoch - )) { + )) { try { StreamIdentifier.multiStreamInstance(pattern); Assert.fail("Serialization " + pattern + " should not have created a StreamIdentifier"); @@ -62,15 +62,18 @@ public class StreamIdentifierTest { @Test public void testMultiStreamByArnWithInvalidStreamArnFail() { for (final Arn invalidStreamArn : Arrays.asList( - createArn("abc", SERVICE, KINESIS_REGION, TEST_ACCOUNT_ID, RESOURCE), // invalid partition - createArn(PARTITION, "dynamodb", KINESIS_REGION, TEST_ACCOUNT_ID, RESOURCE), // incorrect service - createArn(PARTITION, SERVICE, null, TEST_ACCOUNT_ID, RESOURCE), // missing region - createArn(PARTITION, SERVICE, KINESIS_REGION, null, RESOURCE), // missing account id - createArn(PARTITION, SERVICE, KINESIS_REGION, "123456789", RESOURCE), // account id not 12 digits - createArn(PARTITION, SERVICE, KINESIS_REGION, "123456789abc", RESOURCE), // 12char alphanumeric account id - createArn(PARTITION, SERVICE, KINESIS_REGION, TEST_ACCOUNT_ID, "table/name"), // incorrect resource type - Arn.fromString("arn:aws:dynamodb:us-east-2:123456789012:table/myDynamoDBTable") // valid ARN for incorrect resource - )) { + createArn("abc", SERVICE, KINESIS_REGION, TEST_ACCOUNT_ID, RESOURCE), // invalid partition + createArn(PARTITION, "dynamodb", KINESIS_REGION, TEST_ACCOUNT_ID, RESOURCE), // incorrect service + createArn(PARTITION, SERVICE, null, TEST_ACCOUNT_ID, RESOURCE), // missing region + createArn(PARTITION, SERVICE, KINESIS_REGION, null, RESOURCE), // missing account id + createArn(PARTITION, SERVICE, KINESIS_REGION, "123456789", RESOURCE), // account id not 12 digits + createArn( + PARTITION, SERVICE, KINESIS_REGION, "123456789abc", RESOURCE), // 12char alphanumeric account id + createArn(PARTITION, SERVICE, KINESIS_REGION, TEST_ACCOUNT_ID, "table/name"), // incorrect resource type + Arn.fromString( + "arn:aws:dynamodb:us-east-2:123456789012:table/myDynamoDBTable") // valid ARN for incorrect + // resource + )) { try { StreamIdentifier.multiStreamInstance(invalidStreamArn, EPOCH); Assert.fail("Arn " + invalidStreamArn + " should not have created a StreamIdentifier"); @@ -150,5 +153,4 @@ public class StreamIdentifierTest { .resource(resource) .build(); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/KCLAppConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/KCLAppConfig.java index 3168d13b..0de2ae8e 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/KCLAppConfig.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/KCLAppConfig.java @@ -1,22 +1,19 @@ package software.amazon.kinesis.config; +import java.io.IOException; +import java.net.Inet4Address; +import java.net.URISyntaxException; +import java.net.UnknownHostException; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import lombok.Builder; import lombok.Value; +import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.arns.Arn; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.kinesis.common.FutureUtils; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.common.StreamConfig; -import software.amazon.kinesis.common.StreamIdentifier; -import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy; -import software.amazon.kinesis.processor.MultiStreamTracker; -import software.amazon.kinesis.processor.SingleStreamTracker; -import software.amazon.kinesis.retrieval.RetrievalConfig; -import software.amazon.kinesis.retrieval.fanout.FanOutConfig; -import software.amazon.kinesis.retrieval.polling.PollingConfig; -import software.amazon.kinesis.utils.RecordValidatorQueue; -import software.amazon.kinesis.utils.ReshardOptions; -import software.amazon.kinesis.application.TestRecordProcessorFactory; -import lombok.Builder; import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider; import software.amazon.awssdk.http.Protocol; @@ -33,20 +30,22 @@ import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryReques import software.amazon.awssdk.services.kinesis.model.DescribeStreamSummaryResponse; import software.amazon.awssdk.services.sts.StsAsyncClient; import software.amazon.awssdk.utils.AttributeMap; +import software.amazon.kinesis.application.TestRecordProcessorFactory; import software.amazon.kinesis.common.ConfigsBuilder; +import software.amazon.kinesis.common.FutureUtils; import software.amazon.kinesis.common.InitialPositionInStream; +import software.amazon.kinesis.common.InitialPositionInStreamExtended; +import software.amazon.kinesis.common.StreamConfig; +import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy; +import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.ShardRecordProcessorFactory; - -import java.io.IOException; -import java.net.Inet4Address; -import java.net.URISyntaxException; -import java.net.UnknownHostException; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import lombok.extern.slf4j.Slf4j; +import software.amazon.kinesis.processor.SingleStreamTracker; +import software.amazon.kinesis.retrieval.RetrievalConfig; +import software.amazon.kinesis.retrieval.fanout.FanOutConfig; +import software.amazon.kinesis.retrieval.polling.PollingConfig; +import software.amazon.kinesis.utils.RecordValidatorQueue; +import software.amazon.kinesis.utils.ReshardOptions; /** * Default configuration for a producer or consumer used in integration tests. @@ -79,9 +78,10 @@ public abstract class KCLAppConfig { public List getStreamNames() { if (this.streamNames == null) { - return getStreamArns().stream().map(streamArn -> - streamArn.toString().substring(streamArn.toString().indexOf("/") + 1)) - .collect(Collectors.toList()); + return getStreamArns().stream() + .map(streamArn -> + streamArn.toString().substring(streamArn.toString().indexOf("/") + 1)) + .collect(Collectors.toList()); } else { return this.streamNames; } @@ -93,9 +93,13 @@ public abstract class KCLAppConfig { return INTEGRATION_TEST_RESOURCE_PREFIX + getTestName(); } - public int getShardCount() { return 4; } + public int getShardCount() { + return 4; + } - public Region getRegion() { return Region.US_WEST_2; } + public Region getRegion() { + return Region.US_WEST_2; + } /** * Gets credentials for passed in profile with "-DawsProfile" which should match "~/.aws/config". Otherwise, @@ -103,8 +107,9 @@ public abstract class KCLAppConfig { */ private AwsCredentialsProvider getCredentialsProvider() { final String awsProfile = System.getProperty(AWS_ACCOUNT_PROFILE_PROPERTY); - return (awsProfile != null) ? - ProfileCredentialsProvider.builder().profileName(awsProfile).build() : DefaultCredentialsProvider.create(); + return (awsProfile != null) + ? ProfileCredentialsProvider.builder().profileName(awsProfile).build() + : DefaultCredentialsProvider.create(); } public boolean isCrossAccount() { @@ -138,9 +143,9 @@ public abstract class KCLAppConfig { if (this.accountIdForConsumer == null) { try { this.accountIdForConsumer = FutureUtils.resolveOrCancelFuture( - buildStsAsyncClientForConsumer().getCallerIdentity(), Duration.ofSeconds(30)).account(); - } - catch (Exception e) { + buildStsAsyncClientForConsumer().getCallerIdentity(), Duration.ofSeconds(30)) + .account(); + } catch (Exception e) { log.error("Error when getting account ID through STS for consumer", e); } } @@ -151,9 +156,9 @@ public abstract class KCLAppConfig { if (this.accountIdForStreamOwner == null) { try { this.accountIdForStreamOwner = FutureUtils.resolveOrCancelFuture( - buildStsAsyncClientForStreamOwner().getCallerIdentity(), Duration.ofSeconds(30)).account(); - } - catch (Exception e) { + buildStsAsyncClientForStreamOwner().getCallerIdentity(), Duration.ofSeconds(30)) + .account(); + } catch (Exception e) { log.error("Error when getting account ID through STS for consumer", e); } } @@ -187,8 +192,8 @@ public abstract class KCLAppConfig { return this.kinesisAsyncClientForStreamOwner; } - - private KinesisAsyncClient buildAsyncKinesisClient(AwsCredentialsProvider creds) throws URISyntaxException, IOException { + private KinesisAsyncClient buildAsyncKinesisClient(AwsCredentialsProvider creds) + throws URISyntaxException, IOException { // Setup H2 client config. final NettyNioAsyncHttpClient.Builder builder = NettyNioAsyncHttpClient.builder() .maxConcurrency(Integer.MAX_VALUE) @@ -198,7 +203,8 @@ public abstract class KCLAppConfig { builder.buildWithDefaults(AttributeMap.builder().build()); // Setup client builder by default values - final KinesisAsyncClientBuilder kinesisAsyncClientBuilder = KinesisAsyncClient.builder().region(getRegion()); + final KinesisAsyncClientBuilder kinesisAsyncClientBuilder = + KinesisAsyncClient.builder().region(getRegion()); kinesisAsyncClientBuilder.httpClient(sdkAsyncHttpClient); kinesisAsyncClientBuilder.credentialsProvider(creds); @@ -208,9 +214,9 @@ public abstract class KCLAppConfig { private StsAsyncClient buildStsAsyncClientForConsumer() { if (this.stsAsyncClientForConsumer == null) { this.stsAsyncClientForConsumer = StsAsyncClient.builder() - .credentialsProvider(getCredentialsProvider()) - .region(getRegion()) - .build(); + .credentialsProvider(getCredentialsProvider()) + .region(getRegion()) + .build(); } return this.stsAsyncClientForConsumer; } @@ -220,8 +226,7 @@ public abstract class KCLAppConfig { final StsAsyncClient client; if (isCrossAccount()) { client = buildStsAsyncClient(getCrossAccountCredentialsProvider()); - } - else { + } else { client = buildStsAsyncClient(getCredentialsProvider()); } this.stsAsyncClientForStreamOwner = client; @@ -238,7 +243,8 @@ public abstract class KCLAppConfig { public final DynamoDbAsyncClient buildAsyncDynamoDbClient() throws IOException { if (this.dynamoDbAsyncClient == null) { - final DynamoDbAsyncClientBuilder builder = DynamoDbAsyncClient.builder().region(getRegion()); + final DynamoDbAsyncClientBuilder builder = + DynamoDbAsyncClient.builder().region(getRegion()); builder.credentialsProvider(getCredentialsProvider()); this.dynamoDbAsyncClient = builder.build(); } @@ -247,7 +253,8 @@ public abstract class KCLAppConfig { public final CloudWatchAsyncClient buildAsyncCloudWatchClient() throws IOException { if (this.cloudWatchAsyncClient == null) { - final CloudWatchAsyncClientBuilder builder = CloudWatchAsyncClient.builder().region(getRegion()); + final CloudWatchAsyncClientBuilder builder = + CloudWatchAsyncClient.builder().region(getRegion()); builder.credentialsProvider(getCredentialsProvider()); this.cloudWatchAsyncClient = builder.build(); } @@ -276,8 +283,13 @@ public abstract class KCLAppConfig { final SingleStreamTracker singleStreamTracker = new SingleStreamTracker( StreamIdentifier.singleStreamInstance(getStreamArns().get(0)), buildStreamConfigList(streamToConsumerArnsMap).get(0)); - return new ConfigsBuilder(singleStreamTracker, getApplicationName(), - buildAsyncKinesisClientForConsumer(), buildAsyncDynamoDbClient(), buildAsyncCloudWatchClient(), workerId, + return new ConfigsBuilder( + singleStreamTracker, + getApplicationName(), + buildAsyncKinesisClientForConsumer(), + buildAsyncDynamoDbClient(), + buildAsyncCloudWatchClient(), + workerId, getShardRecordProcessorFactory()); } else { final MultiStreamTracker multiStreamTracker = new MultiStreamTracker() { @@ -285,34 +297,46 @@ public abstract class KCLAppConfig { public List streamConfigList() { return buildStreamConfigList(streamToConsumerArnsMap); } + @Override public FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy() { return new FormerStreamsLeasesDeletionStrategy.NoLeaseDeletionStrategy(); } }; - return new ConfigsBuilder(multiStreamTracker, getApplicationName(), - buildAsyncKinesisClientForConsumer(), buildAsyncDynamoDbClient(), buildAsyncCloudWatchClient(), workerId, + return new ConfigsBuilder( + multiStreamTracker, + getApplicationName(), + buildAsyncKinesisClientForConsumer(), + buildAsyncDynamoDbClient(), + buildAsyncCloudWatchClient(), + workerId, getShardRecordProcessorFactory()); } } private List buildStreamConfigList(Map streamToConsumerArnsMap) { - return getStreamArns().stream().map(streamArn-> { - final StreamIdentifier streamIdentifier; - if (getStreamArns().size() == 1) { - streamIdentifier = StreamIdentifier.singleStreamInstance(streamArn); - } else { //is multi-stream - streamIdentifier = StreamIdentifier.multiStreamInstance(streamArn, getCreationEpoch(streamArn)); - } + return getStreamArns().stream() + .map(streamArn -> { + final StreamIdentifier streamIdentifier; + if (getStreamArns().size() == 1) { + streamIdentifier = StreamIdentifier.singleStreamInstance(streamArn); + } else { // is multi-stream + streamIdentifier = StreamIdentifier.multiStreamInstance(streamArn, getCreationEpoch(streamArn)); + } - if (streamToConsumerArnsMap != null) { - final StreamConfig streamConfig = new StreamConfig(streamIdentifier, - InitialPositionInStreamExtended.newInitialPosition(getInitialPosition())); - return streamConfig.consumerArn(streamToConsumerArnsMap.get(streamArn).toString()); - } else { - return new StreamConfig(streamIdentifier, InitialPositionInStreamExtended.newInitialPosition(getInitialPosition())); - } - }).collect(Collectors.toList()); + if (streamToConsumerArnsMap != null) { + final StreamConfig streamConfig = new StreamConfig( + streamIdentifier, + InitialPositionInStreamExtended.newInitialPosition(getInitialPosition())); + return streamConfig.consumerArn( + streamToConsumerArnsMap.get(streamArn).toString()); + } else { + return new StreamConfig( + streamIdentifier, + InitialPositionInStreamExtended.newInitialPosition(getInitialPosition())); + } + }) + .collect(Collectors.toList()); } private long getCreationEpoch(Arn streamArn) { @@ -320,7 +344,7 @@ public abstract class KCLAppConfig { .streamARN(streamArn.toString()) .build(); - DescribeStreamSummaryResponse response = null; + DescribeStreamSummaryResponse response = null; try { response = FutureUtils.resolveOrCancelFuture( buildAsyncKinesisClientForStreamOwner().describeStreamSummary(request), Duration.ofSeconds(60)); @@ -330,17 +354,18 @@ public abstract class KCLAppConfig { return response.streamDescriptionSummary().streamCreationTimestamp().toEpochMilli(); } - public abstract RetrievalMode getRetrievalMode(); public RetrievalConfig getRetrievalConfig(ConfigsBuilder configsBuilder, Map streamToConsumerArnsMap) { final RetrievalConfig config = configsBuilder.retrievalConfig(); if (getRetrievalMode() == RetrievalMode.POLLING) { - config.retrievalSpecificConfig(new PollingConfig(config.kinesisClient())); + config.retrievalSpecificConfig(new PollingConfig(config.kinesisClient())); } else { if (getStreamArns().size() == 1) { - final Arn consumerArn = streamToConsumerArnsMap.get(getStreamArns().get(0)); - config.retrievalSpecificConfig(new FanOutConfig(config.kinesisClient()).consumerArn(consumerArn.toString())); + final Arn consumerArn = + streamToConsumerArnsMap.get(getStreamArns().get(0)); + config.retrievalSpecificConfig( + new FanOutConfig(config.kinesisClient()).consumerArn(consumerArn.toString())); } // For CAA multi-stream EFO, consumerArn is specified in StreamConfig } @@ -349,8 +374,9 @@ public abstract class KCLAppConfig { public Arn buildStreamArn(String streamName) { final String partition = getRegion().metadata().partition().id(); - return Arn.fromString(String.join(":", "arn", partition, "kinesis", getRegion().id(), - getAccountIdForStreamOwner(), "stream") + "/" + INTEGRATION_TEST_RESOURCE_PREFIX + streamName); + return Arn.fromString( + String.join(":", "arn", partition, "kinesis", getRegion().id(), getAccountIdForStreamOwner(), "stream") + + "/" + INTEGRATION_TEST_RESOURCE_PREFIX + streamName); } /** @@ -364,5 +390,4 @@ public abstract class KCLAppConfig { private int recordSizeKB; private long callPeriodMills; } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH1TestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH1TestConfig.java index d4bb1c49..a8440176 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH1TestConfig.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH1TestConfig.java @@ -1,12 +1,12 @@ package software.amazon.kinesis.config; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.http.Protocol; - import java.util.Collections; import java.util.List; import java.util.UUID; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; + /** * Config for a polling consumer with HTTP protocol of HTTP1 */ diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH2TestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH2TestConfig.java index 5af83843..989f0fc6 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH2TestConfig.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryPollingH2TestConfig.java @@ -1,12 +1,12 @@ package software.amazon.kinesis.config; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.http.Protocol; - import java.util.Collections; import java.util.List; import java.util.UUID; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; + /** * Config for a polling consumer with HTTP protocol of HTTP2 */ diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingReshardingTestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingReshardingTestConfig.java index 7984131f..24a038de 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingReshardingTestConfig.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingReshardingTestConfig.java @@ -1,14 +1,14 @@ package software.amazon.kinesis.config; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.http.Protocol; -import software.amazon.kinesis.utils.ReshardOptions; - import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.UUID; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; +import software.amazon.kinesis.utils.ReshardOptions; + import static software.amazon.kinesis.utils.ReshardOptions.MERGE; import static software.amazon.kinesis.utils.ReshardOptions.SPLIT; @@ -17,7 +17,7 @@ public class ReleaseCanaryStreamingReshardingTestConfig extends KCLAppConfig { private final UUID uniqueId = UUID.randomUUID(); private final String applicationName = "StreamingReshardingTest"; - private final String streamName ="2XStreamingReshardingTestStream_" + uniqueId; + private final String streamName = "2XStreamingReshardingTestStream_" + uniqueId; @Override public String getTestName() { @@ -30,7 +30,9 @@ public class ReleaseCanaryStreamingReshardingTestConfig extends KCLAppConfig { } @Override - public Protocol getKinesisClientProtocol() { return Protocol.HTTP2; } + public Protocol getKinesisClientProtocol() { + return Protocol.HTTP2; + } @Override public RetrievalMode getRetrievalMode() { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingTestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingTestConfig.java index 832fb905..c6a04ce2 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingTestConfig.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/ReleaseCanaryStreamingTestConfig.java @@ -1,12 +1,12 @@ package software.amazon.kinesis.config; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.http.Protocol; - import java.util.Collections; import java.util.List; import java.util.UUID; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; + /** * Config for a streaming consumer with HTTP protocol of HTTP2 */ @@ -14,7 +14,7 @@ public class ReleaseCanaryStreamingTestConfig extends KCLAppConfig { private final UUID uniqueId = UUID.randomUUID(); private final String applicationName = "StreamingTest"; - private final String streamName ="2XStreamingTestStream_" + uniqueId; + private final String streamName = "2XStreamingTestStream_" + uniqueId; @Override public String getTestName() { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/KCLCrossAccountAppConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/KCLCrossAccountAppConfig.java index c20dd782..ba334661 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/KCLCrossAccountAppConfig.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/KCLCrossAccountAppConfig.java @@ -17,7 +17,10 @@ public abstract class KCLCrossAccountAppConfig extends KCLAppConfig { @Override public AwsCredentialsProvider getCrossAccountCredentialsProvider() { final String awsCrossAccountProfile = System.getProperty(KCLAppConfig.CROSS_ACCOUNT_PROFILE_PROPERTY); - return (awsCrossAccountProfile != null) ? - ProfileCredentialsProvider.builder().profileName(awsCrossAccountProfile).build() : null; + return (awsCrossAccountProfile != null) + ? ProfileCredentialsProvider.builder() + .profileName(awsCrossAccountProfile) + .build() + : null; } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamPollingH2TestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamPollingH2TestConfig.java index da48ca97..68f71799 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamPollingH2TestConfig.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamPollingH2TestConfig.java @@ -5,7 +5,6 @@ import java.util.List; import java.util.UUID; import lombok.extern.slf4j.Slf4j; - import software.amazon.awssdk.arns.Arn; import software.amazon.awssdk.http.Protocol; import software.amazon.kinesis.config.RetrievalMode; diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamStreamingTestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamStreamingTestConfig.java index 30c99bee..19e2aa3f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamStreamingTestConfig.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountMultiStreamStreamingTestConfig.java @@ -5,7 +5,6 @@ import java.util.List; import java.util.UUID; import lombok.extern.slf4j.Slf4j; - import software.amazon.awssdk.arns.Arn; import software.amazon.awssdk.http.Protocol; import software.amazon.kinesis.config.RetrievalMode; diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountPollingH2TestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountPollingH2TestConfig.java index a0754d91..7f44408e 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountPollingH2TestConfig.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountPollingH2TestConfig.java @@ -1,14 +1,13 @@ package software.amazon.kinesis.config.crossaccount; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.http.Protocol; -import software.amazon.kinesis.config.RetrievalMode; - import java.util.Collections; import java.util.List; import java.util.UUID; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; +import software.amazon.kinesis.config.RetrievalMode; /** * Config for a cross account polling consumer with HTTP protocol of HTTP2 diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountStreamingTestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountStreamingTestConfig.java index 90bd637e..594347aa 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountStreamingTestConfig.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/crossaccount/ReleaseCanaryCrossAccountStreamingTestConfig.java @@ -1,14 +1,13 @@ package software.amazon.kinesis.config.crossaccount; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.http.Protocol; -import software.amazon.kinesis.config.RetrievalMode; - import java.util.Collections; import java.util.List; import java.util.UUID; import lombok.extern.slf4j.Slf4j; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; +import software.amazon.kinesis.config.RetrievalMode; /** * Config for a streaming consumer with HTTP protocol of HTTP2 diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/multistream/ReleaseCanaryMultiStreamPollingH2TestConfig.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/multistream/ReleaseCanaryMultiStreamPollingH2TestConfig.java index 4f926025..ae49ec56 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/multistream/ReleaseCanaryMultiStreamPollingH2TestConfig.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/config/multistream/ReleaseCanaryMultiStreamPollingH2TestConfig.java @@ -1,12 +1,12 @@ package software.amazon.kinesis.config; -import software.amazon.awssdk.arns.Arn; -import software.amazon.awssdk.http.Protocol; - import java.util.ArrayList; import java.util.List; import java.util.UUID; +import software.amazon.awssdk.arns.Arn; +import software.amazon.awssdk.http.Protocol; + /** * Config for a polling consumer with HTTP protocol of HTTP2 */ diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDeciderTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDeciderTest.java index 9508903b..bbed04d3 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDeciderTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DeterministicShuffleShardSyncLeaderDeciderTest.java @@ -24,6 +24,7 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; + import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -33,6 +34,7 @@ import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseRefresher; import software.amazon.kinesis.leases.exceptions.DependencyException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; @@ -63,10 +65,8 @@ public class DeterministicShuffleShardSyncLeaderDeciderTest { @Before public void setup() { numShardSyncWorkers = 1; - leaderDecider = new DeterministicShuffleShardSyncLeaderDecider(leaseRefresher, - scheduledExecutorService, - numShardSyncWorkers, - readWriteLock); + leaderDecider = new DeterministicShuffleShardSyncLeaderDecider( + leaseRefresher, scheduledExecutorService, numShardSyncWorkers, readWriteLock); when(readWriteLock.readLock()).thenReturn(mock(ReentrantReadWriteLock.ReadLock.class)); when(readWriteLock.writeLock()).thenReturn(mock(ReentrantReadWriteLock.WriteLock.class)); @@ -103,9 +103,9 @@ public class DeterministicShuffleShardSyncLeaderDeciderTest { } @Test - public void testElectedLeadersAsPerExpectedShufflingOrder() - throws Exception { - List leases = getLeases(5, false /*emptyLeaseOwner */, false /* duplicateLeaseOwner */, true /* activeLeases */); + public void testElectedLeadersAsPerExpectedShufflingOrder() throws Exception { + List leases = + getLeases(5, false /*emptyLeaseOwner */, false /* duplicateLeaseOwner */, true /* activeLeases */); when(leaseRefresher.listLeases()).thenReturn(leases); Set expectedLeaders = getExpectedLeaders(leases); for (String leader : expectedLeaders) { @@ -121,11 +121,10 @@ public class DeterministicShuffleShardSyncLeaderDeciderTest { @Test public void testElectedLeadersAsPerExpectedShufflingOrderWhenUniqueWorkersLessThanMaxLeaders() { this.numShardSyncWorkers = 5; // More than number of unique lease owners - leaderDecider = new DeterministicShuffleShardSyncLeaderDecider(leaseRefresher, - scheduledExecutorService, - numShardSyncWorkers, - readWriteLock); - List leases = getLeases(3, false /*emptyLeaseOwner */, false /* duplicateLeaseOwner */, true /* activeLeases */); + leaderDecider = new DeterministicShuffleShardSyncLeaderDecider( + leaseRefresher, scheduledExecutorService, numShardSyncWorkers, readWriteLock); + List leases = + getLeases(3, false /*emptyLeaseOwner */, false /* duplicateLeaseOwner */, true /* activeLeases */); Set expectedLeaders = getExpectedLeaders(leases); // All lease owners should be present in expected leaders set, and they should all be leaders. for (Lease lease : leases) { @@ -134,7 +133,8 @@ public class DeterministicShuffleShardSyncLeaderDeciderTest { } } - private List getLeases(int count, boolean emptyLeaseOwner, boolean duplicateLeaseOwner, boolean activeLeases) { + private List getLeases( + int count, boolean emptyLeaseOwner, boolean duplicateLeaseOwner, boolean activeLeases) { List leases = new ArrayList<>(); for (int i = 0; i < count; i++) { Lease lease = new Lease(); @@ -150,8 +150,12 @@ public class DeterministicShuffleShardSyncLeaderDeciderTest { } private Set getExpectedLeaders(List leases) { - List uniqueHosts = leases.stream().filter(lease -> lease.leaseOwner() != null) - .map(Lease::leaseOwner).distinct().sorted().collect(Collectors.toList()); + List uniqueHosts = leases.stream() + .filter(lease -> lease.leaseOwner() != null) + .map(Lease::leaseOwner) + .distinct() + .sorted() + .collect(Collectors.toList()); Collections.shuffle(uniqueHosts, new Random(DETERMINISTIC_SHUFFLE_SEED)); int numWorkers = Math.min(uniqueHosts.size(), this.numShardSyncWorkers); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DiagnosticEventsTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DiagnosticEventsTest.java index 08ed8abb..62751f01 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DiagnosticEventsTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/DiagnosticEventsTest.java @@ -15,6 +15,11 @@ package software.amazon.kinesis.coordinator; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; + import lombok.extern.slf4j.Slf4j; import org.junit.Before; import org.junit.Test; @@ -25,11 +30,6 @@ import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseBuilder; import software.amazon.kinesis.leases.LeaseCoordinator; -import java.util.Collection; -import java.util.Collections; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.times; @@ -41,8 +41,10 @@ import static org.mockito.Mockito.when; public class DiagnosticEventsTest { @Mock private ThreadPoolExecutor executor; + @Mock private LeaseCoordinator leaseCoordinator; + @Mock private DiagnosticEventHandler defaultHandler; @@ -138,8 +140,8 @@ public class DiagnosticEventsTest { assertEquals(executorStateEvent.getLeasesOwned(), leaseAssignments.size()); assertEquals(0, executorStateEvent.getCurrentQueueSize()); - RejectedTaskEvent rejectedTaskEvent = factory.rejectedTaskEvent(executorStateEvent, - new TestRejectedTaskException()); + RejectedTaskEvent rejectedTaskEvent = + factory.rejectedTaskEvent(executorStateEvent, new TestRejectedTaskException()); assertEquals(rejectedTaskEvent.getExecutorStateEvent().getActiveThreads(), activeThreadCount); assertEquals(rejectedTaskEvent.getExecutorStateEvent().getCoreThreads(), corePoolSize); assertEquals(rejectedTaskEvent.getExecutorStateEvent().getLargestPoolSize(), largestPoolSize); @@ -150,7 +152,9 @@ public class DiagnosticEventsTest { } private class TestRejectedTaskException extends Exception { - private TestRejectedTaskException() { super(); } + private TestRejectedTaskException() { + super(); + } } private class CustomHandler implements DiagnosticEventHandler { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinatorTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinatorTest.java index 34e6aede..20b22226 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinatorTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/GracefulShutdownCoordinatorTest.java @@ -14,16 +14,6 @@ */ package software.amazon.kinesis.coordinator; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; @@ -37,19 +27,34 @@ import org.mockito.verification.VerificationMode; import software.amazon.kinesis.leases.ShardInfo; import software.amazon.kinesis.lifecycle.ShardConsumer; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class GracefulShutdownCoordinatorTest { @Mock private CountDownLatch shutdownCompleteLatch; + @Mock private CountDownLatch notificationCompleteLatch; + @Mock private CountDownLatch finalShutdownLatch; + @Mock private Scheduler scheduler; + @Mock private Callable contextCallable; + @Mock private ConcurrentMap shardInfoConsumerMap; @@ -231,9 +236,11 @@ public class GracefulShutdownCoordinatorTest { when(notificationCompleteLatch.await(anyLong(), any(TimeUnit.class))).thenReturn(true); doAnswer(invocation -> { - Thread.currentThread().interrupt(); - return true; - }).when(scheduler).shutdown(); + Thread.currentThread().interrupt(); + return true; + }) + .when(scheduler) + .shutdown(); assertThat(requestedShutdownCallable.call(), equalTo(false)); verifyLatchAwait(notificationCompleteLatch); @@ -287,7 +294,8 @@ public class GracefulShutdownCoordinatorTest { @Test(expected = IllegalStateException.class) public void testWorkerShutdownCallableThrows() throws Exception { - Callable requestedShutdownCallable = new GracefulShutdownCoordinator().createGracefulShutdownCallable(contextCallable); + Callable requestedShutdownCallable = + new GracefulShutdownCoordinator().createGracefulShutdownCallable(contextCallable); when(contextCallable.call()).thenThrow(new IllegalStateException("Bad Shutdown")); requestedShutdownCallable.call(); @@ -379,6 +387,4 @@ public class GracefulShutdownCoordinatorTest { when(shardInfoConsumerMap.size()).thenReturn(initialItemCount, additionalItemCounts); when(shardInfoConsumerMap.isEmpty()).thenReturn(initialItemCount == 0, additionalEmptyStates); } - - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManagerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManagerTest.java index 355e2c96..1e6be18f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManagerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/PeriodicShardSyncManagerTest.java @@ -15,6 +15,16 @@ package software.amazon.kinesis.coordinator; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.junit.Assert; @@ -37,16 +47,6 @@ import software.amazon.kinesis.leases.ShardSyncTaskManager; import software.amazon.kinesis.metrics.NullMetricsFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -56,264 +56,359 @@ import static software.amazon.kinesis.coordinator.PeriodicShardSyncManager.MIN_H import static software.amazon.kinesis.leases.LeaseManagementConfig.DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY; @RunWith(MockitoJUnitRunner.class) - public class PeriodicShardSyncManagerTest { private static final int MAX_DEPTH_WITH_IN_PROGRESS_PARENTS = 1; private StreamIdentifier streamIdentifier; private PeriodicShardSyncManager periodicShardSyncManager; + @Mock private LeaderDecider leaderDecider; + @Mock private LeaseRefresher leaseRefresher; + @Mock Map currentStreamConfigMap; + @Mock Function shardSyncTaskManagerProvider; + @Mock Map streamToShardSyncTaskManagerMap; @Before public void setup() { streamIdentifier = StreamIdentifier.multiStreamInstance("123456789012:stream:456"); - periodicShardSyncManager = new PeriodicShardSyncManager("worker", leaderDecider, leaseRefresher, currentStreamConfigMap, - shardSyncTaskManagerProvider, streamToShardSyncTaskManagerMap, true, new NullMetricsFactory(), 2 * 60 * 1000, 3, + periodicShardSyncManager = new PeriodicShardSyncManager( + "worker", + leaderDecider, + leaseRefresher, + currentStreamConfigMap, + shardSyncTaskManagerProvider, + streamToShardSyncTaskManagerMap, + true, + new NullMetricsFactory(), + 2 * 60 * 1000, + 3, new AtomicBoolean(true)); } @Test public void testForFailureWhenHashRangesAreIncomplete() { - List hashRanges = new ArrayList() {{ - add(deserialize("0", "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); - add(deserialize("25", MAX_HASH_KEY.toString())); // Missing interval here - }}.stream().map(hashKeyRangeForLease -> { - Lease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - Assert.assertTrue(PeriodicShardSyncManager - .checkForHoleInHashKeyRanges(streamIdentifier, hashRanges).isPresent()); + List hashRanges = new ArrayList() { + { + add(deserialize("0", "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); + add(deserialize("25", MAX_HASH_KEY.toString())); // Missing interval here + } + }.stream() + .map(hashKeyRangeForLease -> { + Lease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + Assert.assertTrue(PeriodicShardSyncManager.checkForHoleInHashKeyRanges(streamIdentifier, hashRanges) + .isPresent()); } @Test public void testForSuccessWhenHashRangesAreComplete() { - List hashRanges = new ArrayList() {{ - add(deserialize("0", "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - Lease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - Assert.assertFalse(PeriodicShardSyncManager - .checkForHoleInHashKeyRanges(streamIdentifier, hashRanges).isPresent()); + List hashRanges = new ArrayList() { + { + add(deserialize("0", "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + Lease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + Assert.assertFalse(PeriodicShardSyncManager.checkForHoleInHashKeyRanges(streamIdentifier, hashRanges) + .isPresent()); } @Test public void testForSuccessWhenUnSortedHashRangesAreComplete() { - List hashRanges = new ArrayList() {{ - add(deserialize("4", "23")); - add(deserialize("2", "3")); - add(deserialize("0", "1")); - add(deserialize("24", MAX_HASH_KEY.toString())); - add(deserialize("6", "23")); - - }}.stream().map(hashKeyRangeForLease -> { - Lease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - Assert.assertFalse(PeriodicShardSyncManager - .checkForHoleInHashKeyRanges(streamIdentifier, hashRanges).isPresent()); + List hashRanges = new ArrayList() { + { + add(deserialize("4", "23")); + add(deserialize("2", "3")); + add(deserialize("0", "1")); + add(deserialize("24", MAX_HASH_KEY.toString())); + add(deserialize("6", "23")); + } + }.stream() + .map(hashKeyRangeForLease -> { + Lease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + Assert.assertFalse(PeriodicShardSyncManager.checkForHoleInHashKeyRanges(streamIdentifier, hashRanges) + .isPresent()); } @Test public void testForSuccessWhenHashRangesAreCompleteForOverlappingLeasesAtEnd() { - List hashRanges = new ArrayList() {{ - add(deserialize("0", "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - add(deserialize("24", "45")); - }}.stream().map(hashKeyRangeForLease -> { - Lease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - Assert.assertFalse(PeriodicShardSyncManager - .checkForHoleInHashKeyRanges(streamIdentifier, hashRanges).isPresent()); + List hashRanges = new ArrayList() { + { + add(deserialize("0", "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + add(deserialize("24", "45")); + } + }.stream() + .map(hashKeyRangeForLease -> { + Lease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + Assert.assertFalse(PeriodicShardSyncManager.checkForHoleInHashKeyRanges(streamIdentifier, hashRanges) + .isPresent()); } @Test public void testIfShardSyncIsInitiatedWhenNoLeasesArePassed() { - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, null).shouldDoShardSync()); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, null) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsInitiatedWhenEmptyLeasesArePassed() { - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, new ArrayList<>()).shouldDoShardSync()); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, new ArrayList<>()) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsNotInitiatedWhenConfidenceFactorIsNotReached() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); // Hole between 23 and 25 - add(deserialize("25", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); // Hole between 23 and 25 + add(deserialize("25", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); } @Test public void testIfShardSyncIsInitiatedWhenConfidenceFactorIsReached() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); // Hole between 23 and 25 - add(deserialize("25", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); // Hole between 23 and 25 + add(deserialize("25", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsInitiatedWhenHoleIsDueToShardEnd() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); // introducing hole here through SHARD_END checkpoint - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - if (lease.hashKeyRangeForLease().startingHashKey().toString().equals("4")) { - lease.checkpoint(ExtendedSequenceNumber.SHARD_END); - } else { - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); // introducing hole here through SHARD_END checkpoint + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); } - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + if (lease.hashKeyRangeForLease() + .startingHashKey() + .toString() + .equals("4")) { + lease.checkpoint(ExtendedSequenceNumber.SHARD_END); + } else { + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + } + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsInitiatedWhenNoLeasesAreUsedDueToShardEnd() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.SHARD_END); - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.SHARD_END); + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsNotInitiatedWhenHoleShifts() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); // Hole between 23 and 25 - add(deserialize("25", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - List multiStreamLeases2 = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); // Hole between 3 and 5 - add(deserialize("5", "23")); - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); // Hole between 23 and 25 + add(deserialize("25", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + List multiStreamLeases2 = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); // Hole between 3 and 5 + add(deserialize("5", "23")); + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); // Resetting the holes - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases2).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases2).shouldDoShardSync()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases2) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases2) + .shouldDoShardSync()); } @Test public void testIfShardSyncIsNotInitiatedWhenHoleShiftsMoreThanOnce() { - List multiStreamLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "23")); - add(deserialize("6", "23")); // Hole between 23 and 25 - add(deserialize("25", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - List multiStreamLeases2 = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); // Hole between 3 and 5 - add(deserialize("5", "23")); - add(deserialize("6", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.hashKeyRange(hashKeyRangeForLease); - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); + List multiStreamLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "23")); + add(deserialize("6", "23")); // Hole between 23 and 25 + add(deserialize("25", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + List multiStreamLeases2 = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); // Hole between 3 and 5 + add(deserialize("5", "23")); + add(deserialize("6", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.hashKeyRange(hashKeyRangeForLease); + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); // Resetting the holes - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases2).shouldDoShardSync())); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases2) + .shouldDoShardSync())); // Resetting the holes - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); } @Test @@ -323,40 +418,53 @@ public class PeriodicShardSyncManagerTest { when(shardSyncTaskManagerProvider.apply(any())).thenReturn(shardSyncTaskManager); when(shardSyncTaskManager.shardDetector()).thenReturn(shardDetector); - final int[] shardCounter = { 0 }; - List hashKeyRangeForLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("4", "20")); - add(deserialize("21", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}; + final int[] shardCounter = {0}; + List hashKeyRangeForLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("4", "20")); + add(deserialize("21", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }; List kinesisShards = hashKeyRangeForLeases.stream() - .map(hashKeyRangeForLease -> Shard.builder().shardId("shard-" + (++shardCounter[0])).hashKeyRange( - HashKeyRange.builder().startingHashKey(hashKeyRangeForLease.serializedStartingHashKey()) - .endingHashKey(hashKeyRangeForLease.serializedEndingHashKey()).build()).build()) + .map(hashKeyRangeForLease -> Shard.builder() + .shardId("shard-" + (++shardCounter[0])) + .hashKeyRange(HashKeyRange.builder() + .startingHashKey(hashKeyRangeForLease.serializedStartingHashKey()) + .endingHashKey(hashKeyRangeForLease.serializedEndingHashKey()) + .build()) + .build()) .collect(Collectors.toList()); when(shardDetector.listShards()).thenReturn(kinesisShards); - final int[] leaseCounter = { 0 }; - List multiStreamLeases = hashKeyRangeForLeases.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.leaseKey(MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-"+(++leaseCounter[0]))); - lease.shardId("shard-"+(leaseCounter[0])); - // Setting the hashrange only for last two leases - if (leaseCounter[0] >= 3) { - lease.hashKeyRange(hashKeyRangeForLease); - } - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); + final int[] leaseCounter = {0}; + List multiStreamLeases = hashKeyRangeForLeases.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.leaseKey( + MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-" + (++leaseCounter[0]))); + lease.shardId("shard-" + (leaseCounter[0])); + // Setting the hashrange only for last two leases + if (leaseCounter[0] >= 3) { + lease.hashKeyRange(hashKeyRangeForLease); + } + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); // Assert that shard sync should never trigger - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); // Assert that all the leases now has hashRanges set. for (Lease lease : multiStreamLeases) { @@ -371,40 +479,53 @@ public class PeriodicShardSyncManagerTest { when(shardSyncTaskManagerProvider.apply(any())).thenReturn(shardSyncTaskManager); when(shardSyncTaskManager.shardDetector()).thenReturn(shardDetector); - final int[] shardCounter = { 0 }; - List hashKeyRangeForLeases = new ArrayList() {{ - add(deserialize(MIN_HASH_KEY.toString(), "1")); - add(deserialize("2", "3")); - add(deserialize("5", "20")); // Hole between 3 and 5 - add(deserialize("21", "23")); - add(deserialize("24", MAX_HASH_KEY.toString())); - }}; + final int[] shardCounter = {0}; + List hashKeyRangeForLeases = new ArrayList() { + { + add(deserialize(MIN_HASH_KEY.toString(), "1")); + add(deserialize("2", "3")); + add(deserialize("5", "20")); // Hole between 3 and 5 + add(deserialize("21", "23")); + add(deserialize("24", MAX_HASH_KEY.toString())); + } + }; List kinesisShards = hashKeyRangeForLeases.stream() - .map(hashKeyRangeForLease -> Shard.builder().shardId("shard-" + (++shardCounter[0])).hashKeyRange( - HashKeyRange.builder().startingHashKey(hashKeyRangeForLease.serializedStartingHashKey()) - .endingHashKey(hashKeyRangeForLease.serializedEndingHashKey()).build()).build()) + .map(hashKeyRangeForLease -> Shard.builder() + .shardId("shard-" + (++shardCounter[0])) + .hashKeyRange(HashKeyRange.builder() + .startingHashKey(hashKeyRangeForLease.serializedStartingHashKey()) + .endingHashKey(hashKeyRangeForLease.serializedEndingHashKey()) + .build()) + .build()) .collect(Collectors.toList()); when(shardDetector.listShards()).thenReturn(kinesisShards); - final int[] leaseCounter = { 0 }; - List multiStreamLeases = hashKeyRangeForLeases.stream().map(hashKeyRangeForLease -> { - MultiStreamLease lease = new MultiStreamLease(); - lease.leaseKey(MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-"+(++leaseCounter[0]))); - lease.shardId("shard-"+(leaseCounter[0])); - // Setting the hashrange only for last two leases - if (leaseCounter[0] >= 3) { - lease.hashKeyRange(hashKeyRangeForLease); - } - lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - return lease; - }).collect(Collectors.toList()); + final int[] leaseCounter = {0}; + List multiStreamLeases = hashKeyRangeForLeases.stream() + .map(hashKeyRangeForLease -> { + MultiStreamLease lease = new MultiStreamLease(); + lease.leaseKey( + MultiStreamLease.getLeaseKey(streamIdentifier.serialize(), "shard-" + (++leaseCounter[0]))); + lease.shardId("shard-" + (leaseCounter[0])); + // Setting the hashrange only for last two leases + if (leaseCounter[0] >= 3) { + lease.hashKeyRange(hashKeyRangeForLease); + } + lease.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + return lease; + }) + .collect(Collectors.toList()); // Assert that shard sync should never trigger - IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY).forEach(i -> Assert - .assertFalse(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync())); - Assert.assertTrue(periodicShardSyncManager.checkForShardSync(streamIdentifier, multiStreamLeases).shouldDoShardSync()); + IntStream.range(1, DEFAULT_CONSECUTIVE_HOLES_FOR_TRIGGERING_LEASE_RECOVERY) + .forEach(i -> Assert.assertFalse(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync())); + Assert.assertTrue(periodicShardSyncManager + .checkForShardSync(streamIdentifier, multiStreamLeases) + .shouldDoShardSync()); // Assert that all the leases now has hashRanges set. for (Lease lease : multiStreamLeases) { @@ -414,14 +535,17 @@ public class PeriodicShardSyncManagerTest { @Test public void testFor1000DifferentValidSplitHierarchyTreeTheHashRangesAreAlwaysComplete() { - for (int i=0; i < 1000; i++) { + for (int i = 0; i < 1000; i++) { int maxInitialLeaseCount = 100; List leases = generateInitialLeases(maxInitialLeaseCount); reshard(leases, 5, ReshardType.SPLIT, maxInitialLeaseCount, false); Collections.shuffle(leases); -// System.out.println( -// leases.stream().map(l -> l.checkpoint().sequenceNumber() + ":" + l.hashKeyRangeForLease()).collect(Collectors.toList())); - Assert.assertFalse(periodicShardSyncManager.hasHoleInLeases(streamIdentifier, leases).isPresent()); + // System.out.println( + // leases.stream().map(l -> l.checkpoint().sequenceNumber() + ":" + + // l.hashKeyRangeForLease()).collect(Collectors.toList())); + Assert.assertFalse(periodicShardSyncManager + .hasHoleInLeases(streamIdentifier, leases) + .isPresent()); } } @@ -432,7 +556,9 @@ public class PeriodicShardSyncManagerTest { List leases = generateInitialLeases(maxInitialLeaseCount); reshard(leases, 5, ReshardType.MERGE, maxInitialLeaseCount, false); Collections.shuffle(leases); - Assert.assertFalse(periodicShardSyncManager.hasHoleInLeases(streamIdentifier, leases).isPresent()); + Assert.assertFalse(periodicShardSyncManager + .hasHoleInLeases(streamIdentifier, leases) + .isPresent()); } } @@ -443,7 +569,9 @@ public class PeriodicShardSyncManagerTest { List leases = generateInitialLeases(maxInitialLeaseCount); reshard(leases, 5, ReshardType.ANY, maxInitialLeaseCount, false); Collections.shuffle(leases); - Assert.assertFalse(periodicShardSyncManager.hasHoleInLeases(streamIdentifier, leases).isPresent()); + Assert.assertFalse(periodicShardSyncManager + .hasHoleInLeases(streamIdentifier, leases) + .isPresent()); } } @@ -454,7 +582,9 @@ public class PeriodicShardSyncManagerTest { List leases = generateInitialLeases(maxInitialLeaseCount); reshard(leases, MAX_DEPTH_WITH_IN_PROGRESS_PARENTS, ReshardType.MERGE, maxInitialLeaseCount, true); Collections.shuffle(leases); - Assert.assertFalse(periodicShardSyncManager.hasHoleInLeases(streamIdentifier, leases).isPresent()); + Assert.assertFalse(periodicShardSyncManager + .hasHoleInLeases(streamIdentifier, leases) + .isPresent()); } } @@ -465,12 +595,12 @@ public class PeriodicShardSyncManagerTest { List leases = generateInitialLeases(maxInitialLeaseCount); reshard(leases, MAX_DEPTH_WITH_IN_PROGRESS_PARENTS, ReshardType.ANY, maxInitialLeaseCount, true); Collections.shuffle(leases); - Assert.assertFalse(periodicShardSyncManager.hasHoleInLeases(streamIdentifier, leases).isPresent()); + Assert.assertFalse(periodicShardSyncManager + .hasHoleInLeases(streamIdentifier, leases) + .isPresent()); } } - - private List generateInitialLeases(int initialShardCount) { long hashRangeInternalMax = 10000000; List initialLeases = new ArrayList<>(); @@ -493,7 +623,11 @@ public class PeriodicShardSyncManagerTest { return initialLeases; } - private void reshard(List initialLeases, int depth, ReshardType reshardType, int leaseCounter, + private void reshard( + List initialLeases, + int depth, + ReshardType reshardType, + int leaseCounter, boolean shouldKeepSomeParentsInProgress) { for (int i = 0; i < depth; i++) { if (reshardType == ReshardType.SPLIT) { @@ -511,25 +645,29 @@ public class PeriodicShardSyncManagerTest { } private int merge(List initialLeases, int leaseCounter, boolean shouldKeepSomeParentsInProgress) { - List leasesEligibleForMerge = initialLeases.stream().filter(l -> CollectionUtils.isNullOrEmpty(l.childShardIds())) + List leasesEligibleForMerge = initialLeases.stream() + .filter(l -> CollectionUtils.isNullOrEmpty(l.childShardIds())) .collect(Collectors.toList()); -// System.out.println("Leases to merge : " + leasesEligibleForMerge); + // System.out.println("Leases to merge : " + leasesEligibleForMerge); int leasesToMerge = (int) ((leasesEligibleForMerge.size() - 1) / 2.0 * Math.random()); for (int i = 0; i < leasesToMerge; i += 2) { Lease parent1 = leasesEligibleForMerge.get(i); Lease parent2 = leasesEligibleForMerge.get(i + 1); - if (parent2.hashKeyRangeForLease().startingHashKey().subtract(parent1.hashKeyRangeForLease().endingHashKey()).equals(BigInteger.ONE)) - { + if (parent2.hashKeyRangeForLease() + .startingHashKey() + .subtract(parent1.hashKeyRangeForLease().endingHashKey()) + .equals(BigInteger.ONE)) { parent1.checkpoint(ExtendedSequenceNumber.SHARD_END); if (!shouldKeepSomeParentsInProgress || (shouldKeepSomeParentsInProgress && isOneFromDiceRoll())) { -// System.out.println("Deciding to keep parent in progress : " + parent2); + // System.out.println("Deciding to keep parent in progress : " + parent2); parent2.checkpoint(ExtendedSequenceNumber.SHARD_END); } Lease child = new Lease(); child.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); child.leaseKey("shard-" + (++leaseCounter)); -// System.out.println("Parent " + parent1 + " and " + parent2 + " merges into " + child); - child.hashKeyRange(new HashKeyRangeForLease(parent1.hashKeyRangeForLease().startingHashKey(), + // System.out.println("Parent " + parent1 + " and " + parent2 + " merges into " + child); + child.hashKeyRange(new HashKeyRangeForLease( + parent1.hashKeyRangeForLease().startingHashKey(), parent2.hashKeyRangeForLease().endingHashKey())); parent1.childShardIds(Collections.singletonList(child.leaseKey())); parent2.childShardIds(Collections.singletonList(child.leaseKey())); @@ -542,24 +680,31 @@ public class PeriodicShardSyncManagerTest { } private int split(List initialLeases, int leaseCounter) { - List leasesEligibleForSplit = initialLeases.stream().filter(l -> CollectionUtils.isNullOrEmpty(l.childShardIds())) + List leasesEligibleForSplit = initialLeases.stream() + .filter(l -> CollectionUtils.isNullOrEmpty(l.childShardIds())) .collect(Collectors.toList()); -// System.out.println("Leases to split : " + leasesEligibleForSplit); + // System.out.println("Leases to split : " + leasesEligibleForSplit); int leasesToSplit = (int) (leasesEligibleForSplit.size() * Math.random()); for (int i = 0; i < leasesToSplit; i++) { Lease parent = leasesEligibleForSplit.get(i); parent.checkpoint(ExtendedSequenceNumber.SHARD_END); Lease child1 = new Lease(); child1.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); - child1.hashKeyRange(new HashKeyRangeForLease(parent.hashKeyRangeForLease().startingHashKey(), - parent.hashKeyRangeForLease().startingHashKey().add(parent.hashKeyRangeForLease().endingHashKey()) + child1.hashKeyRange(new HashKeyRangeForLease( + parent.hashKeyRangeForLease().startingHashKey(), + parent.hashKeyRangeForLease() + .startingHashKey() + .add(parent.hashKeyRangeForLease().endingHashKey()) .divide(new BigInteger("2")))); child1.leaseKey("shard-" + (++leaseCounter)); Lease child2 = new Lease(); child2.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); child2.hashKeyRange(new HashKeyRangeForLease( - parent.hashKeyRangeForLease().startingHashKey().add(parent.hashKeyRangeForLease().endingHashKey()) - .divide(new BigInteger("2")).add(new BigInteger("1")), + parent.hashKeyRangeForLease() + .startingHashKey() + .add(parent.hashKeyRangeForLease().endingHashKey()) + .divide(new BigInteger("2")) + .add(new BigInteger("1")), parent.hashKeyRangeForLease().endingHashKey())); child2.leaseKey("shard-" + (++leaseCounter)); @@ -567,7 +712,7 @@ public class PeriodicShardSyncManagerTest { child2.parentShardIds(Sets.newHashSet(parent.leaseKey())); parent.childShardIds(Lists.newArrayList(child1.leaseKey(), child2.leaseKey())); -// System.out.println("Parent " + parent + " splits into " + child1 + " and " + child2); + // System.out.println("Parent " + parent + " splits into " + child1 + " and " + child2); initialLeases.add(child1); initialLeases.add(child2); @@ -583,13 +728,9 @@ public class PeriodicShardSyncManagerTest { return Math.random() <= 0.16; } - private enum ReshardType { SPLIT, MERGE, ANY } - - - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/SchedulerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/SchedulerTest.java index f29c2341..f5e81d4f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/SchedulerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/SchedulerTest.java @@ -15,28 +15,6 @@ package software.amazon.kinesis.coordinator; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.eq; -import static org.mockito.Matchers.same; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.doCallRealMethod; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static org.mockito.internal.verification.VerificationModeFactory.atMost; - import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; @@ -68,7 +46,6 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.Spy; import org.mockito.runners.MockitoJUnitRunner; - import org.mockito.stubbing.OngoingStubbing; import software.amazon.awssdk.arns.Arn; import software.amazon.awssdk.regions.Region; @@ -86,8 +63,8 @@ import software.amazon.kinesis.common.StreamConfig; import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.exceptions.KinesisClientLibException; import software.amazon.kinesis.exceptions.KinesisClientLibNonRetryableException; -import software.amazon.kinesis.leases.LeaseCleanupManager; import software.amazon.kinesis.leases.HierarchicalShardSyncer; +import software.amazon.kinesis.leases.LeaseCleanupManager; import software.amazon.kinesis.leases.LeaseCoordinator; import software.amazon.kinesis.leases.LeaseManagementConfig; import software.amazon.kinesis.leases.LeaseManagementFactory; @@ -108,8 +85,8 @@ import software.amazon.kinesis.lifecycle.events.LeaseLostInput; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.lifecycle.events.ShardEndedInput; import software.amazon.kinesis.lifecycle.events.ShutdownRequestedInput; -import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.metrics.MetricsConfig; +import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.processor.Checkpointer; import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy; import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.AutoDetectionAndDeferredDeletionStrategy; @@ -117,13 +94,35 @@ import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.NoL import software.amazon.kinesis.processor.FormerStreamsLeasesDeletionStrategy.ProvidedStreamsDeferredDeletionStrategy; import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.ProcessorConfig; -import software.amazon.kinesis.processor.ShardRecordProcessorFactory; import software.amazon.kinesis.processor.ShardRecordProcessor; +import software.amazon.kinesis.processor.ShardRecordProcessorFactory; import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.retrieval.RetrievalConfig; import software.amazon.kinesis.retrieval.RetrievalFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.doCallRealMethod; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static org.mockito.internal.verification.VerificationModeFactory.atMost; + /** * */ @@ -157,28 +156,40 @@ public class SchedulerTest { @Mock private KinesisAsyncClient kinesisClient; + @Mock private DynamoDbAsyncClient dynamoDBClient; + @Mock private CloudWatchAsyncClient cloudWatchClient; + @Mock private RetrievalFactory retrievalFactory; + @Mock private RecordsPublisher recordsPublisher; + @Mock private LeaseCoordinator leaseCoordinator; + @Mock private ShardSyncTaskManager shardSyncTaskManager; + @Mock private DynamoDBLeaseRefresher dynamoDBLeaseRefresher; + @Mock private ShardDetector shardDetector; + @Mock private Checkpointer checkpoint; + @Mock private WorkerStateChangeListener workerStateChangeListener; + @Spy private TestMultiStreamTracker multiStreamTracker; + @Mock private LeaseCleanupManager leaseCleanupManager; @@ -192,27 +203,38 @@ public class SchedulerTest { shardRecordProcessorFactory = new TestShardRecordProcessorFactory(); checkpointConfig = new CheckpointConfig().checkpointFactory(new TestKinesisCheckpointFactory()); - coordinatorConfig = new CoordinatorConfig(applicationName).parentShardPollIntervalMillis(100L) + coordinatorConfig = new CoordinatorConfig(applicationName) + .parentShardPollIntervalMillis(100L) .workerStateChangeListener(workerStateChangeListener); - leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, streamName, - workerIdentifier).leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, false)); + leaseManagementConfig = new LeaseManagementConfig( + tableName, dynamoDBClient, kinesisClient, streamName, workerIdentifier) + .leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, false)); lifecycleConfig = new LifecycleConfig(); metricsConfig = new MetricsConfig(cloudWatchClient, namespace); processorConfig = new ProcessorConfig(shardRecordProcessorFactory); - retrievalConfig = new RetrievalConfig(kinesisClient, streamName, applicationName) - .retrievalFactory(retrievalFactory); + retrievalConfig = + new RetrievalConfig(kinesisClient, streamName, applicationName).retrievalFactory(retrievalFactory); when(leaseCoordinator.leaseRefresher()).thenReturn(dynamoDBLeaseRefresher); when(shardSyncTaskManager.shardDetector()).thenReturn(shardDetector); when(shardSyncTaskManager.hierarchicalShardSyncer()).thenReturn(new HierarchicalShardSyncer()); when(shardSyncTaskManager.callShardSyncTask()).thenReturn(new TaskResult(null)); - when(retrievalFactory.createGetRecordsCache(any(ShardInfo.class), any(StreamConfig.class), - any(MetricsFactory.class))).thenReturn(recordsPublisher); + when(retrievalFactory.createGetRecordsCache( + any(ShardInfo.class), any(StreamConfig.class), any(MetricsFactory.class))) + .thenReturn(recordsPublisher); when(shardDetector.streamIdentifier()).thenReturn(mock(StreamIdentifier.class)); when(kinesisClient.serviceClientConfiguration()) - .thenReturn(KinesisServiceClientConfiguration.builder().region(TEST_REGION).build()); + .thenReturn(KinesisServiceClientConfiguration.builder() + .region(TEST_REGION) + .build()); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); } /** @@ -222,8 +244,14 @@ public class SchedulerTest { public void testGetStageName() { final String stageName = "testStageName"; coordinatorConfig = new CoordinatorConfig(stageName); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); assertEquals(stageName, scheduler.applicationName()); } @@ -232,17 +260,20 @@ public class SchedulerTest { final String shardId = "shardId-000000000000"; final String concurrencyToken = "concurrencyToken"; final ShardInfo shardInfo = new ShardInfo(shardId, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - final ShardConsumer shardConsumer1 = scheduler.createOrGetShardConsumer(shardInfo, shardRecordProcessorFactory, leaseCleanupManager); + final ShardConsumer shardConsumer1 = + scheduler.createOrGetShardConsumer(shardInfo, shardRecordProcessorFactory, leaseCleanupManager); assertNotNull(shardConsumer1); - final ShardConsumer shardConsumer2 = scheduler.createOrGetShardConsumer(shardInfo, shardRecordProcessorFactory, leaseCleanupManager); + final ShardConsumer shardConsumer2 = + scheduler.createOrGetShardConsumer(shardInfo, shardRecordProcessorFactory, leaseCleanupManager); assertNotNull(shardConsumer2); assertSame(shardConsumer1, shardConsumer2); final String anotherConcurrencyToken = "anotherConcurrencyToken"; - final ShardInfo shardInfo2 = new ShardInfo(shardId, anotherConcurrencyToken, null, - ExtendedSequenceNumber.TRIM_HORIZON); - final ShardConsumer shardConsumer3 = scheduler.createOrGetShardConsumer(shardInfo2, shardRecordProcessorFactory, leaseCleanupManager); + final ShardInfo shardInfo2 = + new ShardInfo(shardId, anotherConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); + final ShardConsumer shardConsumer3 = + scheduler.createOrGetShardConsumer(shardInfo2, shardRecordProcessorFactory, leaseCleanupManager); assertNotNull(shardConsumer3); assertNotSame(shardConsumer1, shardConsumer3); @@ -257,12 +288,12 @@ public class SchedulerTest { final ExtendedSequenceNumber secondSequenceNumber = new ExtendedSequenceNumber("1000"); final ExtendedSequenceNumber finalSequenceNumber = new ExtendedSequenceNumber("2000"); - final List initialShardInfo = Collections.singletonList( - new ShardInfo(shardId, concurrencyToken, null, firstSequenceNumber)); - final List firstShardInfo = Collections.singletonList( - new ShardInfo(shardId, concurrencyToken, null, secondSequenceNumber)); - final List secondShardInfo = Collections.singletonList( - new ShardInfo(shardId, concurrencyToken, null, finalSequenceNumber)); + final List initialShardInfo = + Collections.singletonList(new ShardInfo(shardId, concurrencyToken, null, firstSequenceNumber)); + final List firstShardInfo = + Collections.singletonList(new ShardInfo(shardId, concurrencyToken, null, secondSequenceNumber)); + final List secondShardInfo = + Collections.singletonList(new ShardInfo(shardId, concurrencyToken, null, finalSequenceNumber)); final Checkpoint firstCheckpoint = new Checkpoint(firstSequenceNumber, null, null); @@ -274,9 +305,12 @@ public class SchedulerTest { schedulerSpy.runProcessLoop(); schedulerSpy.runProcessLoop(); - verify(schedulerSpy).buildConsumer(same(initialShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); - verify(schedulerSpy, never()).buildConsumer(same(firstShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); - verify(schedulerSpy, never()).buildConsumer(same(secondShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); + verify(schedulerSpy) + .buildConsumer(same(initialShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); + verify(schedulerSpy, never()) + .buildConsumer(same(firstShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); + verify(schedulerSpy, never()) + .buildConsumer(same(secondShardInfo.get(0)), eq(shardRecordProcessorFactory), eq(leaseCleanupManager)); verify(checkpoint).getCheckpointObject(eq(shardId)); } @@ -288,14 +322,16 @@ public class SchedulerTest { final String anotherConcurrencyToken = "anotherConcurrencyToken"; final ShardInfo shardInfo0 = new ShardInfo(shard0, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - final ShardInfo shardInfo0WithAnotherConcurrencyToken = new ShardInfo(shard0, anotherConcurrencyToken, null, - ExtendedSequenceNumber.TRIM_HORIZON); + final ShardInfo shardInfo0WithAnotherConcurrencyToken = + new ShardInfo(shard0, anotherConcurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); final ShardInfo shardInfo1 = new ShardInfo(shard1, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - final ShardConsumer shardConsumer0 = scheduler.createOrGetShardConsumer(shardInfo0, shardRecordProcessorFactory, leaseCleanupManager); - final ShardConsumer shardConsumer0WithAnotherConcurrencyToken = - scheduler.createOrGetShardConsumer(shardInfo0WithAnotherConcurrencyToken, shardRecordProcessorFactory, leaseCleanupManager); - final ShardConsumer shardConsumer1 = scheduler.createOrGetShardConsumer(shardInfo1, shardRecordProcessorFactory, leaseCleanupManager); + final ShardConsumer shardConsumer0 = + scheduler.createOrGetShardConsumer(shardInfo0, shardRecordProcessorFactory, leaseCleanupManager); + final ShardConsumer shardConsumer0WithAnotherConcurrencyToken = scheduler.createOrGetShardConsumer( + shardInfo0WithAnotherConcurrencyToken, shardRecordProcessorFactory, leaseCleanupManager); + final ShardConsumer shardConsumer1 = + scheduler.createOrGetShardConsumer(shardInfo1, shardRecordProcessorFactory, leaseCleanupManager); Set shards = new HashSet<>(); shards.add(shardInfo0); @@ -313,23 +349,38 @@ public class SchedulerTest { public final void testInitializationFailureWithRetries() throws Exception { doNothing().when(leaseCoordinator).initialize(); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenThrow(new RuntimeException()); - leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, streamName, - workerIdentifier).leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, true)); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + leaseManagementConfig = new LeaseManagementConfig( + tableName, dynamoDBClient, kinesisClient, streamName, workerIdentifier) + .leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, true)); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); scheduler.run(); - verify(dynamoDBLeaseRefresher, times(coordinatorConfig.maxInitializationAttempts())).isLeaseTableEmpty(); + verify(dynamoDBLeaseRefresher, times(coordinatorConfig.maxInitializationAttempts())) + .isLeaseTableEmpty(); } @Test public final void testInitializationFailureWithRetriesWithConfiguredMaxInitializationAttempts() throws Exception { final int maxInitializationAttempts = 5; coordinatorConfig.maxInitializationAttempts(maxInitializationAttempts); - leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, streamName, - workerIdentifier).leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, true)); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + leaseManagementConfig = new LeaseManagementConfig( + tableName, dynamoDBClient, kinesisClient, streamName, workerIdentifier) + .leaseManagementFactory(new TestKinesisLeaseManagementFactory(false, true)); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); doNothing().when(leaseCoordinator).initialize(); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenThrow(new RuntimeException()); @@ -344,25 +395,37 @@ public class SchedulerTest { public final void testMultiStreamInitialization() { retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, - workerIdentifier).leaseManagementFactory(new TestKinesisLeaseManagementFactory(true, true)); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, workerIdentifier) + .leaseManagementFactory(new TestKinesisLeaseManagementFactory(true, true)); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); scheduler.initialize(); - shardDetectorMap.values() - .forEach(shardDetector -> verify(shardDetector, times(1)).listShards()); - shardSyncTaskManagerMap.values() - .forEach(shardSyncTM -> verify(shardSyncTM, times(1)).hierarchicalShardSyncer()); + shardDetectorMap.values().forEach(shardDetector -> verify(shardDetector, times(1)) + .listShards()); + shardSyncTaskManagerMap.values().forEach(shardSyncTM -> verify(shardSyncTM, times(1)) + .hierarchicalShardSyncer()); } @Test public final void testMultiStreamInitializationWithFailures() { retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, - workerIdentifier).leaseManagementFactory(new TestKinesisLeaseManagementFactory(true, true)); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + leaseManagementConfig = new LeaseManagementConfig(tableName, dynamoDBClient, kinesisClient, workerIdentifier) + .leaseManagementFactory(new TestKinesisLeaseManagementFactory(true, true)); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); scheduler.initialize(); // Note : As of today we retry for all streams in the next attempt. Hence the retry for each stream will vary. // At the least we expect 2 retries for each stream. Since there are 4 streams, we expect at most @@ -386,14 +449,29 @@ public class SchedulerTest { final ExtendedSequenceNumber finalSequenceNumber = new ExtendedSequenceNumber("2000"); final List initialShardInfo = multiStreamTracker.streamConfigList().stream() - .map(sc -> new ShardInfo(shardId, concurrencyToken, null, firstSequenceNumber, - sc.streamIdentifier().serialize())).collect(Collectors.toList()); + .map(sc -> new ShardInfo( + shardId, + concurrencyToken, + null, + firstSequenceNumber, + sc.streamIdentifier().serialize())) + .collect(Collectors.toList()); final List firstShardInfo = multiStreamTracker.streamConfigList().stream() - .map(sc -> new ShardInfo(shardId, concurrencyToken, null, secondSequenceNumber, - sc.streamIdentifier().serialize())).collect(Collectors.toList()); + .map(sc -> new ShardInfo( + shardId, + concurrencyToken, + null, + secondSequenceNumber, + sc.streamIdentifier().serialize())) + .collect(Collectors.toList()); final List secondShardInfo = multiStreamTracker.streamConfigList().stream() - .map(sc -> new ShardInfo(shardId, concurrencyToken, null, finalSequenceNumber, - sc.streamIdentifier().serialize())).collect(Collectors.toList()); + .map(sc -> new ShardInfo( + shardId, + concurrencyToken, + null, + finalSequenceNumber, + sc.streamIdentifier().serialize())) + .collect(Collectors.toList()); final Checkpoint firstCheckpoint = new Checkpoint(firstSequenceNumber, null, null); @@ -401,101 +479,138 @@ public class SchedulerTest { when(checkpoint.getCheckpointObject(anyString())).thenReturn(firstCheckpoint); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); Scheduler schedulerSpy = spy(scheduler); schedulerSpy.runProcessLoop(); schedulerSpy.runProcessLoop(); schedulerSpy.runProcessLoop(); - initialShardInfo.forEach( - shardInfo -> verify(schedulerSpy).buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), same(leaseCleanupManager))); - firstShardInfo.forEach( - shardInfo -> verify(schedulerSpy, never()).buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), eq(leaseCleanupManager))); - secondShardInfo.forEach( - shardInfo -> verify(schedulerSpy, never()).buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), eq(leaseCleanupManager))); + initialShardInfo.forEach(shardInfo -> verify(schedulerSpy) + .buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), same(leaseCleanupManager))); + firstShardInfo.forEach(shardInfo -> verify(schedulerSpy, never()) + .buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), eq(leaseCleanupManager))); + secondShardInfo.forEach(shardInfo -> verify(schedulerSpy, never()) + .buildConsumer(same(shardInfo), eq(shardRecordProcessorFactory), eq(leaseCleanupManager))); } @Test public final void testMultiStreamNoStreamsAreSyncedWhenStreamsAreNotRefreshed() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - List streamConfigList1 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList1 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); - List streamConfigList2 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList2 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); Assert.assertTrue("SyncedStreams should be empty", syncedStreams.isEmpty()); - assertEquals(new HashSet<>(streamConfigList1), new HashSet<>(scheduler.currentStreamConfigMap().values())); + assertEquals( + new HashSet<>(streamConfigList1), + new HashSet<>(scheduler.currentStreamConfigMap().values())); } @Test public final void testMultiStreamOnlyNewStreamsAreSynced() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - List streamConfigList1 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList1 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); - List streamConfigList2 = IntStream.range(1, 7).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList2 = IntStream.range(1, 7) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); - Set expectedSyncedStreams = IntStream.range(5, 7).mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))).collect( - Collectors.toCollection(HashSet::new)); + Set expectedSyncedStreams = IntStream.range(5, 7) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( + Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) + .collect(Collectors.toCollection(HashSet::new)); Assert.assertEquals(expectedSyncedStreams, syncedStreams); - Assert.assertEquals(Sets.newHashSet(streamConfigList2), + Assert.assertEquals( + Sets.newHashSet(streamConfigList2), Sets.newHashSet(scheduler.currentStreamConfigMap().values())); } @Test public final void testMultiStreamSyncFromTableDefaultInitPos() { // Streams in lease table but not tracked by multiStreamTracker - List leasesInTable = IntStream.range(1, 3).mapToObj(streamId -> new MultiStreamLease() - .streamIdentifier( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)) - .shardId("some_random_shard_id")) + List leasesInTable = IntStream.range(1, 3) + .mapToObj(streamId -> new MultiStreamLease() + .streamIdentifier(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)) + .shardId("some_random_shard_id")) .collect(Collectors.toCollection(LinkedList::new)); - // Include a stream that is already tracked by multiStreamTracker, just to make sure we will not touch this stream config later - leasesInTable.add(new MultiStreamLease().streamIdentifier("123456789012:stream1:1").shardId("some_random_shard_id")); + // Include a stream that is already tracked by multiStreamTracker, just to make sure we will not touch this + // stream config later + leasesInTable.add(new MultiStreamLease() + .streamIdentifier("123456789012:stream1:1") + .shardId("some_random_shard_id")); // Expected StreamConfig after running syncStreamsFromLeaseTableOnAppInit // By default, Stream not present in multiStreamTracker will have initial position of LATEST - List expectedConfig = IntStream.range(1, 3).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List expectedConfig = IntStream.range(1, 3) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); // Include default configs expectedConfig.addAll(multiStreamTracker.streamConfigList()); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); scheduler.syncStreamsFromLeaseTableOnAppInit(leasesInTable); - Map expectedConfigMap = expectedConfig.stream().collect(Collectors.toMap( - StreamConfig::streamIdentifier, Function.identity())); + Map expectedConfigMap = + expectedConfig.stream().collect(Collectors.toMap(StreamConfig::streamIdentifier, Function.identity())); Assert.assertEquals(expectedConfigMap, scheduler.currentStreamConfigMap()); } @@ -504,45 +619,59 @@ public class SchedulerTest { Date testTimeStamp = new Date(); // Streams in lease table but not tracked by multiStreamTracker - List leasesInTable = IntStream.range(1, 3).mapToObj(streamId -> new MultiStreamLease() - .streamIdentifier( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)) - .shardId("some_random_shard_id")) + List leasesInTable = IntStream.range(1, 3) + .mapToObj(streamId -> new MultiStreamLease() + .streamIdentifier(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)) + .shardId("some_random_shard_id")) .collect(Collectors.toCollection(LinkedList::new)); - // Include a stream that is already tracked by multiStreamTracker, just to make sure we will not touch this stream config later - leasesInTable.add(new MultiStreamLease().streamIdentifier("123456789012:stream1:1").shardId("some_random_shard_id")); + // Include a stream that is already tracked by multiStreamTracker, just to make sure we will not touch this + // stream config later + leasesInTable.add(new MultiStreamLease() + .streamIdentifier("123456789012:stream1:1") + .shardId("some_random_shard_id")); // Expected StreamConfig after running syncStreamsFromLeaseTableOnAppInit - // Stream not present in multiStreamTracker will have initial position specified by orphanedStreamInitialPositionInStream - List expectedConfig = IntStream.range(1, 3).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPositionAtTimestamp(testTimeStamp))) + // Stream not present in multiStreamTracker will have initial position specified by + // orphanedStreamInitialPositionInStream + List expectedConfig = IntStream.range(1, 3) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(testTimeStamp))) .collect(Collectors.toCollection(LinkedList::new)); // Include default configs expectedConfig.addAll(multiStreamTracker.streamConfigList()); // Mock a specific orphanedStreamInitialPositionInStream specified in multiStreamTracker - when(multiStreamTracker.orphanedStreamInitialPositionInStream()).thenReturn( - InitialPositionInStreamExtended.newInitialPositionAtTimestamp(testTimeStamp)); + when(multiStreamTracker.orphanedStreamInitialPositionInStream()) + .thenReturn(InitialPositionInStreamExtended.newInitialPositionAtTimestamp(testTimeStamp)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); scheduler.syncStreamsFromLeaseTableOnAppInit(leasesInTable); - Map expectedConfigMap = expectedConfig.stream().collect(Collectors.toMap( - sc -> sc.streamIdentifier(), sc -> sc)); + Map expectedConfigMap = + expectedConfig.stream().collect(Collectors.toMap(sc -> sc.streamIdentifier(), sc -> sc)); Assert.assertEquals(expectedConfigMap, scheduler.currentStreamConfigMap()); } @Test public final void testMultiStreamStaleStreamsAreNotDeletedImmediatelyAutoDeletionStrategy() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamStaleStreamsAreNotDeletedImmediately(true, false); } @@ -556,93 +685,121 @@ public class SchedulerTest { @Test public final void testMultiStreamStaleStreamsAreNotDeletedImmediatelyProvidedListStrategy() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return null; - } + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return null; + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamStaleStreamsAreNotDeletedImmediately(false, false); } @Test public final void testMultiStreamStaleStreamsAreNotDeletedImmediatelyProvidedListStrategy2() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return IntStream.range(1, 3).mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))).collect( - Collectors.toCollection(ArrayList::new)); - } + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return IntStream.range(1, 3) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join( + streamId * TEST_ACCOUNT, + "multiStreamTest-" + streamId, + streamId * 12345))) + .collect(Collectors.toCollection(ArrayList::new)); + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamStaleStreamsAreNotDeletedImmediately(true, false); } - private void testMultiStreamStaleStreamsAreNotDeletedImmediately(boolean expectPendingStreamsForDeletion, - boolean onlyStreamsDeletionNotLeases) + private void testMultiStreamStaleStreamsAreNotDeletedImmediately( + boolean expectPendingStreamsForDeletion, boolean onlyStreamsDeletionNotLeases) throws DependencyException, ProvisionedThroughputException, InvalidStateException { - List streamConfigList1 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList1 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); - List streamConfigList2 = IntStream.range(3, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList2 = IntStream.range(3, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); mockListLeases(streamConfigList1); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); - Set expectedPendingStreams = IntStream.range(1, 3).mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))).collect( - Collectors.toCollection(HashSet::new)); - Set expectedSyncedStreams = onlyStreamsDeletionNotLeases ? expectedPendingStreams : Sets.newHashSet(); + Set expectedPendingStreams = IntStream.range(1, 3) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( + Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) + .collect(Collectors.toCollection(HashSet::new)); + Set expectedSyncedStreams = + onlyStreamsDeletionNotLeases ? expectedPendingStreams : Sets.newHashSet(); Assert.assertEquals(expectedSyncedStreams, syncedStreams); - Assert.assertEquals(Sets.newHashSet(onlyStreamsDeletionNotLeases ? streamConfigList2 : streamConfigList1), + Assert.assertEquals( + Sets.newHashSet(onlyStreamsDeletionNotLeases ? streamConfigList2 : streamConfigList1), Sets.newHashSet(scheduler.currentStreamConfigMap().values())); - Assert.assertEquals(expectPendingStreamsForDeletion ? expectedPendingStreams : Sets.newHashSet(), + Assert.assertEquals( + expectPendingStreamsForDeletion ? expectedPendingStreams : Sets.newHashSet(), scheduler.staleStreamDeletionMap().keySet()); } @Test public final void testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriodWithAutoDetectionStrategy() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ZERO; - } - }); + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ZERO; + } + }); testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriod(true, null); } @Test public final void testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriodWithProvidedListStrategy() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return null; - } + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return null; + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ZERO; - } - }); - HashSet currentStreamConfigMapOverride = IntStream.range(1, 5).mapToObj( - streamId -> new StreamConfig(StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ZERO; + } + }); + HashSet currentStreamConfigMapOverride = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(HashSet::new)); testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriod(false, currentStreamConfigMapOverride); @@ -651,60 +808,82 @@ public class SchedulerTest { @Test public final void testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriodWithProvidedListStrategy2() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return IntStream.range(1, 3).mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))).collect( - Collectors.toCollection(ArrayList::new)); - } + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return IntStream.range(1, 3) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join( + streamId * TEST_ACCOUNT, + "multiStreamTest-" + streamId, + streamId * 12345))) + .collect(Collectors.toCollection(ArrayList::new)); + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ZERO; - } - }); + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ZERO; + } + }); testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriod(true, null); } - private void testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriod(boolean expectSyncedStreams, - Set currentStreamConfigMapOverride) + private void testMultiStreamStaleStreamsAreDeletedAfterDefermentPeriod( + boolean expectSyncedStreams, Set currentStreamConfigMapOverride) throws DependencyException, ProvisionedThroughputException, InvalidStateException { - List streamConfigList1 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList1 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); - List streamConfigList2 = IntStream.range(3, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList2 = IntStream.range(3, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); mockListLeases(streamConfigList1); Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); - Set expectedSyncedStreams = IntStream.range(1, 3).mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))).collect( - Collectors.toCollection(HashSet::new)); + Set expectedSyncedStreams = IntStream.range(1, 3) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( + Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) + .collect(Collectors.toCollection(HashSet::new)); Assert.assertEquals(expectSyncedStreams ? expectedSyncedStreams : Sets.newHashSet(), syncedStreams); - Assert.assertEquals(currentStreamConfigMapOverride == null ? Sets.newHashSet(streamConfigList2) : currentStreamConfigMapOverride, + Assert.assertEquals( + currentStreamConfigMapOverride == null + ? Sets.newHashSet(streamConfigList2) + : currentStreamConfigMapOverride, Sets.newHashSet(scheduler.currentStreamConfigMap().values())); - Assert.assertEquals(Sets.newHashSet(), - scheduler.staleStreamDeletionMap().keySet()); + Assert.assertEquals( + Sets.newHashSet(), scheduler.staleStreamDeletionMap().keySet()); } @Test - public final void testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithAutoDetectionStrategy() - throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + public final void + testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithAutoDetectionStrategy() + throws DependencyException, ProvisionedThroughputException, InvalidStateException { + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediately(true, false); } @@ -716,49 +895,65 @@ public class SchedulerTest { } @Test - public final void testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithProvidedListStrategy() - throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return null; - } + public final void + testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithProvidedListStrategy() + throws DependencyException, ProvisionedThroughputException, InvalidStateException { + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return null; + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediately(false, false); } @Test - public final void testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithProvidedListStrategy2() - throws DependencyException, ProvisionedThroughputException, InvalidStateException { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { - @Override public List streamIdentifiersForLeaseCleanup() { - return IntStream.range(1, 3) - .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) - .collect(Collectors.toCollection(ArrayList::new)); - } + public final void + testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediatelyWithProvidedListStrategy2() + throws DependencyException, ProvisionedThroughputException, InvalidStateException { + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new ProvidedStreamsDeferredDeletionStrategy() { + @Override + public List streamIdentifiersForLeaseCleanup() { + return IntStream.range(1, 3) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join( + streamId * TEST_ACCOUNT, + "multiStreamTest-" + streamId, + streamId * 12345))) + .collect(Collectors.toCollection(ArrayList::new)); + } - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofHours(1); - } - }); + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofHours(1); + } + }); testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediately(true, false); } private void testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreNotDeletedImmediately( - boolean expectPendingStreamsForDeletion, - boolean onlyStreamsNoLeasesDeletion) + boolean expectPendingStreamsForDeletion, boolean onlyStreamsNoLeasesDeletion) throws DependencyException, ProvisionedThroughputException, InvalidStateException { List streamConfigList1 = createDummyStreamConfigList(1, 5); List streamConfigList2 = createDummyStreamConfigList(3, 7); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); // Mock listLeases to exercise the delete path so scheduler doesn't remove stale streams due to not presenting // in lease table @@ -772,57 +967,66 @@ public class SchedulerTest { if (onlyStreamsNoLeasesDeletion) { expectedSyncedStreams = IntStream.concat(IntStream.range(1, 3), IntStream.range(5, 7)) - .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) .collect(Collectors.toCollection(HashSet::new)); } else { expectedSyncedStreams = IntStream.range(5, 7) - .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) + .mapToObj(streamId -> StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) .collect(Collectors.toCollection(HashSet::new)); } Assert.assertEquals(expectedSyncedStreams, syncedStreams); List expectedCurrentStreamConfigs; if (onlyStreamsNoLeasesDeletion) { - expectedCurrentStreamConfigs = IntStream.range(3, 7).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + expectedCurrentStreamConfigs = IntStream.range(3, 7) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); } else { - expectedCurrentStreamConfigs = IntStream.range(1, 7).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + expectedCurrentStreamConfigs = IntStream.range(1, 7) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); } - Assert.assertEquals(Sets.newHashSet(expectedCurrentStreamConfigs), + Assert.assertEquals( + Sets.newHashSet(expectedCurrentStreamConfigs), Sets.newHashSet(scheduler.currentStreamConfigMap().values())); - Assert.assertEquals(expectPendingStreamsForDeletion ? expectedPendingStreams: Sets.newHashSet(), + Assert.assertEquals( + expectPendingStreamsForDeletion ? expectedPendingStreams : Sets.newHashSet(), scheduler.staleStreamDeletionMap().keySet()); } - @Test - public void testKinesisStaleDeletedStreamCleanup() throws ProvisionedThroughputException, InvalidStateException, DependencyException { + public void testKinesisStaleDeletedStreamCleanup() + throws ProvisionedThroughputException, InvalidStateException, DependencyException { List streamConfigList1 = createDummyStreamConfigList(1, 6); List streamConfigList2 = createDummyStreamConfigList(1, 4); prepareForStaleDeletedStreamCleanupTests(streamConfigList1, streamConfigList2); // when KCL starts it starts with tracking 5 stream - assertEquals(Sets.newHashSet(streamConfigList1), Sets.newHashSet(scheduler.currentStreamConfigMap().values())); + assertEquals( + Sets.newHashSet(streamConfigList1), + Sets.newHashSet(scheduler.currentStreamConfigMap().values())); assertEquals(0, scheduler.staleStreamDeletionMap().size()); mockListLeases(streamConfigList1); // 2 Streams are no longer needed to be consumed Set syncedStreams1 = scheduler.checkAndSyncStreamShardsAndLeases(); - assertEquals(Sets.newHashSet(streamConfigList1), Sets.newHashSet(scheduler.currentStreamConfigMap().values())); - assertEquals(createDummyStreamConfigList(4, 6).stream() - .map(StreamConfig::streamIdentifier) - .collect(Collectors.toSet()), scheduler.staleStreamDeletionMap() - .keySet()); + assertEquals( + Sets.newHashSet(streamConfigList1), + Sets.newHashSet(scheduler.currentStreamConfigMap().values())); + assertEquals( + createDummyStreamConfigList(4, 6).stream() + .map(StreamConfig::streamIdentifier) + .collect(Collectors.toSet()), + scheduler.staleStreamDeletionMap().keySet()); assertEquals(0, syncedStreams1.size()); StreamConfig deletedStreamConfig = createDummyStreamConfig(5); @@ -834,15 +1038,18 @@ public class SchedulerTest { Set expectedCurrentStreamConfigs = Sets.newHashSet(streamConfigList1); expectedCurrentStreamConfigs.remove(deletedStreamConfig); - //assert kinesis deleted stream is cleaned up from KCL in memory state. - assertEquals(expectedCurrentStreamConfigs, Sets.newHashSet(scheduler.currentStreamConfigMap().values())); - assertEquals(Sets.newHashSet(createDummyStreamConfig(4).streamIdentifier()), + // assert kinesis deleted stream is cleaned up from KCL in memory state. + assertEquals( + expectedCurrentStreamConfigs, + Sets.newHashSet(scheduler.currentStreamConfigMap().values())); + assertEquals( + Sets.newHashSet(createDummyStreamConfig(4).streamIdentifier()), Sets.newHashSet(scheduler.staleStreamDeletionMap().keySet())); assertEquals(1, syncedStreams2.size()); - assertEquals(0, scheduler.deletedStreamListProvider().purgeAllDeletedStream().size()); + assertEquals( + 0, scheduler.deletedStreamListProvider().purgeAllDeletedStream().size()); verify(multiStreamTracker, times(3)).streamConfigList(); - } // Tests validate that no cleanup of stream is done if its still tracked in multiStreamTracker @@ -858,15 +1065,19 @@ public class SchedulerTest { assertEquals(0, syncedStreams.size()); assertEquals(0, scheduler.staleStreamDeletionMap().size()); - assertEquals(Sets.newHashSet(streamConfigList1), Sets.newHashSet(scheduler.currentStreamConfigMap().values())); + assertEquals( + Sets.newHashSet(streamConfigList1), + Sets.newHashSet(scheduler.currentStreamConfigMap().values())); } - //Creates list of upperBound-lowerBound no of dummy StreamConfig + // Creates list of upperBound-lowerBound no of dummy StreamConfig private List createDummyStreamConfigList(int lowerBound, int upperBound) { - return IntStream.range(lowerBound, upperBound).mapToObj(this::createDummyStreamConfig) - .collect(Collectors.toCollection(LinkedList::new)); + return IntStream.range(lowerBound, upperBound) + .mapToObj(this::createDummyStreamConfig) + .collect(Collectors.toCollection(LinkedList::new)); } - private StreamConfig createDummyStreamConfig(int streamId){ + + private StreamConfig createDummyStreamConfig(int streamId) { return new StreamConfig( StreamIdentifier.multiStreamInstance( Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), @@ -876,37 +1087,48 @@ public class SchedulerTest { @Test public final void testMultiStreamNewStreamsAreSyncedAndStaleStreamsAreDeletedAfterDefermentPeriod() throws DependencyException, ProvisionedThroughputException, InvalidStateException { - List streamConfigList1 = IntStream.range(1, 5).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList1 = IntStream.range(1, 5) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); - List streamConfigList2 = IntStream.range(3, 7).mapToObj(streamId -> new StreamConfig( - StreamIdentifier.multiStreamInstance( - Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) + List streamConfigList2 = IntStream.range(3, 7) + .mapToObj(streamId -> new StreamConfig( + StreamIdentifier.multiStreamInstance(Joiner.on(":") + .join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345)), + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST))) .collect(Collectors.toCollection(LinkedList::new)); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList1, streamConfigList2); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { - @Override public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ZERO; - } - }); + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ZERO; + } + }); Set syncedStreams = scheduler.checkAndSyncStreamShardsAndLeases(); Set expectedSyncedStreams = IntStream.concat(IntStream.range(1, 3), IntStream.range(5, 7)) .mapToObj(streamId -> StreamIdentifier.multiStreamInstance( Joiner.on(":").join(streamId * TEST_ACCOUNT, "multiStreamTest-" + streamId, streamId * 12345))) .collect(Collectors.toCollection(HashSet::new)); Assert.assertEquals(expectedSyncedStreams, syncedStreams); - Assert.assertEquals(Sets.newHashSet(streamConfigList2), + Assert.assertEquals( + Sets.newHashSet(streamConfigList2), Sets.newHashSet(scheduler.currentStreamConfigMap().values())); - Assert.assertEquals(Sets.newHashSet(), - scheduler.staleStreamDeletionMap().keySet()); + Assert.assertEquals( + Sets.newHashSet(), scheduler.staleStreamDeletionMap().keySet()); } @Test @@ -914,8 +1136,14 @@ public class SchedulerTest { final int maxInitializationAttempts = 1; coordinatorConfig.maxInitializationAttempts(maxInitializationAttempts); coordinatorConfig.skipShardSyncAtWorkerInitializationIfLeasesExist(false); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); doNothing().when(leaseCoordinator).initialize(); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true); @@ -925,7 +1153,8 @@ public class SchedulerTest { long endTime = System.currentTimeMillis(); assertTrue(endTime - startTime > MIN_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS); - assertTrue(endTime - startTime < (MAX_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS + LEASE_TABLE_CHECK_FREQUENCY_MILLIS)); + assertTrue(endTime - startTime + < (MAX_WAIT_TIME_FOR_LEASE_TABLE_CHECK_MILLIS + LEASE_TABLE_CHECK_FREQUENCY_MILLIS)); } @Test @@ -933,8 +1162,14 @@ public class SchedulerTest { final int maxInitializationAttempts = 1; coordinatorConfig.maxInitializationAttempts(maxInitializationAttempts); coordinatorConfig.skipShardSyncAtWorkerInitializationIfLeasesExist(false); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); doNothing().when(leaseCoordinator).initialize(); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(false); @@ -949,9 +1184,11 @@ public class SchedulerTest { @Test public final void testSchedulerShutdown() { scheduler.shutdown(); - verify(workerStateChangeListener, times(1)).onWorkerStateChange(WorkerStateChangeListener.WorkerState.SHUT_DOWN_STARTED); + verify(workerStateChangeListener, times(1)) + .onWorkerStateChange(WorkerStateChangeListener.WorkerState.SHUT_DOWN_STARTED); verify(leaseCoordinator, times(1)).stop(); - verify(workerStateChangeListener, times(1)).onWorkerStateChange(WorkerStateChangeListener.WorkerState.SHUT_DOWN); + verify(workerStateChangeListener, times(1)) + .onWorkerStateChange(WorkerStateChangeListener.WorkerState.SHUT_DOWN); } @Test @@ -963,8 +1200,15 @@ public class SchedulerTest { when(eventFactory.rejectedTaskEvent(any(), any())).thenReturn(rejectedTaskEvent); when(eventFactory.executorStateEvent(any(), any())).thenReturn(executorStateEvent); - Scheduler testScheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, - lifecycleConfig, metricsConfig, processorConfig, retrievalConfig, eventFactory); + Scheduler testScheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig, + eventFactory); Scheduler schedulerSpy = spy(testScheduler); @@ -973,9 +1217,11 @@ public class SchedulerTest { .doCallRealMethod() .doAnswer(invocation -> { // trigger rejected task in RxJava layer - RxJavaPlugins.onError(new RejectedExecutionException("Test exception.")); - return null; - }).when(schedulerSpy).runProcessLoop(); + RxJavaPlugins.onError(new RejectedExecutionException("Test exception.")); + return null; + }) + .when(schedulerSpy) + .runProcessLoop(); // Scheduler sets error handler in initialize method schedulerSpy.initialize(); @@ -1000,14 +1246,16 @@ public class SchedulerTest { when(multiStreamTracker.streamConfigList()).thenReturn(Collections.emptyList()); prepareMultiStreamScheduler(streamConfigList); // Populate currentStreamConfigMap to simulate that the leader has the latest streams. - multiStreamTracker.streamConfigList().forEach(s -> scheduler.currentStreamConfigMap().put(s.streamIdentifier(), s)); + multiStreamTracker + .streamConfigList() + .forEach(s -> scheduler.currentStreamConfigMap().put(s.streamIdentifier(), s)); scheduler.runProcessLoop(); verify(scheduler).syncStreamsFromLeaseTableOnAppInit(any()); assertTrue(scheduler.currentStreamConfigMap().size() != 0); } @Test - public void testNotRefreshForNewStreamAfterLeaderFlippedTheShouldInitialize(){ + public void testNotRefreshForNewStreamAfterLeaderFlippedTheShouldInitialize() { prepareMultiStreamScheduler(createDummyStreamConfigList(1, 6)); // flip the shouldInitialize flag scheduler.runProcessLoop(); @@ -1020,8 +1268,11 @@ public class SchedulerTest { // Since the sync path has been executed once before the DDB sync flags should be flipped // to prevent doing DDB lookups in the subsequent runs. verify(scheduler, times(1)).syncStreamsFromLeaseTableOnAppInit(any()); - assertEquals(0, streamConfigList.stream() - .filter(s -> !scheduler.currentStreamConfigMap().containsKey(s.streamIdentifier())).count()); + assertEquals( + 0, + streamConfigList.stream() + .filter(s -> !scheduler.currentStreamConfigMap().containsKey(s.streamIdentifier())) + .count()); } @Test @@ -1041,10 +1292,13 @@ public class SchedulerTest { final List streamConfigList = createDummyStreamConfigList(1, 6); mockListLeases(streamConfigList); streamConfigList.forEach(s -> scheduler.currentStreamConfigMap().put(s.streamIdentifier(), s)); - final Set initialSet = new HashSet<>(scheduler.currentStreamConfigMap().keySet()); + final Set initialSet = + new HashSet<>(scheduler.currentStreamConfigMap().keySet()); scheduler.checkAndSyncStreamShardsAndLeases(); assertEquals(initialSet, scheduler.currentStreamConfigMap().keySet()); - assertEquals(streamConfigList.size(), scheduler.currentStreamConfigMap().keySet().size()); + assertEquals( + streamConfigList.size(), + scheduler.currentStreamConfigMap().keySet().size()); } @Test @@ -1053,30 +1307,40 @@ public class SchedulerTest { when(multiStreamTracker.streamConfigList()).thenReturn(streamConfigList); prepareMultiStreamScheduler(); streamConfigList.forEach(s -> scheduler.currentStreamConfigMap().put(s.streamIdentifier(), s)); - final Set initialSet = new HashSet<>(scheduler.currentStreamConfigMap().keySet()); + final Set initialSet = + new HashSet<>(scheduler.currentStreamConfigMap().keySet()); scheduler.checkAndSyncStreamShardsAndLeases(); assertEquals(initialSet, scheduler.currentStreamConfigMap().keySet()); - assertEquals(streamConfigList.size(), scheduler.currentStreamConfigMap().keySet().size()); + assertEquals( + streamConfigList.size(), + scheduler.currentStreamConfigMap().keySet().size()); } @SafeVarargs private final void prepareMultiStreamScheduler(List... streamConfigs) { retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - scheduler = spy(new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig)); + scheduler = spy(new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); stubMultiStreamTracker(streamConfigs); when(scheduler.shouldSyncStreamsNow()).thenReturn(true); } @SafeVarargs private final void prepareForStaleDeletedStreamCleanupTests(List... streamConfigs) { - when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()).thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { - @Override - public Duration waitPeriodToDeleteFormerStreams() { - return Duration.ofDays(1); - } - }); + when(multiStreamTracker.formerStreamsLeasesDeletionStrategy()) + .thenReturn(new AutoDetectionAndDeferredDeletionStrategy() { + @Override + public Duration waitPeriodToDeleteFormerStreams() { + return Duration.ofDays(1); + } + }); stubMultiStreamTracker(streamConfigs); prepareMultiStreamScheduler(); } @@ -1091,34 +1355,48 @@ public class SchedulerTest { } } - private void mockListLeases(List configs) throws ProvisionedThroughputException, InvalidStateException, DependencyException { - when(dynamoDBLeaseRefresher.listLeases()).thenReturn(configs.stream() - .map(s -> new MultiStreamLease().streamIdentifier(s.streamIdentifier().toString()) - .shardId("some_random_shard_id")).collect(Collectors.toList())); + private void mockListLeases(List configs) + throws ProvisionedThroughputException, InvalidStateException, DependencyException { + when(dynamoDBLeaseRefresher.listLeases()) + .thenReturn(configs.stream() + .map(s -> new MultiStreamLease() + .streamIdentifier(s.streamIdentifier().toString()) + .shardId("some_random_shard_id")) + .collect(Collectors.toList())); } @Test public void testStreamConfigsArePopulatedWithStreamArnsInMultiStreamMode() { final String streamArnStr = constructStreamArnStr(TEST_REGION, 111122223333L, "some-stream-name"); - when(multiStreamTracker.streamConfigList()).thenReturn(Stream.of( - // Each of scheduler's currentStreamConfigMap entries should have a streamARN in - // multi-stream mode, regardless of whether the streamTracker-provided streamIdentifiers - // were created using serialization or stream ARN. - StreamIdentifier.multiStreamInstance(constructStreamIdentifierSer(TEST_ACCOUNT, streamName)), - StreamIdentifier.multiStreamInstance(Arn.fromString(streamArnStr), TEST_EPOCH) - ) - .map(streamIdentifier -> new StreamConfig(streamIdentifier, TEST_INITIAL_POSITION)) - .collect(Collectors.toList())); + when(multiStreamTracker.streamConfigList()) + .thenReturn(Stream.of( + // Each of scheduler's currentStreamConfigMap entries should have a streamARN in + // multi-stream mode, regardless of whether the streamTracker-provided streamIdentifiers + // were created using serialization or stream ARN. + StreamIdentifier.multiStreamInstance( + constructStreamIdentifierSer(TEST_ACCOUNT, streamName)), + StreamIdentifier.multiStreamInstance(Arn.fromString(streamArnStr), TEST_EPOCH)) + .map(streamIdentifier -> new StreamConfig(streamIdentifier, TEST_INITIAL_POSITION)) + .collect(Collectors.toList())); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); final Set expectedStreamArns = Sets.newHashSet(constructStreamArnStr(TEST_REGION, TEST_ACCOUNT, streamName), streamArnStr); final Set actualStreamArns = scheduler.currentStreamConfigMap().values().stream() - .map(sc -> sc.streamIdentifier().streamArnOptional().orElseThrow(IllegalStateException::new).toString()) + .map(sc -> sc.streamIdentifier() + .streamArnOptional() + .orElseThrow(IllegalStateException::new) + .toString()) .collect(Collectors.toSet()); assertEquals(expectedStreamArns, actualStreamArns); @@ -1132,41 +1410,62 @@ public class SchedulerTest { .collect(Collectors.toSet()) .contains(streamIdentifierSerializationForOrphan)); - when(leaseCoordinator.getCurrentAssignments()).thenReturn(Collections.singletonList( - new ShardInfo(TEST_SHARD_ID, null, null, null, streamIdentifierSerializationForOrphan))); + when(leaseCoordinator.getCurrentAssignments()) + .thenReturn(Collections.singletonList( + new ShardInfo(TEST_SHARD_ID, null, null, null, streamIdentifierSerializationForOrphan))); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - scheduler = new Scheduler(checkpointConfig, coordinatorConfig, leaseManagementConfig, lifecycleConfig, - metricsConfig, processorConfig, retrievalConfig); + scheduler = new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig); scheduler.runProcessLoop(); - verify(multiStreamTracker).createStreamConfig( - StreamIdentifier.multiStreamInstance(streamIdentifierSerializationForOrphan)); + verify(multiStreamTracker) + .createStreamConfig(StreamIdentifier.multiStreamInstance(streamIdentifierSerializationForOrphan)); final ArgumentCaptor streamConfigArgumentCaptor = ArgumentCaptor.forClass(StreamConfig.class); verify(retrievalFactory).createGetRecordsCache(any(), streamConfigArgumentCaptor.capture(), any()); final StreamConfig actualStreamConfigForOrphan = streamConfigArgumentCaptor.getValue(); - final Optional streamArnForOrphan = actualStreamConfigForOrphan.streamIdentifier().streamArnOptional(); + final Optional streamArnForOrphan = + actualStreamConfigForOrphan.streamIdentifier().streamArnOptional(); assertTrue(streamArnForOrphan.isPresent()); - assertEquals(constructStreamArnStr(TEST_REGION, TEST_ACCOUNT, streamName), streamArnForOrphan.get().toString()); + assertEquals( + constructStreamArnStr(TEST_REGION, TEST_ACCOUNT, streamName), + streamArnForOrphan.get().toString()); } @Test public void testMismatchingArnRegionAndKinesisClientRegionThrowsException() { final Region streamArnRegion = Region.US_WEST_1; - Assert.assertNotEquals(streamArnRegion, kinesisClient.serviceClientConfiguration().region()); + Assert.assertNotEquals( + streamArnRegion, kinesisClient.serviceClientConfiguration().region()); - when(multiStreamTracker.streamConfigList()).thenReturn(Collections.singletonList(new StreamConfig( - StreamIdentifier.multiStreamInstance( - Arn.fromString(constructStreamArnStr(streamArnRegion, TEST_ACCOUNT, streamName)), TEST_EPOCH), - TEST_INITIAL_POSITION))); + when(multiStreamTracker.streamConfigList()) + .thenReturn(Collections.singletonList(new StreamConfig( + StreamIdentifier.multiStreamInstance( + Arn.fromString(constructStreamArnStr(streamArnRegion, TEST_ACCOUNT, streamName)), + TEST_EPOCH), + TEST_INITIAL_POSITION))); retrievalConfig = new RetrievalConfig(kinesisClient, multiStreamTracker, applicationName) .retrievalFactory(retrievalFactory); - assertThrows(IllegalArgumentException.class, () -> new Scheduler(checkpointConfig, coordinatorConfig, - leaseManagementConfig, lifecycleConfig, metricsConfig, processorConfig, retrievalConfig)); + assertThrows( + IllegalArgumentException.class, + () -> new Scheduler( + checkpointConfig, + coordinatorConfig, + leaseManagementConfig, + lifecycleConfig, + metricsConfig, + processorConfig, + retrievalConfig)); } private static String constructStreamIdentifierSer(long accountId, String streamName) { @@ -1350,9 +1649,7 @@ public class SchedulerTest { } @Override - public void leaseLost(LeaseLostInput leaseLostInput) { - - } + public void leaseLost(LeaseLostInput leaseLostInput) {} @Override public void shardEnded(ShardEndedInput shardEndedInput) { @@ -1364,9 +1661,7 @@ public class SchedulerTest { } @Override - public void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput) { - - } + public void shutdownRequested(ShutdownRequestedInput shutdownRequestedInput) {} }; } @@ -1374,7 +1669,6 @@ public class SchedulerTest { public ShardRecordProcessor shardRecordProcessor(StreamIdentifier streamIdentifier) { return shardRecordProcessor(); } - } @RequiredArgsConstructor @@ -1394,8 +1688,10 @@ public class SchedulerTest { } @Override - public ShardSyncTaskManager createShardSyncTaskManager(MetricsFactory metricsFactory, - StreamConfig streamConfig, DeletedStreamListProvider deletedStreamListProvider) { + public ShardSyncTaskManager createShardSyncTaskManager( + MetricsFactory metricsFactory, + StreamConfig streamConfig, + DeletedStreamListProvider deletedStreamListProvider) { if (shouldReturnDefaultShardSyncTaskmanager) { return shardSyncTaskManager; } @@ -1439,8 +1735,8 @@ public class SchedulerTest { private class TestKinesisCheckpointFactory implements CheckpointFactory { @Override - public Checkpointer createCheckpointer(final LeaseCoordinator leaseCoordinator, - final LeaseRefresher leaseRefresher) { + public Checkpointer createCheckpointer( + final LeaseCoordinator leaseCoordinator, final LeaseRefresher leaseRefresher) { return checkpoint; } } @@ -1462,7 +1758,7 @@ public class SchedulerTest { } @Override - public FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy(){ + public FormerStreamsLeasesDeletionStrategy formerStreamsLeasesDeletionStrategy() { return new AutoDetectionAndDeferredDeletionStrategy() { @Override public Duration waitPeriodToDeleteFormerStreams() { @@ -1471,5 +1767,4 @@ public class SchedulerTest { }; } } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/WorkerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/WorkerTest.java index ec076e8d..a6ffcf0c 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/WorkerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/coordinator/WorkerTest.java @@ -116,9 +116,11 @@ public class WorkerTest { private static final IRecordProcessorFactory SAMPLE_RECORD_PROCESSOR_FACTORY_V2 = SAMPLE_RECORD_PROCESSOR_FACTORY; - *//* + */ + /* * Test method for {@link Worker#getApplicationName()}. - *//* + */ + /* @Test public final void testGetStageName() { final String stageName = "testStageName"; @@ -343,10 +345,12 @@ public class WorkerTest { Assert.assertTrue(count > 0); } - *//* + */ + /* * Runs worker with threadPoolSize == numShards * Test method for {@link Worker#run()}. - *//* + */ + /* @Test public final void testRunWithThreadPoolSizeEqualToNumShards() throws Exception { final int numShards = 1; @@ -354,10 +358,12 @@ public class WorkerTest { runAndTestWorker(numShards, threadPoolSize); } - *//* + */ + /* * Runs worker with threadPoolSize < numShards * Test method for {@link Worker#run()}. - *//* + */ + /* @Test public final void testRunWithThreadPoolSizeLessThanNumShards() throws Exception { final int numShards = 3; @@ -365,10 +371,12 @@ public class WorkerTest { runAndTestWorker(numShards, threadPoolSize); } - *//* + */ + /* * Runs worker with threadPoolSize > numShards * Test method for {@link Worker#run()}. - *//* + */ + /* @Test public final void testRunWithThreadPoolSizeMoreThanNumShards() throws Exception { final int numShards = 3; @@ -376,10 +384,12 @@ public class WorkerTest { runAndTestWorker(numShards, threadPoolSize); } - *//* + */ + /* * Runs worker with threadPoolSize < numShards * Test method for {@link Worker#run()}. - *//* + */ + /* @Test public final void testOneSplitShard2Threads() throws Exception { final int threadPoolSize = 2; @@ -392,10 +402,12 @@ public class WorkerTest { runAndTestWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList, numberOfRecordsPerShard, config); } - *//* + */ + /* * Runs worker with threadPoolSize < numShards * Test method for {@link Worker#run()}. - *//* + */ + /* @Test public final void testOneSplitShard2ThreadsWithCallsForEmptyRecords() throws Exception { final int threadPoolSize = 2; @@ -554,13 +566,15 @@ public class WorkerTest { verify(v2RecordProcessor, times(1)).shutdown(any(ShutdownInput.class)); } - *//* + */ + /* * This test is testing the {@link Worker}'s shutdown behavior and by extension the behavior of * {@link ThreadPoolExecutor#shutdownNow()}. It depends on the thread pool sending an interrupt to the pool threads. * This behavior makes the test a bit racy, since we need to ensure a specific order of events. * * @throws Exception - *//* + */ + /* @Test public final void testWorkerForcefulShutdown() throws Exception { final List shardList = createShardListWithOneShard(); @@ -1732,12 +1746,14 @@ public class WorkerTest { } } - *//* + */ + /* * Returns executor service that will be owned by the worker. This is useful to test the scenario * where worker shuts down the executor service also during shutdown flow. * * @return Executor service that will be owned by the worker. - *//* + */ + /* private WorkerThreadPoolExecutor getWorkerThreadPoolExecutor() { ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("ShardRecordProcessor-%04d").build(); return new WorkerThreadPoolExecutor(threadFactory); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ExceptionThrowingLeaseRefresher.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ExceptionThrowingLeaseRefresher.java index 7f93216d..62272bbe 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ExceptionThrowingLeaseRefresher.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ExceptionThrowingLeaseRefresher.java @@ -27,7 +27,7 @@ import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; /** * Mock LeaseRefresher by randomly throwing Leasing Exceptions. - * + * */ @RequiredArgsConstructor @Slf4j @@ -70,12 +70,13 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { } // Define which method should throw exception and when it should throw exception. - private ExceptionThrowingLeaseRefresherMethods methodThrowingException = ExceptionThrowingLeaseRefresherMethods.NONE; + private ExceptionThrowingLeaseRefresherMethods methodThrowingException = + ExceptionThrowingLeaseRefresherMethods.NONE; private int timeThrowingException = Integer.MAX_VALUE; /** * Set parameters used for throwing exception. - * + * * @param method which would throw exception * @param throwingTime defines what time to throw exception */ @@ -97,7 +98,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { // 1). method equals to methodThrowingException // 2). method calling count equals to what we want private void throwExceptions(String methodName, ExceptionThrowingLeaseRefresherMethods method) - throws DependencyException { + throws DependencyException { // Increase calling count for this method leaseRefresherMethodCallingCount[method.index()]++; if (method.equals(methodThrowingException) @@ -111,17 +112,16 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean createLeaseTableIfNotExists(Long readCapacity, Long writeCapacity) throws ProvisionedThroughputException, DependencyException { - throwExceptions("createLeaseTableIfNotExists", - ExceptionThrowingLeaseRefresherMethods.CREATELEASETABLEIFNOTEXISTS); + throwExceptions( + "createLeaseTableIfNotExists", ExceptionThrowingLeaseRefresherMethods.CREATELEASETABLEIFNOTEXISTS); return leaseRefresher.createLeaseTableIfNotExists(readCapacity, writeCapacity); } @Override - public boolean createLeaseTableIfNotExists() - throws ProvisionedThroughputException, DependencyException { - throwExceptions("createLeaseTableIfNotExists", - ExceptionThrowingLeaseRefresherMethods.CREATELEASETABLEIFNOTEXISTS); + public boolean createLeaseTableIfNotExists() throws ProvisionedThroughputException, DependencyException { + throwExceptions( + "createLeaseTableIfNotExists", ExceptionThrowingLeaseRefresherMethods.CREATELEASETABLEIFNOTEXISTS); return leaseRefresher.createLeaseTableIfNotExists(); } @@ -149,8 +149,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { } @Override - public List listLeases() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + public List listLeases() throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("listLeases", ExceptionThrowingLeaseRefresherMethods.LISTLEASES); return leaseRefresher.listLeases(); @@ -158,7 +157,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean createLeaseIfNotExists(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("createLeaseIfNotExists", ExceptionThrowingLeaseRefresherMethods.CREATELEASEIFNOTEXISTS); return leaseRefresher.createLeaseIfNotExists(lease); @@ -166,7 +165,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean renewLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("renewLease", ExceptionThrowingLeaseRefresherMethods.RENEWLEASE); return leaseRefresher.renewLease(lease); @@ -174,7 +173,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean takeLease(Lease lease, String owner) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("takeLease", ExceptionThrowingLeaseRefresherMethods.TAKELEASE); return leaseRefresher.takeLease(lease, owner); @@ -182,7 +181,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean evictLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("evictLease", ExceptionThrowingLeaseRefresherMethods.EVICTLEASE); return leaseRefresher.evictLease(lease); @@ -190,7 +189,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public void deleteLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("deleteLease", ExceptionThrowingLeaseRefresherMethods.DELETELEASE); leaseRefresher.deleteLease(lease); @@ -198,7 +197,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public boolean updateLease(Lease lease) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("updateLease", ExceptionThrowingLeaseRefresherMethods.UPDATELEASE); return leaseRefresher.updateLease(lease); @@ -206,7 +205,7 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { @Override public Lease getLease(String leaseKey) - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { throwExceptions("getLease", ExceptionThrowingLeaseRefresherMethods.GETLEASE); return leaseRefresher.getLease(leaseKey); @@ -220,8 +219,8 @@ public class ExceptionThrowingLeaseRefresher implements LeaseRefresher { } @Override - public boolean isLeaseTableEmpty() throws DependencyException, - InvalidStateException, ProvisionedThroughputException { + public boolean isLeaseTableEmpty() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { return false; } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/HierarchicalShardSyncerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/HierarchicalShardSyncerTest.java index 6df34633..e22a9126 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/HierarchicalShardSyncerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/HierarchicalShardSyncerTest.java @@ -18,19 +18,6 @@ package software.amazon.kinesis.leases; // TODO: Fix the lack of DynamoDB Loca // -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.math.BigInteger; import java.util.ArrayList; import java.util.Arrays; @@ -56,7 +43,6 @@ import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.model.HashKeyRange; import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; import software.amazon.awssdk.services.kinesis.model.SequenceNumberRange; @@ -77,24 +63,37 @@ import software.amazon.kinesis.metrics.MetricsScope; import software.amazon.kinesis.metrics.NullMetricsScope; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import static software.amazon.kinesis.leases.HierarchicalShardSyncer.MemoizationContext; import static software.amazon.kinesis.leases.HierarchicalShardSyncer.determineNewLeasesToCreate; @RunWith(MockitoJUnitRunner.class) public class HierarchicalShardSyncerTest { - private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.LATEST); - private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = InitialPositionInStreamExtended - .newInitialPositionAtTimestamp(new Date(1000L)); + private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); + private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(1000L)); private static final int EXPONENT = 128; private static final String LEASE_OWNER = "TestOwner"; private static final MetricsScope SCOPE = new NullMetricsScope(); private static final boolean MULTISTREAM_MODE_ON = true; private static final String STREAM_IDENTIFIER = "123456789012:stream:1"; - private static final HierarchicalShardSyncer.MultiStreamArgs MULTI_STREAM_ARGS = new HierarchicalShardSyncer.MultiStreamArgs( - MULTISTREAM_MODE_ON, StreamIdentifier.multiStreamInstance(STREAM_IDENTIFIER)); + private static final HierarchicalShardSyncer.MultiStreamArgs MULTI_STREAM_ARGS = + new HierarchicalShardSyncer.MultiStreamArgs( + MULTISTREAM_MODE_ON, StreamIdentifier.multiStreamInstance(STREAM_IDENTIFIER)); /** *

    @@ -141,10 +140,12 @@ public class HierarchicalShardSyncerTest {
         /**
          * Old/Obsolete max value of a sequence number (2^128 -1).
          */
    -    public static final BigInteger MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE);
    +    public static final BigInteger MAX_SEQUENCE_NUMBER =
    +            new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE);
     
         @Mock
         private ShardDetector shardDetector;
    +
         @Mock
         private DynamoDBLeaseRefresher dynamoDBLeaseRefresher;
     
    @@ -166,7 +167,8 @@ public class HierarchicalShardSyncerTest {
         public void testDetermineNewLeasesToCreateNoShards() {
             final List shards = Collections.emptyList();
             final List leases = Collections.emptyList();
    -        final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer();
    +        final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer =
    +                new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer();
             assertTrue(determineNewLeasesToCreate(emptyLeaseTableSynchronizer, shards, leases, INITIAL_POSITION_LATEST)
                     .isEmpty());
         }
    @@ -174,13 +176,21 @@ public class HierarchicalShardSyncerTest {
         /**
          * Test determineNewLeasesToCreate() where there are no shards for MultiStream
          */
    -    @Test public void testDetermineNewLeasesToCreateNoShardsForMultiStream() {
    +    @Test
    +    public void testDetermineNewLeasesToCreateNoShardsForMultiStream() {
             final List shards = Collections.emptyList();
             final List leases = Collections.emptyList();
    -        final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer();
    +        final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer =
    +                new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer();
     
    -        assertTrue(determineNewLeasesToCreate(emptyLeaseTableSynchronizer, shards, leases, INITIAL_POSITION_LATEST,
    -                Collections.emptySet(), MULTI_STREAM_ARGS).isEmpty());
    +        assertTrue(determineNewLeasesToCreate(
    +                        emptyLeaseTableSynchronizer,
    +                        shards,
    +                        leases,
    +                        INITIAL_POSITION_LATEST,
    +                        Collections.emptySet(),
    +                        MULTI_STREAM_ARGS)
    +                .isEmpty());
         }
     
         /**
    @@ -192,13 +202,15 @@ public class HierarchicalShardSyncerTest {
             final String shardId1 = "shardId-1";
             final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null);
     
    -        final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange),
    +        final List shards = Arrays.asList(
    +                ShardObjectHelper.newShard(shardId0, null, null, sequenceRange),
                     ShardObjectHelper.newShard(shardId1, null, null, sequenceRange));
             final List currentLeases = Collections.emptyList();
    -        final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer();
    +        final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer =
    +                new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer();
     
    -        final List newLeases = determineNewLeasesToCreate(emptyLeaseTableSynchronizer,
    -                shards, currentLeases, INITIAL_POSITION_LATEST);
    +        final List newLeases =
    +                determineNewLeasesToCreate(emptyLeaseTableSynchronizer, shards, currentLeases, INITIAL_POSITION_LATEST);
             validateLeases(newLeases, shardId0, shardId1);
         }
     
    @@ -211,13 +223,20 @@ public class HierarchicalShardSyncerTest {
             final String shardId1 = "shardId-1";
             final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null);
     
    -        final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange),
    +        final List shards = Arrays.asList(
    +                ShardObjectHelper.newShard(shardId0, null, null, sequenceRange),
                     ShardObjectHelper.newShard(shardId1, null, null, sequenceRange));
             final List currentLeases = Collections.emptyList();
    -        final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer();
    +        final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer =
    +                new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer();
     
    -        final List newLeases = determineNewLeasesToCreate(emptyLeaseTableSynchronizer,
    -                shards, currentLeases, INITIAL_POSITION_LATEST, new HashSet<>(), MULTI_STREAM_ARGS);
    +        final List newLeases = determineNewLeasesToCreate(
    +                emptyLeaseTableSynchronizer,
    +                shards,
    +                currentLeases,
    +                INITIAL_POSITION_LATEST,
    +                new HashSet<>(),
    +                MULTI_STREAM_ARGS);
             validateLeases(newLeases, toMultiStreamLeases(shardId0, shardId1));
         }
     
    @@ -233,22 +252,29 @@ public class HierarchicalShardSyncerTest {
             final String shardId3 = "shardId-3";
             final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null);
     
    -        final List shardsWithLeases = Arrays.asList(ShardObjectHelper.newShard(shardId3, null, null, sequenceRange));
    -        final List shardsWithoutLeases = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange),
    +        final List shardsWithLeases =
    +                Arrays.asList(ShardObjectHelper.newShard(shardId3, null, null, sequenceRange));
    +        final List shardsWithoutLeases = Arrays.asList(
    +                ShardObjectHelper.newShard(shardId0, null, null, sequenceRange),
                     ShardObjectHelper.newShard(shardId1, null, null, sequenceRange),
                     ShardObjectHelper.newShard(shardId2, shardId1, null, sequenceRange));
     
    -        final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases).flatMap(x -> x.stream()).collect(Collectors.toList());
    -        final List currentLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo");
    +        final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases)
    +                .flatMap(x -> x.stream())
    +                .collect(Collectors.toList());
    +        final List currentLeases =
    +                createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo");
             final Set inconsistentShardIds = new HashSet<>(Collections.singletonList(shardId2));
     
             Map shardIdToShardMap = HierarchicalShardSyncer.constructShardIdToShardMap(shards);
    -        Map> shardIdToChildShardIdsMap = HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
    +        Map> shardIdToChildShardIdsMap =
    +                HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
             final HierarchicalShardSyncer.LeaseSynchronizer leaseSynchronizer =
    -                new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap);
    +                new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(
    +                        shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap);
     
    -        final List newLeases = determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases,
    -                INITIAL_POSITION_LATEST, inconsistentShardIds);
    +        final List newLeases = determineNewLeasesToCreate(
    +                leaseSynchronizer, shards, currentLeases, INITIAL_POSITION_LATEST, inconsistentShardIds);
             validateLeases(newLeases, shardId0, shardId1);
         }
     
    @@ -264,31 +290,44 @@ public class HierarchicalShardSyncerTest {
             final String shardId3 = "shardId-3";
             final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null);
     
    -        final List shardsWithLeases = Arrays.asList(ShardObjectHelper.newShard(shardId3, null, null, sequenceRange));
    -        final List shardsWithoutLeases = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange),
    +        final List shardsWithLeases =
    +                Arrays.asList(ShardObjectHelper.newShard(shardId3, null, null, sequenceRange));
    +        final List shardsWithoutLeases = Arrays.asList(
    +                ShardObjectHelper.newShard(shardId0, null, null, sequenceRange),
                     ShardObjectHelper.newShard(shardId1, null, null, sequenceRange),
                     ShardObjectHelper.newShard(shardId2, shardId1, null, sequenceRange));
     
    -        final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases).flatMap(x -> x.stream()).collect(Collectors.toList());
    -        final List currentLeases = new ArrayList<>(createMultiStreamLeasesFromShards(shardsWithLeases,
    -                ExtendedSequenceNumber.LATEST, "foo"));
    +        final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases)
    +                .flatMap(x -> x.stream())
    +                .collect(Collectors.toList());
    +        final List currentLeases = new ArrayList<>(
    +                createMultiStreamLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo"));
             final Set inconsistentShardIds = new HashSet<>(Collections.singletonList(shardId2));
     
             Map shardIdToShardMap = HierarchicalShardSyncer.constructShardIdToShardMap(shards);
    -        Map> shardIdToChildShardIdsMap = HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
    +        Map> shardIdToChildShardIdsMap =
    +                HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
             final HierarchicalShardSyncer.LeaseSynchronizer leaseSynchronizer =
    -                new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap);
    +                new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(
    +                        shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap);
     
    -        final List newLeases = determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases,
    -                INITIAL_POSITION_LATEST, inconsistentShardIds, MULTI_STREAM_ARGS);
    +        final List newLeases = determineNewLeasesToCreate(
    +                leaseSynchronizer,
    +                shards,
    +                currentLeases,
    +                INITIAL_POSITION_LATEST,
    +                inconsistentShardIds,
    +                MULTI_STREAM_ARGS);
             validateLeases(newLeases, toMultiStreamLeases(shardId0, shardId1));
         }
     
         private static void validateHashRangeInLease(List leases) {
             final Consumer leaseValidation = lease -> {
                 Validate.notNull(lease.hashKeyRangeForLease());
    -            Validate.isTrue(lease.hashKeyRangeForLease().startingHashKey()
    -                    .compareTo(lease.hashKeyRangeForLease().endingHashKey()) < 0);
    +            Validate.isTrue(lease.hashKeyRangeForLease()
    +                            .startingHashKey()
    +                            .compareTo(lease.hashKeyRangeForLease().endingHashKey())
    +                    < 0);
             };
             leases.forEach(leaseValidation);
         }
    @@ -321,23 +360,27 @@ public class HierarchicalShardSyncerTest {
         }
     
         private void testLeaseCreation(
    -            final List shards,
    -            final boolean ignoreUnexpectedChildShards,
    -            final String... expectedLeaseKeys)
    +            final List shards, final boolean ignoreUnexpectedChildShards, final String... expectedLeaseKeys)
                 throws Exception {
             final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class);
     
    -        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()).thenReturn(shards);
    +        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException())
    +                .thenReturn(shards);
             when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList());
    -        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true);
    +        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture()))
    +                .thenReturn(true);
     
    -        hierarchicalShardSyncer
    -                .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST,
    -                        SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +        hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                shardDetector,
    +                dynamoDBLeaseRefresher,
    +                INITIAL_POSITION_LATEST,
    +                SCOPE,
    +                ignoreUnexpectedChildShards,
    +                dynamoDBLeaseRefresher.isLeaseTableEmpty());
     
             final List requestLeases = leaseCaptor.getAllValues();
    -        final Set extendedSequenceNumbers = requestLeases.stream().map(Lease::checkpoint)
    -                .collect(Collectors.toSet());
    +        final Set extendedSequenceNumbers =
    +                requestLeases.stream().map(Lease::checkpoint).collect(Collectors.toSet());
     
             validateLeases(requestLeases, expectedLeaseKeys);
             assertEquals(1, extendedSequenceNumbers.size());
    @@ -355,15 +398,14 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testCheckAndCreateLeasesForShardsIfMissingAtLatest() throws Exception {
    -        testLeaseCreation(SHARD_GRAPH_A, false,
    -                "shardId-4", "shardId-8", "shardId-9", "shardId-10");
    +        testLeaseCreation(SHARD_GRAPH_A, false, "shardId-4", "shardId-8", "shardId-9", "shardId-10");
         }
     
         @Test
         public void testCheckAndCreateLeasesForShardsIfMissingAtLatestMultiStream() throws Exception {
             setupMultiStream();
    -        testLeaseCreation(SHARD_GRAPH_A, false,
    -                toMultiStreamLeases("shardId-4", "shardId-8", "shardId-9", "shardId-10"));
    +        testLeaseCreation(
    +                SHARD_GRAPH_A, false, toMultiStreamLeases("shardId-4", "shardId-8", "shardId-9", "shardId-10"));
         }
     
         /**
    @@ -383,16 +425,21 @@ public class HierarchicalShardSyncerTest {
     
         private void testCheckAndCreateLeasesForShardsWithShardList(final String... expectedLeaseKeys) throws Exception {
             final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class);
    -        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true);
    +        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture()))
    +                .thenReturn(true);
     
    -        hierarchicalShardSyncer
    -                .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST,
    -                        SHARD_GRAPH_A, false, SCOPE,
    -                        dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +        hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                shardDetector,
    +                dynamoDBLeaseRefresher,
    +                INITIAL_POSITION_LATEST,
    +                SHARD_GRAPH_A,
    +                false,
    +                SCOPE,
    +                dynamoDBLeaseRefresher.isLeaseTableEmpty());
     
             final List requestLeases = leaseCaptor.getAllValues();
    -        final Set extendedSequenceNumbers = requestLeases.stream().map(Lease::checkpoint)
    -                                                                                 .collect(Collectors.toSet());
    +        final Set extendedSequenceNumbers =
    +                requestLeases.stream().map(Lease::checkpoint).collect(Collectors.toSet());
     
             validateLeases(requestLeases, expectedLeaseKeys);
             assertEquals(1, extendedSequenceNumbers.size());
    @@ -434,16 +481,21 @@ public class HierarchicalShardSyncerTest {
             final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class);
             when(shardDetector.listShards()).thenReturn(SHARD_GRAPH_A);
             when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList());
    -        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true);
    +        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture()))
    +                .thenReturn(true);
     
    -        hierarchicalShardSyncer
    -                .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST,
    -                        new ArrayList<>(), false, SCOPE,
    -                        dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +        hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                shardDetector,
    +                dynamoDBLeaseRefresher,
    +                INITIAL_POSITION_LATEST,
    +                new ArrayList<>(),
    +                false,
    +                SCOPE,
    +                dynamoDBLeaseRefresher.isLeaseTableEmpty());
     
             final List requestLeases = leaseCaptor.getAllValues();
    -        final Set extendedSequenceNumbers = requestLeases.stream().map(Lease::checkpoint)
    -                                                                                 .collect(Collectors.toSet());
    +        final Set extendedSequenceNumbers =
    +                requestLeases.stream().map(Lease::checkpoint).collect(Collectors.toSet());
             validateLeases(requestLeases);
             assertEquals(0, extendedSequenceNumbers.size());
     
    @@ -467,10 +519,10 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonWithEmptyLeaseTable() throws Exception {
    -        final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-0", "shardId-1", "shardId-2",
    -                "shardId-3", "shardId-4", "shardId-5"));
    -        testCheckAndCreateLeaseForShardsIfMissing(SHARD_GRAPH_A, INITIAL_POSITION_TRIM_HORIZON,
    -                expectedLeaseKeysToCreate);
    +        final Set expectedLeaseKeysToCreate = new HashSet<>(
    +                Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5"));
    +        testCheckAndCreateLeaseForShardsIfMissing(
    +                SHARD_GRAPH_A, INITIAL_POSITION_TRIM_HORIZON, expectedLeaseKeysToCreate);
         }
     
         /**
    @@ -487,10 +539,10 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testCheckAndCreateLeasesForNewShardsAtTimestampWithEmptyLeaseTable1() throws Exception {
    -        final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-8", "shardId-4", "shardId-9",
    -                "shardId-10"));
    -        testCheckAndCreateLeaseForShardsIfMissing(SHARD_GRAPH_A, INITIAL_POSITION_AT_TIMESTAMP,
    -                expectedLeaseKeysToCreate);
    +        final Set expectedLeaseKeysToCreate =
    +                new HashSet<>(Arrays.asList("shardId-8", "shardId-4", "shardId-9", "shardId-10"));
    +        testCheckAndCreateLeaseForShardsIfMissing(
    +                SHARD_GRAPH_A, INITIAL_POSITION_AT_TIMESTAMP, expectedLeaseKeysToCreate);
         }
     
         /**
    @@ -507,10 +559,10 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testCheckAndCreateLeasesForNewShardsAtTimestampWithEmptyLeaseTable2() throws Exception {
    -        final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-6", "shardId-7", "shardId-4",
    -                "shardId-5"));
    -        final InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended
    -                .newInitialPositionAtTimestamp(new Date(200L));
    +        final Set expectedLeaseKeysToCreate =
    +                new HashSet<>(Arrays.asList("shardId-6", "shardId-7", "shardId-4", "shardId-5"));
    +        final InitialPositionInStreamExtended initialPosition =
    +                InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(200L));
             testCheckAndCreateLeaseForShardsIfMissing(SHARD_GRAPH_A, initialPosition, expectedLeaseKeysToCreate);
         }
     
    @@ -528,8 +580,8 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testCheckAndCreateLeasesForNewShardsAtLatestWithEmptyLeaseTable() throws Exception {
    -        final Set expectedLeaseKeysToCreate = new HashSet<>(Arrays.asList("shardId-8", "shardId-4", "shardId-9",
    -                "shardId-10"));
    +        final Set expectedLeaseKeysToCreate =
    +                new HashSet<>(Arrays.asList("shardId-8", "shardId-4", "shardId-9", "shardId-10"));
             testCheckAndCreateLeaseForShardsIfMissing(SHARD_GRAPH_A, INITIAL_POSITION_LATEST, expectedLeaseKeysToCreate);
         }
     
    @@ -553,11 +605,14 @@ public class HierarchicalShardSyncerTest {
             // lease for shard-0 when reading from TRIM_HORIZON.
             final Set missingLeaseKeys = new HashSet<>(Arrays.asList("shardId-0", "shardId-6", "shardId-8"));
             final List shardsWithLeases = shards.stream()
    -                .filter(s -> !missingLeaseKeys.contains(s.shardId())).collect(Collectors.toList());
    -        final List existingLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.TRIM_HORIZON, LEASE_OWNER);
    +                .filter(s -> !missingLeaseKeys.contains(s.shardId()))
    +                .collect(Collectors.toList());
    +        final List existingLeases =
    +                createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.TRIM_HORIZON, LEASE_OWNER);
     
             final Set expectedLeaseKeysToCreate = Collections.singleton("shardId-0");
    -        testCheckAndCreateLeaseForShardsIfMissing(shards, INITIAL_POSITION_TRIM_HORIZON, expectedLeaseKeysToCreate, existingLeases);
    +        testCheckAndCreateLeaseForShardsIfMissing(
    +                shards, INITIAL_POSITION_TRIM_HORIZON, expectedLeaseKeysToCreate, existingLeases);
         }
     
         /**
    @@ -580,11 +635,14 @@ public class HierarchicalShardSyncerTest {
             // lease for shard-0 when reading from AT_TIMESTAMP.
             final Set missingLeaseKeys = new HashSet<>(Arrays.asList("shardId-0", "shardId-6", "shardId-8"));
             final List shardsWithLeases = shards.stream()
    -                .filter(s -> !missingLeaseKeys.contains(s.shardId())).collect(Collectors.toList());
    -        final List existingLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.AT_TIMESTAMP, LEASE_OWNER);
    +                .filter(s -> !missingLeaseKeys.contains(s.shardId()))
    +                .collect(Collectors.toList());
    +        final List existingLeases =
    +                createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.AT_TIMESTAMP, LEASE_OWNER);
     
             final Set expectedLeaseKeysToCreate = Collections.singleton("shardId-0");
    -        testCheckAndCreateLeaseForShardsIfMissing(shards, INITIAL_POSITION_AT_TIMESTAMP, expectedLeaseKeysToCreate, existingLeases);
    +        testCheckAndCreateLeaseForShardsIfMissing(
    +                shards, INITIAL_POSITION_AT_TIMESTAMP, expectedLeaseKeysToCreate, existingLeases);
         }
     
         /**
    @@ -603,14 +661,16 @@ public class HierarchicalShardSyncerTest {
         @Test
         public void testCheckAndCreateLeasesForNewShardsAtTimestampWithPartialLeaseTable2() throws Exception {
             final List shards = SHARD_GRAPH_A;
    -        final InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended
    -                .newInitialPositionAtTimestamp(new Date(200L));
    +        final InitialPositionInStreamExtended initialPosition =
    +                InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(200L));
             // Leases for shard-0 and its descendants (shard-6, and shard-8) are missing. Expect lease sync to recover the
             // lease for shard-0 when reading from AT_TIMESTAMP.
             final Set missingLeaseKeys = new HashSet<>(Arrays.asList("shardId-0", "shardId-6", "shardId-8"));
             final List shardsWithLeases = shards.stream()
    -                .filter(s -> !missingLeaseKeys.contains(s.shardId())).collect(Collectors.toList());
    -        final List existingLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.AT_TIMESTAMP, LEASE_OWNER);
    +                .filter(s -> !missingLeaseKeys.contains(s.shardId()))
    +                .collect(Collectors.toList());
    +        final List existingLeases =
    +                createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.AT_TIMESTAMP, LEASE_OWNER);
     
             final Set expectedLeaseKeysToCreate = Collections.singleton("shardId-0");
             testCheckAndCreateLeaseForShardsIfMissing(shards, initialPosition, expectedLeaseKeysToCreate, existingLeases);
    @@ -636,27 +696,37 @@ public class HierarchicalShardSyncerTest {
             // lease for shard-0 when reading from LATEST.
             final Set missingLeaseKeys = new HashSet<>(Arrays.asList("shardId-0", "shardId-6", "shardId-8"));
             final List shardsWithLeases = shards.stream()
    -                .filter(s -> !missingLeaseKeys.contains(s.shardId())).collect(Collectors.toList());
    -        final List existingLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, LEASE_OWNER);
    +                .filter(s -> !missingLeaseKeys.contains(s.shardId()))
    +                .collect(Collectors.toList());
    +        final List existingLeases =
    +                createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, LEASE_OWNER);
     
             final Set expectedLeaseKeysToCreate = Collections.singleton("shardId-0");
    -        testCheckAndCreateLeaseForShardsIfMissing(shards, INITIAL_POSITION_LATEST, expectedLeaseKeysToCreate, existingLeases);
    +        testCheckAndCreateLeaseForShardsIfMissing(
    +                shards, INITIAL_POSITION_LATEST, expectedLeaseKeysToCreate, existingLeases);
         }
     
         @Test(expected = KinesisClientLibIOException.class)
         public void testCheckAndCreateLeasesForNewShardsWhenParentIsOpen() throws Exception {
             final List shards = new ArrayList<>(SHARD_GRAPH_A);
    -        final SequenceNumberRange range = shards.get(0).sequenceNumberRange().toBuilder().endingSequenceNumber(null)
    +        final SequenceNumberRange range = shards.get(0).sequenceNumberRange().toBuilder()
    +                .endingSequenceNumber(null)
                     .build();
             final Shard shard = shards.get(3).toBuilder().sequenceNumberRange(range).build();
             shards.remove(3);
             shards.add(3, shard);
     
    -        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()).thenReturn(shards);
    +        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException())
    +                .thenReturn(shards);
     
             try {
    -            hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher,
    -                    INITIAL_POSITION_TRIM_HORIZON, SCOPE, false, dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +            hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                    shardDetector,
    +                    dynamoDBLeaseRefresher,
    +                    INITIAL_POSITION_TRIM_HORIZON,
    +                    SCOPE,
    +                    false,
    +                    dynamoDBLeaseRefresher.isLeaseTableEmpty());
             } finally {
                 verify(shardDetector).listShardsWithoutConsumingResourceNotFoundException();
                 verify(dynamoDBLeaseRefresher, never()).listLeases();
    @@ -666,17 +736,23 @@ public class HierarchicalShardSyncerTest {
         @Test(expected = KinesisClientLibIOException.class)
         public void testCheckAndCreateLeasesForNewShardsWhenParentIsOpenForMultiStream() throws Exception {
             final List shards = new ArrayList<>(SHARD_GRAPH_A);
    -        final SequenceNumberRange range = shards.get(0).sequenceNumberRange().toBuilder().endingSequenceNumber(null)
    +        final SequenceNumberRange range = shards.get(0).sequenceNumberRange().toBuilder()
    +                .endingSequenceNumber(null)
                     .build();
             final Shard shard = shards.get(3).toBuilder().sequenceNumberRange(range).build();
             shards.remove(3);
             shards.add(3, shard);
     
    -        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()).thenReturn(shards);
    +        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException())
    +                .thenReturn(shards);
             setupMultiStream();
             try {
    -            hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher,
    -                    INITIAL_POSITION_TRIM_HORIZON, SCOPE, false,
    +            hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                    shardDetector,
    +                    dynamoDBLeaseRefresher,
    +                    INITIAL_POSITION_TRIM_HORIZON,
    +                    SCOPE,
    +                    false,
                         dynamoDBLeaseRefresher.isLeaseTableEmpty());
             } finally {
                 verify(shardDetector).listShardsWithoutConsumingResourceNotFoundException();
    @@ -696,9 +772,12 @@ public class HierarchicalShardSyncerTest {
             // is not closed, those children should be ignored when syncing shards, no leases
             // should be obtained for them, and we should obtain a lease on the still-open
             // parent.
    -        shards.add(5,
    +        shards.add(
    +                5,
                     shard.toBuilder()
    -                        .sequenceNumberRange(shard.sequenceNumberRange().toBuilder().endingSequenceNumber(null).build())
    +                        .sequenceNumberRange(shard.sequenceNumberRange().toBuilder()
    +                                .endingSequenceNumber(null)
    +                                .build())
                             .build());
     
             testLeaseCreation(shards, true, expectedLeaseKeys);
    @@ -714,7 +793,8 @@ public class HierarchicalShardSyncerTest {
         }
     
         @Test
    -    public void testCheckAndCreateLeasesForNewShardsWhenParentIsOpenAndIgnoringInconsistentChildrenMultiStream() throws Exception {
    +    public void testCheckAndCreateLeasesForNewShardsWhenParentIsOpenAndIgnoringInconsistentChildrenMultiStream()
    +            throws Exception {
             setupMultiStream();
             testCheckAndCreateLeasesForNewShardsWhenParentIsOpenAndIgnoringInconsistentChildren(
                     toMultiStreamLeases("shardId-4", "shardId-5", "shardId-8"));
    @@ -722,41 +802,55 @@ public class HierarchicalShardSyncerTest {
     
         @Test
         public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShard() throws Exception {
    -        testCheckAndCreateLeasesForNewShardsAndClosedShard(ExtendedSequenceNumber.TRIM_HORIZON,
    -                INITIAL_POSITION_TRIM_HORIZON);
    +        testCheckAndCreateLeasesForNewShardsAndClosedShard(
    +                ExtendedSequenceNumber.TRIM_HORIZON, INITIAL_POSITION_TRIM_HORIZON);
         }
     
         @Test
         public void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShard() throws Exception {
    -        testCheckAndCreateLeasesForNewShardsAndClosedShard(ExtendedSequenceNumber.AT_TIMESTAMP,
    -                INITIAL_POSITION_AT_TIMESTAMP);
    +        testCheckAndCreateLeasesForNewShardsAndClosedShard(
    +                ExtendedSequenceNumber.AT_TIMESTAMP, INITIAL_POSITION_AT_TIMESTAMP);
         }
     
    -    private void testCheckAndCreateLeasesForNewShardsAndClosedShard(final ExtendedSequenceNumber sequenceNumber,
    -            final InitialPositionInStreamExtended position) throws Exception {
    +    private void testCheckAndCreateLeasesForNewShardsAndClosedShard(
    +            final ExtendedSequenceNumber sequenceNumber, final InitialPositionInStreamExtended position)
    +            throws Exception {
             final String shardIdPrefix = "shardId-%d";
             final List shards = SHARD_GRAPH_A;
             final List leases = createLeasesFromShards(shards, sequenceNumber, LEASE_OWNER);
     
             // Marking shardId-0 as ShardEnd.
    -        leases.stream().filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())).findFirst()
    +        leases.stream()
    +                .filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey()))
    +                .findFirst()
                     .ifPresent(lease -> lease.checkpoint(ExtendedSequenceNumber.SHARD_END));
     
             // Marking child of shardId-0 to be processed and not at TRIM_HORIZON.
    -        leases.stream().filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())).findFirst()
    +        leases.stream()
    +                .filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey()))
    +                .findFirst()
                     .ifPresent(lease -> lease.checkpoint(new ExtendedSequenceNumber("1")));
     
             final ArgumentCaptor leaseCreateCaptor = ArgumentCaptor.forClass(Lease.class);
             final ArgumentCaptor leaseDeleteCaptor = ArgumentCaptor.forClass(Lease.class);
     
    -        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()).thenReturn(shards);
    -        when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList()).thenReturn(leases);
    -        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture())).thenReturn(true);
    +        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException())
    +                .thenReturn(shards);
    +        when(dynamoDBLeaseRefresher.listLeases())
    +                .thenReturn(Collections.emptyList())
    +                .thenReturn(leases);
    +        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture()))
    +                .thenReturn(true);
             doNothing().when(dynamoDBLeaseRefresher).deleteLease(leaseDeleteCaptor.capture());
     
             // Initial call: No leases present, create leases.
    -        hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position,
    -                SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +        hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                shardDetector,
    +                dynamoDBLeaseRefresher,
    +                position,
    +                SCOPE,
    +                ignoreUnexpectedChildShards,
    +                dynamoDBLeaseRefresher.isLeaseTableEmpty());
     
             final Set createLeases = new HashSet<>(leaseCreateCaptor.getAllValues());
     
    @@ -770,8 +864,13 @@ public class HierarchicalShardSyncerTest {
             verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class));
     
             // Second call: Leases present, no leases should be deleted.
    -        hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position,
    -                SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +        hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                shardDetector,
    +                dynamoDBLeaseRefresher,
    +                position,
    +                SCOPE,
    +                ignoreUnexpectedChildShards,
    +                dynamoDBLeaseRefresher.isLeaseTableEmpty());
             final List deleteLeases = leaseDeleteCaptor.getAllValues();
     
             assertTrue(deleteLeases.isEmpty());
    @@ -784,15 +883,15 @@ public class HierarchicalShardSyncerTest {
         @Test(expected = DependencyException.class)
         public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithListLeasesExceptions()
                 throws Exception {
    -        testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions(ExtendedSequenceNumber.TRIM_HORIZON,
    -                INITIAL_POSITION_TRIM_HORIZON);
    +        testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions(
    +                ExtendedSequenceNumber.TRIM_HORIZON, INITIAL_POSITION_TRIM_HORIZON);
         }
     
         @Test(expected = DependencyException.class)
         public void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShardWithListLeasesExceptions()
                 throws Exception {
    -        testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions(ExtendedSequenceNumber.AT_TIMESTAMP,
    -                INITIAL_POSITION_AT_TIMESTAMP);
    +        testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions(
    +                ExtendedSequenceNumber.AT_TIMESTAMP, INITIAL_POSITION_AT_TIMESTAMP);
         }
     
         private void testCheckAndCreateLeasesForNewShardsAndClosedShardWithListLeasesExceptions(
    @@ -803,26 +902,37 @@ public class HierarchicalShardSyncerTest {
             final List leases = createLeasesFromShards(shards, sequenceNumber, LEASE_OWNER);
     
             // Marking shardId-0 as ShardEnd.
    -        leases.stream().filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())).findFirst()
    +        leases.stream()
    +                .filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey()))
    +                .findFirst()
                     .ifPresent(lease -> lease.checkpoint(ExtendedSequenceNumber.SHARD_END));
     
             // Marking child of shardId-0 to be processed and not at TRIM_HORIZON.
    -        leases.stream().filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())).findFirst()
    +        leases.stream()
    +                .filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey()))
    +                .findFirst()
                     .ifPresent(lease -> lease.checkpoint(new ExtendedSequenceNumber("1")));
     
             final ArgumentCaptor leaseCreateCaptor = ArgumentCaptor.forClass(Lease.class);
     
    -        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()).thenReturn(shards);
    +        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException())
    +                .thenReturn(shards);
             when(dynamoDBLeaseRefresher.listLeases())
                     .thenThrow(new DependencyException(new Throwable("Throw for ListLeases")))
    -                .thenReturn(Collections.emptyList()).thenReturn(leases);
    -        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture())).thenReturn(true);
    +                .thenReturn(Collections.emptyList())
    +                .thenReturn(leases);
    +        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture()))
    +                .thenReturn(true);
     
             try {
                 // Initial call: Call to create leases. Fails on ListLeases
    -            hierarchicalShardSyncer
    -                    .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position,
    -                            SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +            hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                    shardDetector,
    +                    dynamoDBLeaseRefresher,
    +                    position,
    +                    SCOPE,
    +                    ignoreUnexpectedChildShards,
    +                    dynamoDBLeaseRefresher.isLeaseTableEmpty());
             } finally {
                 verify(shardDetector, times(1)).listShardsWithoutConsumingResourceNotFoundException();
                 verify(dynamoDBLeaseRefresher, times(1)).listLeases();
    @@ -830,9 +940,13 @@ public class HierarchicalShardSyncerTest {
                 verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class));
     
                 // Second call: Leases not present, leases will be created.
    -            hierarchicalShardSyncer
    -                    .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position,
    -                            SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +            hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                    shardDetector,
    +                    dynamoDBLeaseRefresher,
    +                    position,
    +                    SCOPE,
    +                    ignoreUnexpectedChildShards,
    +                    dynamoDBLeaseRefresher.isLeaseTableEmpty());
     
                 final Set createLeases = new HashSet<>(leaseCreateCaptor.getAllValues());
                 final Set expectedCreateLeases = getExpectedLeasesForGraphA(shards, sequenceNumber, position);
    @@ -845,9 +959,13 @@ public class HierarchicalShardSyncerTest {
                 verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class));
     
                 // Final call: Leases present, belongs to TestOwner, shardId-0 is at ShardEnd should be cleaned up.
    -            hierarchicalShardSyncer
    -                    .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position,
    -                            SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +            hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                    shardDetector,
    +                    dynamoDBLeaseRefresher,
    +                    position,
    +                    SCOPE,
    +                    ignoreUnexpectedChildShards,
    +                    dynamoDBLeaseRefresher.isLeaseTableEmpty());
     
                 verify(shardDetector, times(3)).listShardsWithoutConsumingResourceNotFoundException();
                 verify(dynamoDBLeaseRefresher, times(expectedCreateLeases.size())).createLeaseIfNotExists(any(Lease.class));
    @@ -860,14 +978,17 @@ public class HierarchicalShardSyncerTest {
         public void testDeletedStreamListProviderUpdateOnResourceNotFound()
                 throws ProvisionedThroughputException, InvalidStateException, DependencyException, InterruptedException {
             DeletedStreamListProvider dummyDeletedStreamListProvider = new DeletedStreamListProvider();
    -        hierarchicalShardSyncer = new HierarchicalShardSyncer(MULTISTREAM_MODE_ON, STREAM_IDENTIFIER,
    -                dummyDeletedStreamListProvider);
    +        hierarchicalShardSyncer =
    +                new HierarchicalShardSyncer(MULTISTREAM_MODE_ON, STREAM_IDENTIFIER, dummyDeletedStreamListProvider);
             when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(false);
    -        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()).thenThrow(
    -                ResourceNotFoundException.builder()
    -                                         .build());
    -        boolean response = hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher,
    -                INITIAL_POSITION_TRIM_HORIZON, SCOPE, ignoreUnexpectedChildShards,
    +        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException())
    +                .thenThrow(ResourceNotFoundException.builder().build());
    +        boolean response = hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                shardDetector,
    +                dynamoDBLeaseRefresher,
    +                INITIAL_POSITION_TRIM_HORIZON,
    +                SCOPE,
    +                ignoreUnexpectedChildShards,
                     dynamoDBLeaseRefresher.isLeaseTableEmpty());
             Set deletedStreamSet = dummyDeletedStreamListProvider.purgeAllDeletedStream();
     
    @@ -882,15 +1003,15 @@ public class HierarchicalShardSyncerTest {
         @Test(expected = DependencyException.class)
         public void testCheckAndCreateLeasesForNewShardsAtTrimHorizonAndClosedShardWithCreateLeaseExceptions()
                 throws Exception {
    -        testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions(ExtendedSequenceNumber.TRIM_HORIZON,
    -                INITIAL_POSITION_TRIM_HORIZON);
    +        testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions(
    +                ExtendedSequenceNumber.TRIM_HORIZON, INITIAL_POSITION_TRIM_HORIZON);
         }
     
         @Test(expected = DependencyException.class)
         public void testCheckAndCreateLeasesForNewShardsAtTimestampAndClosedShardWithCreateLeaseExceptions()
                 throws Exception {
    -        testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions(ExtendedSequenceNumber.AT_TIMESTAMP,
    -                INITIAL_POSITION_AT_TIMESTAMP);
    +        testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions(
    +                ExtendedSequenceNumber.AT_TIMESTAMP, INITIAL_POSITION_AT_TIMESTAMP);
         }
     
         private void testCheckAndCreateLeasesForNewShardsAndClosedShardWithCreateLeaseExceptions(
    @@ -901,35 +1022,51 @@ public class HierarchicalShardSyncerTest {
             final List leases = createLeasesFromShards(shards, sequenceNumber, LEASE_OWNER);
     
             // Marking shardId-0 as ShardEnd.
    -        leases.stream().filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey())).findFirst()
    +        leases.stream()
    +                .filter(lease -> String.format(shardIdPrefix, 0).equals(lease.leaseKey()))
    +                .findFirst()
                     .ifPresent(lease -> lease.checkpoint(ExtendedSequenceNumber.SHARD_END));
     
             // Marking child of shardId-0 to be processed and not at TRIM_HORIZON.
    -        leases.stream().filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey())).findFirst()
    +        leases.stream()
    +                .filter(lease -> String.format(shardIdPrefix, 6).equals(lease.leaseKey()))
    +                .findFirst()
                     .ifPresent(lease -> lease.checkpoint(new ExtendedSequenceNumber("1")));
     
             final ArgumentCaptor leaseCreateCaptor = ArgumentCaptor.forClass(Lease.class);
     
    -        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()).thenReturn(shards);
    -        when(dynamoDBLeaseRefresher.listLeases()).thenReturn(Collections.emptyList())
    -                .thenReturn(Collections.emptyList()).thenReturn(leases);
    +        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException())
    +                .thenReturn(shards);
    +        when(dynamoDBLeaseRefresher.listLeases())
    +                .thenReturn(Collections.emptyList())
    +                .thenReturn(Collections.emptyList())
    +                .thenReturn(leases);
             when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCreateCaptor.capture()))
    -                .thenThrow(new DependencyException(new Throwable("Throw for CreateLease"))).thenReturn(true);
    +                .thenThrow(new DependencyException(new Throwable("Throw for CreateLease")))
    +                .thenReturn(true);
     
             try {
                 // Initial call: No leases present, create leases. Create lease Fails
    -            hierarchicalShardSyncer
    -                    .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position,
    -                            SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +            hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                    shardDetector,
    +                    dynamoDBLeaseRefresher,
    +                    position,
    +                    SCOPE,
    +                    ignoreUnexpectedChildShards,
    +                    dynamoDBLeaseRefresher.isLeaseTableEmpty());
             } finally {
                 verify(shardDetector, times(1)).listShardsWithoutConsumingResourceNotFoundException();
                 verify(dynamoDBLeaseRefresher, times(1)).listLeases();
                 verify(dynamoDBLeaseRefresher, times(1)).createLeaseIfNotExists(any(Lease.class));
                 verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class));
     
    -            hierarchicalShardSyncer
    -                    .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position,
    -                            SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +            hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                    shardDetector,
    +                    dynamoDBLeaseRefresher,
    +                    position,
    +                    SCOPE,
    +                    ignoreUnexpectedChildShards,
    +                    dynamoDBLeaseRefresher.isLeaseTableEmpty());
     
                 final Set createLeases = new HashSet<>(leaseCreateCaptor.getAllValues());
     
    @@ -943,9 +1080,13 @@ public class HierarchicalShardSyncerTest {
                 verify(dynamoDBLeaseRefresher, never()).deleteLease(any(Lease.class));
     
                 // Final call: Leases are present, shardId-0 is at ShardEnd needs to be cleaned up.
    -            hierarchicalShardSyncer
    -                    .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, position,
    -                            SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +            hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                    shardDetector,
    +                    dynamoDBLeaseRefresher,
    +                    position,
    +                    SCOPE,
    +                    ignoreUnexpectedChildShards,
    +                    dynamoDBLeaseRefresher.isLeaseTableEmpty());
     
                 verify(shardDetector, times(3)).listShardsWithoutConsumingResourceNotFoundException();
                 verify(dynamoDBLeaseRefresher, times(1 + expectedCreateLeases.size()))
    @@ -955,87 +1096,116 @@ public class HierarchicalShardSyncerTest {
             }
         }
     
    -    private List createLeasesFromShards(final List shards, final ExtendedSequenceNumber checkpoint,
    -            final String leaseOwner) {
    -        return shards.stream().map(shard -> {
    -            final Set parentShardIds = new HashSet<>();
    -            if (StringUtils.isNotEmpty(shard.parentShardId())) {
    -                parentShardIds.add(shard.parentShardId());
    -            }
    -            if (StringUtils.isNotEmpty(shard.adjacentParentShardId())) {
    -                parentShardIds.add(shard.adjacentParentShardId());
    -            }
    -            return new Lease(shard.shardId(), leaseOwner, 0L, UUID.randomUUID(), 0L, checkpoint, null, 0L,
    -                    parentShardIds, new HashSet<>(), null, HashKeyRangeForLease.fromHashKeyRange(shard.hashKeyRange()));
    -        }).collect(Collectors.toList());
    +    private List createLeasesFromShards(
    +            final List shards, final ExtendedSequenceNumber checkpoint, final String leaseOwner) {
    +        return shards.stream()
    +                .map(shard -> {
    +                    final Set parentShardIds = new HashSet<>();
    +                    if (StringUtils.isNotEmpty(shard.parentShardId())) {
    +                        parentShardIds.add(shard.parentShardId());
    +                    }
    +                    if (StringUtils.isNotEmpty(shard.adjacentParentShardId())) {
    +                        parentShardIds.add(shard.adjacentParentShardId());
    +                    }
    +                    return new Lease(
    +                            shard.shardId(),
    +                            leaseOwner,
    +                            0L,
    +                            UUID.randomUUID(),
    +                            0L,
    +                            checkpoint,
    +                            null,
    +                            0L,
    +                            parentShardIds,
    +                            new HashSet<>(),
    +                            null,
    +                            HashKeyRangeForLease.fromHashKeyRange(shard.hashKeyRange()));
    +                })
    +                .collect(Collectors.toList());
         }
     
    -    private List createMultiStreamLeasesFromShards(final List shards, final ExtendedSequenceNumber checkpoint,
    -            final String leaseOwner) {
    -        return shards.stream().map(shard -> {
    -            final Set parentShardIds = new HashSet<>();
    -            if (StringUtils.isNotEmpty(shard.parentShardId())) {
    -                parentShardIds.add(shard.parentShardId());
    -            }
    -            if (StringUtils.isNotEmpty(shard.adjacentParentShardId())) {
    -                parentShardIds.add(shard.adjacentParentShardId());
    -            }
    -            final MultiStreamLease msLease = new MultiStreamLease();
    -            msLease.shardId(shard.shardId());
    -            msLease.leaseOwner(leaseOwner);
    -            msLease.leaseCounter(0L);
    -            msLease.concurrencyToken(UUID.randomUUID());
    -            msLease.lastCounterIncrementNanos(0L);
    -            msLease.checkpoint(checkpoint);
    -            msLease.parentShardIds(parentShardIds);
    -            msLease.streamIdentifier(STREAM_IDENTIFIER);
    -            return msLease;
    -        }).collect(Collectors.toList());
    +    private List createMultiStreamLeasesFromShards(
    +            final List shards, final ExtendedSequenceNumber checkpoint, final String leaseOwner) {
    +        return shards.stream()
    +                .map(shard -> {
    +                    final Set parentShardIds = new HashSet<>();
    +                    if (StringUtils.isNotEmpty(shard.parentShardId())) {
    +                        parentShardIds.add(shard.parentShardId());
    +                    }
    +                    if (StringUtils.isNotEmpty(shard.adjacentParentShardId())) {
    +                        parentShardIds.add(shard.adjacentParentShardId());
    +                    }
    +                    final MultiStreamLease msLease = new MultiStreamLease();
    +                    msLease.shardId(shard.shardId());
    +                    msLease.leaseOwner(leaseOwner);
    +                    msLease.leaseCounter(0L);
    +                    msLease.concurrencyToken(UUID.randomUUID());
    +                    msLease.lastCounterIncrementNanos(0L);
    +                    msLease.checkpoint(checkpoint);
    +                    msLease.parentShardIds(parentShardIds);
    +                    msLease.streamIdentifier(STREAM_IDENTIFIER);
    +                    return msLease;
    +                })
    +                .collect(Collectors.toList());
         }
     
         private void testCheckAndCreateLeasesForShardsIfMissing(InitialPositionInStreamExtended initialPosition)
                 throws Exception {
             final String shardId0 = "shardId-0";
             final String shardId1 = "shardId-1";
    -        final HashKeyRange range1 = ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, BigInteger.ONE.toString());
    -        final HashKeyRange range2 = ShardObjectHelper.newHashKeyRange(new BigInteger("2").toString(), ShardObjectHelper.MAX_HASH_KEY);
    +        final HashKeyRange range1 =
    +                ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, BigInteger.ONE.toString());
    +        final HashKeyRange range2 =
    +                ShardObjectHelper.newHashKeyRange(new BigInteger("2").toString(), ShardObjectHelper.MAX_HASH_KEY);
             final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("11", null);
    -        final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange, range1),
    +        final List shards = Arrays.asList(
    +                ShardObjectHelper.newShard(shardId0, null, null, sequenceRange, range1),
                     ShardObjectHelper.newShard(shardId1, null, null, sequenceRange, range2));
             final Set expectedLeaseKeys = new HashSet<>(Arrays.asList(shardId0, shardId1));
     
             testCheckAndCreateLeaseForShardsIfMissing(shards, initialPosition, expectedLeaseKeys);
         }
     
    -    private void testCheckAndCreateLeaseForShardsIfMissing(final List shards,
    -                                                           final InitialPositionInStreamExtended initialPosition,
    -                                                           final Set expectedLeaseKeys) throws Exception {
    +    private void testCheckAndCreateLeaseForShardsIfMissing(
    +            final List shards,
    +            final InitialPositionInStreamExtended initialPosition,
    +            final Set expectedLeaseKeys)
    +            throws Exception {
             testCheckAndCreateLeaseForShardsIfMissing(shards, initialPosition, expectedLeaseKeys, Collections.emptyList());
         }
     
    -    private void testCheckAndCreateLeaseForShardsIfMissing(final List shards,
    -                                                           final InitialPositionInStreamExtended initialPosition,
    -                                                           final Set expectedLeaseKeys,
    -                                                           final List existingLeases) throws Exception {
    +    private void testCheckAndCreateLeaseForShardsIfMissing(
    +            final List shards,
    +            final InitialPositionInStreamExtended initialPosition,
    +            final Set expectedLeaseKeys,
    +            final List existingLeases)
    +            throws Exception {
             final ArgumentCaptor leaseCaptor = ArgumentCaptor.forClass(Lease.class);
     
    -        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()).thenReturn(shards);
    +        when(shardDetector.listShardsWithoutConsumingResourceNotFoundException())
    +                .thenReturn(shards);
             when(shardDetector.listShardsWithFilter(any())).thenReturn(getFilteredShards(shards, initialPosition));
             when(dynamoDBLeaseRefresher.listLeases()).thenReturn(existingLeases);
             when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(existingLeases.isEmpty());
    -        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture())).thenReturn(true);
    +        when(dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseCaptor.capture()))
    +                .thenReturn(true);
     
    -        hierarchicalShardSyncer
    -                .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, initialPosition,
    -                        SCOPE, false, dynamoDBLeaseRefresher.isLeaseTableEmpty());
    +        hierarchicalShardSyncer.checkAndCreateLeaseForNewShards(
    +                shardDetector,
    +                dynamoDBLeaseRefresher,
    +                initialPosition,
    +                SCOPE,
    +                false,
    +                dynamoDBLeaseRefresher.isLeaseTableEmpty());
     
             final List leases = leaseCaptor.getAllValues();
             final Set leaseKeys = leases.stream().map(Lease::leaseKey).collect(Collectors.toSet());
    -        final Set leaseSequenceNumbers = leases.stream().map(Lease::checkpoint)
    -                .collect(Collectors.toSet());
    +        final Set leaseSequenceNumbers =
    +                leases.stream().map(Lease::checkpoint).collect(Collectors.toSet());
     
    -        final Set expectedSequenceNumbers = new HashSet<>(Collections
    -                .singletonList(new ExtendedSequenceNumber(initialPosition.getInitialPositionInStream().name())));
    +        final Set expectedSequenceNumbers =
    +                new HashSet<>(Collections.singletonList(new ExtendedSequenceNumber(
    +                        initialPosition.getInitialPositionInStream().name())));
     
             assertEquals(expectedLeaseKeys.size(), leases.size());
             assertEquals(expectedLeaseKeys, leaseKeys);
    @@ -1050,26 +1220,30 @@ public class HierarchicalShardSyncerTest {
             final String shardId0 = "shardId-0";
             final String shardId1 = "shardId-1";
             final List currentLeases = new ArrayList<>();
    -        final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer = new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer();
    +        final HierarchicalShardSyncer.LeaseSynchronizer emptyLeaseTableSynchronizer =
    +                new HierarchicalShardSyncer.EmptyLeaseTableSynchronizer();
             final SequenceNumberRange sequenceRange = ShardObjectHelper.newSequenceNumberRange("342980", null);
     
    -        final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, sequenceRange),
    +        final List shards = Arrays.asList(
    +                ShardObjectHelper.newShard(shardId0, null, null, sequenceRange),
                     ShardObjectHelper.newShard(shardId1, null, null, sequenceRange));
     
    -        final Set initialPositions = new HashSet<>(
    -                Arrays.asList(INITIAL_POSITION_LATEST, INITIAL_POSITION_TRIM_HORIZON));
    +        final Set initialPositions =
    +                new HashSet<>(Arrays.asList(INITIAL_POSITION_LATEST, INITIAL_POSITION_TRIM_HORIZON));
     
             final Set expectedLeaseShardIds = new HashSet<>(Arrays.asList(shardId0, shardId1));
     
             for (InitialPositionInStreamExtended initialPosition : initialPositions) {
    -            final List newLeases = determineNewLeasesToCreate(emptyLeaseTableSynchronizer, shards, currentLeases,
    -                    initialPosition);
    +            final List newLeases =
    +                    determineNewLeasesToCreate(emptyLeaseTableSynchronizer, shards, currentLeases, initialPosition);
                 assertEquals(2, newLeases.size());
     
                 for (Lease lease : newLeases) {
                     assertTrue(expectedLeaseShardIds.contains(lease.leaseKey()));
    -                assertThat(lease.checkpoint(),
    -                        equalTo(new ExtendedSequenceNumber(initialPosition.getInitialPositionInStream().toString())));
    +                assertThat(
    +                        lease.checkpoint(),
    +                        equalTo(new ExtendedSequenceNumber(
    +                                initialPosition.getInitialPositionInStream().toString())));
                 }
             }
         }
    @@ -1082,25 +1256,30 @@ public class HierarchicalShardSyncerTest {
             final String lastShardId = "shardId-1";
     
             final List shardsWithoutLeases = Arrays.asList(
    -                ShardObjectHelper.newShard("shardId-0", null, null,
    -                        ShardObjectHelper.newSequenceNumberRange("303", "404")),
    -                ShardObjectHelper.newShard(lastShardId, null, null,
    -                        ShardObjectHelper.newSequenceNumberRange("405", null)));
    +                ShardObjectHelper.newShard(
    +                        "shardId-0", null, null, ShardObjectHelper.newSequenceNumberRange("303", "404")),
    +                ShardObjectHelper.newShard(
    +                        lastShardId, null, null, ShardObjectHelper.newSequenceNumberRange("405", null)));
     
    -        final List shardsWithLeases = Arrays.asList(ShardObjectHelper.newShard("shardId-2", null,
    -                null, ShardObjectHelper.newSequenceNumberRange("202", "302")));
    +        final List shardsWithLeases = Arrays.asList(ShardObjectHelper.newShard(
    +                "shardId-2", null, null, ShardObjectHelper.newSequenceNumberRange("202", "302")));
     
    -        final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases).flatMap(x -> x.stream()).collect(Collectors.toList());
    -        final List currentLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo");
    +        final List shards = Stream.of(shardsWithLeases, shardsWithoutLeases)
    +                .flatMap(x -> x.stream())
    +                .collect(Collectors.toList());
    +        final List currentLeases =
    +                createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo");
     
             Map shardIdToShardMap = HierarchicalShardSyncer.constructShardIdToShardMap(shards);
    -        Map> shardIdToChildShardIdsMap = HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
    +        Map> shardIdToChildShardIdsMap =
    +                HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
     
             final HierarchicalShardSyncer.LeaseSynchronizer leaseSynchronizer =
    -                new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap);
    +                new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(
    +                        shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap);
     
    -        final List newLeases = determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases,
    -                INITIAL_POSITION_LATEST);
    +        final List newLeases =
    +                determineNewLeasesToCreate(leaseSynchronizer, shards, currentLeases, INITIAL_POSITION_LATEST);
     
             assertThat(newLeases.size(), equalTo(1));
             assertThat(newLeases.get(0).leaseKey(), equalTo(lastShardId));
    @@ -1126,8 +1305,8 @@ public class HierarchicalShardSyncerTest {
             final Map expectedShardIdCheckpointMap = new HashMap<>();
             expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.LATEST);
             expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1149,8 +1328,8 @@ public class HierarchicalShardSyncerTest {
             final List shardIdsOfCurrentLeases = Arrays.asList("shardId-4", "shardId-5", "shardId-7");
             final Map expectedShardIdCheckpointMap = new HashMap<>();
             expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1175,8 +1354,8 @@ public class HierarchicalShardSyncerTest {
             expectedShardIdCheckpointMap.put("shardId-4", ExtendedSequenceNumber.LATEST);
             expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.LATEST);
             expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.LATEST);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1198,8 +1377,8 @@ public class HierarchicalShardSyncerTest {
             final List shardIdsOfCurrentLeases = Arrays.asList("shardId-4", "shardId-9", "shardId-10");
             final Map expectedShardIdCheckpointMap = new HashMap<>();
             expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.LATEST);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1225,8 +1404,8 @@ public class HierarchicalShardSyncerTest {
             expectedShardIdCheckpointMap.put("shardId-6", ExtendedSequenceNumber.LATEST);
             expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.LATEST);
             expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.LATEST);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_C, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_C, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1265,8 +1444,8 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testDetermineNewLeasesToCreateSplitMergeLatestA_CompleteHashRangeWithoutGC() {
    -        final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3",
    -                "shardId-4", "shardId-5", "shardId-6", "shardId-7");
    +        final List shardIdsOfCurrentLeases = Arrays.asList(
    +                "shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5", "shardId-6", "shardId-7");
             assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST);
         }
     
    @@ -1291,8 +1470,8 @@ public class HierarchicalShardSyncerTest {
             expectedShardIdCheckpointMap.put("shardId-8", ExtendedSequenceNumber.LATEST);
             expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.LATEST);
             expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.LATEST);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, Collections.emptyList(), INITIAL_POSITION_LATEST,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, Collections.emptyList(), INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1311,8 +1490,8 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testDetermineNewLeasesToCreateSplitMergeLatestA_CompleteHashRangeAcrossDifferentEpochs() {
    -        final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7",
    -                "shardId-9", "shardId-10");
    +        final List shardIdsOfCurrentLeases =
    +                Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7", "shardId-9", "shardId-10");
             assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST);
         }
     
    @@ -1335,8 +1514,8 @@ public class HierarchicalShardSyncerTest {
             final List shardIdsOfCurrentLeases = Collections.singletonList("shardId-6");
             final Map expectedShardIdCheckpointMap = new HashMap<>();
             expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.LATEST);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1375,8 +1554,8 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testDetermineNewLeasesToCreateSplitMergeLatestB_CompleteHashRangeWithoutGC() {
    -        final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3",
    -                "shardId-4", "shardId-5");
    +        final List shardIdsOfCurrentLeases =
    +                Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5");
             assertExpectedLeasesAreCreated(SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_LATEST);
         }
     
    @@ -1399,8 +1578,8 @@ public class HierarchicalShardSyncerTest {
             final Map expectedShardIdCheckpointMap = new HashMap<>();
             expectedShardIdCheckpointMap.put("shardId-9", ExtendedSequenceNumber.LATEST);
             expectedShardIdCheckpointMap.put("shardId-10", ExtendedSequenceNumber.LATEST);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_B, Collections.emptyList(), INITIAL_POSITION_LATEST,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_B, Collections.emptyList(), INITIAL_POSITION_LATEST, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1424,8 +1603,8 @@ public class HierarchicalShardSyncerTest {
             expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON);
             expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON);
             expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.TRIM_HORIZON);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1448,8 +1627,8 @@ public class HierarchicalShardSyncerTest {
             final Map expectedShardIdCheckpointMap = new HashMap<>();
             expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON);
             expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1473,8 +1652,8 @@ public class HierarchicalShardSyncerTest {
             expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.TRIM_HORIZON);
             expectedShardIdCheckpointMap.put("shardId-4", ExtendedSequenceNumber.TRIM_HORIZON);
             expectedShardIdCheckpointMap.put("shardId-5", ExtendedSequenceNumber.TRIM_HORIZON);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1499,8 +1678,8 @@ public class HierarchicalShardSyncerTest {
             expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON);
             expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.TRIM_HORIZON);
             expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.TRIM_HORIZON);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1539,8 +1718,8 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testDetermineNewLeasesToCreateSplitMergeHorizonA_CompleteHashRangeWithoutGC() {
    -        final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3",
    -                "shardId-4", "shardId-5", "shardId-6", "shardId-7");
    +        final List shardIdsOfCurrentLeases = Arrays.asList(
    +                "shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5", "shardId-6", "shardId-7");
             assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON);
         }
     
    @@ -1567,8 +1746,8 @@ public class HierarchicalShardSyncerTest {
             expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.TRIM_HORIZON);
             expectedShardIdCheckpointMap.put("shardId-4", ExtendedSequenceNumber.TRIM_HORIZON);
             expectedShardIdCheckpointMap.put("shardId-5", ExtendedSequenceNumber.TRIM_HORIZON);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, Collections.emptyList(), INITIAL_POSITION_TRIM_HORIZON,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, Collections.emptyList(), INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1587,8 +1766,8 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testDetermineNewLeasesToCreateSplitMergeHorizonA_CompleteHashRangeAcrossDifferentEpochs() {
    -        final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7",
    -                "shardId-9", "shardId-10");
    +        final List shardIdsOfCurrentLeases =
    +                Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7", "shardId-9", "shardId-10");
             assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON);
         }
     
    @@ -1606,15 +1785,16 @@ public class HierarchicalShardSyncerTest {
          * Expected leases: (7)
          * 
    */ -// TODO: Account for out-of-order lease creation in TRIM_HORIZON and AT_TIMESTAMP cases -// @Test -// public void testDetermineNewLeasesToCreateSplitMergeHorizonB_PartialHashRange() { -// final List shards = constructShardListForGraphB(); -// final List shardIdsOfCurrentLeases = Arrays.asList("shardId-6"); -// final Map expectedShardIdCheckpointMap = new HashMap<>(); -// expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.TRIM_HORIZON); -// assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap); -// } + // TODO: Account for out-of-order lease creation in TRIM_HORIZON and AT_TIMESTAMP cases + // @Test + // public void testDetermineNewLeasesToCreateSplitMergeHorizonB_PartialHashRange() { + // final List shards = constructShardListForGraphB(); + // final List shardIdsOfCurrentLeases = Arrays.asList("shardId-6"); + // final Map expectedShardIdCheckpointMap = new HashMap<>(); + // expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.TRIM_HORIZON); + // assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON, + // expectedShardIdCheckpointMap); + // } /** *
    @@ -1652,8 +1832,8 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testDetermineNewLeasesToCreateSplitMergeHorizonB_CompleteHashRangeWithoutGC() {
    -        final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3",
    -                "shardId-4", "shardId-5");
    +        final List shardIdsOfCurrentLeases =
    +                Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5");
             assertExpectedLeasesAreCreated(SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_TRIM_HORIZON);
         }
     
    @@ -1676,8 +1856,8 @@ public class HierarchicalShardSyncerTest {
             final Map expectedShardIdCheckpointMap = new HashMap<>();
             expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.TRIM_HORIZON);
             expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.TRIM_HORIZON);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_B, Collections.emptyList(), INITIAL_POSITION_TRIM_HORIZON,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_B, Collections.emptyList(), INITIAL_POSITION_TRIM_HORIZON, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1701,8 +1881,8 @@ public class HierarchicalShardSyncerTest {
             expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.AT_TIMESTAMP);
             expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.AT_TIMESTAMP);
             expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.AT_TIMESTAMP);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1725,8 +1905,8 @@ public class HierarchicalShardSyncerTest {
             final Map expectedShardIdCheckpointMap = new HashMap<>();
             expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.AT_TIMESTAMP);
             expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.AT_TIMESTAMP);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1750,8 +1930,8 @@ public class HierarchicalShardSyncerTest {
             expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.AT_TIMESTAMP);
             expectedShardIdCheckpointMap.put("shardId-4", ExtendedSequenceNumber.AT_TIMESTAMP);
             expectedShardIdCheckpointMap.put("shardId-5", ExtendedSequenceNumber.AT_TIMESTAMP);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1776,8 +1956,8 @@ public class HierarchicalShardSyncerTest {
             expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.AT_TIMESTAMP);
             expectedShardIdCheckpointMap.put("shardId-2", ExtendedSequenceNumber.AT_TIMESTAMP);
             expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.AT_TIMESTAMP);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1816,8 +1996,8 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testDetermineNewLeasesToCreateSplitMergeAtTimestampA_CompleteHashRangeWithoutGC() {
    -        final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3",
    -                "shardId-4", "shardId-5", "shardId-6", "shardId-7");
    +        final List shardIdsOfCurrentLeases = Arrays.asList(
    +                "shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5", "shardId-6", "shardId-7");
             assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP);
         }
     
    @@ -1844,8 +2024,8 @@ public class HierarchicalShardSyncerTest {
             expectedShardIdCheckpointMap.put("shardId-3", ExtendedSequenceNumber.AT_TIMESTAMP);
             expectedShardIdCheckpointMap.put("shardId-4", ExtendedSequenceNumber.AT_TIMESTAMP);
             expectedShardIdCheckpointMap.put("shardId-5", ExtendedSequenceNumber.AT_TIMESTAMP);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_A, Collections.emptyList(), INITIAL_POSITION_AT_TIMESTAMP,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_A, Collections.emptyList(), INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap);
         }
     
         /**
    @@ -1864,8 +2044,8 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testDetermineNewLeasesToCreateSplitMergeAtTimestampA_CompleteHashRangeAcrossDifferentEpochs() {
    -        final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7",
    -                "shardId-9", "shardId-10");
    +        final List shardIdsOfCurrentLeases =
    +                Arrays.asList("shardId-0", "shardId-1", "shardId-4", "shardId-7", "shardId-9", "shardId-10");
             assertExpectedLeasesAreCreated(SHARD_GRAPH_A, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP);
         }
     
    @@ -1883,15 +2063,16 @@ public class HierarchicalShardSyncerTest {
          * Expected leases: (7)
          * 
    */ -// TODO: Account for out-of-order lease creation in TRIM_HORIZON and AT_TIMESTAMP cases -// @Test -// public void testDetermineNewLeasesToCreateSplitMergeAtTimestampB_PartialHashRange() { -// final List shards = constructShardListForGraphB(); -// final List shardIdsOfCurrentLeases = Arrays.asList("shardId-6"); -// final Map expectedShardIdCheckpointMap = new HashMap<>(); -// expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.AT_TIMESTAMP); -// assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap); -// } + // TODO: Account for out-of-order lease creation in TRIM_HORIZON and AT_TIMESTAMP cases + // @Test + // public void testDetermineNewLeasesToCreateSplitMergeAtTimestampB_PartialHashRange() { + // final List shards = constructShardListForGraphB(); + // final List shardIdsOfCurrentLeases = Arrays.asList("shardId-6"); + // final Map expectedShardIdCheckpointMap = new HashMap<>(); + // expectedShardIdCheckpointMap.put("shardId-7", ExtendedSequenceNumber.AT_TIMESTAMP); + // assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP, + // expectedShardIdCheckpointMap); + // } /** *
    @@ -1929,8 +2110,8 @@ public class HierarchicalShardSyncerTest {
          */
         @Test
         public void testDetermineNewLeasesToCreateSplitMergeAtTimestampB_CompleteHashRangeWithoutGC() {
    -        final List shardIdsOfCurrentLeases = Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3",
    -                "shardId-4", "shardId-5");
    +        final List shardIdsOfCurrentLeases =
    +                Arrays.asList("shardId-0", "shardId-1", "shardId-2", "shardId-3", "shardId-4", "shardId-5");
             assertExpectedLeasesAreCreated(SHARD_GRAPH_B, shardIdsOfCurrentLeases, INITIAL_POSITION_AT_TIMESTAMP);
         }
     
    @@ -1953,8 +2134,8 @@ public class HierarchicalShardSyncerTest {
             final Map expectedShardIdCheckpointMap = new HashMap<>();
             expectedShardIdCheckpointMap.put("shardId-0", ExtendedSequenceNumber.AT_TIMESTAMP);
             expectedShardIdCheckpointMap.put("shardId-1", ExtendedSequenceNumber.AT_TIMESTAMP);
    -        assertExpectedLeasesAreCreated(SHARD_GRAPH_B, Collections.emptyList(), INITIAL_POSITION_AT_TIMESTAMP,
    -                expectedShardIdCheckpointMap);
    +        assertExpectedLeasesAreCreated(
    +                SHARD_GRAPH_B, Collections.emptyList(), INITIAL_POSITION_AT_TIMESTAMP, expectedShardIdCheckpointMap);
         }
     
         private void assertExpectedLeasesAreCreated(
    @@ -1964,27 +2145,32 @@ public class HierarchicalShardSyncerTest {
             assertExpectedLeasesAreCreated(shards, shardIdsOfCurrentLeases, initialPosition, Collections.emptyMap());
         }
     
    -    private void assertExpectedLeasesAreCreated(List shards,
    -                                                List shardIdsOfCurrentLeases,
    -                                                InitialPositionInStreamExtended initialPosition,
    -                                                Map expectedShardIdCheckpointMap) {
    +    private void assertExpectedLeasesAreCreated(
    +            List shards,
    +            List shardIdsOfCurrentLeases,
    +            InitialPositionInStreamExtended initialPosition,
    +            Map expectedShardIdCheckpointMap) {
     
             final List currentLeases = shardIdsOfCurrentLeases.stream()
    -                .map(shardId -> newLease(shardId)).collect(Collectors.toList());
    +                .map(shardId -> newLease(shardId))
    +                .collect(Collectors.toList());
     
             final Map shardIdToShardMap = HierarchicalShardSyncer.constructShardIdToShardMap(shards);
    -        final Map> shardIdToChildShardIdsMap = HierarchicalShardSyncer
    -                .constructShardIdToChildShardIdsMap(shardIdToShardMap);
    +        final Map> shardIdToChildShardIdsMap =
    +                HierarchicalShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap);
     
             final HierarchicalShardSyncer.LeaseSynchronizer nonEmptyLeaseTableSynchronizer =
    -                new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap);
    +                new HierarchicalShardSyncer.NonEmptyLeaseTableSynchronizer(
    +                        shardDetector, shardIdToShardMap, shardIdToChildShardIdsMap);
     
    -        final List newLeases = determineNewLeasesToCreate(nonEmptyLeaseTableSynchronizer,
    -                shards, currentLeases, initialPosition);
    +        final List newLeases =
    +                determineNewLeasesToCreate(nonEmptyLeaseTableSynchronizer, shards, currentLeases, initialPosition);
     
             assertThat(newLeases.size(), equalTo(expectedShardIdCheckpointMap.size()));
             for (Lease lease : newLeases) {
    -            assertThat("Unexpected lease: " + lease, expectedShardIdCheckpointMap.containsKey(lease.leaseKey()),
    +            assertThat(
    +                    "Unexpected lease: " + lease,
    +                    expectedShardIdCheckpointMap.containsKey(lease.leaseKey()),
                         equalTo(true));
                 assertThat(lease.checkpoint(), equalTo(expectedShardIdCheckpointMap.get(lease.leaseKey())));
             }
    @@ -2009,27 +2195,35 @@ public class HierarchicalShardSyncerTest {
             final SequenceNumberRange range4 = ShardObjectHelper.newSequenceNumberRange("206", null);
     
             return Arrays.asList(
    -                ShardObjectHelper.newShard("shardId-0", null, null, range0,
    -                        ShardObjectHelper.newHashKeyRange("0", "99")),
    -                ShardObjectHelper.newShard("shardId-1", null, null, range0,
    -                        ShardObjectHelper.newHashKeyRange("100", "199")),
    -                ShardObjectHelper.newShard("shardId-2", null, null, range0,
    -                        ShardObjectHelper.newHashKeyRange("200", "299")),
    -                ShardObjectHelper.newShard("shardId-3", null, null, range0,
    -                        ShardObjectHelper.newHashKeyRange("300", "399")),
    -                ShardObjectHelper.newShard("shardId-4", null, null, range1,
    -                        ShardObjectHelper.newHashKeyRange("400", "499")),
    -                ShardObjectHelper.newShard("shardId-5", null, null, range2,
    +                ShardObjectHelper.newShard(
    +                        "shardId-0", null, null, range0, ShardObjectHelper.newHashKeyRange("0", "99")),
    +                ShardObjectHelper.newShard(
    +                        "shardId-1", null, null, range0, ShardObjectHelper.newHashKeyRange("100", "199")),
    +                ShardObjectHelper.newShard(
    +                        "shardId-2", null, null, range0, ShardObjectHelper.newHashKeyRange("200", "299")),
    +                ShardObjectHelper.newShard(
    +                        "shardId-3", null, null, range0, ShardObjectHelper.newHashKeyRange("300", "399")),
    +                ShardObjectHelper.newShard(
    +                        "shardId-4", null, null, range1, ShardObjectHelper.newHashKeyRange("400", "499")),
    +                ShardObjectHelper.newShard(
    +                        "shardId-5",
    +                        null,
    +                        null,
    +                        range2,
                             ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY)),
    -                ShardObjectHelper.newShard("shardId-6", "shardId-0", "shardId-1", range3,
    -                        ShardObjectHelper.newHashKeyRange("0", "199")),
    -                ShardObjectHelper.newShard("shardId-7", "shardId-2", "shardId-3", range3,
    -                        ShardObjectHelper.newHashKeyRange("200", "399")),
    -                ShardObjectHelper.newShard("shardId-8", "shardId-6", "shardId-7", range4,
    -                        ShardObjectHelper.newHashKeyRange("0", "399")),
    -                ShardObjectHelper.newShard("shardId-9", "shardId-5", null, range4,
    -                        ShardObjectHelper.newHashKeyRange("500", "799")),
    -                ShardObjectHelper.newShard("shardId-10", null, "shardId-5", range4,
    +                ShardObjectHelper.newShard(
    +                        "shardId-6", "shardId-0", "shardId-1", range3, ShardObjectHelper.newHashKeyRange("0", "199")),
    +                ShardObjectHelper.newShard(
    +                        "shardId-7", "shardId-2", "shardId-3", range3, ShardObjectHelper.newHashKeyRange("200", "399")),
    +                ShardObjectHelper.newShard(
    +                        "shardId-8", "shardId-6", "shardId-7", range4, ShardObjectHelper.newHashKeyRange("0", "399")),
    +                ShardObjectHelper.newShard(
    +                        "shardId-9", "shardId-5", null, range4, ShardObjectHelper.newHashKeyRange("500", "799")),
    +                ShardObjectHelper.newShard(
    +                        "shardId-10",
    +                        null,
    +                        "shardId-5",
    +                        range4,
                             ShardObjectHelper.newHashKeyRange("800", ShardObjectHelper.MAX_HASH_KEY)));
         }
     
    @@ -2044,19 +2238,25 @@ public class HierarchicalShardSyncerTest {
                             .collect(Collectors.toList());
                 case TRIM_HORIZON:
                     String minSeqNum = shards.stream()
    -                        .min(Comparator.comparingLong(s -> Long.parseLong(s.sequenceNumberRange().startingSequenceNumber())))
    +                        .min(Comparator.comparingLong(
    +                                s -> Long.parseLong(s.sequenceNumberRange().startingSequenceNumber())))
                             .map(s -> s.sequenceNumberRange().startingSequenceNumber())
                             .orElseThrow(RuntimeException::new);
                     return shards.stream()
    -                        .filter(s -> s.sequenceNumberRange().startingSequenceNumber().equals(minSeqNum))
    +                        .filter(s ->
    +                                s.sequenceNumberRange().startingSequenceNumber().equals(minSeqNum))
                             .collect(Collectors.toList());
                 case AT_TIMESTAMP:
                     return shards.stream()
    -                        .filter(s -> new Date(Long.parseLong(s.sequenceNumberRange().startingSequenceNumber()))
    -                                .compareTo(initialPosition.getTimestamp()) <= 0)
    -                        .filter(s -> s.sequenceNumberRange().endingSequenceNumber() == null ||
    -                                new Date(Long.parseLong(s.sequenceNumberRange().endingSequenceNumber()))
    -                                        .compareTo(initialPosition.getTimestamp()) > 0)
    +                        .filter(s ->
    +                                new Date(Long.parseLong(s.sequenceNumberRange().startingSequenceNumber()))
    +                                                .compareTo(initialPosition.getTimestamp())
    +                                        <= 0)
    +                        .filter(s -> s.sequenceNumberRange().endingSequenceNumber() == null
    +                                || new Date(Long.parseLong(
    +                                                        s.sequenceNumberRange().endingSequenceNumber()))
    +                                                .compareTo(initialPosition.getTimestamp())
    +                                        > 0)
                             .collect(Collectors.toList());
             }
             throw new RuntimeException("Unsupported initial position " + initialPosition);
    @@ -2073,9 +2273,10 @@ public class HierarchicalShardSyncerTest {
          * shards from epoch 206 (open - no ending sequenceNumber)
          * 
    */ - private Set getExpectedLeasesForGraphA(List shards, - ExtendedSequenceNumber sequenceNumber, - InitialPositionInStreamExtended initialPosition) { + private Set getExpectedLeasesForGraphA( + List shards, + ExtendedSequenceNumber sequenceNumber, + InitialPositionInStreamExtended initialPosition) { final List filteredShards; if (initialPosition.getInitialPositionInStream().equals(InitialPositionInStream.AT_TIMESTAMP)) { // Lease creation for AT_TIMESTAMP should work the same as for TRIM_HORIZON - ignore shard filters @@ -2110,7 +2311,8 @@ public class HierarchicalShardSyncerTest { final HashKeyRange hashRange1 = ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY); final HashKeyRange hashRange2 = ShardObjectHelper.newHashKeyRange("0", ShardObjectHelper.MAX_HASH_KEY); - return Arrays.asList(ShardObjectHelper.newShard("shardId-0", null, null, range0, hashRange0), + return Arrays.asList( + ShardObjectHelper.newShard("shardId-0", null, null, range0, hashRange0), ShardObjectHelper.newShard("shardId-1", null, null, range0, hashRange1), ShardObjectHelper.newShard("shardId-2", "shardId-0", "shardId-1", range1, hashRange2), ShardObjectHelper.newShard("shardId-3", "shardId-2", null, range2, hashRange0), @@ -2142,28 +2344,36 @@ public class HierarchicalShardSyncerTest { final SequenceNumberRange range4 = ShardObjectHelper.newSequenceNumberRange("206", null); return Arrays.asList( - ShardObjectHelper.newShard("shardId-0", null, null, range0, - ShardObjectHelper.newHashKeyRange("0", "399")), - ShardObjectHelper.newShard("shardId-1", null, null, range1, - ShardObjectHelper.newHashKeyRange("400", "499")), - ShardObjectHelper.newShard("shardId-2", null, null, range0, - ShardObjectHelper.newHashKeyRange("500", "599")), - ShardObjectHelper.newShard("shardId-3", null, null, range0, + ShardObjectHelper.newShard( + "shardId-0", null, null, range0, ShardObjectHelper.newHashKeyRange("0", "399")), + ShardObjectHelper.newShard( + "shardId-1", null, null, range1, ShardObjectHelper.newHashKeyRange("400", "499")), + ShardObjectHelper.newShard( + "shardId-2", null, null, range0, ShardObjectHelper.newHashKeyRange("500", "599")), + ShardObjectHelper.newShard( + "shardId-3", + null, + null, + range0, ShardObjectHelper.newHashKeyRange("600", ShardObjectHelper.MAX_HASH_KEY)), - ShardObjectHelper.newShard("shardId-4", "shardId-0", null, range3, - ShardObjectHelper.newHashKeyRange("0", "199")), - ShardObjectHelper.newShard("shardId-5", "shardId-0", null, range3, - ShardObjectHelper.newHashKeyRange("200", "399")), - ShardObjectHelper.newShard("shardId-6", "shardId-2", "shardId-3", range2, + ShardObjectHelper.newShard( + "shardId-4", "shardId-0", null, range3, ShardObjectHelper.newHashKeyRange("0", "199")), + ShardObjectHelper.newShard( + "shardId-5", "shardId-0", null, range3, ShardObjectHelper.newHashKeyRange("200", "399")), + ShardObjectHelper.newShard( + "shardId-6", + "shardId-2", + "shardId-3", + range2, ShardObjectHelper.newHashKeyRange("500", ShardObjectHelper.MAX_HASH_KEY)), - ShardObjectHelper.newShard("shardId-7", "shardId-4", null, range4, - ShardObjectHelper.newHashKeyRange("0", "99")), - ShardObjectHelper.newShard("shardId-8", "shardId-4", null, range4, - ShardObjectHelper.newHashKeyRange("100", "199")), - ShardObjectHelper.newShard("shardId-9", "shardId-5", null, range4, - ShardObjectHelper.newHashKeyRange("200", "299")), - ShardObjectHelper.newShard("shardId-10", "shardId-5", null, range4, - ShardObjectHelper.newHashKeyRange("300", "399"))); + ShardObjectHelper.newShard( + "shardId-7", "shardId-4", null, range4, ShardObjectHelper.newHashKeyRange("0", "99")), + ShardObjectHelper.newShard( + "shardId-8", "shardId-4", null, range4, ShardObjectHelper.newHashKeyRange("100", "199")), + ShardObjectHelper.newShard( + "shardId-9", "shardId-5", null, range4, ShardObjectHelper.newHashKeyRange("200", "299")), + ShardObjectHelper.newShard( + "shardId-10", "shardId-5", null, range4, ShardObjectHelper.newHashKeyRange("300", "399"))); } /** @@ -2173,9 +2383,8 @@ public class HierarchicalShardSyncerTest { public void testCheckIfDescendantAndAddNewLeasesForAncestorsNullShardId() { final MemoizationContext memoizationContext = new MemoizationContext(); - assertFalse(HierarchicalShardSyncer - .checkIfDescendantAndAddNewLeasesForAncestors(null, INITIAL_POSITION_LATEST, null, null, - null, memoizationContext)); + assertFalse(HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors( + null, INITIAL_POSITION_LATEST, null, null, null, memoizationContext)); } /** @@ -2186,9 +2395,8 @@ public class HierarchicalShardSyncerTest { final String shardId = "shardId-trimmed"; final MemoizationContext memoizationContext = new MemoizationContext(); - assertFalse(HierarchicalShardSyncer - .checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, null, - new HashMap<>(), null, memoizationContext)); + assertFalse(HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors( + shardId, INITIAL_POSITION_LATEST, null, new HashMap<>(), null, memoizationContext)); } /** @@ -2203,9 +2411,13 @@ public class HierarchicalShardSyncerTest { final Map kinesisShards = new HashMap<>(); kinesisShards.put(shardId, ShardObjectHelper.newShard(shardId, null, null, null)); - assertTrue( - HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, - shardIdsOfCurrentLeases, kinesisShards, newLeaseMap, memoizationContext)); + assertTrue(HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors( + shardId, + INITIAL_POSITION_LATEST, + shardIdsOfCurrentLeases, + kinesisShards, + newLeaseMap, + memoizationContext)); } /** @@ -2225,9 +2437,13 @@ public class HierarchicalShardSyncerTest { kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); kinesisShards.put(shardId, ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null)); - assertFalse( - HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, - shardIdsOfCurrentLeases, kinesisShards, newLeaseMap, memoizationContext)); + assertFalse(HierarchicalShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors( + shardId, + INITIAL_POSITION_LATEST, + shardIdsOfCurrentLeases, + kinesisShards, + newLeaseMap, + memoizationContext)); } /** @@ -2235,7 +2451,8 @@ public class HierarchicalShardSyncerTest { */ @Test public void testEmptyLeaseTableBootstrapUsesShardFilterWithAtLatest() throws Exception { - ShardFilter shardFilter = ShardFilter.builder().type(ShardFilterType.AT_LATEST).build(); + ShardFilter shardFilter = + ShardFilter.builder().type(ShardFilterType.AT_LATEST).build(); testEmptyLeaseTableBootstrapUsesListShardsWithFilter(INITIAL_POSITION_LATEST, shardFilter); } @@ -2244,7 +2461,8 @@ public class HierarchicalShardSyncerTest { */ @Test public void testEmptyLeaseTableBootstrapUsesShardFilterWithAtTrimHorizon() throws Exception { - ShardFilter shardFilter = ShardFilter.builder().type(ShardFilterType.AT_TRIM_HORIZON).build(); + ShardFilter shardFilter = + ShardFilter.builder().type(ShardFilterType.AT_TRIM_HORIZON).build(); testEmptyLeaseTableBootstrapUsesListShardsWithFilter(INITIAL_POSITION_TRIM_HORIZON, shardFilter); } @@ -2253,23 +2471,33 @@ public class HierarchicalShardSyncerTest { */ @Test public void testEmptyLeaseTableBootstrapUsesShardFilterWithAtTimestamp() throws Exception { - ShardFilter shardFilter = ShardFilter.builder().type(ShardFilterType.AT_TIMESTAMP).timestamp(new Date(1000L).toInstant()).build(); + ShardFilter shardFilter = ShardFilter.builder() + .type(ShardFilterType.AT_TIMESTAMP) + .timestamp(new Date(1000L).toInstant()) + .build(); testEmptyLeaseTableBootstrapUsesListShardsWithFilter(INITIAL_POSITION_AT_TIMESTAMP, shardFilter); } - public void testEmptyLeaseTableBootstrapUsesListShardsWithFilter(InitialPositionInStreamExtended initialPosition, - ShardFilter shardFilter) throws Exception { + public void testEmptyLeaseTableBootstrapUsesListShardsWithFilter( + InitialPositionInStreamExtended initialPosition, ShardFilter shardFilter) throws Exception { final String shardId0 = "shardId-0"; - final List shards = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, + final List shards = Arrays.asList(ShardObjectHelper.newShard( + shardId0, + null, + null, ShardObjectHelper.newSequenceNumberRange("1", null), ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, ShardObjectHelper.MAX_HASH_KEY))); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true); when(shardDetector.listShardsWithFilter(shardFilter)).thenReturn(shards); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, initialPosition, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + initialPosition, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); verify(shardDetector, atLeast(1)).listShardsWithFilter(shardFilter); verify(shardDetector, never()).listShards(); @@ -2280,20 +2508,26 @@ public class HierarchicalShardSyncerTest { final String shardId0 = "shardId-0"; final String shardId1 = "shardId-1"; - final List shardsWithLeases = Arrays.asList(ShardObjectHelper.newShard(shardId0, null, null, - ShardObjectHelper.newSequenceNumberRange("1", "2"))); - final List shardsWithoutLeases = Arrays.asList(ShardObjectHelper.newShard(shardId1, null, null, - ShardObjectHelper.newSequenceNumberRange("3", "4"))); + final List shardsWithLeases = Arrays.asList( + ShardObjectHelper.newShard(shardId0, null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"))); + final List shardsWithoutLeases = Arrays.asList( + ShardObjectHelper.newShard(shardId1, null, null, ShardObjectHelper.newSequenceNumberRange("3", "4"))); - final List currentLeases = createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo"); + final List currentLeases = + createLeasesFromShards(shardsWithLeases, ExtendedSequenceNumber.LATEST, "foo"); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(false); - when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()).thenReturn(shardsWithoutLeases); + when(shardDetector.listShardsWithoutConsumingResourceNotFoundException()) + .thenReturn(shardsWithoutLeases); when(dynamoDBLeaseRefresher.listLeases()).thenReturn(currentLeases); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, ignoreUnexpectedChildShards, dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_LATEST, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); verify(shardDetector, atLeast(1)).listShardsWithoutConsumingResourceNotFoundException(); } @@ -2305,20 +2539,30 @@ public class HierarchicalShardSyncerTest { @Test(expected = KinesisClientLibIOException.class) public void testEmptyLeaseTableThrowsExceptionWhenHashRangeIsStillIncompleteAfterRetries() throws Exception { final List shardsWithIncompleteHashRange = Arrays.asList( - ShardObjectHelper.newShard("shardId-0", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newShard( + "shardId-0", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange("0", "1")), - ShardObjectHelper.newShard("shardId-1", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), - ShardObjectHelper.newHashKeyRange("2", "3")) - ); + ShardObjectHelper.newShard( + "shardId-1", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange("2", "3"))); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true); when(shardDetector.listShardsWithFilter(any(ShardFilter.class))).thenReturn(shardsWithIncompleteHashRange); try { - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, ignoreUnexpectedChildShards, - dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_LATEST, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); } finally { verify(shardDetector, times(3)).listShardsWithFilter(any(ShardFilter.class)); // Verify retries. } @@ -2331,26 +2575,45 @@ public class HierarchicalShardSyncerTest { @Test public void testEmptyLeaseTablePopulatesLeasesWithCompleteHashRangeAfterTwoRetries() throws Exception { final List shardsWithIncompleteHashRange = Arrays.asList( - ShardObjectHelper.newShard("shardId-0", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newShard( + "shardId-0", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "69")), - ShardObjectHelper.newShard("shardId-1", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), - ShardObjectHelper.newHashKeyRange("71", ShardObjectHelper.MAX_HASH_KEY)) - ); + ShardObjectHelper.newShard( + "shardId-1", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange("71", ShardObjectHelper.MAX_HASH_KEY))); final List shardsWithCompleteHashRange = Arrays.asList( - ShardObjectHelper.newShard("shardId-2", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newShard( + "shardId-2", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "420")), - ShardObjectHelper.newShard("shardId-3", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), - ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY)) - ); + ShardObjectHelper.newShard( + "shardId-3", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY))); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true); - when(shardDetector.listShardsWithFilter(any(ShardFilter.class))).thenReturn(shardsWithIncompleteHashRange) - .thenReturn(shardsWithIncompleteHashRange).thenReturn(shardsWithCompleteHashRange); + when(shardDetector.listShardsWithFilter(any(ShardFilter.class))) + .thenReturn(shardsWithIncompleteHashRange) + .thenReturn(shardsWithIncompleteHashRange) + .thenReturn(shardsWithCompleteHashRange); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, ignoreUnexpectedChildShards, - dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_LATEST, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); verify(shardDetector, times(3)).listShardsWithFilter(any(ShardFilter.class)); // Verify retries. verify(dynamoDBLeaseRefresher, times(2)).createLeaseIfNotExists(any(Lease.class)); @@ -2362,544 +2625,573 @@ public class HierarchicalShardSyncerTest { @Test public void testEmptyLeaseTablePopulatesLeasesWithCompleteHashRange() throws Exception { final List shardsWithCompleteHashRange = Arrays.asList( - ShardObjectHelper.newShard("shardId-2", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newShard( + "shardId-2", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), ShardObjectHelper.newHashKeyRange(ShardObjectHelper.MIN_HASH_KEY, "420")), - ShardObjectHelper.newShard("shardId-3", null, null, ShardObjectHelper.newSequenceNumberRange("1", "2"), - ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY)) - ); + ShardObjectHelper.newShard( + "shardId-3", + null, + null, + ShardObjectHelper.newSequenceNumberRange("1", "2"), + ShardObjectHelper.newHashKeyRange("421", ShardObjectHelper.MAX_HASH_KEY))); when(dynamoDBLeaseRefresher.isLeaseTableEmpty()).thenReturn(true); when(shardDetector.listShardsWithFilter(any(ShardFilter.class))).thenReturn(shardsWithCompleteHashRange); - hierarchicalShardSyncer - .checkAndCreateLeaseForNewShards(shardDetector, dynamoDBLeaseRefresher, INITIAL_POSITION_LATEST, - SCOPE, ignoreUnexpectedChildShards, - dynamoDBLeaseRefresher.isLeaseTableEmpty()); + hierarchicalShardSyncer.checkAndCreateLeaseForNewShards( + shardDetector, + dynamoDBLeaseRefresher, + INITIAL_POSITION_LATEST, + SCOPE, + ignoreUnexpectedChildShards, + dynamoDBLeaseRefresher.isLeaseTableEmpty()); verify(shardDetector, times(1)).listShardsWithFilter(any(ShardFilter.class)); // Verify retries. verify(dynamoDBLeaseRefresher, times(2)).createLeaseIfNotExists(any(Lease.class)); } -// /** -// * Test CheckIfDescendantAndAddNewLeasesForAncestors - two parents, there is a lease for one parent. -// */ -// @Test + // /** + // * Test CheckIfDescendantAndAddNewLeasesForAncestors - two parents, there is a lease for one parent. + // */ + // @Test // public void testCheckIfDescendantAndAddNewLeasesForAncestors2P2A1PDescendant() { -// Set shardIdsOfCurrentLeases = new HashSet(); -// Map newLeaseMap = new HashMap(); -// Map kinesisShards = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// kinesisShards.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); -// shardIdsOfCurrentLeases.add(parentShardId); -// -// String adjacentParentShardId = "shardId-adjacentParent"; -// kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); -// -// String shardId = "shardId-9-1"; -// Shard shard = ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null); -// kinesisShards.put(shardId, shard); -// -// Map memoizationContext = new HashMap<>(); -// assertTrue(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, -// shardIdsOfCurrentLeases, -// kinesisShards, -// newLeaseMap, -// memoizationContext)); -// assertEquals(1, newLeaseMap.size()); -// assertTrue(newLeaseMap.containsKey(adjacentParentShardId)); -// Lease adjacentParentLease = newLeaseMap.get(adjacentParentShardId); -// assertEquals(ExtendedSequenceNumber.LATEST, adjacentParentLease.checkpoint()); -// } -// -// /** -// * Test parentShardIds() when the shard has no parents. -// */ -// @Test + // Set shardIdsOfCurrentLeases = new HashSet(); + // Map newLeaseMap = new HashMap(); + // Map kinesisShards = new HashMap(); + // + // String parentShardId = "shardId-parent"; + // kinesisShards.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); + // shardIdsOfCurrentLeases.add(parentShardId); + // + // String adjacentParentShardId = "shardId-adjacentParent"; + // kinesisShards.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, + // null)); + // + // String shardId = "shardId-9-1"; + // Shard shard = ShardObjectHelper.newShard(shardId, parentShardId, adjacentParentShardId, null); + // kinesisShards.put(shardId, shard); + // + // Map memoizationContext = new HashMap<>(); + // assertTrue(ShardSyncer.checkIfDescendantAndAddNewLeasesForAncestors(shardId, INITIAL_POSITION_LATEST, + // shardIdsOfCurrentLeases, + // kinesisShards, + // newLeaseMap, + // memoizationContext)); + // assertEquals(1, newLeaseMap.size()); + // assertTrue(newLeaseMap.containsKey(adjacentParentShardId)); + // Lease adjacentParentLease = newLeaseMap.get(adjacentParentShardId); + // assertEquals(ExtendedSequenceNumber.LATEST, adjacentParentLease.checkpoint()); + // } + // + // /** + // * Test parentShardIds() when the shard has no parents. + // */ + // @Test // public void testGetParentShardIdsNoParents() { -// Shard shard = new Shard(); -// assertTrue(ShardSyncer.getParentShardIds(shard, null).isEmpty()); -// } -// -// /** -// * Test parentShardIds() when the shard has no parents. -// */ -// @Test + // Shard shard = new Shard(); + // assertTrue(ShardSyncer.getParentShardIds(shard, null).isEmpty()); + // } + // + // /** + // * Test parentShardIds() when the shard has no parents. + // */ + // @Test // public void testGetParentShardIdsTrimmedParents() { -// Map shardMap = new HashMap(); -// Shard shard = ShardObjectHelper.newShard("shardId-test", "foo", "bar", null); -// assertTrue(ShardSyncer.getParentShardIds(shard, shardMap).isEmpty()); -// } -// -// /** -// * Test parentShardIds() when the shard has a single parent. -// */ -// @Test + // Map shardMap = new HashMap(); + // Shard shard = ShardObjectHelper.newShard("shardId-test", "foo", "bar", null); + // assertTrue(ShardSyncer.getParentShardIds(shard, shardMap).isEmpty()); + // } + // + // /** + // * Test parentShardIds() when the shard has a single parent. + // */ + // @Test // public void testGetParentShardIdsSingleParent() { -// Map shardMap = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); -// -// Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, null, null); -// Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// -// shard.setParentShardId(null); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertTrue(parentShardIds.isEmpty()); -// -// shard.setAdjacentParentShardId(parentShardId); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// } -// -// /** -// * Test parentShardIds() when the shard has two parents, one is trimmed. -// */ -// @Test + // Map shardMap = new HashMap(); + // + // String parentShardId = "shardId-parent"; + // shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); + // + // Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, null, null); + // Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertEquals(1, parentShardIds.size()); + // assertTrue(parentShardIds.contains(parentShardId)); + // + // shard.setParentShardId(null); + // parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertTrue(parentShardIds.isEmpty()); + // + // shard.setAdjacentParentShardId(parentShardId); + // parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertEquals(1, parentShardIds.size()); + // assertTrue(parentShardIds.contains(parentShardId)); + // } + // + // /** + // * Test parentShardIds() when the shard has two parents, one is trimmed. + // */ + // @Test // public void testGetParentShardIdsOneTrimmedParent() { -// Map shardMap = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// Shard parent = ShardObjectHelper.newShard(parentShardId, null, null, null); -// -// String adjacentParentShardId = "shardId-adjacentParent"; -// Shard adjacentParent = ShardObjectHelper.newShard(adjacentParentShardId, null, null, null); -// -// Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); -// -// shardMap.put(parentShardId, parent); -// Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// -// shardMap.remove(parentShardId); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertTrue(parentShardIds.isEmpty()); -// -// shardMap.put(adjacentParentShardId, adjacentParent); -// parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(1, parentShardIds.size()); -// assertTrue(parentShardIds.contains(adjacentParentShardId)); -// } -// -// /** -// * Test parentShardIds() when the shard has two parents. -// */ -// @Test + // Map shardMap = new HashMap(); + // + // String parentShardId = "shardId-parent"; + // Shard parent = ShardObjectHelper.newShard(parentShardId, null, null, null); + // + // String adjacentParentShardId = "shardId-adjacentParent"; + // Shard adjacentParent = ShardObjectHelper.newShard(adjacentParentShardId, null, null, null); + // + // Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); + // + // shardMap.put(parentShardId, parent); + // Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertEquals(1, parentShardIds.size()); + // assertTrue(parentShardIds.contains(parentShardId)); + // + // shardMap.remove(parentShardId); + // parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertTrue(parentShardIds.isEmpty()); + // + // shardMap.put(adjacentParentShardId, adjacentParent); + // parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertEquals(1, parentShardIds.size()); + // assertTrue(parentShardIds.contains(adjacentParentShardId)); + // } + // + // /** + // * Test parentShardIds() when the shard has two parents. + // */ + // @Test // public void testGetParentShardIdsTwoParents() { -// Map shardMap = new HashMap(); -// -// String parentShardId = "shardId-parent"; -// shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); -// -// String adjacentParentShardId = "shardId-adjacentParent"; -// shardMap.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); -// -// Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); -// -// Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); -// assertEquals(2, parentShardIds.size()); -// assertTrue(parentShardIds.contains(parentShardId)); -// assertTrue(parentShardIds.contains(adjacentParentShardId)); -// } -// -// /** -// */ -// @Test + // Map shardMap = new HashMap(); + // + // String parentShardId = "shardId-parent"; + // shardMap.put(parentShardId, ShardObjectHelper.newShard(parentShardId, null, null, null)); + // + // String adjacentParentShardId = "shardId-adjacentParent"; + // shardMap.put(adjacentParentShardId, ShardObjectHelper.newShard(adjacentParentShardId, null, null, null)); + // + // Shard shard = ShardObjectHelper.newShard("shardId-test", parentShardId, adjacentParentShardId, null); + // + // Set parentShardIds = ShardSyncer.getParentShardIds(shard, shardMap); + // assertEquals(2, parentShardIds.size()); + // assertTrue(parentShardIds.contains(parentShardId)); + // assertTrue(parentShardIds.contains(adjacentParentShardId)); + // } + // + // /** + // */ + // @Test // public void testNewLease() { -// Shard shard = new Shard(); -// String shardId = "shardId-95"; -// shard.setShardId(shardId); -// String parentShardId = "shardId-parent"; -// String adjacentParentShardId = "shardId-adjacentParent"; -// shard.setParentShardId(parentShardId); -// shard.setAdjacentParentShardId(adjacentParentShardId); -// -// Lease lease = ShardSyncer.newKCLLease(shard); -// assertEquals(shardId, lease.leaseKey()); -// assertNull(lease.checkpoint()); -// Set parentIds = lease.parentShardIds(); -// assertEquals(2, parentIds.size()); -// assertTrue(parentIds.contains(parentShardId)); -// assertTrue(parentIds.contains(adjacentParentShardId)); -// } -// -// /** -// * Test method for constructShardIdToShardMap. -// * -// * . -// */ -// @Test + // Shard shard = new Shard(); + // String shardId = "shardId-95"; + // shard.setShardId(shardId); + // String parentShardId = "shardId-parent"; + // String adjacentParentShardId = "shardId-adjacentParent"; + // shard.setParentShardId(parentShardId); + // shard.setAdjacentParentShardId(adjacentParentShardId); + // + // Lease lease = ShardSyncer.newKCLLease(shard); + // assertEquals(shardId, lease.leaseKey()); + // assertNull(lease.checkpoint()); + // Set parentIds = lease.parentShardIds(); + // assertEquals(2, parentIds.size()); + // assertTrue(parentIds.contains(parentShardId)); + // assertTrue(parentIds.contains(adjacentParentShardId)); + // } + // + // /** + // * Test method for constructShardIdToShardMap. + // * + // * . + // */ + // @Test // public void testConstructShardIdToShardMap() { -// List shards = new ArrayList(2); -// shards.add(ShardObjectHelper.newShard("shardId-0", null, null, null)); -// shards.add(ShardObjectHelper.newShard("shardId-1", null, null, null)); -// -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// assertEquals(shards.size(), shardIdToShardMap.size()); -// for (Shard shard : shards) { -// assertSame(shard, shardIdToShardMap.get(shard.getShardId())); -// } -// } -// -// /** -// * Test getOpenShards() - no shards are open. -// */ -// @Test + // List shards = new ArrayList(2); + // shards.add(ShardObjectHelper.newShard("shardId-0", null, null, null)); + // shards.add(ShardObjectHelper.newShard("shardId-1", null, null, null)); + // + // Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + // assertEquals(shards.size(), shardIdToShardMap.size()); + // for (Shard shard : shards) { + // assertSame(shard, shardIdToShardMap.get(shard.getShardId())); + // } + // } + // + // /** + // * Test getOpenShards() - no shards are open. + // */ + // @Test // public void testGetOpenShardsNoneOpen() { -// List shards = new ArrayList(); -// shards.add(ShardObjectHelper.newShard("shardId-9384", -// null, -// null, -// ShardObjectHelper.newSequenceNumberRange("123", "345"))); -// assertTrue(ShardSyncer.getOpenShards(shards).isEmpty()); -// } -// -// /** -// * Test getOpenShards() - test null and max end sequence number. -// */ -// @Test + // List shards = new ArrayList(); + // shards.add(ShardObjectHelper.newShard("shardId-9384", + // null, + // null, + // ShardObjectHelper.newSequenceNumberRange("123", "345"))); + // assertTrue(ShardSyncer.getOpenShards(shards).isEmpty()); + // } + // + // /** + // * Test getOpenShards() - test null and max end sequence number. + // */ + // @Test // public void testGetOpenShardsNullAndMaxEndSeqNum() { -// List shards = new ArrayList(); -// String shardId = "shardId-2738"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("123", null); -// shards.add(ShardObjectHelper.newShard(shardId, null, null, sequenceNumberRange)); -// -// // Verify shard is considered open when it has a null end sequence number -// List openShards = ShardSyncer.getOpenShards(shards); -// assertEquals(1, openShards.size()); -// assertEquals(shardId, openShards.get(0).getShardId()); -// -// // Close shard before testing for max sequence number -// sequenceNumberRange.setEndingSequenceNumber("1000"); -// openShards = ShardSyncer.getOpenShards(shards); -// assertTrue(openShards.isEmpty()); -// -// // Verify shard is considered closed when the end sequence number is set to max allowed sequence number -// sequenceNumberRange.setEndingSequenceNumber(MAX_SEQUENCE_NUMBER.toString()); -// openShards = ShardSyncer.getOpenShards(shards); -// assertEquals(0, openShards.size()); -// } -// -// /** -// * Test isCandidateForCleanup -// * -// * @throws KinesisClientLibIOException -// */ -// @Test + // List shards = new ArrayList(); + // String shardId = "shardId-2738"; + // SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("123", null); + // shards.add(ShardObjectHelper.newShard(shardId, null, null, sequenceNumberRange)); + // + // // Verify shard is considered open when it has a null end sequence number + // List openShards = ShardSyncer.getOpenShards(shards); + // assertEquals(1, openShards.size()); + // assertEquals(shardId, openShards.get(0).getShardId()); + // + // // Close shard before testing for max sequence number + // sequenceNumberRange.setEndingSequenceNumber("1000"); + // openShards = ShardSyncer.getOpenShards(shards); + // assertTrue(openShards.isEmpty()); + // + // // Verify shard is considered closed when the end sequence number is set to max allowed sequence number + // sequenceNumberRange.setEndingSequenceNumber(MAX_SEQUENCE_NUMBER.toString()); + // openShards = ShardSyncer.getOpenShards(shards); + // assertEquals(0, openShards.size()); + // } + // + // /** + // * Test isCandidateForCleanup + // * + // * @throws KinesisClientLibIOException + // */ + // @Test // public void testIsCandidateForCleanup() throws KinesisClientLibIOException { -// String parentShardId = "shardId-0000"; -// String adjacentParentShardId = "shardId-0001"; -// String shardId = "shardId-0002"; -// Lease lease = newLease(shardId); -// List parentShardIds = new ArrayList<>(); -// parentShardIds.add(parentShardId); -// parentShardIds.add(adjacentParentShardId); -// lease.parentShardIds(parentShardIds); -// Set currentKinesisShardIds = new HashSet<>(); -// -// currentKinesisShardIds.add(shardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.clear(); -// assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.add(parentShardId); -// // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.clear(); -// assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// -// currentKinesisShardIds.add(adjacentParentShardId); -// // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// currentKinesisShardIds.add(parentShardId); -// // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// currentKinesisShardIds.add(shardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// } -// -// /** -// * Test isCandidateForCleanup -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // String parentShardId = "shardId-0000"; + // String adjacentParentShardId = "shardId-0001"; + // String shardId = "shardId-0002"; + // Lease lease = newLease(shardId); + // List parentShardIds = new ArrayList<>(); + // parentShardIds.add(parentShardId); + // parentShardIds.add(adjacentParentShardId); + // lease.parentShardIds(parentShardIds); + // Set currentKinesisShardIds = new HashSet<>(); + // + // currentKinesisShardIds.add(shardId); + // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // + // currentKinesisShardIds.clear(); + // assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // + // currentKinesisShardIds.add(parentShardId); + // // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // + // currentKinesisShardIds.clear(); + // assertTrue(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // + // currentKinesisShardIds.add(adjacentParentShardId); + // // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // currentKinesisShardIds.add(parentShardId); + // // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // currentKinesisShardIds.add(shardId); + // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // } + // + // /** + // * Test isCandidateForCleanup + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testIsCandidateForCleanupParentExists() throws KinesisClientLibIOException { -// String parentShardId = "shardId-0000"; -// String adjacentParentShardId = "shardId-0001"; -// String shardId = "shardId-0002"; -// Lease lease = newLease(shardId); -// List parentShardIds = new ArrayList<>(); -// parentShardIds.add(parentShardId); -// parentShardIds.add(adjacentParentShardId); -// lease.parentShardIds(parentShardIds); -// Set currentKinesisShardIds = new HashSet<>(); -// -// currentKinesisShardIds.add(parentShardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// } -// -// /** -// * Test isCandidateForCleanup -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // String parentShardId = "shardId-0000"; + // String adjacentParentShardId = "shardId-0001"; + // String shardId = "shardId-0002"; + // Lease lease = newLease(shardId); + // List parentShardIds = new ArrayList<>(); + // parentShardIds.add(parentShardId); + // parentShardIds.add(adjacentParentShardId); + // lease.parentShardIds(parentShardIds); + // Set currentKinesisShardIds = new HashSet<>(); + // + // currentKinesisShardIds.add(parentShardId); + // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // } + // + // /** + // * Test isCandidateForCleanup + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testIsCandidateForCleanupAdjacentParentExists() throws KinesisClientLibIOException { -// String parentShardId = "shardId-0000"; -// String adjacentParentShardId = "shardId-0001"; -// String shardId = "shardId-0002"; -// Lease lease = newLease(shardId); -// List parentShardIds = new ArrayList<>(); -// parentShardIds.add(parentShardId); -// parentShardIds.add(adjacentParentShardId); -// lease.parentShardIds(parentShardIds); -// Set currentKinesisShardIds = new HashSet<>(); -// -// currentKinesisShardIds.add(adjacentParentShardId); -// assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); -// } -// -// /** -// * Test cleanup of lease for a shard that has been fully processed (and processing of child shards has begun). -// * -// * @throws DependencyException -// * @throws InvalidStateException -// * @throws ProvisionedThroughputException -// */ -// @Test + // String parentShardId = "shardId-0000"; + // String adjacentParentShardId = "shardId-0001"; + // String shardId = "shardId-0002"; + // Lease lease = newLease(shardId); + // List parentShardIds = new ArrayList<>(); + // parentShardIds.add(parentShardId); + // parentShardIds.add(adjacentParentShardId); + // lease.parentShardIds(parentShardIds); + // Set currentKinesisShardIds = new HashSet<>(); + // + // currentKinesisShardIds.add(adjacentParentShardId); + // assertFalse(ShardSyncer.isCandidateForCleanup(lease, currentKinesisShardIds)); + // } + // + // /** + // * Test cleanup of lease for a shard that has been fully processed (and processing of child shards has begun). + // * + // * @throws DependencyException + // * @throws InvalidStateException + // * @throws ProvisionedThroughputException + // */ + // @Test // public void testCleanupLeaseForClosedShard() -// throws DependencyException, InvalidStateException, ProvisionedThroughputException { -// String closedShardId = "shardId-2"; -// Lease leaseForClosedShard = newLease(closedShardId); -// leaseForClosedShard.checkpoint(new ExtendedSequenceNumber("1234")); -// dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseForClosedShard); -// -// Set childShardIds = new HashSet<>(); -// List trackedLeases = new ArrayList<>(); -// Set parentShardIds = new HashSet<>(); -// parentShardIds.add(closedShardId); -// String childShardId1 = "shardId-5"; -// Lease childLease1 = newLease(childShardId1); -// childLease1.parentShardIds(parentShardIds); -// childLease1.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); -// String childShardId2 = "shardId-7"; -// Lease childLease2 = newLease(childShardId2); -// childLease2.parentShardIds(parentShardIds); -// childLease2.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); -// Map trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// -// // empty list of leases -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // closed shard has not been fully processed yet (checkpoint != SHARD_END) -// trackedLeases.add(leaseForClosedShard); -// trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // closed shard has been fully processed yet (checkpoint == SHARD_END) -// leaseForClosedShard.checkpoint(ExtendedSequenceNumber.SHARD_END); -// dynamoDBLeaseRefresher.updateLease(leaseForClosedShard); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // lease for only one child exists -// childShardIds.add(childShardId1); -// childShardIds.add(childShardId2); -// dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseForClosedShard); -// dynamoDBLeaseRefresher.createLeaseIfNotExists(childLease1); -// trackedLeases.add(childLease1); -// trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // leases for both children exists, but they are both at TRIM_HORIZON -// dynamoDBLeaseRefresher.createLeaseIfNotExists(childLease2); -// trackedLeases.add(childLease2); -// trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // leases for both children exists, one is at TRIM_HORIZON -// childLease1.checkpoint(new ExtendedSequenceNumber("34890")); -// dynamoDBLeaseRefresher.updateLease(childLease1); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// -// // leases for both children exists, NONE of them are at TRIM_HORIZON -// childLease2.checkpoint(new ExtendedSequenceNumber("43789")); -// dynamoDBLeaseRefresher.updateLease(childLease2); -// ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, dynamoDBLeaseRefresher); -// assertNull(dynamoDBLeaseRefresher.getLease(closedShardId)); -// } -// -// /** -// * Test we can handle trimmed Kinesis shards (absent from the shard list), and valid closed shards. -// * -// * @throws KinesisClientLibIOException -// */ -// @Test + // throws DependencyException, InvalidStateException, ProvisionedThroughputException { + // String closedShardId = "shardId-2"; + // Lease leaseForClosedShard = newLease(closedShardId); + // leaseForClosedShard.checkpoint(new ExtendedSequenceNumber("1234")); + // dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseForClosedShard); + // + // Set childShardIds = new HashSet<>(); + // List trackedLeases = new ArrayList<>(); + // Set parentShardIds = new HashSet<>(); + // parentShardIds.add(closedShardId); + // String childShardId1 = "shardId-5"; + // Lease childLease1 = newLease(childShardId1); + // childLease1.parentShardIds(parentShardIds); + // childLease1.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + // String childShardId2 = "shardId-7"; + // Lease childLease2 = newLease(childShardId2); + // childLease2.parentShardIds(parentShardIds); + // childLease2.checkpoint(ExtendedSequenceNumber.TRIM_HORIZON); + // Map trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + // + // // empty list of leases + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // closed shard has not been fully processed yet (checkpoint != SHARD_END) + // trackedLeases.add(leaseForClosedShard); + // trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // closed shard has been fully processed yet (checkpoint == SHARD_END) + // leaseForClosedShard.checkpoint(ExtendedSequenceNumber.SHARD_END); + // dynamoDBLeaseRefresher.updateLease(leaseForClosedShard); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // lease for only one child exists + // childShardIds.add(childShardId1); + // childShardIds.add(childShardId2); + // dynamoDBLeaseRefresher.createLeaseIfNotExists(leaseForClosedShard); + // dynamoDBLeaseRefresher.createLeaseIfNotExists(childLease1); + // trackedLeases.add(childLease1); + // trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // leases for both children exists, but they are both at TRIM_HORIZON + // dynamoDBLeaseRefresher.createLeaseIfNotExists(childLease2); + // trackedLeases.add(childLease2); + // trackedLeaseMap = ShardSyncer.constructShardIdToKCLLeaseMap(trackedLeases); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // leases for both children exists, one is at TRIM_HORIZON + // childLease1.checkpoint(new ExtendedSequenceNumber("34890")); + // dynamoDBLeaseRefresher.updateLease(childLease1); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNotNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // + // // leases for both children exists, NONE of them are at TRIM_HORIZON + // childLease2.checkpoint(new ExtendedSequenceNumber("43789")); + // dynamoDBLeaseRefresher.updateLease(childLease2); + // ShardSyncer.cleanupLeaseForClosedShard(closedShardId, childShardIds, trackedLeaseMap, + // dynamoDBLeaseRefresher); + // assertNull(dynamoDBLeaseRefresher.getLease(closedShardId)); + // } + // + // /** + // * Test we can handle trimmed Kinesis shards (absent from the shard list), and valid closed shards. + // * + // * @throws KinesisClientLibIOException + // */ + // @Test // public void testAssertShardCoveredOrAbsentTestAbsentAndValid() throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// Shard closedShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); -// SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); -// Shard child1 = -// ShardObjectHelper.newShard("shardId-54879", expectedClosedShardId, null, childSequenceNumberRange); -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // test for case where shard has been trimmed (absent from list) -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // Populate shards. -// shards.add(closedShard); -// shards.add(child1); -// shardIdToShardMap.put(expectedClosedShardId, closedShard); -// shardIdToShardMap.put(child1.getShardId(), child1); -// shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// -// // test degenerate split/merge -// child1.setHashKeyRange(hashKeyRange); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // test merge -// child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("10", "2985")); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("3", "25")); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// -// // test split -// HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); -// HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); -// child1.setHashKeyRange(childHashKeyRange1); -// Shard child2 = ShardObjectHelper.newShard("shardId-43789", -// null, -// expectedClosedShardId, -// childSequenceNumberRange, -// childHashKeyRange2); -// shards.add(child2); -// shardIdToShardMap.put(child2.getShardId(), child2); -// shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// -// /** -// * Test we throw an exception if the shard is open -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // List shards = new ArrayList<>(); + // String expectedClosedShardId = "shardId-34098"; + // SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); + // HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + // Shard closedShard = + // ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); + // SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); + // Shard child1 = + // ShardObjectHelper.newShard("shardId-54879", expectedClosedShardId, null, + // childSequenceNumberRange); + // Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + // Map> shardIdToChildShardIdsMap = + // ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // Set closedShardIds = new HashSet<>(); + // closedShardIds.add(expectedClosedShardId); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // + // // test for case where shard has been trimmed (absent from list) + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // + // // Populate shards. + // shards.add(closedShard); + // shards.add(child1); + // shardIdToShardMap.put(expectedClosedShardId, closedShard); + // shardIdToShardMap.put(child1.getShardId(), child1); + // shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // + // // test degenerate split/merge + // child1.setHashKeyRange(hashKeyRange); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // + // // test merge + // child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("10", "2985")); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // child1.setHashKeyRange(ShardObjectHelper.newHashKeyRange("3", "25")); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // + // // test split + // HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); + // HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); + // child1.setHashKeyRange(childHashKeyRange1); + // Shard child2 = ShardObjectHelper.newShard("shardId-43789", + // null, + // expectedClosedShardId, + // childSequenceNumberRange, + // childHashKeyRange2); + // shards.add(child2); + // shardIdToShardMap.put(child2.getShardId(), child2); + // shardIdToChildShardIdsMap = ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // } + // + // /** + // * Test we throw an exception if the shard is open + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testAssertShardCoveredOrAbsentTestOpen() throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", null); -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// Shard openShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); -// shards.add(openShard); -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// -// /** -// * Test we throw an exception if there are no children -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // List shards = new ArrayList<>(); + // String expectedClosedShardId = "shardId-34098"; + // SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", null); + // HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + // Shard openShard = + // ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); + // shards.add(openShard); + // Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + // Map> shardIdToChildShardIdsMap = + // ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // Set closedShardIds = new HashSet<>(); + // closedShardIds.add(expectedClosedShardId); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // } + // + // /** + // * Test we throw an exception if there are no children + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testAssertShardCoveredOrAbsentTestNoChildren() throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// Shard closedShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); -// shards.add(closedShard); -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// -// /** -// * Test we throw an exception if children don't cover hash key range (min of children > min of parent) -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // List shards = new ArrayList<>(); + // String expectedClosedShardId = "shardId-34098"; + // SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); + // HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + // Shard closedShard = + // ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, hashKeyRange); + // shards.add(closedShard); + // Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + // Map> shardIdToChildShardIdsMap = + // ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // Set closedShardIds = new HashSet<>(); + // closedShardIds.add(expectedClosedShardId); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // } + // + // /** + // * Test we throw an exception if children don't cover hash key range (min of children > min of parent) + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testAssertShardCoveredOrAbsentTestIncompleteSplitMin() throws KinesisClientLibIOException { -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("12", "15"); -// HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); -// testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); -// } -// -// /** -// * Test we throw an exception if children don't cover hash key range (max of children < max of parent) -// * -// * @throws KinesisClientLibIOException -// */ -// @Test(expected = KinesisClientLibIOException.class) + // HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + // HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("12", "15"); + // HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "25"); + // testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); + // } + // + // /** + // * Test we throw an exception if children don't cover hash key range (max of children < max of parent) + // * + // * @throws KinesisClientLibIOException + // */ + // @Test(expected = KinesisClientLibIOException.class) // public void testAssertShardCoveredOrAbsentTestIncompleteSplitMax() throws KinesisClientLibIOException { -// HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); -// HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); -// HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "23"); -// testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); -// } -// -// private void testAssertShardCoveredOrAbsentTestIncompleteSplit(HashKeyRange parentHashKeyRange, -// HashKeyRange child1HashKeyRange, -// HashKeyRange child2HashKeyRange) -// throws KinesisClientLibIOException { -// List shards = new ArrayList<>(); -// String expectedClosedShardId = "shardId-34098"; -// SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); -// Shard closedShard = -// ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, parentHashKeyRange); -// shards.add(closedShard); -// -// SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); -// Shard child1 = ShardObjectHelper.newShard("shardId-43789", -// null, -// expectedClosedShardId, -// childSequenceNumberRange, -// child1HashKeyRange); -// shards.add(child1); -// Shard child2 = ShardObjectHelper.newShard("shardId-43789", -// null, -// expectedClosedShardId, -// childSequenceNumberRange, -// child2HashKeyRange); -// shards.add(child2); -// -// Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); -// Map> shardIdToChildShardIdsMap = -// ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); -// Set closedShardIds = new HashSet<>(); -// closedShardIds.add(expectedClosedShardId); -// ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, closedShardIds); -// } -// + // HashKeyRange hashKeyRange = ShardObjectHelper.newHashKeyRange("10", "25"); + // HashKeyRange childHashKeyRange1 = ShardObjectHelper.newHashKeyRange("10", "15"); + // HashKeyRange childHashKeyRange2 = ShardObjectHelper.newHashKeyRange("16", "23"); + // testAssertShardCoveredOrAbsentTestIncompleteSplit(hashKeyRange, childHashKeyRange1, childHashKeyRange2); + // } + // + // private void testAssertShardCoveredOrAbsentTestIncompleteSplit(HashKeyRange parentHashKeyRange, + // HashKeyRange child1HashKeyRange, + // HashKeyRange child2HashKeyRange) + // throws KinesisClientLibIOException { + // List shards = new ArrayList<>(); + // String expectedClosedShardId = "shardId-34098"; + // SequenceNumberRange sequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("103", "205"); + // Shard closedShard = + // ShardObjectHelper.newShard(expectedClosedShardId, null, null, sequenceNumberRange, + // parentHashKeyRange); + // shards.add(closedShard); + // + // SequenceNumberRange childSequenceNumberRange = ShardObjectHelper.newSequenceNumberRange("206", "300"); + // Shard child1 = ShardObjectHelper.newShard("shardId-43789", + // null, + // expectedClosedShardId, + // childSequenceNumberRange, + // child1HashKeyRange); + // shards.add(child1); + // Shard child2 = ShardObjectHelper.newShard("shardId-43789", + // null, + // expectedClosedShardId, + // childSequenceNumberRange, + // child2HashKeyRange); + // shards.add(child2); + // + // Map shardIdToShardMap = ShardSyncer.constructShardIdToShardMap(shards); + // Map> shardIdToChildShardIdsMap = + // ShardSyncer.constructShardIdToChildShardIdsMap(shardIdToShardMap); + // Set closedShardIds = new HashSet<>(); + // closedShardIds.add(expectedClosedShardId); + // ShardSyncer.assertClosedShardsAreCoveredOrAbsent(shardIdToShardMap, shardIdToChildShardIdsMap, + // closedShardIds); + // } + // /** * Helper method. */ diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/KinesisShardDetectorTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/KinesisShardDetectorTest.java index d0870d51..9c855c4b 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/KinesisShardDetectorTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/KinesisShardDetectorTest.java @@ -15,6 +15,15 @@ package software.amazon.kinesis.leases; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -31,15 +40,6 @@ import software.amazon.awssdk.services.kinesis.model.ResourceInUseException; import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; import software.amazon.awssdk.services.kinesis.model.Shard; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.isA; import static org.hamcrest.CoreMatchers.nullValue; @@ -72,21 +72,29 @@ public class KinesisShardDetectorTest { @Mock private KinesisAsyncClient client; + @Mock private CompletableFuture mockFuture; @Before public void setup() { - shardDetector = new KinesisShardDetector(client, STREAM_NAME, LIST_SHARDS_BACKOFF_TIME_IN_MILLIS, - MAX_LIST_SHARDS_RETRY_ATTEMPTS, LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS, - MAX_CACHE_MISSES_BEFORE_RELOAD, CACHE_MISS_WARNING_MODULUS); + shardDetector = new KinesisShardDetector( + client, + STREAM_NAME, + LIST_SHARDS_BACKOFF_TIME_IN_MILLIS, + MAX_LIST_SHARDS_RETRY_ATTEMPTS, + LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS, + MAX_CACHE_MISSES_BEFORE_RELOAD, + CACHE_MISS_WARNING_MODULUS); } @Test public void testListShardsSingleResponse() { final List expectedShards = new ArrayList<>(); - final ListShardsResponse listShardsResponse = ListShardsResponse.builder().nextToken(null) - .shards(expectedShards).build(); + final ListShardsResponse listShardsResponse = ListShardsResponse.builder() + .nextToken(null) + .shards(expectedShards) + .build(); final CompletableFuture future = CompletableFuture.completedFuture(listShardsResponse); when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); @@ -106,8 +114,7 @@ public class KinesisShardDetectorTest { try { shardDetector.listShards(); } finally { - verify(client, times(MAX_LIST_SHARDS_RETRY_ATTEMPTS)) - .listShards(any(ListShardsRequest.class)); + verify(client, times(MAX_LIST_SHARDS_RETRY_ATTEMPTS)).listShards(any(ListShardsRequest.class)); } } @@ -123,7 +130,6 @@ public class KinesisShardDetectorTest { assertThat(shards, nullValue()); verify(client).listShards(any(ListShardsRequest.class)); - } @Test(expected = LimitExceededException.class) @@ -137,8 +143,7 @@ public class KinesisShardDetectorTest { try { shardDetector.listShards(); } finally { - verify(client, times(MAX_LIST_SHARDS_RETRY_ATTEMPTS)) - .listShards(any(ListShardsRequest.class)); + verify(client, times(MAX_LIST_SHARDS_RETRY_ATTEMPTS)).listShards(any(ListShardsRequest.class)); } } @@ -165,7 +170,6 @@ public class KinesisShardDetectorTest { when(client.listShards(any(ListShardsRequest.class))).thenReturn(mockFuture); shardDetector.listShards(); - } @Test @@ -183,8 +187,8 @@ public class KinesisShardDetectorTest { @Test public void testGetShardEmptyCache() { final String shardId = String.format(SHARD_ID, 1); - final CompletableFuture future = CompletableFuture - .completedFuture(ListShardsResponse.builder().shards(createShardList()).build()); + final CompletableFuture future = CompletableFuture.completedFuture( + ListShardsResponse.builder().shards(createShardList()).build()); when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); @@ -213,36 +217,40 @@ public class KinesisShardDetectorTest { final List shards = new ArrayList<>(createShardList()); shards.add(Shard.builder().shardId(shardId).build()); - final CompletableFuture future = CompletableFuture - .completedFuture(ListShardsResponse.builder().shards(shards).build()); + final CompletableFuture future = CompletableFuture.completedFuture( + ListShardsResponse.builder().shards(shards).build()); shardDetector.cachedShardMap(createShardList()); when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); final List responses = IntStream.range(0, MAX_CACHE_MISSES_BEFORE_RELOAD + 1) - .mapToObj(x -> shardDetector.shard(shardId)).collect(Collectors.toList()); + .mapToObj(x -> shardDetector.shard(shardId)) + .collect(Collectors.toList()); IntStream.range(0, MAX_CACHE_MISSES_BEFORE_RELOAD).forEach(x -> { assertThat(responses.get(x), nullValue()); }); - assertThat(responses.get(MAX_CACHE_MISSES_BEFORE_RELOAD), equalTo(Shard.builder().shardId(shardId).build())); + assertThat( + responses.get(MAX_CACHE_MISSES_BEFORE_RELOAD), + equalTo(Shard.builder().shardId(shardId).build())); verify(client).listShards(any(ListShardsRequest.class)); } @Test public void testGetShardNonExistentShardForceRefresh() { final String shardId = String.format(SHARD_ID, 5); - final CompletableFuture future = CompletableFuture - .completedFuture(ListShardsResponse.builder().shards(createShardList()).build()); + final CompletableFuture future = CompletableFuture.completedFuture( + ListShardsResponse.builder().shards(createShardList()).build()); shardDetector.cachedShardMap(createShardList()); when(client.listShards(any(ListShardsRequest.class))).thenReturn(future); final List responses = IntStream.range(0, MAX_CACHE_MISSES_BEFORE_RELOAD + 1) - .mapToObj(x -> shardDetector.shard(shardId)).collect(Collectors.toList()); + .mapToObj(x -> shardDetector.shard(shardId)) + .collect(Collectors.toList()); responses.forEach(response -> assertThat(response, nullValue())); assertThat(shardDetector.cacheMisses().get(), equalTo(0)); @@ -250,7 +258,8 @@ public class KinesisShardDetectorTest { } private List createShardList() { - return Arrays.asList(Shard.builder().shardId(String.format(SHARD_ID, 0)).build(), + return Arrays.asList( + Shard.builder().shardId(String.format(SHARD_ID, 0)).build(), Shard.builder().shardId(String.format(SHARD_ID, 1)).build(), Shard.builder().shardId(String.format(SHARD_ID, 2)).build(), Shard.builder().shardId(String.format(SHARD_ID, 3)).build(), diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseBuilder.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseBuilder.java index 8f825875..1bfe1bac 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseBuilder.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseBuilder.java @@ -34,14 +34,24 @@ public class LeaseBuilder { private ExtendedSequenceNumber checkpoint; private ExtendedSequenceNumber pendingCheckpoint; private Long ownerSwitchesSinceCheckpoint = 0L; - private Set parentShardIds = new HashSet<>(); + private Set parentShardIds = new HashSet<>(); private Set childShardIds = new HashSet<>(); private byte[] pendingCheckpointState; private HashKeyRangeForLease hashKeyRangeForLease; public Lease build() { - return new Lease(leaseKey, leaseOwner, leaseCounter, concurrencyToken, lastCounterIncrementNanos, checkpoint, - pendingCheckpoint, ownerSwitchesSinceCheckpoint, parentShardIds, childShardIds, - pendingCheckpointState, hashKeyRangeForLease); + return new Lease( + leaseKey, + leaseOwner, + leaseCounter, + concurrencyToken, + lastCounterIncrementNanos, + checkpoint, + pendingCheckpoint, + ownerSwitchesSinceCheckpoint, + parentShardIds, + childShardIds, + pendingCheckpointState, + hashKeyRangeForLease); } -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCleanupManagerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCleanupManagerTest.java index 9a731f80..9d51351c 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCleanupManagerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCleanupManagerTest.java @@ -15,6 +15,14 @@ package software.amazon.kinesis.leases; +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ScheduledExecutorService; +import java.util.stream.Collectors; + import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -29,14 +37,6 @@ import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.metrics.NullMetricsFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import java.time.Duration; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ScheduledExecutorService; -import java.util.stream.Collectors; - import static org.mockito.Matchers.any; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -45,13 +45,14 @@ import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class LeaseCleanupManagerTest { - private static final ShardInfo SHARD_INFO = new ShardInfo("shardId", "concurrencyToken", - Collections.emptySet(), ExtendedSequenceNumber.LATEST); + private static final ShardInfo SHARD_INFO = + new ShardInfo("shardId", "concurrencyToken", Collections.emptySet(), ExtendedSequenceNumber.LATEST); private static final StreamIdentifier STREAM_IDENTIFIER = StreamIdentifier.singleStreamInstance("streamName"); private final long leaseCleanupIntervalMillis = Duration.ofSeconds(1).toMillis(); - private final long completedLeaseCleanupIntervalMillis = Duration.ofSeconds(0).toMillis(); + private final long completedLeaseCleanupIntervalMillis = + Duration.ofSeconds(0).toMillis(); private final long garbageLeaseCleanupIntervalMillis = Duration.ofSeconds(0).toMillis(); private boolean cleanupLeasesOfCompletedShards = true; private LeaseCleanupManager leaseCleanupManager; @@ -59,21 +60,30 @@ public class LeaseCleanupManagerTest { @Mock private LeaseRefresher leaseRefresher; + @Mock private LeaseCoordinator leaseCoordinator; + @Mock private ShardDetector shardDetector; + @Mock private ScheduledExecutorService deletionThreadPool; @Before public void setUp() throws Exception { - leaseCleanupManager = new LeaseCleanupManager(leaseCoordinator, NULL_METRICS_FACTORY, deletionThreadPool, - cleanupLeasesOfCompletedShards, leaseCleanupIntervalMillis, completedLeaseCleanupIntervalMillis, + leaseCleanupManager = new LeaseCleanupManager( + leaseCoordinator, + NULL_METRICS_FACTORY, + deletionThreadPool, + cleanupLeasesOfCompletedShards, + leaseCleanupIntervalMillis, + completedLeaseCleanupIntervalMillis, garbageLeaseCleanupIntervalMillis); when(leaseCoordinator.leaseRefresher()).thenReturn(leaseRefresher); - when(leaseCoordinator.updateLease(any(Lease.class), any(UUID.class), any(String.class), any(String.class))).thenReturn(true); + when(leaseCoordinator.updateLease(any(Lease.class), any(UUID.class), any(String.class), any(String.class))) + .thenReturn(true); } /** @@ -104,8 +114,8 @@ public class LeaseCleanupManagerTest { */ @Test public final void testParentShardLeaseDeletedSplitCase() throws Exception { - verifyExpectedDeletedLeasesCompletedShardCase(SHARD_INFO, childShardsForSplit(), - ExtendedSequenceNumber.LATEST, 1); + verifyExpectedDeletedLeasesCompletedShardCase( + SHARD_INFO, childShardsForSplit(), ExtendedSequenceNumber.LATEST, 1); } /** @@ -114,8 +124,8 @@ public class LeaseCleanupManagerTest { */ @Test public final void testParentShardLeaseDeletedMergeCase() throws Exception { - verifyExpectedDeletedLeasesCompletedShardCase(SHARD_INFO, childShardsForMerge(), - ExtendedSequenceNumber.LATEST, 1); + verifyExpectedDeletedLeasesCompletedShardCase( + SHARD_INFO, childShardsForMerge(), ExtendedSequenceNumber.LATEST, 1); } /** @@ -126,12 +136,17 @@ public class LeaseCleanupManagerTest { public final void testNoLeasesDeletedWhenNotEnabled() throws Exception { cleanupLeasesOfCompletedShards = false; - leaseCleanupManager = new LeaseCleanupManager(leaseCoordinator, NULL_METRICS_FACTORY, deletionThreadPool, - cleanupLeasesOfCompletedShards, leaseCleanupIntervalMillis, completedLeaseCleanupIntervalMillis, + leaseCleanupManager = new LeaseCleanupManager( + leaseCoordinator, + NULL_METRICS_FACTORY, + deletionThreadPool, + cleanupLeasesOfCompletedShards, + leaseCleanupIntervalMillis, + completedLeaseCleanupIntervalMillis, garbageLeaseCleanupIntervalMillis); - verifyExpectedDeletedLeasesCompletedShardCase(SHARD_INFO, childShardsForSplit(), - ExtendedSequenceNumber.LATEST, 0); + verifyExpectedDeletedLeasesCompletedShardCase( + SHARD_INFO, childShardsForSplit(), ExtendedSequenceNumber.LATEST, 0); } /** @@ -142,8 +157,7 @@ public class LeaseCleanupManagerTest { public final void testNoCleanupWhenSomeChildShardLeasesAreNotPresent() throws Exception { List childShards = childShardsForSplit(); - verifyExpectedDeletedLeasesCompletedShardCase(SHARD_INFO, childShards, - ExtendedSequenceNumber.LATEST, false, 0); + verifyExpectedDeletedLeasesCompletedShardCase(SHARD_INFO, childShards, ExtendedSequenceNumber.LATEST, false, 0); } /** @@ -174,10 +188,11 @@ public class LeaseCleanupManagerTest { */ @Test public final void testLeaseNotDeletedWhenParentsStillPresent() throws Exception { - final ShardInfo shardInfo = new ShardInfo("shardId-0", "concurrencyToken", Collections.singleton("parent"), - ExtendedSequenceNumber.LATEST); + final ShardInfo shardInfo = new ShardInfo( + "shardId-0", "concurrencyToken", Collections.singleton("parent"), ExtendedSequenceNumber.LATEST); - verifyExpectedDeletedLeasesCompletedShardCase(shardInfo, childShardsForMerge(), ExtendedSequenceNumber.LATEST, 0); + verifyExpectedDeletedLeasesCompletedShardCase( + shardInfo, childShardsForMerge(), ExtendedSequenceNumber.LATEST, 0); } /** @@ -193,8 +208,8 @@ public class LeaseCleanupManagerTest { */ @Test public final void testLeaseDeletedWhenShardDoesNotExist() throws Exception { - final Lease heldLease = LeaseHelper.createLease(SHARD_INFO.shardId(), "leaseOwner", - Collections.singleton("parentShardId")); + final Lease heldLease = + LeaseHelper.createLease(SHARD_INFO.shardId(), "leaseOwner", Collections.singleton("parentShardId")); testLeaseDeletedWhenShardDoesNotExist(heldLease); } @@ -204,13 +219,18 @@ public class LeaseCleanupManagerTest { */ @Test public final void testLeaseDeletedWhenShardDoesNotExistAndCleanupCompletedLeaseDisabled() throws Exception { - final Lease heldLease = LeaseHelper.createLease(SHARD_INFO.shardId(), "leaseOwner", - Collections.singleton("parentShardId")); + final Lease heldLease = + LeaseHelper.createLease(SHARD_INFO.shardId(), "leaseOwner", Collections.singleton("parentShardId")); cleanupLeasesOfCompletedShards = false; - leaseCleanupManager = new LeaseCleanupManager(leaseCoordinator, NULL_METRICS_FACTORY, deletionThreadPool, - cleanupLeasesOfCompletedShards, leaseCleanupIntervalMillis, completedLeaseCleanupIntervalMillis, + leaseCleanupManager = new LeaseCleanupManager( + leaseCoordinator, + NULL_METRICS_FACTORY, + deletionThreadPool, + cleanupLeasesOfCompletedShards, + leaseCleanupIntervalMillis, + completedLeaseCleanupIntervalMillis, garbageLeaseCleanupIntervalMillis); testLeaseDeletedWhenShardDoesNotExist(heldLease); @@ -229,25 +249,45 @@ public class LeaseCleanupManagerTest { verify(leaseRefresher).deleteLease(heldLease); } - private void verifyExpectedDeletedLeasesCompletedShardCase(ShardInfo shardInfo, List childShards, - ExtendedSequenceNumber extendedSequenceNumber, - int expectedDeletedLeases) throws Exception { - verifyExpectedDeletedLeasesCompletedShardCase(shardInfo, childShards, extendedSequenceNumber, true, expectedDeletedLeases); + private void verifyExpectedDeletedLeasesCompletedShardCase( + ShardInfo shardInfo, + List childShards, + ExtendedSequenceNumber extendedSequenceNumber, + int expectedDeletedLeases) + throws Exception { + verifyExpectedDeletedLeasesCompletedShardCase( + shardInfo, childShards, extendedSequenceNumber, true, expectedDeletedLeases); } - private void verifyExpectedDeletedLeasesCompletedShardCase(ShardInfo shardInfo, List childShards, - ExtendedSequenceNumber extendedSequenceNumber, - boolean childShardLeasesPresent, - int expectedDeletedLeases) throws Exception { - final Lease lease = LeaseHelper.createLease(shardInfo.shardId(), "leaseOwner", shardInfo.parentShardIds(), + private void verifyExpectedDeletedLeasesCompletedShardCase( + ShardInfo shardInfo, + List childShards, + ExtendedSequenceNumber extendedSequenceNumber, + boolean childShardLeasesPresent, + int expectedDeletedLeases) + throws Exception { + final Lease lease = LeaseHelper.createLease( + shardInfo.shardId(), + "leaseOwner", + shardInfo.parentShardIds(), childShards.stream().map(ChildShard::shardId).collect(Collectors.toSet())); - final List childShardLeases = childShards.stream().map(c -> LeaseHelper.createLease( - ShardInfo.getLeaseKey(shardInfo, c.shardId()), "leaseOwner", Collections.singleton(shardInfo.shardId()), - Collections.emptyList(), extendedSequenceNumber)).collect(Collectors.toList()); + final List childShardLeases = childShards.stream() + .map(c -> LeaseHelper.createLease( + ShardInfo.getLeaseKey(shardInfo, c.shardId()), + "leaseOwner", + Collections.singleton(shardInfo.shardId()), + Collections.emptyList(), + extendedSequenceNumber)) + .collect(Collectors.toList()); - final List parentShardLeases = lease.parentShardIds().stream().map(p -> - LeaseHelper.createLease(ShardInfo.getLeaseKey(shardInfo, p), "leaseOwner", Collections.emptyList(), - Collections.singleton(shardInfo.shardId()), extendedSequenceNumber)).collect(Collectors.toList()); + final List parentShardLeases = lease.parentShardIds().stream() + .map(p -> LeaseHelper.createLease( + ShardInfo.getLeaseKey(shardInfo, p), + "leaseOwner", + Collections.emptyList(), + Collections.singleton(shardInfo.shardId()), + extendedSequenceNumber)) + .collect(Collectors.toList()); when(leaseRefresher.getLease(lease.leaseKey())).thenReturn(lease); for (Lease parentShardLease : parentShardLeases) { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCoordinatorExerciser.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCoordinatorExerciser.java index 72b48f16..0bc285a6 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCoordinatorExerciser.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseCoordinatorExerciser.java @@ -25,7 +25,6 @@ import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; - import javax.swing.BoxLayout; import javax.swing.JFrame; import javax.swing.JLabel; @@ -58,18 +57,23 @@ public class LeaseCoordinatorExerciser { private static final long INITIAL_LEASE_TABLE_READ_CAPACITY = 10L; private static final long INITIAL_LEASE_TABLE_WRITE_CAPACITY = 50L; - public static void main(String[] args) throws DependencyException, InvalidStateException, - ProvisionedThroughputException { + public static void main(String[] args) + throws DependencyException, InvalidStateException, ProvisionedThroughputException { int numCoordinators = 9; int numLeases = 73; int leaseDurationMillis = 10000; int epsilonMillis = 100; DynamoDbAsyncClient dynamoDBClient = DynamoDbAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); + .credentialsProvider(DefaultCredentialsProvider.create()) + .build(); - LeaseRefresher leaseRefresher = new DynamoDBLeaseRefresher("nagl_ShardProgress", dynamoDBClient, - new DynamoDBLeaseSerializer(), true, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); + LeaseRefresher leaseRefresher = new DynamoDBLeaseRefresher( + "nagl_ShardProgress", + dynamoDBClient, + new DynamoDBLeaseSerializer(), + true, + TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); if (leaseRefresher.createLeaseTableIfNotExists()) { log.info("Waiting for newly created lease table"); @@ -80,17 +84,31 @@ public class LeaseCoordinatorExerciser { } CloudWatchAsyncClient client = CloudWatchAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); - CloudWatchMetricsFactory metricsFactory = new CloudWatchMetricsFactory(client, "testNamespace", 30 * 1000, 1000, - METRICS_LEVEL, MetricsConfig.METRICS_DIMENSIONS_ALL, FLUSH_SIZE); + .credentialsProvider(DefaultCredentialsProvider.create()) + .build(); + CloudWatchMetricsFactory metricsFactory = new CloudWatchMetricsFactory( + client, + "testNamespace", + 30 * 1000, + 1000, + METRICS_LEVEL, + MetricsConfig.METRICS_DIMENSIONS_ALL, + FLUSH_SIZE); final List coordinators = new ArrayList<>(); for (int i = 0; i < numCoordinators; i++) { String workerIdentifier = "worker-" + Integer.toString(i); - LeaseCoordinator coord = new DynamoDBLeaseCoordinator(leaseRefresher, workerIdentifier, leaseDurationMillis, - epsilonMillis, MAX_LEASES_FOR_WORKER, MAX_LEASES_TO_STEAL_AT_ONE_TIME, - MAX_LEASE_RENEWER_THREAD_COUNT, INITIAL_LEASE_TABLE_READ_CAPACITY, - INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); + LeaseCoordinator coord = new DynamoDBLeaseCoordinator( + leaseRefresher, + workerIdentifier, + leaseDurationMillis, + epsilonMillis, + MAX_LEASES_FOR_WORKER, + MAX_LEASES_TO_STEAL_AT_ONE_TIME, + MAX_LEASE_RENEWER_THREAD_COUNT, + INITIAL_LEASE_TABLE_READ_CAPACITY, + INITIAL_LEASE_TABLE_WRITE_CAPACITY, + metricsFactory); coordinators.add(coord); } @@ -133,7 +151,6 @@ public class LeaseCoordinatorExerciser { button.setLabel("Stop " + coord.workerIdentifier()); } } - }); coordPanel.add(button); @@ -168,12 +185,14 @@ public class LeaseCoordinatorExerciser { public int compare(final Lease arg0, final Lease arg1) { return arg0.leaseKey().compareTo(arg1.leaseKey()); } - }); StringBuilder builder = new StringBuilder(); builder.append(""); - builder.append(workerIdentifier).append(":").append(asgn.size()).append(" "); + builder.append(workerIdentifier) + .append(":") + .append(asgn.size()) + .append(" "); for (Lease lease : asgn) { String leaseKey = lease.leaseKey(); @@ -189,8 +208,10 @@ public class LeaseCoordinatorExerciser { greenNesses.put(leaseKey, greenNess); lastOwners.put(leaseKey, lease.leaseOwner()); - builder.append(String.format("%03d", - String.format("#00%02x00", greenNess), Integer.parseInt(leaseKey))).append(" "); + builder.append(String.format( + "%03d", + String.format("#00%02x00", greenNess), Integer.parseInt(leaseKey))) + .append(" "); } builder.append(""); @@ -211,7 +232,6 @@ public class LeaseCoordinatorExerciser { } } } - }.start(); frame.pack(); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseHelper.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseHelper.java index 0e10bc48..f17fc370 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseHelper.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseHelper.java @@ -15,23 +15,28 @@ package software.amazon.kinesis.leases; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - import java.util.Collection; import java.util.Collections; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + public class LeaseHelper { public static Lease createLease(String leaseKey, String leaseOwner, Collection parentShardIds) { return createLease(leaseKey, leaseOwner, parentShardIds, Collections.emptySet(), ExtendedSequenceNumber.LATEST); } - public static Lease createLease(String leaseKey, String leaseOwner, Collection parentShardIds, Collection childShardIds) { + public static Lease createLease( + String leaseKey, String leaseOwner, Collection parentShardIds, Collection childShardIds) { return createLease(leaseKey, leaseOwner, parentShardIds, childShardIds, ExtendedSequenceNumber.LATEST); } - public static Lease createLease(String leaseKey, String leaseOwner, Collection parentShardIds, - Collection childShardIds, ExtendedSequenceNumber extendedSequenceNumber) { + public static Lease createLease( + String leaseKey, + String leaseOwner, + Collection parentShardIds, + Collection childShardIds, + ExtendedSequenceNumber extendedSequenceNumber) { Lease lease = new Lease(); lease.leaseKey(leaseKey); lease.leaseOwner(leaseOwner); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationBillingModePayPerRequestTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationBillingModePayPerRequestTest.java index c9b79189..f35b4ed8 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationBillingModePayPerRequestTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationBillingModePayPerRequestTest.java @@ -22,7 +22,14 @@ import software.amazon.kinesis.leases.dynamodb.DynamoDBLeaseRefresher; public class LeaseIntegrationBillingModePayPerRequestTest extends LeaseIntegrationTest { @Override protected DynamoDBLeaseRefresher getLeaseRefresher() { - return new DynamoDBLeaseRefresher(tableName+"Per-Request", ddbClient, leaseSerializer, true, - tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, BillingMode.PAY_PER_REQUEST, false); + return new DynamoDBLeaseRefresher( + tableName + "Per-Request", + ddbClient, + leaseSerializer, + true, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PAY_PER_REQUEST, + false); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationTest.java index 66b221c8..6f312271 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseIntegrationTest.java @@ -14,12 +14,11 @@ */ package software.amazon.kinesis.leases; +import lombok.extern.slf4j.Slf4j; import org.junit.Rule; import org.junit.rules.TestWatcher; import org.junit.runner.Description; import org.mockito.Mock; - -import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.model.BillingMode; @@ -33,7 +32,8 @@ public class LeaseIntegrationTest { protected static DynamoDBLeaseRefresher leaseRefresher; protected static DynamoDbAsyncClient ddbClient = DynamoDbAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); + .credentialsProvider(DefaultCredentialsProvider.create()) + .build(); protected String tableName = "nagl_ShardProgress"; @@ -73,9 +73,14 @@ public class LeaseIntegrationTest { }; protected DynamoDBLeaseRefresher getLeaseRefresher() { - return new DynamoDBLeaseRefresher(tableName, ddbClient, leaseSerializer, true, - tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, BillingMode.PAY_PER_REQUEST, false); + return new DynamoDBLeaseRefresher( + tableName, + ddbClient, + leaseSerializer, + true, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PAY_PER_REQUEST, + false); } - } - diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseTest.java index ad3827a9..e0e338ba 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/LeaseTest.java @@ -1,16 +1,15 @@ package software.amazon.kinesis.leases; +import java.util.Collections; +import java.util.HashSet; +import java.util.concurrent.TimeUnit; + import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import java.util.Collections; -import java.util.HashSet; -import java.util.concurrent.TimeUnit; - @RunWith(MockitoJUnitRunner.class) public class LeaseTest { @@ -19,7 +18,7 @@ public class LeaseTest { private static final long LEASE_DURATION_NANOS = TimeUnit.MILLISECONDS.toNanos(LEASE_DURATION_MILLIS); - //Write a unit test for software.amazon.kinesis.leases.Lease to test leaseOwner as null and epired + // Write a unit test for software.amazon.kinesis.leases.Lease to test leaseOwner as null and epired @Test public void testLeaseOwnerNullAndExpired() { long expiredTime = MOCK_CURRENT_TIME - LEASE_DURATION_NANOS - 1; diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritizationUnitTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritizationUnitTest.java index 5147ba79..9caa9648 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritizationUnitTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ParentsFirstShardPrioritizationUnitTest.java @@ -14,9 +14,6 @@ */ package software.amazon.kinesis.leases; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -24,9 +21,11 @@ import java.util.List; import java.util.Random; import org.junit.Test; - import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + public class ParentsFirstShardPrioritizationUnitTest { @Test(expected = IllegalArgumentException.class) @@ -169,8 +168,7 @@ public class ParentsFirstShardPrioritizationUnitTest { private List parentShardIds = Collections.emptyList(); private ExtendedSequenceNumber checkpoint = ExtendedSequenceNumber.LATEST; - ShardInfoBuilder() { - } + ShardInfoBuilder() {} ShardInfoBuilder withShardId(String shardId) { this.shardId = shardId; diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardInfoTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardInfoTest.java index 4ccafe52..d61194fc 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardInfoTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardInfoTest.java @@ -14,12 +14,6 @@ */ package software.amazon.kinesis.leases; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; - import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -28,9 +22,14 @@ import java.util.UUID; import org.junit.Before; import org.junit.Test; - import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; + public class ShardInfoTest { private static final String CONCURRENCY_TOKEN = UUID.randomUUID().toString(); private static final String SHARD_ID = "shardId-test"; @@ -48,7 +47,8 @@ public class ShardInfoTest { @Test public void testPacboyShardInfoEqualsWithSameArgs() { - ShardInfo equalShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); + ShardInfo equalShardInfo = + new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); assertTrue("Equal should return true for arguments all the same", testShardInfo.equals(equalShardInfo)); } @@ -59,9 +59,9 @@ public class ShardInfoTest { @Test public void testPacboyShardInfoEqualsForfToken() { - ShardInfo diffShardInfo = new ShardInfo(SHARD_ID, UUID.randomUUID().toString(), parentShardIds, ExtendedSequenceNumber.LATEST); - assertFalse("Equal should return false with different concurrency token", - diffShardInfo.equals(testShardInfo)); + ShardInfo diffShardInfo = + new ShardInfo(SHARD_ID, UUID.randomUUID().toString(), parentShardIds, ExtendedSequenceNumber.LATEST); + assertFalse("Equal should return false with different concurrency token", diffShardInfo.equals(testShardInfo)); diffShardInfo = new ShardInfo(SHARD_ID, null, parentShardIds, ExtendedSequenceNumber.LATEST); assertFalse("Equal should return false for null concurrency token", diffShardInfo.equals(testShardInfo)); } @@ -71,9 +71,10 @@ public class ShardInfoTest { List differentlyOrderedParentShardIds = new ArrayList<>(); differentlyOrderedParentShardIds.add("shard-2"); differentlyOrderedParentShardIds.add("shard-1"); - ShardInfo shardInfoWithDifferentlyOrderedParentShardIds = - new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, differentlyOrderedParentShardIds, ExtendedSequenceNumber.LATEST); - assertTrue("Equal should return true even with parent shard Ids reordered", + ShardInfo shardInfoWithDifferentlyOrderedParentShardIds = new ShardInfo( + SHARD_ID, CONCURRENCY_TOKEN, differentlyOrderedParentShardIds, ExtendedSequenceNumber.LATEST); + assertTrue( + "Equal should return true even with parent shard Ids reordered", shardInfoWithDifferentlyOrderedParentShardIds.equals(testShardInfo)); } @@ -82,34 +83,40 @@ public class ShardInfoTest { Set diffParentIds = new HashSet<>(); diffParentIds.add("shard-3"); diffParentIds.add("shard-4"); - ShardInfo diffShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, diffParentIds, ExtendedSequenceNumber.LATEST); - assertFalse("Equal should return false with different parent shard Ids", - diffShardInfo.equals(testShardInfo)); + ShardInfo diffShardInfo = + new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, diffParentIds, ExtendedSequenceNumber.LATEST); + assertFalse("Equal should return false with different parent shard Ids", diffShardInfo.equals(testShardInfo)); diffShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, null, ExtendedSequenceNumber.LATEST); assertFalse("Equal should return false with null parent shard Ids", diffShardInfo.equals(testShardInfo)); } @Test public void testShardInfoCheckpointEqualsHashCode() { - ShardInfo baseInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, - ExtendedSequenceNumber.TRIM_HORIZON); - ShardInfo differentCheckpoint = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, - new ExtendedSequenceNumber("1234")); + ShardInfo baseInfo = + new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); + ShardInfo differentCheckpoint = + new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, new ExtendedSequenceNumber("1234")); ShardInfo nullCheckpoint = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, null); assertThat("Checkpoint should not be included in equality.", baseInfo.equals(differentCheckpoint), is(true)); assertThat("Checkpoint should not be included in equality.", baseInfo.equals(nullCheckpoint), is(true)); - assertThat("Checkpoint should not be included in hash code.", baseInfo.hashCode(), + assertThat( + "Checkpoint should not be included in hash code.", + baseInfo.hashCode(), equalTo(differentCheckpoint.hashCode())); - assertThat("Checkpoint should not be included in hash code.", baseInfo.hashCode(), + assertThat( + "Checkpoint should not be included in hash code.", + baseInfo.hashCode(), equalTo(nullCheckpoint.hashCode())); } @Test public void testPacboyShardInfoSameHashCode() { - ShardInfo equalShardInfo = new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); - assertTrue("Shard info objects should have same hashCode for the same arguments", + ShardInfo equalShardInfo = + new ShardInfo(SHARD_ID, CONCURRENCY_TOKEN, parentShardIds, ExtendedSequenceNumber.LATEST); + assertTrue( + "Shard info objects should have same hashCode for the same arguments", equalShardInfo.hashCode() == testShardInfo.hashCode()); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardObjectHelper.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardObjectHelper.java index 4e2bae48..228e65df 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardObjectHelper.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardObjectHelper.java @@ -18,7 +18,6 @@ import java.math.BigInteger; import java.util.ArrayList; import java.util.List; - import software.amazon.awssdk.services.kinesis.model.HashKeyRange; import software.amazon.awssdk.services.kinesis.model.SequenceNumberRange; import software.amazon.awssdk.services.kinesis.model.Shard; @@ -29,12 +28,13 @@ import software.amazon.awssdk.services.kinesis.model.Shard; public class ShardObjectHelper { private static final int EXPONENT = 128; - + /** * Max value of a sequence number (2^128 -1). Useful for defining sequence number range for a shard. */ - static final String MAX_SEQUENCE_NUMBER = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); - + static final String MAX_SEQUENCE_NUMBER = + new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); + /** * Min value of a sequence number (0). Useful for defining sequence number range for a shard. */ @@ -43,18 +43,18 @@ public class ShardObjectHelper { /** * Max value of a hash key (2^128 -1). Useful for defining hash key range for a shard. */ - public static final String MAX_HASH_KEY = new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); - + public static final String MAX_HASH_KEY = + new BigInteger("2").pow(EXPONENT).subtract(BigInteger.ONE).toString(); + /** * Min value of a hash key (0). Useful for defining sequence number range for a shard. */ public static final String MIN_HASH_KEY = BigInteger.ZERO.toString(); /** - * + * */ - private ShardObjectHelper() { - } + private ShardObjectHelper() {} /** Helper method to create a new shard object. * @param shardId @@ -63,11 +63,16 @@ public class ShardObjectHelper { * @param sequenceNumberRange * @return */ - static Shard newShard(String shardId, + static Shard newShard( + String shardId, String parentShardId, String adjacentParentShardId, SequenceNumberRange sequenceNumberRange) { - return newShard(shardId, parentShardId, adjacentParentShardId, sequenceNumberRange, + return newShard( + shardId, + parentShardId, + adjacentParentShardId, + sequenceNumberRange, HashKeyRange.builder().startingHashKey("1").endingHashKey("100").build()); } @@ -79,14 +84,19 @@ public class ShardObjectHelper { * @param hashKeyRange * @return */ - public static Shard newShard(String shardId, - String parentShardId, - String adjacentParentShardId, - SequenceNumberRange sequenceNumberRange, - HashKeyRange hashKeyRange) { - return Shard.builder().shardId(shardId).parentShardId(parentShardId) - .adjacentParentShardId(adjacentParentShardId).sequenceNumberRange(sequenceNumberRange) - .hashKeyRange(hashKeyRange).build(); + public static Shard newShard( + String shardId, + String parentShardId, + String adjacentParentShardId, + SequenceNumberRange sequenceNumberRange, + HashKeyRange hashKeyRange) { + return Shard.builder() + .shardId(shardId) + .parentShardId(parentShardId) + .adjacentParentShardId(adjacentParentShardId) + .sequenceNumberRange(sequenceNumberRange) + .hashKeyRange(hashKeyRange) + .build(); } /** Helper method. @@ -94,8 +104,12 @@ public class ShardObjectHelper { * @param endingSequenceNumber * @return */ - public static SequenceNumberRange newSequenceNumberRange(String startingSequenceNumber, String endingSequenceNumber) { - return SequenceNumberRange.builder().startingSequenceNumber(startingSequenceNumber).endingSequenceNumber(endingSequenceNumber).build(); + public static SequenceNumberRange newSequenceNumberRange( + String startingSequenceNumber, String endingSequenceNumber) { + return SequenceNumberRange.builder() + .startingSequenceNumber(startingSequenceNumber) + .endingSequenceNumber(endingSequenceNumber) + .build(); } /** Helper method. @@ -104,9 +118,12 @@ public class ShardObjectHelper { * @return */ public static HashKeyRange newHashKeyRange(String startingHashKey, String endingHashKey) { - return HashKeyRange.builder().startingHashKey(startingHashKey).endingHashKey(endingHashKey).build(); + return HashKeyRange.builder() + .startingHashKey(startingHashKey) + .endingHashKey(endingHashKey) + .build(); } - + static List getParentShardIds(Shard shard) { List parentShardIds = new ArrayList<>(2); if (shard.adjacentParentShardId() != null) { @@ -117,5 +134,4 @@ public class ShardObjectHelper { } return parentShardIds; } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSequenceVerifier.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSequenceVerifier.java index d7a94266..3e69e244 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSequenceVerifier.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSequenceVerifier.java @@ -21,7 +21,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentSkipListSet; - import junit.framework.Assert; import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.services.kinesis.model.Shard; @@ -46,14 +45,14 @@ public class ShardSequenceVerifier { shardIdToShards.put(shard.shardId(), shard); } } - + public void registerInitialization(String shardId) { List parentShardIds = ShardObjectHelper.getParentShardIds(shardIdToShards.get(shardId)); for (String parentShardId : parentShardIds) { if (initializedShards.contains(parentShardId)) { if (!shutdownShards.contains(parentShardId)) { - String message = "Parent shard " + parentShardId + " was not shutdown before shard " - + shardId + " was initialized."; + String message = "Parent shard " + parentShardId + " was not shutdown before shard " + shardId + + " was initialized."; log.error(message); validationFailures.add(message); } @@ -61,18 +60,17 @@ public class ShardSequenceVerifier { } initializedShards.add(shardId); } - + public void registerShutdown(String shardId, ShutdownReason reason) { if (reason.equals(ShutdownReason.SHARD_END)) { shutdownShards.add(shardId); } } - + public void verify() { for (String message : validationFailures) { log.error(message); } Assert.assertTrue(validationFailures.isEmpty()); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSyncTaskIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSyncTaskIntegrationTest.java index 4ce8eeb1..28915b16 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSyncTaskIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/ShardSyncTaskIntegrationTest.java @@ -14,7 +14,6 @@ */ package software.amazon.kinesis.leases; - import java.util.HashSet; import java.util.List; import java.util.Set; @@ -25,7 +24,6 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; - import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; @@ -64,43 +62,58 @@ public class ShardSyncTaskIntegrationTest { @BeforeClass public static void setUpBeforeClass() throws Exception { -// ClientAsyncHttpConfiguration configuration = ClientAsyncHttpConfiguration.builder().httpClientFactory( -// NettySdkHttpClientFactory.builder().trustAllCertificates(true).maxConnectionsPerEndpoint(10).build()) -// .build(); -// kinesisClient = KinesisAsyncClient.builder().asyncHttpConfiguration(configuration) -// .endpointOverride(new URI("https://aws-kinesis-alpha.corp.amazon.com")).region(Region.US_EAST_1) -// .build(); -// + // ClientAsyncHttpConfiguration configuration = ClientAsyncHttpConfiguration.builder().httpClientFactory( + // + // NettySdkHttpClientFactory.builder().trustAllCertificates(true).maxConnectionsPerEndpoint(10).build()) + // .build(); + // kinesisClient = KinesisAsyncClient.builder().asyncHttpConfiguration(configuration) + // .endpointOverride(new + // URI("https://aws-kinesis-alpha.corp.amazon.com")).region(Region.US_EAST_1) + // .build(); + // try { - CreateStreamRequest req = CreateStreamRequest.builder().streamName(STREAM_NAME).shardCount(1).build(); + CreateStreamRequest req = CreateStreamRequest.builder() + .streamName(STREAM_NAME) + .shardCount(1) + .build(); kinesisClient.createStream(req); } catch (KinesisException ase) { ase.printStackTrace(); } StreamStatus status; -// do { -// status = StreamStatus.fromValue(kinesisClient.describeStreamSummary( -// DescribeStreamSummaryRequest.builder().streamName(STREAM_NAME).build()).get() -// .streamDescriptionSummary().streamStatusString()); -// } while (status != StreamStatus.ACTIVE); -// + // do { + // status = StreamStatus.fromValue(kinesisClient.describeStreamSummary( + // DescribeStreamSummaryRequest.builder().streamName(STREAM_NAME).build()).get() + // .streamDescriptionSummary().streamStatusString()); + // } while (status != StreamStatus.ACTIVE); + // } @Before public void setup() { - DynamoDbAsyncClient client = DynamoDbAsyncClient.builder().region(Region.US_EAST_1).build(); - leaseRefresher = - new DynamoDBLeaseRefresher("ShardSyncTaskIntegrationTest", client, new DynamoDBLeaseSerializer(), - USE_CONSISTENT_READS, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); + DynamoDbAsyncClient client = + DynamoDbAsyncClient.builder().region(Region.US_EAST_1).build(); + leaseRefresher = new DynamoDBLeaseRefresher( + "ShardSyncTaskIntegrationTest", + client, + new DynamoDBLeaseSerializer(), + USE_CONSISTENT_READS, + TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); - shardDetector = new KinesisShardDetector(kinesisClient, STREAM_NAME, 500L, 50, - LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS, MAX_CACHE_MISSES_BEFORE_RELOAD, CACHE_MISS_WARNING_MODULUS); + shardDetector = new KinesisShardDetector( + kinesisClient, + STREAM_NAME, + 500L, + 50, + LIST_SHARDS_CACHE_ALLOWED_AGE_IN_SECONDS, + MAX_CACHE_MISSES_BEFORE_RELOAD, + CACHE_MISS_WARNING_MODULUS); hierarchicalShardSyncer = new HierarchicalShardSyncer(); } /** * Test method for call(). - * + * * @throws DependencyException * @throws InvalidStateException * @throws ProvisionedThroughputException @@ -113,11 +126,18 @@ public class ShardSyncTaskIntegrationTest { leaseRefresher.createLeaseTableIfNotExists(readCapacity, writeCapacity); } leaseRefresher.deleteAll(); - Set shardIds = shardDetector.listShards().stream().map(Shard::shardId).collect(Collectors.toSet()); - ShardSyncTask syncTask = new ShardSyncTask(shardDetector, leaseRefresher, + Set shardIds = + shardDetector.listShards().stream().map(Shard::shardId).collect(Collectors.toSet()); + ShardSyncTask syncTask = new ShardSyncTask( + shardDetector, + leaseRefresher, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST), - false, true, false, 0L, - hierarchicalShardSyncer, NULL_METRICS_FACTORY); + false, + true, + false, + 0L, + hierarchicalShardSyncer, + NULL_METRICS_FACTORY); syncTask.call(); List leases = leaseRefresher.listLeases(); Set leaseKeys = new HashSet<>(); @@ -130,5 +150,4 @@ public class ShardSyncTaskIntegrationTest { shardIds.removeAll(leaseKeys); Assert.assertTrue(shardIds.isEmpty()); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBCheckpointerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBCheckpointerTest.java index 63cb97e8..1523fe38 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBCheckpointerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBCheckpointerTest.java @@ -14,10 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.UUID; import org.junit.Before; @@ -25,7 +21,6 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.kinesis.checkpoint.dynamodb.DynamoDBCheckpointer; import software.amazon.kinesis.exceptions.KinesisClientLibException; import software.amazon.kinesis.exceptions.ShutdownException; @@ -37,6 +32,10 @@ import software.amazon.kinesis.leases.exceptions.InvalidStateException; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class DynamoDBCheckpointerTest { private static final String SHARD_ID = "shardId-test"; @@ -46,6 +45,7 @@ public class DynamoDBCheckpointerTest { @Mock private LeaseRefresher leaseRefresher; + @Mock private LeaseCoordinator leaseCoordinator; @@ -58,11 +58,13 @@ public class DynamoDBCheckpointerTest { } @Test(expected = ShutdownException.class) - public void testSetCheckpointWithUnownedShardId() throws KinesisClientLibException, DependencyException, - InvalidStateException, ProvisionedThroughputException { + public void testSetCheckpointWithUnownedShardId() + throws KinesisClientLibException, DependencyException, InvalidStateException, + ProvisionedThroughputException { final Lease lease = new Lease(); when(leaseCoordinator.getCurrentlyHeldLease(eq(SHARD_ID))).thenReturn(lease); - when(leaseCoordinator.updateLease(eq(lease), eq(TEST_UUID), eq(OPERATION), eq(SHARD_ID))).thenReturn(false); + when(leaseCoordinator.updateLease(eq(lease), eq(TEST_UUID), eq(OPERATION), eq(SHARD_ID))) + .thenReturn(false); try { dynamoDBCheckpointer.setCheckpoint(SHARD_ID, TEST_CHKPT, TEST_UUID.toString()); } finally { @@ -71,11 +73,11 @@ public class DynamoDBCheckpointerTest { } } -// @Test(expected = DependencyException.class) -// public void testWaitLeaseTableTimeout() -// throws DependencyException, ProvisionedThroughputException, IllegalStateException { -// Set mock lease manager to return false in waiting -// doReturn(false).when(leaseRefresher).waitUntilLeaseTableExists(anyLong(), anyLong()); -// leaseCoordinator.initialize(); -// } + // @Test(expected = DependencyException.class) + // public void testWaitLeaseTableTimeout() + // throws DependencyException, ProvisionedThroughputException, IllegalStateException { + // Set mock lease manager to return false in waiting + // doReturn(false).when(leaseRefresher).waitUntilLeaseTableExists(anyLong(), anyLong()); + // leaseCoordinator.initialize(); + // } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorIntegrationTest.java index 05d4ba74..f52b91e1 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorIntegrationTest.java @@ -14,14 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.fail; - import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -32,7 +24,6 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.kinesis.checkpoint.dynamodb.DynamoDBCheckpointer; @@ -46,6 +37,14 @@ import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.metrics.NullMetricsFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + @RunWith(MockitoJUnitRunner.class) public class DynamoDBLeaseCoordinatorIntegrationTest { private static final int ATTEMPTS = 20; @@ -73,9 +72,14 @@ public class DynamoDBLeaseCoordinatorIntegrationTest { final boolean useConsistentReads = true; if (leaseRefresher == null) { DynamoDbAsyncClient dynamoDBClient = DynamoDbAsyncClient.builder() - .credentialsProvider(DefaultCredentialsProvider.create()).build(); - leaseRefresher = new DynamoDBLeaseRefresher(TABLE_NAME, dynamoDBClient, new DynamoDBLeaseSerializer(), - useConsistentReads, TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); + .credentialsProvider(DefaultCredentialsProvider.create()) + .build(); + leaseRefresher = new DynamoDBLeaseRefresher( + TABLE_NAME, + dynamoDBClient, + new DynamoDBLeaseSerializer(), + useConsistentReads, + TableCreatorCallback.NOOP_TABLE_CREATOR_CALLBACK); } leaseRefresher.createLeaseTableIfNotExists(10L, 10L); @@ -96,9 +100,17 @@ public class DynamoDBLeaseCoordinatorIntegrationTest { } leaseRefresher.deleteAll(); - coordinator = new DynamoDBLeaseCoordinator(leaseRefresher, WORKER_ID, LEASE_DURATION_MILLIS, - EPSILON_MILLIS, MAX_LEASES_FOR_WORKER, MAX_LEASES_TO_STEAL_AT_ONE_TIME, MAX_LEASE_RENEWER_THREAD_COUNT, - INITIAL_LEASE_TABLE_READ_CAPACITY, INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); + coordinator = new DynamoDBLeaseCoordinator( + leaseRefresher, + WORKER_ID, + LEASE_DURATION_MILLIS, + EPSILON_MILLIS, + MAX_LEASES_FOR_WORKER, + MAX_LEASES_TO_STEAL_AT_ONE_TIME, + MAX_LEASE_RENEWER_THREAD_COUNT, + INITIAL_LEASE_TABLE_READ_CAPACITY, + INITIAL_LEASE_TABLE_WRITE_CAPACITY, + metricsFactory); dynamoDBCheckpointer = new DynamoDBCheckpointer(coordinator, leaseRefresher); dynamoDBCheckpointer.operation(OPERATION); @@ -140,7 +152,8 @@ public class DynamoDBLeaseCoordinatorIntegrationTest { lease.leaseOwner(coordinator.workerIdentifier()); assertEquals(lease, leaseFromDDBAtInitialCheckpoint); - dynamoDBCheckpointer.prepareCheckpoint(lease.leaseKey(), pendingCheckpoint, lease.concurrencyToken().toString(), checkpointState); + dynamoDBCheckpointer.prepareCheckpoint( + lease.leaseKey(), pendingCheckpoint, lease.concurrencyToken().toString(), checkpointState); final Lease leaseFromDDBAtPendingCheckpoint = leaseRefresher.getLease(lease.leaseKey()); lease.leaseCounter(lease.leaseCounter() + 1); @@ -257,5 +270,4 @@ public class DynamoDBLeaseCoordinatorIntegrationTest { return leases; } } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorTest.java index 7347b4cb..2b9ffbcd 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseCoordinatorTest.java @@ -1,5 +1,7 @@ package software.amazon.kinesis.leases.dynamodb; +import java.util.UUID; + import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -9,8 +11,6 @@ import software.amazon.kinesis.leases.LeaseRefresher; import software.amazon.kinesis.leases.exceptions.DependencyException; import software.amazon.kinesis.metrics.MetricsFactory; -import java.util.UUID; - import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -32,6 +32,7 @@ public class DynamoDBLeaseCoordinatorTest { @Mock private LeaseRefresher leaseRefresher; + @Mock private MetricsFactory metricsFactory; @@ -39,16 +40,25 @@ public class DynamoDBLeaseCoordinatorTest { @Before public void setup() { - this.leaseCoordinator = new DynamoDBLeaseCoordinator(leaseRefresher, WORKER_ID, LEASE_DURATION_MILLIS, - ENABLE_PRIORITY_LEASE_ASSIGNMENT, EPSILON_MILLIS, MAX_LEASES_FOR_WORKER, - MAX_LEASES_TO_STEAL_AT_ONE_TIME, MAX_LEASE_RENEWER_THREAD_COUNT, - INITIAL_LEASE_TABLE_READ_CAPACITY, INITIAL_LEASE_TABLE_WRITE_CAPACITY, metricsFactory); + this.leaseCoordinator = new DynamoDBLeaseCoordinator( + leaseRefresher, + WORKER_ID, + LEASE_DURATION_MILLIS, + ENABLE_PRIORITY_LEASE_ASSIGNMENT, + EPSILON_MILLIS, + MAX_LEASES_FOR_WORKER, + MAX_LEASES_TO_STEAL_AT_ONE_TIME, + MAX_LEASE_RENEWER_THREAD_COUNT, + INITIAL_LEASE_TABLE_READ_CAPACITY, + INITIAL_LEASE_TABLE_WRITE_CAPACITY, + metricsFactory); } @Test public void testInitialize_tableCreationSucceeds() throws Exception { when(leaseRefresher.createLeaseTableIfNotExists()).thenReturn(true); - when(leaseRefresher.waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS)).thenReturn(true); + when(leaseRefresher.waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS)) + .thenReturn(true); leaseCoordinator.initialize(); @@ -59,7 +69,8 @@ public class DynamoDBLeaseCoordinatorTest { @Test(expected = DependencyException.class) public void testInitialize_tableCreationFails() throws Exception { when(leaseRefresher.createLeaseTableIfNotExists()).thenReturn(false); - when(leaseRefresher.waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS)).thenReturn(false); + when(leaseRefresher.waitUntilLeaseTableExists(SECONDS_BETWEEN_POLLS, TIMEOUT_SECONDS)) + .thenReturn(false); try { leaseCoordinator.initialize(); @@ -81,5 +92,4 @@ public class DynamoDBLeaseCoordinatorTest { leaseCoordinator.stopLeaseTaker(); assertTrue(leaseCoordinator.getAssignments().isEmpty()); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherIntegrationTest.java index 51e12d2d..21a7a44f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherIntegrationTest.java @@ -19,6 +19,7 @@ import java.util.Collections; import java.util.List; import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; + import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -30,6 +31,7 @@ import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseIntegrationTest; import software.amazon.kinesis.leases.UpdateField; import software.amazon.kinesis.leases.exceptions.LeasingException; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -44,8 +46,12 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest @Before public void setup() { - doNothing().when(tableCreatorCallback).performAction( - eq(TableCreatorCallbackInput.builder().dynamoDbClient(ddbClient).tableName(tableName).build())); + doNothing() + .when(tableCreatorCallback) + .performAction(eq(TableCreatorCallbackInput.builder() + .dynamoDbClient(ddbClient) + .tableName(tableName) + .build())); } /** @@ -125,10 +131,8 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); Lease lease = builder.withLease("1").build().get("1"); final String leaseKey = lease.leaseKey(); - final HashKeyRangeForLease hashKeyRangeForLease = HashKeyRangeForLease.fromHashKeyRange(HashKeyRange.builder() - .startingHashKey("1") - .endingHashKey("2") - .build()); + final HashKeyRangeForLease hashKeyRangeForLease = HashKeyRangeForLease.fromHashKeyRange( + HashKeyRange.builder().startingHashKey("1").endingHashKey("2").build()); lease.hashKeyRange(hashKeyRangeForLease); leaseRefresher.updateLeaseWithMetaInfo(lease, UpdateField.HASH_KEY_RANGE); final Lease updatedLease = leaseRefresher.getLease(leaseKey); @@ -186,7 +190,8 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest private void testTakeLease(boolean owned) throws LeasingException { TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); - Lease lease = builder.withLease("1", owned ? "originalOwner" : null).build().get("1"); + Lease lease = + builder.withLease("1", owned ? "originalOwner" : null).build().get("1"); Long originalLeaseCounter = lease.leaseCounter(); String newOwner = "newOwner"; @@ -300,8 +305,12 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest @Test public void testWaitUntilLeaseTableExists() throws LeasingException { final UUID uniqueId = UUID.randomUUID(); - DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher("tableEventuallyExists_" + uniqueId, ddbClient, - new DynamoDBLeaseSerializer(), true, tableCreatorCallback); + DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher( + "tableEventuallyExists_" + uniqueId, + ddbClient, + new DynamoDBLeaseSerializer(), + true, + tableCreatorCallback); refresher.createLeaseTableIfNotExists(); assertTrue(refresher.waitUntilLeaseTableExists(1, 20)); @@ -313,16 +322,16 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest * Just using AtomicInteger for the indirection it provides. */ final AtomicInteger sleepCounter = new AtomicInteger(0); - DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher("nonexistentTable", ddbClient, - new DynamoDBLeaseSerializer(), true, tableCreatorCallback) { - @Override - long sleep(long timeToSleepMillis) { - assertEquals(1000L, timeToSleepMillis); - sleepCounter.incrementAndGet(); - return 1000L; - } - - }; + DynamoDBLeaseRefresher refresher = + new DynamoDBLeaseRefresher( + "nonexistentTable", ddbClient, new DynamoDBLeaseSerializer(), true, tableCreatorCallback) { + @Override + long sleep(long timeToSleepMillis) { + assertEquals(1000L, timeToSleepMillis); + sleepCounter.incrementAndGet(); + return 1000L; + } + }; assertFalse(refresher.waitUntilLeaseTableExists(2, 1)); assertEquals(1, sleepCounter.get()); @@ -330,12 +339,15 @@ public class DynamoDBLeaseRefresherIntegrationTest extends LeaseIntegrationTest @Test public void testTableCreatorCallback() throws Exception { - DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher(tableName, ddbClient, - new DynamoDBLeaseSerializer(), true, tableCreatorCallback); + DynamoDBLeaseRefresher refresher = new DynamoDBLeaseRefresher( + tableName, ddbClient, new DynamoDBLeaseSerializer(), true, tableCreatorCallback); refresher.performPostTableCreationAction(); - verify(tableCreatorCallback).performAction( - eq(TableCreatorCallbackInput.builder().dynamoDbClient(ddbClient).tableName(tableName).build())); + verify(tableCreatorCallback) + .performAction(eq(TableCreatorCallbackInput.builder() + .dynamoDbClient(ddbClient) + .tableName(tableName) + .build())); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherTest.java index 6daa14a3..e757c93b 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRefresherTest.java @@ -14,18 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -43,9 +31,9 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; +import software.amazon.awssdk.services.dynamodb.model.BillingMode; import software.amazon.awssdk.services.dynamodb.model.CreateTableRequest; import software.amazon.awssdk.services.dynamodb.model.CreateTableResponse; import software.amazon.awssdk.services.dynamodb.model.DeleteItemRequest; @@ -65,17 +53,27 @@ import software.amazon.awssdk.services.dynamodb.model.ScanRequest; import software.amazon.awssdk.services.dynamodb.model.ScanResponse; import software.amazon.awssdk.services.dynamodb.model.TableDescription; import software.amazon.awssdk.services.dynamodb.model.TableStatus; +import software.amazon.awssdk.services.dynamodb.model.Tag; import software.amazon.awssdk.services.dynamodb.model.UpdateItemRequest; import software.amazon.awssdk.services.dynamodb.model.UpdateItemResponse; -import software.amazon.awssdk.services.dynamodb.model.Tag; -import software.amazon.awssdk.services.dynamodb.model.BillingMode; - import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseManagementConfig; import software.amazon.kinesis.leases.LeaseSerializer; import software.amazon.kinesis.leases.exceptions.DependencyException; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyString; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class DynamoDBLeaseRefresherTest { @@ -85,24 +83,34 @@ public class DynamoDBLeaseRefresherTest { @Mock private DynamoDbAsyncClient dynamoDbClient; + @Mock private LeaseSerializer leaseSerializer; + @Mock private TableCreatorCallback tableCreatorCallback; + @Mock private CompletableFuture mockScanFuture; + @Mock private CompletableFuture mockPutItemFuture; + @Mock private CompletableFuture mockGetItemFuture; + @Mock private CompletableFuture mockUpdateFuture; + @Mock private CompletableFuture mockDeleteFuture; + @Mock private CompletableFuture mockDescribeTableFuture; + @Mock private CompletableFuture mockCreateTableFuture; + @Mock private Lease lease; @@ -118,11 +126,12 @@ public class DynamoDBLeaseRefresherTest { @Before public void setup() throws Exception { - leaseRefresher = new DynamoDBLeaseRefresher(TABLE_NAME, dynamoDbClient, leaseSerializer, CONSISTENT_READS, - tableCreatorCallback); + leaseRefresher = new DynamoDBLeaseRefresher( + TABLE_NAME, dynamoDbClient, leaseSerializer, CONSISTENT_READS, tableCreatorCallback); serializedLease = new HashMap<>(); - describeTableRequest = DescribeTableRequest.builder().tableName(TABLE_NAME).build(); + describeTableRequest = + DescribeTableRequest.builder().tableName(TABLE_NAME).build(); createTableRequest = CreateTableRequest.builder() .tableName(TABLE_NAME) .keySchema(leaseSerializer.getKeySchema()) @@ -152,7 +161,9 @@ public class DynamoDBLeaseRefresherTest { lastEvaluatedKey.put("Test", AttributeValue.builder().s("test").build()); when(mockScanFuture.get(anyLong(), any(TimeUnit.class))) - .thenReturn(ScanResponse.builder().lastEvaluatedKey(lastEvaluatedKey).build()) + .thenReturn(ScanResponse.builder() + .lastEvaluatedKey(lastEvaluatedKey) + .build()) .thenThrow(te); verifyCancel(mockScanFuture, () -> leaseRefresher.listLeases()); @@ -179,7 +190,9 @@ public class DynamoDBLeaseRefresherTest { when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) .thenReturn(DescribeTableResponse.builder() - .table(TableDescription.builder().tableStatus(TableStatus.UPDATING).build()) + .table(TableDescription.builder() + .tableStatus(TableStatus.UPDATING) + .build()) .build()); assertTrue(leaseRefresher.waitUntilLeaseTableExists(0, 0)); } @@ -189,7 +202,9 @@ public class DynamoDBLeaseRefresherTest { when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) .thenReturn(DescribeTableResponse.builder() - .table(TableDescription.builder().tableStatus(TableStatus.ACTIVE).build()) + .table(TableDescription.builder() + .tableStatus(TableStatus.ACTIVE) + .build()) .build()); assertTrue(leaseRefresher.waitUntilLeaseTableExists(0, 0)); } @@ -199,7 +214,9 @@ public class DynamoDBLeaseRefresherTest { when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) .thenReturn(DescribeTableResponse.builder() - .table(TableDescription.builder().tableStatus(TableStatus.CREATING).build()) + .table(TableDescription.builder() + .tableStatus(TableStatus.CREATING) + .build()) .build()); assertFalse(leaseRefresher.waitUntilLeaseTableExists(0, 0)); } @@ -209,7 +226,9 @@ public class DynamoDBLeaseRefresherTest { when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) .thenReturn(DescribeTableResponse.builder() - .table(TableDescription.builder().tableStatus(TableStatus.DELETING).build()) + .table(TableDescription.builder() + .tableStatus(TableStatus.DELETING) + .build()) .build()); assertFalse(leaseRefresher.waitUntilLeaseTableExists(0, 0)); } @@ -229,7 +248,7 @@ public class DynamoDBLeaseRefresherTest { @Test public void testRenewLeaseTimesOut() throws Exception { setupUpdateItemTest(); - verifyCancel(mockUpdateFuture, () ->leaseRefresher.renewLease(lease)); + verifyCancel(mockUpdateFuture, () -> leaseRefresher.renewLease(lease)); } @Test @@ -254,7 +273,8 @@ public class DynamoDBLeaseRefresherTest { public void testDeleteAllLeasesTimesOut() throws Exception { TimeoutException te = setRuleForDependencyTimeout(); when(dynamoDbClient.scan(any(ScanRequest.class))).thenReturn(mockScanFuture); - when(mockScanFuture.get(anyLong(), any())).thenReturn(ScanResponse.builder().items(Collections.emptyMap()).build()); + when(mockScanFuture.get(anyLong(), any())) + .thenReturn(ScanResponse.builder().items(Collections.emptyMap()).build()); when(leaseSerializer.fromDynamoRecord(any())).thenReturn(lease); when(leaseSerializer.getDynamoHashKey(any(Lease.class))).thenReturn(Collections.emptyMap()); @@ -287,15 +307,27 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableProvisionedBillingModeIfNotExists() throws Exception { - leaseRefresher = new DynamoDBLeaseRefresher(TABLE_NAME, dynamoDbClient, leaseSerializer, CONSISTENT_READS, - tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, BillingMode.PROVISIONED, DELETION_PROTECTION_ENABLED); + leaseRefresher = new DynamoDBLeaseRefresher( + TABLE_NAME, + dynamoDbClient, + leaseSerializer, + CONSISTENT_READS, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PROVISIONED, + DELETION_PROTECTION_ENABLED); when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); - final ProvisionedThroughput throughput = ProvisionedThroughput.builder().readCapacityUnits(10L) - .writeCapacityUnits(10L).build(); + final ProvisionedThroughput throughput = ProvisionedThroughput.builder() + .readCapacityUnits(10L) + .writeCapacityUnits(10L) + .build(); final CreateTableRequest createTableRequest = CreateTableRequest.builder() .tableName(TABLE_NAME) .keySchema(leaseSerializer.getKeySchema()) @@ -304,7 +336,8 @@ public class DynamoDBLeaseRefresherTest { .deletionProtectionEnabled(DELETION_PROTECTION_ENABLED) .build(); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenReturn(null); final boolean result = leaseRefresher.createLeaseTableIfNotExists(10L, 10L); @@ -321,15 +354,28 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableWithTagsIfNotExists() throws Exception { tags = Collections.singletonList(Tag.builder().key("foo").value("bar").build()); - leaseRefresher = new DynamoDBLeaseRefresher(TABLE_NAME, dynamoDbClient, leaseSerializer, CONSISTENT_READS, - tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, BillingMode.PROVISIONED, DELETION_PROTECTION_ENABLED, tags); + leaseRefresher = new DynamoDBLeaseRefresher( + TABLE_NAME, + dynamoDbClient, + leaseSerializer, + CONSISTENT_READS, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PROVISIONED, + DELETION_PROTECTION_ENABLED, + tags); when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS)) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS)) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); - final ProvisionedThroughput throughput = ProvisionedThroughput.builder().readCapacityUnits(10L) - .writeCapacityUnits(10L).build(); + final ProvisionedThroughput throughput = ProvisionedThroughput.builder() + .readCapacityUnits(10L) + .writeCapacityUnits(10L) + .build(); final CreateTableRequest createTableRequest = CreateTableRequest.builder() .tableName(TABLE_NAME) .keySchema(leaseSerializer.getKeySchema()) @@ -356,10 +402,14 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableIfNotExists() throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenReturn(null); final boolean result = leaseRefresher.createLeaseTableIfNotExists(); @@ -375,24 +425,37 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableProvisionedWithDeletionProtectionIfNotExists() throws Exception { - leaseRefresher = new DynamoDBLeaseRefresher(TABLE_NAME, dynamoDbClient, leaseSerializer, CONSISTENT_READS, - tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, BillingMode.PROVISIONED, true); + leaseRefresher = new DynamoDBLeaseRefresher( + TABLE_NAME, + dynamoDbClient, + leaseSerializer, + CONSISTENT_READS, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PROVISIONED, + true); when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); - final ProvisionedThroughput throughput = ProvisionedThroughput.builder().readCapacityUnits(10L) - .writeCapacityUnits(10L).build(); + final ProvisionedThroughput throughput = ProvisionedThroughput.builder() + .readCapacityUnits(10L) + .writeCapacityUnits(10L) + .build(); final CreateTableRequest createTableRequest = CreateTableRequest.builder() - .tableName(TABLE_NAME) - .keySchema(leaseSerializer.getKeySchema()) - .attributeDefinitions(leaseSerializer.getAttributeDefinitions()) - .provisionedThroughput(throughput) - .deletionProtectionEnabled(true) - .build(); + .tableName(TABLE_NAME) + .keySchema(leaseSerializer.getKeySchema()) + .attributeDefinitions(leaseSerializer.getAttributeDefinitions()) + .provisionedThroughput(throughput) + .deletionProtectionEnabled(true) + .build(); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenReturn(null); final boolean result = leaseRefresher.createLeaseTableIfNotExists(10L, 10L); @@ -409,11 +472,15 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableIfNotExists_throwsDependencyException() throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenThrow(new InterruptedException()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceInUseException.builder().message("Table already exists").build()); + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceInUseException.builder() + .message("Table already exists") + .build()); Assert.assertFalse(leaseRefresher.createLeaseTableIfNotExists()); verify(dynamoDbClient, times(1)).describeTable(describeTableRequest); @@ -425,13 +492,20 @@ public class DynamoDBLeaseRefresherTest { } @Test - public void testCreateLeaseTableIfNotExists_tableAlreadyExists_throwsResourceInUseException_expectFalse() throws Exception { + public void testCreateLeaseTableIfNotExists_tableAlreadyExists_throwsResourceInUseException_expectFalse() + throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceInUseException.builder().message("Table already exists").build()); + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceInUseException.builder() + .message("Table already exists") + .build()); Assert.assertFalse(leaseRefresher.createLeaseTableIfNotExists()); verify(dynamoDbClient, times(1)).describeTable(describeTableRequest); @@ -443,12 +517,17 @@ public class DynamoDBLeaseRefresherTest { } @Test - public void testCreateLeaseTableIfNotExists_throwsLimitExceededException_expectProvisionedThroughputException() throws Exception { + public void testCreateLeaseTableIfNotExists_throwsLimitExceededException_expectProvisionedThroughputException() + throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenThrow(LimitExceededException.builder().build()); Assert.assertThrows(ProvisionedThroughputException.class, () -> leaseRefresher.createLeaseTableIfNotExists()); @@ -463,10 +542,14 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableIfNotExists_throwsDynamoDbException_expectDependencyException() throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenThrow(DynamoDbException.builder().build()); Assert.assertThrows(DependencyException.class, () -> leaseRefresher.createLeaseTableIfNotExists()); @@ -481,10 +564,14 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableIfNotExists_throwsTimeoutException_expectDependencyException() throws Exception { when(dynamoDbClient.describeTable(describeTableRequest)).thenReturn(mockDescribeTableFuture); - when(mockDescribeTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + when(mockDescribeTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(createTableRequest)).thenReturn(mockCreateTableFuture); - when(mockCreateTableFuture.get(eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) + when(mockCreateTableFuture.get( + eq(LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT.toMillis()), eq(TimeUnit.MILLISECONDS))) .thenThrow(new TimeoutException()); Assert.assertThrows(DependencyException.class, () -> leaseRefresher.createLeaseTableIfNotExists()); @@ -498,13 +585,22 @@ public class DynamoDBLeaseRefresherTest { @Test public void testCreateLeaseTableProvisionedBillingModeTimesOut() throws Exception { - leaseRefresher = new DynamoDBLeaseRefresher(TABLE_NAME, dynamoDbClient, leaseSerializer, CONSISTENT_READS, - tableCreatorCallback, LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, BillingMode.PROVISIONED, false); + leaseRefresher = new DynamoDBLeaseRefresher( + TABLE_NAME, + dynamoDbClient, + leaseSerializer, + CONSISTENT_READS, + tableCreatorCallback, + LeaseManagementConfig.DEFAULT_REQUEST_TIMEOUT, + BillingMode.PROVISIONED, + false); TimeoutException te = setRuleForDependencyTimeout(); when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(any(CreateTableRequest.class))).thenReturn(mockCreateTableFuture); when(mockCreateTableFuture.get(anyLong(), any())).thenThrow(te); @@ -518,7 +614,9 @@ public class DynamoDBLeaseRefresherTest { when(dynamoDbClient.describeTable(any(DescribeTableRequest.class))).thenReturn(mockDescribeTableFuture); when(mockDescribeTableFuture.get(anyLong(), any())) - .thenThrow(ResourceNotFoundException.builder().message("Table doesn't exist").build()); + .thenThrow(ResourceNotFoundException.builder() + .message("Table doesn't exist") + .build()); when(dynamoDbClient.createTable(any(CreateTableRequest.class))).thenReturn(mockCreateTableFuture); when(mockCreateTableFuture.get(anyLong(), any())).thenThrow(te); @@ -558,5 +656,4 @@ public class DynamoDBLeaseRefresherTest { return te; } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest.java index 3f692da5..dd6a17a2 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest.java @@ -14,6 +14,10 @@ */ package software.amazon.kinesis.leases.dynamodb; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.Executors; + import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -25,18 +29,14 @@ import software.amazon.kinesis.leases.exceptions.LeasingException; import software.amazon.kinesis.metrics.NullMetricsFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import java.util.Collections; -import java.util.Map; -import java.util.concurrent.Executors; - import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; import static org.junit.Assert.assertThat; @RunWith(MockitoJUnitRunner.class) -public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends - LeaseIntegrationBillingModePayPerRequestTest { +public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest + extends LeaseIntegrationBillingModePayPerRequestTest { private static final String TEST_METRIC = "TestOperation"; // This test case's leases last 2 seconds @@ -46,8 +46,12 @@ public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends @Before public void setup() { - renewer = new DynamoDBLeaseRenewer(leaseRefresher, "foo", LEASE_DURATION_MILLIS, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + renewer = new DynamoDBLeaseRenewer( + leaseRefresher, + "foo", + LEASE_DURATION_MILLIS, + Executors.newCachedThreadPool(), + new NullMetricsFactory()); } @Test @@ -111,7 +115,7 @@ public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends builder.withLease("1", "foo").withLease("2", "foo").build(); builder.addLeasesToRenew(renewer, "1", "2"); - Lease lease2 = builder.renewMutateAssert(renewer, "1", "2").get("2"); + Lease lease2 = builder.renewMutateAssert(renewer, "1", "2").get("2"); // This should be a copy that doesn't get updated Map heldLeases = renewer.getCurrentlyHeldLeases(); @@ -141,7 +145,8 @@ public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends Lease expected = renewer.getCurrentlyHeldLease("1"); expected.checkpoint(new ExtendedSequenceNumber("new checkpoint")); - assertThat(renewer.updateLease(expected, expected.concurrencyToken(), TEST_METRIC, expected.leaseKey()), + assertThat( + renewer.updateLease(expected, expected.concurrencyToken(), TEST_METRIC, expected.leaseKey()), equalTo(true)); // Assert that the counter and data have changed immediately after the update... @@ -256,8 +261,8 @@ public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); builder.withLease(shardId, owner); Map leases = builder.build(); - DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer(leaseRefresher, owner, 30000L, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer( + leaseRefresher, owner, 30000L, Executors.newCachedThreadPool(), new NullMetricsFactory()); renewer.initialize(); Map heldLeases = renewer.getCurrentlyHeldLeases(); assertThat(heldLeases.size(), equalTo(leases.size())); @@ -271,8 +276,8 @@ public class DynamoDBLeaseRenewerBillingModePayPerRequestIntegrationTest extends TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); builder.withLease(shardId, owner); Map leases = builder.build(); - DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer(leaseRefresher, owner, 30000L, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer( + leaseRefresher, owner, 30000L, Executors.newCachedThreadPool(), new NullMetricsFactory()); renewer.initialize(); Map heldLeases = renewer.getCurrentlyHeldLeases(); assertThat(heldLeases.size(), equalTo(leases.size())); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerIntegrationTest.java index f179a073..5abd3a4b 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerIntegrationTest.java @@ -14,18 +14,12 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertThat; - import java.util.Collections; import java.util.Map; import java.util.concurrent.Executors; import org.junit.Before; import org.junit.Test; - import org.junit.runner.RunWith; import org.mockito.runners.MockitoJUnitRunner; import software.amazon.kinesis.leases.Lease; @@ -34,6 +28,12 @@ import software.amazon.kinesis.leases.LeaseRenewer; import software.amazon.kinesis.leases.exceptions.LeasingException; import software.amazon.kinesis.metrics.NullMetricsFactory; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.junit.Assert.assertThat; + @RunWith(MockitoJUnitRunner.class) public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { private static final String TEST_METRIC = "TestOperation"; @@ -45,8 +45,12 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { @Before public void setup() { - renewer = new DynamoDBLeaseRenewer(leaseRefresher, "foo", LEASE_DURATION_MILLIS, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + renewer = new DynamoDBLeaseRenewer( + leaseRefresher, + "foo", + LEASE_DURATION_MILLIS, + Executors.newCachedThreadPool(), + new NullMetricsFactory()); } @Test @@ -110,7 +114,7 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { builder.withLease("1", "foo").withLease("2", "foo").build(); builder.addLeasesToRenew(renewer, "1", "2"); - Lease lease2 = builder.renewMutateAssert(renewer, "1", "2").get("2"); + Lease lease2 = builder.renewMutateAssert(renewer, "1", "2").get("2"); // This should be a copy that doesn't get updated Map heldLeases = renewer.getCurrentlyHeldLeases(); @@ -140,7 +144,8 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { Lease expected = renewer.getCurrentlyHeldLease("1"); expected.checkpoint(new ExtendedSequenceNumber("new checkpoint")); - assertThat(renewer.updateLease(expected, expected.concurrencyToken(), TEST_METRIC, expected.leaseKey()), + assertThat( + renewer.updateLease(expected, expected.concurrencyToken(), TEST_METRIC, expected.leaseKey()), equalTo(true)); // Assert that the counter and data have changed immediately after the update... @@ -255,8 +260,8 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); builder.withLease(shardId, owner); Map leases = builder.build(); - DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer(leaseRefresher, owner, 30000L, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer( + leaseRefresher, owner, 30000L, Executors.newCachedThreadPool(), new NullMetricsFactory()); renewer.initialize(); Map heldLeases = renewer.getCurrentlyHeldLeases(); assertThat(heldLeases.size(), equalTo(leases.size())); @@ -270,8 +275,8 @@ public class DynamoDBLeaseRenewerIntegrationTest extends LeaseIntegrationTest { TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); builder.withLease(shardId, owner); Map leases = builder.build(); - DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer(leaseRefresher, owner, 30000L, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + DynamoDBLeaseRenewer renewer = new DynamoDBLeaseRenewer( + leaseRefresher, owner, 30000L, Executors.newCachedThreadPool(), new NullMetricsFactory()); renewer.initialize(); Map heldLeases = renewer.getCurrentlyHeldLeases(); assertThat(heldLeases.size(), equalTo(leases.size())); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerTest.java index 72379e88..8a700c19 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseRenewerTest.java @@ -14,15 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -36,7 +27,6 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.kinesis.common.HashKeyRangeForLease; import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseRefresher; @@ -45,6 +35,15 @@ import software.amazon.kinesis.leases.exceptions.InvalidStateException; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; import software.amazon.kinesis.metrics.NullMetricsFactory; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + @RunWith(MockitoJUnitRunner.class) public class DynamoDBLeaseRenewerTest { private final String workerIdentifier = "WorkerId"; @@ -56,15 +55,30 @@ public class DynamoDBLeaseRenewerTest { private LeaseRefresher leaseRefresher; private static Lease newLease(String leaseKey) { - return new Lease(leaseKey, "LeaseOwner", 0L, UUID.randomUUID(), System.nanoTime(), null, null, null, - new HashSet<>(), new HashSet<>(), null, HashKeyRangeForLease.deserialize("1", "2")); + return new Lease( + leaseKey, + "LeaseOwner", + 0L, + UUID.randomUUID(), + System.nanoTime(), + null, + null, + null, + new HashSet<>(), + new HashSet<>(), + null, + HashKeyRangeForLease.deserialize("1", "2")); } @Before public void before() { leasesToRenew = null; - renewer = new DynamoDBLeaseRenewer(leaseRefresher, workerIdentifier, leaseDurationMillis, - Executors.newCachedThreadPool(), new NullMetricsFactory()); + renewer = new DynamoDBLeaseRenewer( + leaseRefresher, + workerIdentifier, + leaseDurationMillis, + Executors.newCachedThreadPool(), + new NullMetricsFactory()); } @After @@ -79,7 +93,7 @@ public class DynamoDBLeaseRenewerTest { @Test public void testLeaseRenewerHoldsGoodLeases() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { /* * Prepare leases to be renewed * 2 Good @@ -98,7 +112,8 @@ public class DynamoDBLeaseRenewerTest { } @Test - public void testLeaseRenewerDoesNotRenewExpiredLease() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + public void testLeaseRenewerDoesNotRenewExpiredLease() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { String leaseKey = "expiredLease"; long initialCounterIncrementNanos = 5L; // "expired" time. Lease lease1 = newLease(leaseKey); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerIntegrationTest.java index a6dac253..6b86a5e7 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerIntegrationTest.java @@ -16,6 +16,7 @@ package software.amazon.kinesis.leases.dynamodb; import java.util.Collection; import java.util.Map; + import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -25,6 +26,7 @@ import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseIntegrationTest; import software.amazon.kinesis.leases.exceptions.LeasingException; import software.amazon.kinesis.metrics.NullMetricsFactory; + import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; @@ -159,10 +161,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { @Test public void testSlowGetAllLeases() throws LeasingException { long leaseDurationMillis = 0; - taker = new DynamoDBLeaseTaker(leaseRefresher, - "foo", - leaseDurationMillis, - new NullMetricsFactory()); + taker = new DynamoDBLeaseTaker(leaseRefresher, "foo", leaseDurationMillis, new NullMetricsFactory()); TestHarnessBuilder builder = new TestHarnessBuilder(leaseRefresher); Map addedLeases = builder.withLease("1", "bar") @@ -201,7 +200,7 @@ public class DynamoDBLeaseTakerIntegrationTest extends LeaseIntegrationTest { /** * Verify that one activity is stolen from the highest loaded server when a server needs more than one lease and no * expired leases are available. Setup: 4 leases, server foo holds 0, bar holds 1, baz holds 5. - * + * * Foo should steal from baz. */ @Test diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerTest.java index 1700460f..4e927f31 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/DynamoDBLeaseTakerTest.java @@ -14,8 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import com.google.common.collect.ImmutableList; - import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -28,6 +26,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; +import com.google.common.collect.ImmutableList; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -56,14 +55,17 @@ public class DynamoDBLeaseTakerTest { @Mock private LeaseRefresher leaseRefresher; + @Mock private MetricsFactory metricsFactory; + @Mock private Callable timeProvider; @Before public void setup() { - this.dynamoDBLeaseTaker = new DynamoDBLeaseTaker(leaseRefresher, WORKER_IDENTIFIER, LEASE_DURATION_MILLIS, metricsFactory); + this.dynamoDBLeaseTaker = + new DynamoDBLeaseTaker(leaseRefresher, WORKER_IDENTIFIER, LEASE_DURATION_MILLIS, metricsFactory); } /** @@ -72,10 +74,10 @@ public class DynamoDBLeaseTakerTest { @Test public final void testStringJoin() { List strings = new ArrayList<>(); - + strings.add("foo"); Assert.assertEquals("foo", DynamoDBLeaseTaker.stringJoin(strings, ", ")); - + strings.add("bar"); Assert.assertEquals("foo, bar", DynamoDBLeaseTaker.stringJoin(strings, ", ")); } @@ -128,11 +130,11 @@ public class DynamoDBLeaseTakerTest { @Test public void test_veryOldLeaseDurationNanosMultiplierGetsCorrectLeases() throws Exception { - long veryOldThreshold = MOCK_CURRENT_TIME - - (TimeUnit.MILLISECONDS.toNanos(LEASE_DURATION_MILLIS) * VERY_OLD_LEASE_DURATION_MULTIPLIER); - DynamoDBLeaseTaker dynamoDBLeaseTakerWithCustomMultiplier = - new DynamoDBLeaseTaker(leaseRefresher, WORKER_IDENTIFIER, LEASE_DURATION_MILLIS, metricsFactory) - .withVeryOldLeaseDurationNanosMultiplier(VERY_OLD_LEASE_DURATION_MULTIPLIER); + long veryOldThreshold = MOCK_CURRENT_TIME + - (TimeUnit.MILLISECONDS.toNanos(LEASE_DURATION_MILLIS) * VERY_OLD_LEASE_DURATION_MULTIPLIER); + DynamoDBLeaseTaker dynamoDBLeaseTakerWithCustomMultiplier = new DynamoDBLeaseTaker( + leaseRefresher, WORKER_IDENTIFIER, LEASE_DURATION_MILLIS, metricsFactory) + .withVeryOldLeaseDurationNanosMultiplier(VERY_OLD_LEASE_DURATION_MULTIPLIER); final List allLeases = new ImmutableList.Builder() .add(createLease("foo", "2", MOCK_CURRENT_TIME)) .add(createLease("bar", "3", veryOldThreshold - 1)) @@ -154,11 +156,11 @@ public class DynamoDBLeaseTakerTest { @Test public void test_disableEnablePriorityLeaseAssignmentGetsCorrectLeases() throws Exception { - long veryOldThreshold = MOCK_CURRENT_TIME - - (TimeUnit.MILLISECONDS.toNanos(LEASE_DURATION_MILLIS) * DEFAULT_VERY_OLD_LEASE_DURATION_MULTIPLIER); - DynamoDBLeaseTaker dynamoDBLeaseTakerWithDisabledPriorityLeaseAssignment = - new DynamoDBLeaseTaker(leaseRefresher, WORKER_IDENTIFIER, LEASE_DURATION_MILLIS, metricsFactory) - .withEnablePriorityLeaseAssignment(false); + long veryOldThreshold = MOCK_CURRENT_TIME + - (TimeUnit.MILLISECONDS.toNanos(LEASE_DURATION_MILLIS) * DEFAULT_VERY_OLD_LEASE_DURATION_MULTIPLIER); + DynamoDBLeaseTaker dynamoDBLeaseTakerWithDisabledPriorityLeaseAssignment = new DynamoDBLeaseTaker( + leaseRefresher, WORKER_IDENTIFIER, LEASE_DURATION_MILLIS, metricsFactory) + .withEnablePriorityLeaseAssignment(false); final List allLeases = new ArrayList<>(); allLeases.add(createLease("bar", "2", MOCK_CURRENT_TIME)); allLeases.add(createLease("bar", "3", MOCK_CURRENT_TIME)); @@ -174,7 +176,8 @@ public class DynamoDBLeaseTakerTest { when(metricsFactory.createMetrics()).thenReturn(new NullMetricsScope()); when(timeProvider.call()).thenReturn(MOCK_CURRENT_TIME); - Set output = dynamoDBLeaseTakerWithDisabledPriorityLeaseAssignment.computeLeasesToTake(expiredLeases, timeProvider); + Set output = + dynamoDBLeaseTakerWithDisabledPriorityLeaseAssignment.computeLeasesToTake(expiredLeases, timeProvider); final Set expectedOutput = new HashSet<>(); expectedOutput.add(createLease("baz", "5", veryOldThreshold - 1)); expectedOutput.add(createLease("baz", "6", veryOldThreshold + 1)); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/TestHarnessBuilder.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/TestHarnessBuilder.java index 00db6a51..38e4f50c 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/TestHarnessBuilder.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/leases/dynamodb/TestHarnessBuilder.java @@ -14,9 +14,6 @@ */ package software.amazon.kinesis.leases.dynamodb; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -32,6 +29,9 @@ import software.amazon.kinesis.leases.exceptions.InvalidStateException; import software.amazon.kinesis.leases.exceptions.LeasingException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + public class TestHarnessBuilder { private long currentTimeNanos; @@ -46,7 +46,6 @@ public class TestHarnessBuilder { public Long call() throws Exception { return currentTimeNanos; } - }; public TestHarnessBuilder(final DynamoDBLeaseRefresher leaseRefresher) { @@ -96,8 +95,7 @@ public class TestHarnessBuilder { currentTimeNanos += millis * 1000000; } - public Map takeMutateAssert(DynamoDBLeaseTaker taker, int numToTake) - throws LeasingException { + public Map takeMutateAssert(DynamoDBLeaseTaker taker, int numToTake) throws LeasingException { Map result = taker.takeLeases(timeProvider); assertEquals(numToTake, result.size()); @@ -111,8 +109,7 @@ public class TestHarnessBuilder { return result; } - public Map stealMutateAssert(DynamoDBLeaseTaker taker, int numToTake) - throws LeasingException { + public Map stealMutateAssert(DynamoDBLeaseTaker taker, int numToTake) throws LeasingException { Map result = taker.takeLeases(timeProvider); assertEquals(numToTake, result.size()); @@ -120,8 +117,7 @@ public class TestHarnessBuilder { Lease original = leases.get(actual.leaseKey()); assertNotNull(original); - original.isMarkedForLeaseSteal(true) - .lastCounterIncrementNanos(actual.lastCounterIncrementNanos()); + original.isMarkedForLeaseSteal(true).lastCounterIncrementNanos(actual.lastCounterIncrementNanos()); mutateAssert(taker.getWorkerIdentifier(), original, actual); } @@ -129,7 +125,7 @@ public class TestHarnessBuilder { } public Map takeMutateAssert(DynamoDBLeaseTaker taker, String... takenShardIds) - throws LeasingException { + throws LeasingException { Map result = taker.takeLeases(timeProvider); assertEquals(takenShardIds.length, result.size()); @@ -157,7 +153,7 @@ public class TestHarnessBuilder { } public void addLeasesToRenew(LeaseRenewer renewer, String... shardIds) - throws DependencyException, InvalidStateException { + throws DependencyException, InvalidStateException { List leasesToRenew = new ArrayList(); for (String shardId : shardIds) { @@ -170,7 +166,7 @@ public class TestHarnessBuilder { } public Map renewMutateAssert(LeaseRenewer renewer, String... renewedShardIds) - throws DependencyException, InvalidStateException { + throws DependencyException, InvalidStateException { renewer.renewLeases(); Map heldLeases = renewer.getCurrentlyHeldLeases(); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BasicStreamConsumerIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BasicStreamConsumerIntegrationTest.java index d03254c2..f6e7ba7e 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BasicStreamConsumerIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BasicStreamConsumerIntegrationTest.java @@ -1,11 +1,11 @@ package software.amazon.kinesis.lifecycle; import org.junit.Test; -import software.amazon.kinesis.config.KCLAppConfig; -import software.amazon.kinesis.config.ReleaseCanaryPollingH2TestConfig; -import software.amazon.kinesis.config.ReleaseCanaryPollingH1TestConfig; -import software.amazon.kinesis.config.ReleaseCanaryStreamingTestConfig; import software.amazon.kinesis.application.TestConsumer; +import software.amazon.kinesis.config.KCLAppConfig; +import software.amazon.kinesis.config.ReleaseCanaryPollingH1TestConfig; +import software.amazon.kinesis.config.ReleaseCanaryPollingH2TestConfig; +import software.amazon.kinesis.config.ReleaseCanaryStreamingTestConfig; public class BasicStreamConsumerIntegrationTest { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTaskTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTaskTest.java index 61473833..3d82acc3 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTaskTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/BlockOnParentShardTaskTest.java @@ -14,18 +14,11 @@ */ package software.amazon.kinesis.lifecycle; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import java.util.ArrayList; import java.util.List; import org.junit.Before; import org.junit.Test; - import software.amazon.kinesis.leases.Lease; import software.amazon.kinesis.leases.LeaseRefresher; import software.amazon.kinesis.leases.ShardInfo; @@ -34,6 +27,12 @@ import software.amazon.kinesis.leases.exceptions.InvalidStateException; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + /** * */ @@ -58,7 +57,7 @@ public class BlockOnParentShardTaskTest { */ @Test public final void testCallNoParents() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { LeaseRefresher leaseRefresher = mock(LeaseRefresher.class); when(leaseRefresher.getLease(shardId)).thenReturn(null); @@ -75,7 +74,7 @@ public class BlockOnParentShardTaskTest { */ @Test public final void testCallShouldNotThrowBlockedOnParentWhenParentsHaveFinished() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { ShardInfo shardInfo = null; BlockOnParentShardTask task = null; String parent1ShardId = "shardId-1"; @@ -136,15 +135,16 @@ public class BlockOnParentShardTaskTest { // test single parent parentShardIds.add(parent1ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, - streamId); + shardInfo = + new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); result = task.call(); assertNull(result.getException()); // test two parents parentShardIds.add(parent2ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); + shardInfo = + new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); result = task.call(); assertNull(result.getException()); @@ -222,14 +222,16 @@ public class BlockOnParentShardTaskTest { // test single parent parentShardIds.add(parent1ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); + shardInfo = + new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); result = task.call(); assertNotNull(result.getException()); // test two parents parentShardIds.add(parent2ShardId); - shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); + shardInfo = + new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON, streamId); task = new BlockOnParentShardTask(shardInfo, leaseRefresher, backoffTimeInMillis); result = task.call(); assertNotNull(result.getException()); @@ -243,13 +245,14 @@ public class BlockOnParentShardTaskTest { */ @Test public final void testCallBeforeAndAfterAParentFinishes() - throws DependencyException, InvalidStateException, ProvisionedThroughputException { + throws DependencyException, InvalidStateException, ProvisionedThroughputException { BlockOnParentShardTask task = null; String parentShardId = "shardId-1"; List parentShardIds = new ArrayList<>(); parentShardIds.add(parentShardId); - ShardInfo shardInfo = new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); + ShardInfo shardInfo = + new ShardInfo(shardId, concurrencyToken, parentShardIds, ExtendedSequenceNumber.TRIM_HORIZON); TaskResult result = null; Lease parentLease = new Lease(); LeaseRefresher leaseRefresher = mock(LeaseRefresher.class); @@ -276,5 +279,4 @@ public class BlockOnParentShardTaskTest { BlockOnParentShardTask task = new BlockOnParentShardTask(shardInfo, null, backoffTimeInMillis); assertEquals(TaskType.BLOCK_ON_PARENT_SHARDS, task.taskType()); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ConsumerStatesTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ConsumerStatesTest.java index 6551f949..9c9f1930 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ConsumerStatesTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ConsumerStatesTest.java @@ -14,13 +14,6 @@ */ package software.amazon.kinesis.lifecycle; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; -import static software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState; - import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; @@ -36,18 +29,17 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.model.ChildShard; import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; import software.amazon.kinesis.common.InitialPositionInStream; import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.leases.HierarchicalShardSyncer; import software.amazon.kinesis.leases.LeaseCleanupManager; import software.amazon.kinesis.leases.LeaseCoordinator; import software.amazon.kinesis.leases.LeaseRefresher; import software.amazon.kinesis.leases.ShardDetector; import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.leases.HierarchicalShardSyncer; import software.amazon.kinesis.leases.ShardObjectHelper; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; import software.amazon.kinesis.metrics.MetricsFactory; @@ -58,43 +50,64 @@ import software.amazon.kinesis.retrieval.AggregatorUtil; import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.schemaregistry.SchemaRegistryDecoder; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; +import static software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState; + @RunWith(MockitoJUnitRunner.class) public class ConsumerStatesTest { private static final String STREAM_NAME = "TestStream"; - private static final InitialPositionInStreamExtended INITIAL_POSITION_IN_STREAM = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + private static final InitialPositionInStreamExtended INITIAL_POSITION_IN_STREAM = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); private ShardConsumer consumer; private ShardConsumerArgument argument; @Mock private ShardRecordProcessor shardRecordProcessor; + @Mock private ShardRecordProcessorCheckpointer recordProcessorCheckpointer; + @Mock private ExecutorService executorService; + @Mock private ShardInfo shardInfo; + @Mock private LeaseCoordinator leaseCoordinator; + @Mock private LeaseRefresher leaseRefresher; + @Mock private Checkpointer checkpointer; + @Mock private ShutdownNotification shutdownNotification; + @Mock private RecordsPublisher recordsPublisher; + @Mock private ShardDetector shardDetector; + @Mock private HierarchicalShardSyncer hierarchicalShardSyncer; + @Mock private MetricsFactory metricsFactory; + @Mock private ProcessRecordsInput processRecordsInput; + @Mock private TaskExecutionListener taskExecutionListener; + @Mock private LeaseCleanupManager leaseCleanupManager; @@ -113,17 +126,43 @@ public class ConsumerStatesTest { @Before public void setup() { - argument = new ShardConsumerArgument(shardInfo, StreamIdentifier.singleStreamInstance(STREAM_NAME), - leaseCoordinator, executorService, recordsPublisher, - shardRecordProcessor, checkpointer, recordProcessorCheckpointer, parentShardPollIntervalMillis, - taskBackoffTimeMillis, skipShardSyncAtWorkerInitializationIfLeasesExist, listShardsBackoffTimeInMillis, - maxListShardsRetryAttempts, shouldCallProcessRecordsEvenForEmptyRecordList, idleTimeInMillis, - INITIAL_POSITION_IN_STREAM, cleanupLeasesOfCompletedShards, ignoreUnexpectedChildShards, shardDetector, - new AggregatorUtil(), hierarchicalShardSyncer, metricsFactory, leaseCleanupManager, schemaRegistryDecoder); + argument = new ShardConsumerArgument( + shardInfo, + StreamIdentifier.singleStreamInstance(STREAM_NAME), + leaseCoordinator, + executorService, + recordsPublisher, + shardRecordProcessor, + checkpointer, + recordProcessorCheckpointer, + parentShardPollIntervalMillis, + taskBackoffTimeMillis, + skipShardSyncAtWorkerInitializationIfLeasesExist, + listShardsBackoffTimeInMillis, + maxListShardsRetryAttempts, + shouldCallProcessRecordsEvenForEmptyRecordList, + idleTimeInMillis, + INITIAL_POSITION_IN_STREAM, + cleanupLeasesOfCompletedShards, + ignoreUnexpectedChildShards, + shardDetector, + new AggregatorUtil(), + hierarchicalShardSyncer, + metricsFactory, + leaseCleanupManager, + schemaRegistryDecoder); when(shardInfo.shardId()).thenReturn("shardId-000000000000"); - when(shardInfo.streamIdentifierSerOpt()).thenReturn(Optional.of(StreamIdentifier.singleStreamInstance(STREAM_NAME).serialize())); - consumer = spy(new ShardConsumer(recordsPublisher, executorService, shardInfo, logWarningForTaskAfterMillis, - argument, taskExecutionListener, 0)); + when(shardInfo.streamIdentifierSerOpt()) + .thenReturn(Optional.of( + StreamIdentifier.singleStreamInstance(STREAM_NAME).serialize())); + consumer = spy(new ShardConsumer( + recordsPublisher, + executorService, + shardInfo, + logWarningForTaskAfterMillis, + argument, + taskExecutionListener, + 0)); when(recordProcessorCheckpointer.checkpointer()).thenReturn(checkpointer); } @@ -137,20 +176,30 @@ public class ConsumerStatesTest { ConsumerTask task = state.createTask(argument, consumer, null); assertThat(task, taskWith(BlockOnParentShardTask.class, ShardInfo.class, "shardInfo", equalTo(shardInfo))); - assertThat(task, taskWith(BlockOnParentShardTask.class, LEASE_REFRESHER_CLASS, "leaseRefresher", - equalTo(leaseRefresher))); - assertThat(task, taskWith(BlockOnParentShardTask.class, Long.class, "parentShardPollIntervalMillis", - equalTo(parentShardPollIntervalMillis))); + assertThat( + task, + taskWith( + BlockOnParentShardTask.class, + LEASE_REFRESHER_CLASS, + "leaseRefresher", + equalTo(leaseRefresher))); + assertThat( + task, + taskWith( + BlockOnParentShardTask.class, + Long.class, + "parentShardPollIntervalMillis", + equalTo(parentShardPollIntervalMillis))); assertThat(state.successTransition(), equalTo(ShardConsumerState.INITIALIZING.consumerState())); for (ShutdownReason shutdownReason : ShutdownReason.values()) { - assertThat(state.shutdownTransition(shutdownReason), + assertThat( + state.shutdownTransition(shutdownReason), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); } assertThat(state.state(), equalTo(ShardConsumerState.WAITING_ON_PARENT_SHARDS)); assertThat(state.taskType(), equalTo(TaskType.BLOCK_ON_PARENT_SHARDS)); - } @Test @@ -161,17 +210,24 @@ public class ConsumerStatesTest { assertThat(task, initTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); assertThat(task, initTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); assertThat(task, initTask(Checkpointer.class, "checkpoint", equalTo(checkpointer))); - assertThat(task, initTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); + assertThat( + task, + initTask( + ShardRecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); assertThat(task, initTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + assertThat( + state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.INITIALIZING)); @@ -186,22 +242,28 @@ public class ConsumerStatesTest { assertThat(task, procTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); assertThat(task, procTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, procTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); + assertThat( + task, + procTask( + ShardRecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); assertThat(task, procTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + assertThat( + state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.PROCESSING)); assertThat(state.taskType(), equalTo(TaskType.PROCESS)); - } @Test @@ -212,22 +274,28 @@ public class ConsumerStatesTest { assertThat(task, procTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); assertThat(task, procTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, procTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); + assertThat( + task, + procTask( + ShardRecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); assertThat(task, procTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + assertThat( + state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.PROCESSING)); assertThat(state.taskType(), equalTo(TaskType.PROCESS)); - } @Test @@ -238,17 +306,24 @@ public class ConsumerStatesTest { assertThat(task, procTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); assertThat(task, procTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, procTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); + assertThat( + task, + procTask( + ShardRecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); assertThat(task, procTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); assertThat(state.successTransition(), equalTo(ShardConsumerState.PROCESSING.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + assertThat( + state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.PROCESSING)); @@ -262,24 +337,32 @@ public class ConsumerStatesTest { consumer.gracefulShutdown(shutdownNotification); ConsumerTask task = state.createTask(argument, consumer, null); - assertThat(task, + assertThat( + task, shutdownReqTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, shutdownReqTask(RecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); - assertThat(task, + assertThat( + task, + shutdownReqTask( + RecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); + assertThat( + task, shutdownReqTask(ShutdownNotification.class, "shutdownNotification", equalTo(shutdownNotification))); assertThat(state.successTransition(), equalTo(ConsumerStates.SHUTDOWN_REQUEST_COMPLETION_STATE)); - assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), + assertThat( + state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(ConsumerStates.SHUTDOWN_REQUEST_COMPLETION_STATE)); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED)); assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN_NOTIFICATION)); - } @Test @@ -291,14 +374,15 @@ public class ConsumerStatesTest { assertThat(state.successTransition(), equalTo(state)); assertThat(state.shutdownTransition(ShutdownReason.REQUESTED), equalTo(state)); - assertThat(state.shutdownTransition(ShutdownReason.LEASE_LOST), + assertThat( + state.shutdownTransition(ShutdownReason.LEASE_LOST), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); - assertThat(state.shutdownTransition(ShutdownReason.SHARD_END), + assertThat( + state.shutdownTransition(ShutdownReason.SHARD_END), equalTo(ShardConsumerState.SHUTTING_DOWN.consumerState())); assertThat(state.state(), equalTo(ShardConsumerState.SHUTDOWN_REQUESTED)); assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN_NOTIFICATION)); - } @Test @@ -309,28 +393,33 @@ public class ConsumerStatesTest { List parentShards = new ArrayList<>(); parentShards.add("shardId-000000000000"); ChildShard leftChild = ChildShard.builder() - .shardId("shardId-000000000001") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) - .build(); + .shardId("shardId-000000000001") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) + .build(); ChildShard rightChild = ChildShard.builder() - .shardId("shardId-000000000002") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) - .build(); + .shardId("shardId-000000000002") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) + .build(); childShards.add(leftChild); childShards.add(rightChild); when(processRecordsInput.childShards()).thenReturn(childShards); ConsumerTask task = state.createTask(argument, consumer, processRecordsInput); assertThat(task, shutdownTask(ShardInfo.class, "shardInfo", equalTo(shardInfo))); - assertThat(task, - shutdownTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); - assertThat(task, shutdownTask(ShardRecordProcessorCheckpointer.class, "recordProcessorCheckpointer", - equalTo(recordProcessorCheckpointer))); + assertThat( + task, shutdownTask(ShardRecordProcessor.class, "shardRecordProcessor", equalTo(shardRecordProcessor))); + assertThat( + task, + shutdownTask( + ShardRecordProcessorCheckpointer.class, + "recordProcessorCheckpointer", + equalTo(recordProcessorCheckpointer))); assertThat(task, shutdownTask(ShutdownReason.class, "reason", equalTo(reason))); assertThat(task, shutdownTask(LeaseCoordinator.class, "leaseCoordinator", equalTo(leaseCoordinator))); - assertThat(task, + assertThat( + task, shutdownTask(Boolean.class, "cleanupLeasesOfCompletedShards", equalTo(cleanupLeasesOfCompletedShards))); assertThat(task, shutdownTask(Long.class, "backoffTimeMillis", equalTo(taskBackoffTimeMillis))); @@ -342,7 +431,6 @@ public class ConsumerStatesTest { assertThat(state.state(), equalTo(ShardConsumerState.SHUTTING_DOWN)); assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN)); - } @Test @@ -363,9 +451,8 @@ public class ConsumerStatesTest { assertThat(state.taskType(), equalTo(TaskType.SHUTDOWN_COMPLETE)); } - - static ReflectionPropertyMatcher shutdownTask(Class valueTypeClass, - String propertyName, Matcher matcher) { + static ReflectionPropertyMatcher shutdownTask( + Class valueTypeClass, String propertyName, Matcher matcher) { return taskWith(ShutdownTask.class, valueTypeClass, propertyName, matcher); } @@ -374,18 +461,21 @@ public class ConsumerStatesTest { return taskWith(ShutdownNotificationTask.class, valueTypeClass, propertyName, matcher); } - static ReflectionPropertyMatcher procTask(Class valueTypeClass, - String propertyName, Matcher matcher) { + static ReflectionPropertyMatcher procTask( + Class valueTypeClass, String propertyName, Matcher matcher) { return taskWith(ProcessTask.class, valueTypeClass, propertyName, matcher); } - static ReflectionPropertyMatcher initTask(Class valueTypeClass, - String propertyName, Matcher matcher) { + static ReflectionPropertyMatcher initTask( + Class valueTypeClass, String propertyName, Matcher matcher) { return taskWith(InitializeTask.class, valueTypeClass, propertyName, matcher); } - static ReflectionPropertyMatcher taskWith(Class taskTypeClass, - Class valueTypeClass, String propertyName, Matcher matcher) { + static ReflectionPropertyMatcher taskWith( + Class taskTypeClass, + Class valueTypeClass, + String propertyName, + Matcher matcher) { return new ReflectionPropertyMatcher<>(taskTypeClass, valueTypeClass, matcher, propertyName); } @@ -398,8 +488,11 @@ public class ConsumerStatesTest { private final String propertyName; private final Field matchingField; - private ReflectionPropertyMatcher(Class taskTypeClass, Class valueTypeClass, - Matcher matcher, String propertyName) { + private ReflectionPropertyMatcher( + Class taskTypeClass, + Class valueTypeClass, + Matcher matcher, + String propertyName) { this.taskTypeClass = taskTypeClass; this.valueTypeClazz = valueTypeClass; this.matcher = matcher; @@ -418,44 +511,54 @@ public class ConsumerStatesTest { @Override protected boolean matchesSafely(ConsumerTask item, Description mismatchDescription) { - return Condition.matched(item, mismatchDescription).and(new Condition.Step() { - @Override - public Condition apply(ConsumerTask value, Description mismatch) { - if (taskTypeClass.equals(value.getClass())) { - return Condition.matched(taskTypeClass.cast(value), mismatch); - } - mismatch.appendText("Expected task type of ").appendText(taskTypeClass.getName()) - .appendText(" but was ").appendText(value.getClass().getName()); - return Condition.notMatched(); - } - }).and(new Condition.Step() { - @Override - public Condition apply(TaskType value, Description mismatch) { - if (matchingField == null) { - mismatch.appendText("Field ").appendText(propertyName).appendText(" not present in ") - .appendText(taskTypeClass.getName()); - return Condition.notMatched(); - } + return Condition.matched(item, mismatchDescription) + .and(new Condition.Step() { + @Override + public Condition apply(ConsumerTask value, Description mismatch) { + if (taskTypeClass.equals(value.getClass())) { + return Condition.matched(taskTypeClass.cast(value), mismatch); + } + mismatch.appendText("Expected task type of ") + .appendText(taskTypeClass.getName()) + .appendText(" but was ") + .appendText(value.getClass().getName()); + return Condition.notMatched(); + } + }) + .and(new Condition.Step() { + @Override + public Condition apply(TaskType value, Description mismatch) { + if (matchingField == null) { + mismatch.appendText("Field ") + .appendText(propertyName) + .appendText(" not present in ") + .appendText(taskTypeClass.getName()); + return Condition.notMatched(); + } - try { - return Condition.matched(getValue(value), mismatch); - } catch (RuntimeException re) { - mismatch.appendText("Failure while retrieving value for ").appendText(propertyName); - return Condition.notMatched(); - } - - } - }).and(new Condition.Step() { - @Override - public Condition apply(Object value, Description mismatch) { - if (value != null && !valueTypeClazz.isAssignableFrom(value.getClass())) { - mismatch.appendText("Expected a value of type ").appendText(valueTypeClazz.getName()) - .appendText(" but was ").appendText(value.getClass().getName()); - return Condition.notMatched(); - } - return Condition.matched(valueTypeClazz.cast(value), mismatch); - } - }).matching(matcher); + try { + return Condition.matched(getValue(value), mismatch); + } catch (RuntimeException re) { + mismatch.appendText("Failure while retrieving value for ") + .appendText(propertyName); + return Condition.notMatched(); + } + } + }) + .and(new Condition.Step() { + @Override + public Condition apply(Object value, Description mismatch) { + if (value != null && !valueTypeClazz.isAssignableFrom(value.getClass())) { + mismatch.appendText("Expected a value of type ") + .appendText(valueTypeClazz.getName()) + .appendText(" but was ") + .appendText(value.getClass().getName()); + return Condition.notMatched(); + } + return Condition.matched(valueTypeClazz.cast(value), mismatch); + } + }) + .matching(matcher); } @Override @@ -476,5 +579,4 @@ public class ConsumerStatesTest { } } } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/CrossAccountStreamConsumerIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/CrossAccountStreamConsumerIntegrationTest.java index 264a43ed..d9d1371c 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/CrossAccountStreamConsumerIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/CrossAccountStreamConsumerIntegrationTest.java @@ -1,5 +1,6 @@ package software.amazon.kinesis.lifecycle; +import org.junit.Test; import software.amazon.kinesis.application.TestConsumer; import software.amazon.kinesis.config.KCLAppConfig; import software.amazon.kinesis.config.crossaccount.ReleaseCanaryCrossAccountMultiStreamPollingH2TestConfig; @@ -7,8 +8,6 @@ import software.amazon.kinesis.config.crossaccount.ReleaseCanaryCrossAccountMult import software.amazon.kinesis.config.crossaccount.ReleaseCanaryCrossAccountPollingH2TestConfig; import software.amazon.kinesis.config.crossaccount.ReleaseCanaryCrossAccountStreamingTestConfig; -import org.junit.Test; - public class CrossAccountStreamConsumerIntegrationTest { /** @@ -43,5 +42,4 @@ public class CrossAccountStreamConsumerIntegrationTest { TestConsumer consumer = new TestConsumer(consumerConfig); consumer.run(); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/MultiStreamConsumerIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/MultiStreamConsumerIntegrationTest.java index f6314eac..cbccf1f2 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/MultiStreamConsumerIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/MultiStreamConsumerIntegrationTest.java @@ -1,7 +1,6 @@ package software.amazon.kinesis.lifecycle; import org.junit.Test; - import software.amazon.kinesis.application.TestConsumer; import software.amazon.kinesis.config.KCLAppConfig; import software.amazon.kinesis.config.ReleaseCanaryMultiStreamPollingH2TestConfig; diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ProcessTaskTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ProcessTaskTest.java index 1e89d8cc..300ad832 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ProcessTaskTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ProcessTaskTest.java @@ -14,24 +14,6 @@ */ package software.amazon.kinesis.lifecycle; -import static org.hamcrest.CoreMatchers.allOf; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.beans.HasPropertyWithValue.hasProperty; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.io.ByteArrayOutputStream; import java.math.BigInteger; import java.nio.ByteBuffer; @@ -52,6 +34,9 @@ import java.util.concurrent.TimeUnit; import com.amazonaws.services.schemaregistry.common.Schema; import com.amazonaws.services.schemaregistry.deserializers.GlueSchemaRegistryDeserializer; import com.google.common.collect.ImmutableList; +import com.google.protobuf.ByteString; +import lombok.Data; +import lombok.Getter; import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; @@ -61,11 +46,6 @@ import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - -import com.google.protobuf.ByteString; - -import lombok.Data; -import lombok.Getter; import software.amazon.awssdk.services.kinesis.model.HashKeyRange; import software.amazon.awssdk.services.kinesis.model.Shard; import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; @@ -83,6 +63,24 @@ import software.amazon.kinesis.retrieval.kpl.Messages; import software.amazon.kinesis.retrieval.kpl.Messages.AggregatedRecord; import software.amazon.kinesis.schemaregistry.SchemaRegistryDecoder; +import static org.hamcrest.CoreMatchers.allOf; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.beans.HasPropertyWithValue.hasProperty; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class ProcessTaskTest { private static final long IDLE_TIME_IN_MILLISECONDS = 100L; @@ -95,21 +93,24 @@ public class ProcessTaskTest { @Mock private ProcessRecordsInput processRecordsInput; + @Mock private ShardDetector shardDetector; @Mock private GlueSchemaRegistryDeserializer glueSchemaRegistryDeserializer; - private static final byte[] TEST_DATA = new byte[] { 1, 2, 3, 4 }; + private static final byte[] TEST_DATA = new byte[] {1, 2, 3, 4}; private final String shardId = "shard-test"; private final long taskBackoffTimeMillis = 1L; @Mock private ShardRecordProcessor shardRecordProcessor; + @Mock private ShardRecordProcessorCheckpointer checkpointer; + @Mock private ThrottlingReporter throttlingReporter; @@ -123,29 +124,43 @@ public class ProcessTaskTest { } private ProcessTask makeProcessTask(ProcessRecordsInput processRecordsInput) { - return makeProcessTask(processRecordsInput, new AggregatorUtil(), - skipShardSyncAtWorkerInitializationIfLeasesExist); + return makeProcessTask( + processRecordsInput, new AggregatorUtil(), skipShardSyncAtWorkerInitializationIfLeasesExist); } - private ProcessTask makeProcessTask(ProcessRecordsInput processRecordsInput, GlueSchemaRegistryDeserializer deserializer) { - return makeProcessTask(processRecordsInput, new AggregatorUtil(), skipShardSyncAtWorkerInitializationIfLeasesExist, + private ProcessTask makeProcessTask( + ProcessRecordsInput processRecordsInput, GlueSchemaRegistryDeserializer deserializer) { + return makeProcessTask( + processRecordsInput, + new AggregatorUtil(), + skipShardSyncAtWorkerInitializationIfLeasesExist, new SchemaRegistryDecoder(deserializer)); } - private ProcessTask makeProcessTask(ProcessRecordsInput processRecordsInput, AggregatorUtil aggregatorUtil, - boolean skipShardSync) { + private ProcessTask makeProcessTask( + ProcessRecordsInput processRecordsInput, AggregatorUtil aggregatorUtil, boolean skipShardSync) { return makeProcessTask(processRecordsInput, aggregatorUtil, skipShardSync, null); } - private ProcessTask makeProcessTask(ProcessRecordsInput processRecordsInput, AggregatorUtil aggregatorUtil, boolean skipShardSync, - SchemaRegistryDecoder schemaRegistryDecoder) { - return new ProcessTask(shardInfo, shardRecordProcessor, checkpointer, taskBackoffTimeMillis, - skipShardSync, shardDetector, throttlingReporter, - processRecordsInput, shouldCallProcessRecordsEvenForEmptyRecordList, IDLE_TIME_IN_MILLISECONDS, - aggregatorUtil, - new NullMetricsFactory(), - schemaRegistryDecoder - ); + private ProcessTask makeProcessTask( + ProcessRecordsInput processRecordsInput, + AggregatorUtil aggregatorUtil, + boolean skipShardSync, + SchemaRegistryDecoder schemaRegistryDecoder) { + return new ProcessTask( + shardInfo, + shardRecordProcessor, + checkpointer, + taskBackoffTimeMillis, + skipShardSync, + shardDetector, + throttlingReporter, + processRecordsInput, + shouldCallProcessRecordsEvenForEmptyRecordList, + IDLE_TIME_IN_MILLISECONDS, + aggregatorUtil, + new NullMetricsFactory(), + schemaRegistryDecoder); } @Test @@ -158,13 +173,23 @@ public class ProcessTaskTest { } private KinesisClientRecord makeKinesisClientRecord(String partitionKey, String sequenceNumber, Instant arrival) { - return KinesisClientRecord.builder().partitionKey(partitionKey).sequenceNumber(sequenceNumber) - .approximateArrivalTimestamp(arrival).data(ByteBuffer.wrap(TEST_DATA)).build(); + return KinesisClientRecord.builder() + .partitionKey(partitionKey) + .sequenceNumber(sequenceNumber) + .approximateArrivalTimestamp(arrival) + .data(ByteBuffer.wrap(TEST_DATA)) + .build(); } - private KinesisClientRecord makeKinesisClientRecord(String partitionKey, String sequenceNumber, Instant arrival, ByteBuffer data, Schema schema) { - return KinesisClientRecord.builder().partitionKey(partitionKey).sequenceNumber(sequenceNumber) - .approximateArrivalTimestamp(arrival).data(data).schema(schema).build(); + private KinesisClientRecord makeKinesisClientRecord( + String partitionKey, String sequenceNumber, Instant arrival, ByteBuffer data, Schema schema) { + return KinesisClientRecord.builder() + .partitionKey(partitionKey) + .sequenceNumber(sequenceNumber) + .approximateArrivalTimestamp(arrival) + .data(data) + .schema(schema) + .build(); } @Test @@ -199,13 +224,18 @@ public class ProcessTaskTest { final String sqn = new BigInteger(128, new Random()).toString(); final String pk = UUID.randomUUID().toString(); final Instant ts = Instant.now().minus(4, ChronoUnit.HOURS); - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey("-").data(generateAggregatedRecord(pk)) - .sequenceNumber(sqn).approximateArrivalTimestamp(ts).build(); + KinesisClientRecord record = KinesisClientRecord.builder() + .partitionKey("-") + .data(generateAggregatedRecord(pk)) + .sequenceNumber(sqn) + .approximateArrivalTimestamp(ts) + .build(); processTask = makeProcessTask(processRecordsInput); ShardRecordProcessorOutcome outcome = testWithRecord(record); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); assertEquals(3, actualRecords.size()); for (KinesisClientRecord pr : actualRecords) { @@ -227,13 +257,17 @@ public class ProcessTaskTest { final String sqn = new BigInteger(128, new Random()).toString(); final String pk = UUID.randomUUID().toString(); - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey("-").data(generateAggregatedRecord(pk)) - .sequenceNumber(sqn).build(); + KinesisClientRecord record = KinesisClientRecord.builder() + .partitionKey("-") + .data(generateAggregatedRecord(pk)) + .sequenceNumber(sqn) + .build(); processTask = makeProcessTask(processRecordsInput); ShardRecordProcessorOutcome outcome = testWithRecord(record); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); assertEquals(3, actualRecords.size()); for (KinesisClientRecord actualRecord : actualRecords) { @@ -251,11 +285,12 @@ public class ProcessTaskTest { final int numberOfRecords = 104; // Start these batch of records's sequence number that is greater than previous checkpoint value. final BigInteger startingSqn = previousCheckpointSqn.add(BigInteger.valueOf(10)); - final List records = generateConsecutiveRecords(numberOfRecords, "-", ByteBuffer.wrap(TEST_DATA), - new Date(), startingSqn); + final List records = + generateConsecutiveRecords(numberOfRecords, "-", ByteBuffer.wrap(TEST_DATA), new Date(), startingSqn); processTask = makeProcessTask(processRecordsInput); - ShardRecordProcessorOutcome outcome = testWithRecords(records, + ShardRecordProcessorOutcome outcome = testWithRecords( + records, new ExtendedSequenceNumber(previousCheckpointSqn.toString()), new ExtendedSequenceNumber(previousCheckpointSqn.toString())); @@ -269,12 +304,12 @@ public class ProcessTaskTest { // Some sequence number value from previous processRecords call. final BigInteger baseSqn = new BigInteger(128, new Random()); final ExtendedSequenceNumber lastCheckpointEspn = new ExtendedSequenceNumber(baseSqn.toString()); - final ExtendedSequenceNumber largestPermittedEsqn = new ExtendedSequenceNumber( - baseSqn.add(BigInteger.valueOf(100)).toString()); + final ExtendedSequenceNumber largestPermittedEsqn = + new ExtendedSequenceNumber(baseSqn.add(BigInteger.valueOf(100)).toString()); processTask = makeProcessTask(processRecordsInput); - ShardRecordProcessorOutcome outcome = testWithRecords(Collections.emptyList(), lastCheckpointEspn, - largestPermittedEsqn); + ShardRecordProcessorOutcome outcome = + testWithRecords(Collections.emptyList(), lastCheckpointEspn, largestPermittedEsqn); // Make sure that even with empty records, largest permitted sequence number does not change. assertEquals(largestPermittedEsqn, outcome.getCheckpointCall()); @@ -295,15 +330,20 @@ public class ProcessTaskTest { // Values for this processRecords call. String startingSqn = previousCheckpointSqn.toString(); String pk = UUID.randomUUID().toString(); - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey("-").data(generateAggregatedRecord(pk)) - .sequenceNumber(startingSqn).build(); + KinesisClientRecord record = KinesisClientRecord.builder() + .partitionKey("-") + .data(generateAggregatedRecord(pk)) + .sequenceNumber(startingSqn) + .build(); processTask = makeProcessTask(processRecordsInput); - ShardRecordProcessorOutcome outcome = testWithRecords(Collections.singletonList(record), + ShardRecordProcessorOutcome outcome = testWithRecords( + Collections.singletonList(record), new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn), new ExtendedSequenceNumber(previousCheckpointSqn.toString(), previousCheckpointSsqn)); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); // First two records should be dropped - and only 1 remaining records should be there. assertThat(actualRecords.size(), equalTo(1)); @@ -316,8 +356,8 @@ public class ProcessTaskTest { assertThat(actualRecord.approximateArrivalTimestamp(), nullValue()); // Expected largest permitted sequence number will be last sub-record sequence number. - final ExtendedSequenceNumber expectedLargestPermittedEsqn = new ExtendedSequenceNumber( - previousCheckpointSqn.toString(), 2L); + final ExtendedSequenceNumber expectedLargestPermittedEsqn = + new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 2L); assertEquals(expectedLargestPermittedEsqn, outcome.getCheckpointCall()); } @@ -334,48 +374,58 @@ public class ProcessTaskTest { int recordIndex = 0; sequenceNumber = sequenceNumber.add(BigInteger.ONE); for (int i = 0; i < 5; ++i) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - recordIndex, approximateArrivalTime); + KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord( + sequenceNumber, aggregatedRecord, recordIndex, approximateArrivalTime); aggregatorUtil.addInRange(expectedRecord); recordIndex++; } sequenceNumber = sequenceNumber.add(BigInteger.ONE); for (int i = 0; i < 5; ++i) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - recordIndex, approximateArrivalTime); + KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord( + sequenceNumber, aggregatedRecord, recordIndex, approximateArrivalTime); aggregatorUtil.addBelowRange(expectedRecord); recordIndex++; } sequenceNumber = sequenceNumber.add(BigInteger.ONE); for (int i = 0; i < 5; ++i) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - recordIndex, approximateArrivalTime); + KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord( + sequenceNumber, aggregatedRecord, recordIndex, approximateArrivalTime); aggregatorUtil.addAboveRange(expectedRecord); recordIndex++; } byte[] payload = aggregatedRecord.build().toByteArray(); ByteArrayOutputStream bos = new ByteArrayOutputStream(); - bos.write(new byte[] { -13, -119, -102, -62 }); + bos.write(new byte[] {-13, -119, -102, -62}); bos.write(payload); bos.write(md5(payload)); ByteBuffer rawRecordData = ByteBuffer.wrap(bos.toByteArray()); - KinesisClientRecord rawRecord = KinesisClientRecord.builder().data(rawRecordData) - .approximateArrivalTimestamp(approximateArrivalTime).partitionKey("p-01") - .sequenceNumber(sequenceNumber.toString()).build(); + KinesisClientRecord rawRecord = KinesisClientRecord.builder() + .data(rawRecordData) + .approximateArrivalTimestamp(approximateArrivalTime) + .partitionKey("p-01") + .sequenceNumber(sequenceNumber.toString()) + .build(); - when(shardDetector.shard(any())).thenReturn(Shard.builder().shardId("Shard-01") - .hashKeyRange(HashKeyRange.builder().startingHashKey(lowHashKey).endingHashKey(highHashKey).build()) - .build()); + when(shardDetector.shard(any())) + .thenReturn(Shard.builder() + .shardId("Shard-01") + .hashKeyRange(HashKeyRange.builder() + .startingHashKey(lowHashKey) + .endingHashKey(highHashKey) + .build()) + .build()); when(processRecordsInput.records()).thenReturn(Collections.singletonList(rawRecord)); ProcessTask processTask = makeProcessTask(processRecordsInput, aggregatorUtil, false); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, - new ExtendedSequenceNumber(sequenceNumber.subtract(BigInteger.valueOf(100)).toString(), 0L), + ShardRecordProcessorOutcome outcome = testWithRecords( + processTask, + new ExtendedSequenceNumber( + sequenceNumber.subtract(BigInteger.valueOf(100)).toString(), 0L), new ExtendedSequenceNumber(sequenceNumber.toString(), recordIndex + 1L)); assertThat(outcome.processRecordsCall.records().size(), equalTo(0)); @@ -398,8 +448,8 @@ public class ProcessTaskTest { Instant approximateArrivalTime = Instant.now().minus(i + 4, ChronoUnit.SECONDS); sequenceNumber = sequenceNumber.add(BigInteger.ONE); for (int j = 0; j < 2; ++j) { - KinesisClientRecord expectedRecord = createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, - j, approximateArrivalTime); + KinesisClientRecord expectedRecord = + createAndRegisterAggregatedRecord(sequenceNumber, aggregatedRecord, j, approximateArrivalTime); aggregatorUtil.addInRange(expectedRecord); expectedRecords.add(expectedRecord); } @@ -412,21 +462,31 @@ public class ProcessTaskTest { ByteBuffer rawRecordData = ByteBuffer.wrap(bos.toByteArray()); - KinesisClientRecord rawRecord = KinesisClientRecord.builder().data(rawRecordData) - .approximateArrivalTimestamp(approximateArrivalTime).partitionKey("pa-" + i) - .sequenceNumber(sequenceNumber.toString()).build(); + KinesisClientRecord rawRecord = KinesisClientRecord.builder() + .data(rawRecordData) + .approximateArrivalTimestamp(approximateArrivalTime) + .partitionKey("pa-" + i) + .sequenceNumber(sequenceNumber.toString()) + .build(); rawRecords.add(rawRecord); } - when(shardDetector.shard(any())).thenReturn(Shard.builder().shardId("Shard-01") - .hashKeyRange(HashKeyRange.builder().startingHashKey(lowHashKey).endingHashKey(highHashKey).build()) - .build()); + when(shardDetector.shard(any())) + .thenReturn(Shard.builder() + .shardId("Shard-01") + .hashKeyRange(HashKeyRange.builder() + .startingHashKey(lowHashKey) + .endingHashKey(highHashKey) + .build()) + .build()); when(processRecordsInput.records()).thenReturn(rawRecords); ProcessTask processTask = makeProcessTask(processRecordsInput, aggregatorUtil, false); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, - new ExtendedSequenceNumber(sequenceNumber.subtract(BigInteger.valueOf(100)).toString(), 0L), + ShardRecordProcessorOutcome outcome = testWithRecords( + processTask, + new ExtendedSequenceNumber( + sequenceNumber.subtract(BigInteger.valueOf(100)).toString(), 0L), new ExtendedSequenceNumber(sequenceNumber.toString(), 0L)); assertThat(outcome.processRecordsCall.records(), equalTo(expectedRecords)); @@ -440,38 +500,32 @@ public class ProcessTaskTest { final String pk = UUID.randomUUID().toString(); final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS)); - //Payload set to SchemaRegistry encoded data and schema to null - //to mimic Schema Registry encoded message from Kinesis stream. - final KinesisClientRecord schemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); + // Payload set to SchemaRegistry encoded data and schema to null + // to mimic Schema Registry encoded message from Kinesis stream. + final KinesisClientRecord schemaRegistryRecord = makeKinesisClientRecord( + pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); - final KinesisClientRecord nonSchemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); + final KinesisClientRecord nonSchemaRegistryRecord = makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); - when(processRecordsInput.records()) - .thenReturn( - ImmutableList.of( - schemaRegistryRecord, - nonSchemaRegistryRecord - ) - ); + when(processRecordsInput.records()).thenReturn(ImmutableList.of(schemaRegistryRecord, nonSchemaRegistryRecord)); doReturn(true).when(glueSchemaRegistryDeserializer).canDeserialize(SCHEMA_REGISTRY_PAYLOAD); doReturn(TEST_DATA).when(glueSchemaRegistryDeserializer).getData(SCHEMA_REGISTRY_PAYLOAD); doReturn(SCHEMA_REGISTRY_SCHEMA).when(glueSchemaRegistryDeserializer).getSchema(SCHEMA_REGISTRY_PAYLOAD); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), - new ExtendedSequenceNumber(previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); + ShardRecordProcessorOutcome outcome = testWithRecords( + processTask, + new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), + new ExtendedSequenceNumber( + previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); - KinesisClientRecord decodedSchemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(TEST_DATA), SCHEMA_REGISTRY_SCHEMA); + KinesisClientRecord decodedSchemaRegistryRecord = makeKinesisClientRecord( + pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(TEST_DATA), SCHEMA_REGISTRY_SCHEMA); List expectedRecords = - ImmutableList.of( - decodedSchemaRegistryRecord, - nonSchemaRegistryRecord - ); + ImmutableList.of(decodedSchemaRegistryRecord, nonSchemaRegistryRecord); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); assertEquals(expectedRecords, actualRecords); @@ -488,35 +542,29 @@ public class ProcessTaskTest { final String pk = UUID.randomUUID().toString(); final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS)); - //Payload set to SchemaRegistry encoded data and schema to null - //to mimic Schema Registry encoded message from Kinesis stream. - final KinesisClientRecord schemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); + // Payload set to SchemaRegistry encoded data and schema to null + // to mimic Schema Registry encoded message from Kinesis stream. + final KinesisClientRecord schemaRegistryRecord = makeKinesisClientRecord( + pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); - final KinesisClientRecord nonSchemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); + final KinesisClientRecord nonSchemaRegistryRecord = makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); - when(processRecordsInput.records()) - .thenReturn( - ImmutableList.of( - schemaRegistryRecord, - nonSchemaRegistryRecord - ) - ); + when(processRecordsInput.records()).thenReturn(ImmutableList.of(schemaRegistryRecord, nonSchemaRegistryRecord)); doThrow(new RuntimeException("Invalid data")) - .when(glueSchemaRegistryDeserializer).canDeserialize(SCHEMA_REGISTRY_PAYLOAD); + .when(glueSchemaRegistryDeserializer) + .canDeserialize(SCHEMA_REGISTRY_PAYLOAD); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), - new ExtendedSequenceNumber(previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); + ShardRecordProcessorOutcome outcome = testWithRecords( + processTask, + new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), + new ExtendedSequenceNumber( + previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); - List expectedRecords = - ImmutableList.of( - schemaRegistryRecord, - nonSchemaRegistryRecord - ); + List expectedRecords = ImmutableList.of(schemaRegistryRecord, nonSchemaRegistryRecord); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); assertEquals(expectedRecords, actualRecords); } @@ -529,61 +577,66 @@ public class ProcessTaskTest { final String pk = UUID.randomUUID().toString(); final Date ts = new Date(System.currentTimeMillis() - TimeUnit.MILLISECONDS.convert(4, TimeUnit.HOURS)); - //Payload set to SchemaRegistry encoded data and schema to null - //to mimic Schema Registry encoded message from Kinesis stream. - final KinesisClientRecord schemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); + // Payload set to SchemaRegistry encoded data and schema to null + // to mimic Schema Registry encoded message from Kinesis stream. + final KinesisClientRecord schemaRegistryRecord = makeKinesisClientRecord( + pk, sqn.toString(), ts.toInstant(), ByteBuffer.wrap(SCHEMA_REGISTRY_PAYLOAD), null); - final KinesisClientRecord nonSchemaRegistryRecord = - makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); + final KinesisClientRecord nonSchemaRegistryRecord = makeKinesisClientRecord(pk, sqn.toString(), ts.toInstant()); - when(processRecordsInput.records()) - .thenReturn( - ImmutableList.of( - schemaRegistryRecord, - nonSchemaRegistryRecord - ) - ); + when(processRecordsInput.records()).thenReturn(ImmutableList.of(schemaRegistryRecord, nonSchemaRegistryRecord)); - doReturn(true) - .when(glueSchemaRegistryDeserializer).canDeserialize(SCHEMA_REGISTRY_PAYLOAD); + doReturn(true).when(glueSchemaRegistryDeserializer).canDeserialize(SCHEMA_REGISTRY_PAYLOAD); doThrow(new RuntimeException("Cannot decode data")) - .when(glueSchemaRegistryDeserializer).getData(SCHEMA_REGISTRY_PAYLOAD); + .when(glueSchemaRegistryDeserializer) + .getData(SCHEMA_REGISTRY_PAYLOAD); - ShardRecordProcessorOutcome outcome = testWithRecords(processTask, new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), - new ExtendedSequenceNumber(previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); + ShardRecordProcessorOutcome outcome = testWithRecords( + processTask, + new ExtendedSequenceNumber(previousCheckpointSqn.toString(), 0L), + new ExtendedSequenceNumber( + previousCheckpointSqn.add(previousCheckpointSqn).toString(), 1L)); - List expectedRecords = - ImmutableList.of( - schemaRegistryRecord, - nonSchemaRegistryRecord - ); + List expectedRecords = ImmutableList.of(schemaRegistryRecord, nonSchemaRegistryRecord); - List actualRecords = outcome.getProcessRecordsCall().records(); + List actualRecords = + outcome.getProcessRecordsCall().records(); assertEquals(expectedRecords, actualRecords); } - private KinesisClientRecord createAndRegisterAggregatedRecord(BigInteger sequenceNumber, - AggregatedRecord.Builder aggregatedRecord, int i, Instant approximateArrivalTime) { + private KinesisClientRecord createAndRegisterAggregatedRecord( + BigInteger sequenceNumber, + AggregatedRecord.Builder aggregatedRecord, + int i, + Instant approximateArrivalTime) { byte[] dataArray = new byte[1024]; ThreadLocalRandom.current().nextBytes(dataArray); ByteBuffer data = ByteBuffer.wrap(dataArray); - KinesisClientRecord expectedRecord = KinesisClientRecord.builder().partitionKey("p-" + i) - .sequenceNumber(sequenceNumber.toString()).approximateArrivalTimestamp(approximateArrivalTime) - .data(data).subSequenceNumber(i).aggregated(true).build(); + KinesisClientRecord expectedRecord = KinesisClientRecord.builder() + .partitionKey("p-" + i) + .sequenceNumber(sequenceNumber.toString()) + .approximateArrivalTimestamp(approximateArrivalTime) + .data(data) + .subSequenceNumber(i) + .aggregated(true) + .build(); - Messages.Record kplRecord = Messages.Record.newBuilder().setData(ByteString.copyFrom(dataArray)) - .setPartitionKeyIndex(i).build(); + Messages.Record kplRecord = Messages.Record.newBuilder() + .setData(ByteString.copyFrom(dataArray)) + .setPartitionKeyIndex(i) + .build(); aggregatedRecord.addPartitionKeyTable(expectedRecord.partitionKey()).addRecords(kplRecord); return expectedRecord; } private enum RecordRangeState { - BELOW_RANGE, IN_RANGE, ABOVE_RANGE + BELOW_RANGE, + IN_RANGE, + ABOVE_RANGE } @Getter @@ -597,7 +650,10 @@ public class ProcessTaskTest { ControlledHashAggregatorUtil(String lowHashKey, String highHashKey) { this.lowHashKey = new BigInteger(lowHashKey); this.highHashKey = new BigInteger(highHashKey); - this.width = this.highHashKey.subtract(this.lowHashKey).mod(BigInteger.valueOf(Long.MAX_VALUE)).longValue() + this.width = this.highHashKey + .subtract(this.lowHashKey) + .mod(BigInteger.valueOf(Long.MAX_VALUE)) + .longValue() - 1; } @@ -623,38 +679,53 @@ public class ProcessTaskTest { assertThat(rangeState, not(nullValue())); switch (rangeState) { - case BELOW_RANGE: - return lowHashKey.subtract(BigInteger.valueOf(ThreadLocalRandom.current().nextInt()).abs()); - case IN_RANGE: - return lowHashKey.add(BigInteger.valueOf(ThreadLocalRandom.current().nextLong(width))); - case ABOVE_RANGE: - return highHashKey.add(BigInteger.ONE) - .add(BigInteger.valueOf(ThreadLocalRandom.current().nextInt()).abs()); - default: - throw new IllegalStateException("Unknown range state: " + rangeState); + case BELOW_RANGE: + return lowHashKey.subtract( + BigInteger.valueOf(ThreadLocalRandom.current().nextInt()) + .abs()); + case IN_RANGE: + return lowHashKey.add( + BigInteger.valueOf(ThreadLocalRandom.current().nextLong(width))); + case ABOVE_RANGE: + return highHashKey + .add(BigInteger.ONE) + .add(BigInteger.valueOf(ThreadLocalRandom.current().nextInt()) + .abs()); + default: + throw new IllegalStateException("Unknown range state: " + rangeState); } } } private ShardRecordProcessorOutcome testWithRecord(KinesisClientRecord record) { - return testWithRecords(Collections.singletonList(record), ExtendedSequenceNumber.TRIM_HORIZON, + return testWithRecords( + Collections.singletonList(record), + ExtendedSequenceNumber.TRIM_HORIZON, ExtendedSequenceNumber.TRIM_HORIZON); } - private ShardRecordProcessorOutcome testWithRecords(List records, - ExtendedSequenceNumber lastCheckpointValue, ExtendedSequenceNumber largestPermittedCheckpointValue) { + private ShardRecordProcessorOutcome testWithRecords( + List records, + ExtendedSequenceNumber lastCheckpointValue, + ExtendedSequenceNumber largestPermittedCheckpointValue) { return testWithRecords(records, lastCheckpointValue, largestPermittedCheckpointValue, new AggregatorUtil()); } - private ShardRecordProcessorOutcome testWithRecords(List records, ExtendedSequenceNumber lastCheckpointValue, - ExtendedSequenceNumber largestPermittedCheckpointValue, AggregatorUtil aggregatorUtil) { + private ShardRecordProcessorOutcome testWithRecords( + List records, + ExtendedSequenceNumber lastCheckpointValue, + ExtendedSequenceNumber largestPermittedCheckpointValue, + AggregatorUtil aggregatorUtil) { when(processRecordsInput.records()).thenReturn(records); return testWithRecords( makeProcessTask(processRecordsInput, aggregatorUtil, skipShardSyncAtWorkerInitializationIfLeasesExist), - lastCheckpointValue, largestPermittedCheckpointValue); + lastCheckpointValue, + largestPermittedCheckpointValue); } - private ShardRecordProcessorOutcome testWithRecords(ProcessTask processTask, ExtendedSequenceNumber lastCheckpointValue, + private ShardRecordProcessorOutcome testWithRecords( + ProcessTask processTask, + ExtendedSequenceNumber lastCheckpointValue, ExtendedSequenceNumber largestPermittedCheckpointValue) { when(checkpointer.lastCheckpointValue()).thenReturn(lastCheckpointValue); when(checkpointer.largestPermittedCheckpointValue()).thenReturn(largestPermittedCheckpointValue); @@ -668,12 +739,11 @@ public class ProcessTaskTest { verify(checkpointer).largestPermittedCheckpointValue(esnCaptor.capture()); return new ShardRecordProcessorOutcome(recordsCaptor.getValue(), esnCaptor.getValue()); - } /** * See the KPL documentation on GitHub for more details about the binary format. - * + * * @param pk * Partition key to use. All the records will have the same partition key. * @return ByteBuffer containing the serialized form of the aggregated record, along with the necessary header and @@ -681,13 +751,20 @@ public class ProcessTaskTest { */ private static ByteBuffer generateAggregatedRecord(String pk) { ByteBuffer bb = ByteBuffer.allocate(1024); - bb.put(new byte[] { -13, -119, -102, -62 }); + bb.put(new byte[] {-13, -119, -102, -62}); - Messages.Record r = Messages.Record.newBuilder().setData(ByteString.copyFrom(TEST_DATA)).setPartitionKeyIndex(0) + Messages.Record r = Messages.Record.newBuilder() + .setData(ByteString.copyFrom(TEST_DATA)) + .setPartitionKeyIndex(0) .build(); - byte[] payload = AggregatedRecord.newBuilder().addPartitionKeyTable(pk).addRecords(r).addRecords(r) - .addRecords(r).build().toByteArray(); + byte[] payload = AggregatedRecord.newBuilder() + .addPartitionKeyTable(pk) + .addRecords(r) + .addRecords(r) + .addRecords(r) + .build() + .toByteArray(); bb.put(payload); bb.put(md5(payload)); @@ -696,13 +773,21 @@ public class ProcessTaskTest { return bb; } - private static List generateConsecutiveRecords(int numberOfRecords, String partitionKey, ByteBuffer data, - Date arrivalTimestamp, BigInteger startSequenceNumber) { + private static List generateConsecutiveRecords( + int numberOfRecords, + String partitionKey, + ByteBuffer data, + Date arrivalTimestamp, + BigInteger startSequenceNumber) { List records = new ArrayList<>(); for (int i = 0; i < numberOfRecords; ++i) { String seqNum = startSequenceNumber.add(BigInteger.valueOf(i)).toString(); - KinesisClientRecord record = KinesisClientRecord.builder().partitionKey(partitionKey).data(data) - .sequenceNumber(seqNum).approximateArrivalTimestamp(arrivalTimestamp.toInstant()).build(); + KinesisClientRecord record = KinesisClientRecord.builder() + .partitionKey(partitionKey) + .data(data) + .sequenceNumber(seqNum) + .approximateArrivalTimestamp(arrivalTimestamp.toInstant()) + .build(); records.add(record); } return records; @@ -739,11 +824,11 @@ public class ProcessTaskTest { if (expected == null) { matchers = nullValue(TaskResult.class); } else { - matchers = allOf(notNullValue(TaskResult.class), + matchers = allOf( + notNullValue(TaskResult.class), hasProperty("shardEndReached", equalTo(expected.isShardEndReached())), hasProperty("exception", equalTo(expected.getException()))); } - } @Override diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ReshardIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ReshardIntegrationTest.java index aa08980e..38fab2ac 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ReshardIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ReshardIntegrationTest.java @@ -1,9 +1,9 @@ package software.amazon.kinesis.lifecycle; import org.junit.Test; +import software.amazon.kinesis.application.TestConsumer; import software.amazon.kinesis.config.KCLAppConfig; import software.amazon.kinesis.config.ReleaseCanaryStreamingReshardingTestConfig; -import software.amazon.kinesis.application.TestConsumer; public class ReshardIntegrationTest { @Test diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriberTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriberTest.java index 4299c163..440741b2 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriberTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerSubscriberTest.java @@ -14,6 +14,45 @@ */ package software.amazon.kinesis.lifecycle; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import lombok.NonNull; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.kinesis.common.InitialPositionInStreamExtended; +import software.amazon.kinesis.common.RequestDetails; +import software.amazon.kinesis.leases.ShardInfo; +import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; +import software.amazon.kinesis.retrieval.KinesisClientRecord; +import software.amazon.kinesis.retrieval.RecordsDeliveryAck; +import software.amazon.kinesis.retrieval.RecordsPublisher; +import software.amazon.kinesis.retrieval.RecordsRetrieved; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.nullValue; import static org.junit.Assert.assertEquals; @@ -29,47 +68,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static software.amazon.kinesis.utils.ProcessRecordsInputMatcher.eqProcessRecordsInput; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.stream.Stream; - -import org.apache.commons.lang3.StringUtils; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.runners.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import lombok.NonNull; -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.common.RequestDetails; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.retrieval.KinesisClientRecord; -import software.amazon.kinesis.retrieval.RecordsDeliveryAck; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RecordsRetrieved; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - @Slf4j @RunWith(MockitoJUnitRunner.class) public class ShardConsumerSubscriberTest { @@ -84,6 +82,7 @@ public class ShardConsumerSubscriberTest { @Mock private ShardConsumer shardConsumer; + @Mock private RecordsRetrieved recordsRetrieved; @@ -100,16 +99,22 @@ public class ShardConsumerSubscriberTest { @Before public void before() { - executorService = Executors.newFixedThreadPool(8, new ThreadFactoryBuilder() - .setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build()); + executorService = Executors.newFixedThreadPool( + 8, + new ThreadFactoryBuilder() + .setNameFormat("test-" + testName.getMethodName() + "-%04d") + .setDaemon(true) + .build()); recordsPublisher = new TestPublisher(); - ShardInfo shardInfo = new ShardInfo("shard-001", "", Collections.emptyList(), - ExtendedSequenceNumber.TRIM_HORIZON); + ShardInfo shardInfo = + new ShardInfo("shard-001", "", Collections.emptyList(), ExtendedSequenceNumber.TRIM_HORIZON); when(shardConsumer.shardInfo()).thenReturn(shardInfo); - processRecordsInput = ProcessRecordsInput.builder().records(Collections.emptyList()) - .cacheEntryTime(Instant.now()).build(); + processRecordsInput = ProcessRecordsInput.builder() + .records(Collections.emptyList()) + .cacheEntryTime(Instant.now()) + .build(); subscriber = new ShardConsumerSubscriber(recordsPublisher, executorService, bufferSize, shardConsumer, 0); when(recordsRetrieved.processRecordsInput()).thenReturn(processRecordsInput); @@ -139,7 +144,8 @@ public class ShardConsumerSubscriberTest { startSubscriptionsAndWait(); - verify(shardConsumer, times(100)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, times(100)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); } @Test @@ -149,29 +155,32 @@ public class ShardConsumerSubscriberTest { Throwable testException = new Throwable("ShardConsumerError"); doAnswer(new Answer() { - int expectedInvocations = recordsPublisher.responses.size(); + int expectedInvocations = recordsPublisher.responses.size(); - @Override - public Object answer(InvocationOnMock invocation) throws Throwable { - expectedInvocations--; - if (expectedInvocations == 10) { - throw testException; - } - if (expectedInvocations <= 0) { - synchronized (processedNotifier) { - processedNotifier.notifyAll(); + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + expectedInvocations--; + if (expectedInvocations == 10) { + throw testException; + } + if (expectedInvocations <= 0) { + synchronized (processedNotifier) { + processedNotifier.notifyAll(); + } + } + return null; } - } - return null; - } - }).when(shardConsumer).handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); + }) + .when(shardConsumer) + .handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); startSubscriptionsAndWait(); assertThat(subscriber.getAndResetDispatchFailure(), equalTo(testException)); assertThat(subscriber.getAndResetDispatchFailure(), nullValue()); - verify(shardConsumer, times(20)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, times(20)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); } @Test @@ -192,7 +201,8 @@ public class ShardConsumerSubscriberTest { Thread.sleep(10); } - verify(shardConsumer, times(10)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, times(10)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); assertThat(subscriber.retrievalFailure(), equalTo(expected)); } @@ -225,14 +235,19 @@ public class ShardConsumerSubscriberTest { } assertThat(recordsPublisher.restartedFrom, equalTo(edgeRecord)); - verify(shardConsumer, times(20)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, times(20)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); } @Test public void restartAfterRequestTimerExpiresTest() throws Exception { - executorService = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder() - .setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build()); + executorService = Executors.newFixedThreadPool( + 1, + new ThreadFactoryBuilder() + .setNameFormat("test-" + testName.getMethodName() + "-%04d") + .setDaemon(true) + .build()); subscriber = new ShardConsumerSubscriber(recordsPublisher, executorService, bufferSize, shardConsumer, 0); addUniqueItem(1); @@ -242,15 +257,18 @@ public class ShardConsumerSubscriberTest { List received = new ArrayList<>(); doAnswer(a -> { - ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); - received.add(input); - if (input.records().stream().anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { - synchronized (processedNotifier) { - processedNotifier.notifyAll(); - } - } - return null; - }).when(shardConsumer).handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); + ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); + received.add(input); + if (input.records().stream() + .anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { + synchronized (processedNotifier) { + processedNotifier.notifyAll(); + } + } + return null; + }) + .when(shardConsumer) + .handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); startSubscriptionsAndWait(); @@ -286,17 +304,29 @@ public class ShardConsumerSubscriberTest { processedNotifier.wait(DEFAULT_NOTIFIER_TIMEOUT); } - verify(shardConsumer, times(100)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, times(100)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); assertThat(received.size(), equalTo(recordsPublisher.responses.size())); - Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i), - eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput()))); + Stream.iterate(0, i -> i + 1) + .limit(received.size()) + .forEach(i -> assertThat( + received.get(i), + eqProcessRecordsInput(recordsPublisher + .responses + .get(i) + .recordsRetrieved + .processRecordsInput()))); } @Test public void restartAfterRequestTimerExpiresWhenNotGettingRecordsAfterInitialization() throws Exception { - executorService = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder() - .setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build()); + executorService = Executors.newFixedThreadPool( + 1, + new ThreadFactoryBuilder() + .setNameFormat("test-" + testName.getMethodName() + "-%04d") + .setDaemon(true) + .build()); // Mock record publisher which doesn't publish any records on first try which simulates any scenario which // causes first subscription try to fail. @@ -306,22 +336,26 @@ public class ShardConsumerSubscriberTest { List received = new ArrayList<>(); doAnswer(a -> { - ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); - received.add(input); - if (input.records().stream().anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { - synchronized (processedNotifier) { - processedNotifier.notifyAll(); - } - } - return null; - }).when(shardConsumer).handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); + ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); + received.add(input); + if (input.records().stream() + .anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { + synchronized (processedNotifier) { + processedNotifier.notifyAll(); + } + } + return null; + }) + .when(shardConsumer) + .handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); // First try to start subscriptions. startSubscriptionsAndWait(100); // Verifying that there are no interactions with shardConsumer mock indicating no records were sent back and // subscription has not started correctly. - verify(shardConsumer, never()).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, never()) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); Stream.iterate(2, i -> i + 1).limit(98).forEach(this::addUniqueItem); @@ -341,43 +375,59 @@ public class ShardConsumerSubscriberTest { // Verify that received records in the subscriber are equal to the ones sent by the record publisher. assertThat(received.size(), equalTo(recordsPublisher.responses.size())); - Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i), - eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput()))); + Stream.iterate(0, i -> i + 1) + .limit(received.size()) + .forEach(i -> assertThat( + received.get(i), + eqProcessRecordsInput(recordsPublisher + .responses + .get(i) + .recordsRetrieved + .processRecordsInput()))); } @Test public void restartAfterRequestTimerExpiresWhenInitialTaskExecutionIsRejected() throws Exception { - executorService = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder() - .setNameFormat("test-" + testName.getMethodName() + "-%04d").setDaemon(true).build()); + executorService = Executors.newFixedThreadPool( + 1, + new ThreadFactoryBuilder() + .setNameFormat("test-" + testName.getMethodName() + "-%04d") + .setDaemon(true) + .build()); ExecutorService failingService = spy(executorService); doAnswer(invocation -> directlyExecuteRunnable(invocation)) .doThrow(new RejectedExecutionException()) .doCallRealMethod() - .when(failingService).execute(any()); + .when(failingService) + .execute(any()); subscriber = new ShardConsumerSubscriber(recordsPublisher, failingService, bufferSize, shardConsumer, 0); addUniqueItem(1); List received = new ArrayList<>(); doAnswer(a -> { - ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); - received.add(input); - if (input.records().stream().anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { - synchronized (processedNotifier) { - processedNotifier.notifyAll(); - } - } - return null; - }).when(shardConsumer).handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); + ProcessRecordsInput input = a.getArgumentAt(0, ProcessRecordsInput.class); + received.add(input); + if (input.records().stream() + .anyMatch(r -> StringUtils.startsWith(r.partitionKey(), TERMINAL_MARKER))) { + synchronized (processedNotifier) { + processedNotifier.notifyAll(); + } + } + return null; + }) + .when(shardConsumer) + .handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); // First try to start subscriptions. startSubscriptionsAndWait(100); // Verifying that there are no interactions with shardConsumer mock indicating no records were sent back and // subscription has not started correctly. - verify(shardConsumer, never()).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, never()) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); Stream.iterate(2, i -> i + 1).limit(98).forEach(this::addUniqueItem); @@ -393,12 +443,20 @@ public class ShardConsumerSubscriberTest { } // Verify that shardConsumer mock was called 100 times and all 100 input records are processed. - verify(shardConsumer, times(100)).handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); + verify(shardConsumer, times(100)) + .handleInput(argThat(eqProcessRecordsInput(processRecordsInput)), any(Subscription.class)); // Verify that received records in the subscriber are equal to the ones sent by the record publisher. assertThat(received.size(), equalTo(recordsPublisher.responses.size())); - Stream.iterate(0, i -> i + 1).limit(received.size()).forEach(i -> assertThat(received.get(i), - eqProcessRecordsInput(recordsPublisher.responses.get(i).recordsRetrieved.processRecordsInput()))); + Stream.iterate(0, i -> i + 1) + .limit(received.size()) + .forEach(i -> assertThat( + received.get(i), + eqProcessRecordsInput(recordsPublisher + .responses + .get(i) + .recordsRetrieved + .processRecordsInput()))); } private Object directlyExecuteRunnable(InvocationOnMock invocation) { @@ -410,8 +468,11 @@ public class ShardConsumerSubscriberTest { private void addUniqueItem(int id) { RecordsRetrieved r = mock(RecordsRetrieved.class, "Record-" + id); - ProcessRecordsInput input = ProcessRecordsInput.builder().cacheEntryTime(Instant.now()) - .records(Collections.singletonList(KinesisClientRecord.builder().partitionKey("Record-" + id).build())) + ProcessRecordsInput input = ProcessRecordsInput.builder() + .cacheEntryTime(Instant.now()) + .records(Collections.singletonList(KinesisClientRecord.builder() + .partitionKey("Record-" + id) + .build())) .build(); when(r.processRecordsInput()).thenReturn(input); recordsPublisher.add(new ResponseItem(r)); @@ -420,9 +481,11 @@ public class ShardConsumerSubscriberTest { private ProcessRecordsInput addTerminalMarker(int id) { RecordsRetrieved terminalResponse = mock(RecordsRetrieved.class, TERMINAL_MARKER + "-" + id); ProcessRecordsInput terminalInput = ProcessRecordsInput.builder() - .records(Collections - .singletonList(KinesisClientRecord.builder().partitionKey(TERMINAL_MARKER + "-" + id).build())) - .cacheEntryTime(Instant.now()).build(); + .records(Collections.singletonList(KinesisClientRecord.builder() + .partitionKey(TERMINAL_MARKER + "-" + id) + .build())) + .cacheEntryTime(Instant.now()) + .build(); when(terminalResponse.processRecordsInput()).thenReturn(terminalInput); recordsPublisher.add(new ResponseItem(terminalResponse)); @@ -430,25 +493,28 @@ public class ShardConsumerSubscriberTest { } private void addItemsToReturn(int count) { - Stream.iterate(0, i -> i + 1).limit(count) + Stream.iterate(0, i -> i + 1) + .limit(count) .forEach(i -> recordsPublisher.add(new ResponseItem(recordsRetrieved))); } private void setupNotifierAnswer(int expected) { doAnswer(new Answer() { - int seen = expected; + int seen = expected; - @Override - public Object answer(InvocationOnMock invocation) throws Throwable { - seen--; - if (seen == 0) { - synchronized (processedNotifier) { - processedNotifier.notifyAll(); + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + seen--; + if (seen == 0) { + synchronized (processedNotifier) { + processedNotifier.notifyAll(); + } + } + return null; } - } - return null; - } - }).when(shardConsumer).handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); + }) + .when(shardConsumer) + .handleInput(any(ProcessRecordsInput.class), any(Subscription.class)); } private void startSubscriptionsAndWait() throws InterruptedException { @@ -515,10 +581,9 @@ public class ShardConsumerSubscriberTest { } @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, - InitialPositionInStreamExtended initialPositionInStreamExtended) { - - } + public void start( + ExtendedSequenceNumber extendedSequenceNumber, + InitialPositionInStreamExtended initialPositionInStreamExtended) {} @Override public void restartFrom(RecordsRetrieved recordsRetrieved) { @@ -531,18 +596,13 @@ public class ShardConsumerSubscriberTest { break; } } - } @Override - public void notify(RecordsDeliveryAck ack) { - - } + public void notify(RecordsDeliveryAck ack) {} @Override - public void shutdown() { - - } + public void shutdown() {} @Override public RequestDetails getLastSuccessfulRequestDetails() { @@ -594,7 +654,10 @@ public class ShardConsumerSubscriberTest { private int genericWarningLogged = 0; private int readTimeoutWarningLogged = 0; - TestShardConsumerSubscriber(RecordsPublisher recordsPublisher, ExecutorService executorService, int bufferSize, + TestShardConsumerSubscriber( + RecordsPublisher recordsPublisher, + ExecutorService executorService, + int bufferSize, ShardConsumer shardConsumer, // Setup test expectations int readTimeoutsToIgnoreBeforeWarning) { @@ -625,8 +688,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 0; int expectedReadTimeoutLogs = 0; int expectedGenericLogs = 0; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicSuccess(consumer); mimicSuccess(consumer); @@ -651,8 +714,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 0; int expectedReadTimeoutLogs = 2; int expectedGenericLogs = 0; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicSuccess(consumer); mimicSuccess(consumer); @@ -677,8 +740,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 1; int expectedReadTimeoutLogs = 0; int expectedGenericLogs = 0; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicSuccess(consumer); mimicSuccess(consumer); @@ -704,8 +767,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 1; int expectedReadTimeoutLogs = 2; int expectedGenericLogs = 0; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicException(exceptionToThrow, consumer); mimicException(exceptionToThrow, consumer); @@ -731,8 +794,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 2; int expectedReadTimeoutLogs = 3; int expectedGenericLogs = 0; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicException(exceptionToThrow, consumer); mimicException(exceptionToThrow, consumer); @@ -758,8 +821,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 0; int expectedReadTimeoutLogs = 0; int expectedGenericLogs = 2; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicSuccess(consumer); mimicSuccess(consumer); @@ -787,8 +850,8 @@ public class ShardConsumerSubscriberTest { int readTimeoutsToIgnore = 2; int expectedReadTimeoutLogs = 0; int expectedGenericLogs = 2; - TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber(mock(RecordsPublisher.class), - Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); + TestShardConsumerSubscriber consumer = new TestShardConsumerSubscriber( + mock(RecordsPublisher.class), Executors.newFixedThreadPool(1), 8, shardConsumer, readTimeoutsToIgnore); consumer.startSubscriptions(); mimicSuccess(consumer); mimicSuccess(consumer); @@ -810,5 +873,4 @@ public class ShardConsumerSubscriberTest { // restart subscriptions to allow further requests to be mimiced consumer.startSubscriptions(); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerTest.java index 8db3d517..daab8efe 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShardConsumerTest.java @@ -15,6 +15,51 @@ package software.amazon.kinesis.lifecycle; +import java.time.Instant; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.BrokenBarrierException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import lombok.extern.slf4j.Slf4j; +import org.junit.After; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.runners.MockitoJUnitRunner; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.kinesis.common.InitialPositionInStreamExtended; +import software.amazon.kinesis.common.RequestDetails; +import software.amazon.kinesis.leases.ShardInfo; +import software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState; +import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; +import software.amazon.kinesis.lifecycle.events.TaskExecutionListenerInput; +import software.amazon.kinesis.retrieval.RecordsDeliveryAck; +import software.amazon.kinesis.retrieval.RecordsPublisher; +import software.amazon.kinesis.retrieval.RecordsRetrieved; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; @@ -41,53 +86,6 @@ import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.verifyZeroInteractions; import static org.mockito.Mockito.when; -import java.time.Instant; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.CyclicBarrier; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; - -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.runners.MockitoJUnitRunner; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.common.RequestDetails; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.lifecycle.events.TaskExecutionListenerInput; -import software.amazon.kinesis.lifecycle.ConsumerStates.ShardConsumerState; -import software.amazon.kinesis.retrieval.RecordsDeliveryAck; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RecordsRetrieved; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - /** * Unit tests of {@link ShardConsumer}. */ @@ -106,42 +104,61 @@ public class ShardConsumerTest { private TaskExecutionListenerInput shutdownRequestedAwaitTaskInput; private ExecutorService executorService; + @Mock private RecordsPublisher recordsPublisher; + @Mock private ShutdownNotification shutdownNotification; + @Mock private ConsumerState blockedOnParentsState; + @Mock private ConsumerTask blockedOnParentsTask; + @Mock private ConsumerState initialState; + @Mock private ConsumerTask initializeTask; + @Mock private ConsumerState processingState; + @Mock private ConsumerTask processingTask; + @Mock private ConsumerState shutdownState; + @Mock private ConsumerTask shutdownTask; + @Mock private TaskResult initializeTaskResult; + @Mock private TaskResult processingTaskResult; + @Mock private TaskResult blockOnParentsTaskResult; + @Mock private ConsumerState shutdownCompleteState; + @Mock private ShardConsumerArgument shardConsumerArgument; + @Mock private ConsumerState shutdownRequestedState; + @Mock private ConsumerTask shutdownRequestedTask; + @Mock private ConsumerState shutdownRequestedAwaitState; + @Mock private TaskExecutionListener taskExecutionListener; @@ -156,20 +173,37 @@ public class ShardConsumerTest { public void before() { MockitoAnnotations.initMocks(this); shardInfo = new ShardInfo(shardId, concurrencyToken, null, ExtendedSequenceNumber.TRIM_HORIZON); - ThreadFactory factory = new ThreadFactoryBuilder().setNameFormat("test-" + testName.getMethodName() + "-%04d") - .setDaemon(true).build(); + ThreadFactory factory = new ThreadFactoryBuilder() + .setNameFormat("test-" + testName.getMethodName() + "-%04d") + .setDaemon(true) + .build(); executorService = new ThreadPoolExecutor(4, 4, 1, TimeUnit.SECONDS, new LinkedBlockingQueue<>(), factory); - processRecordsInput = ProcessRecordsInput.builder().isAtShardEnd(false).cacheEntryTime(Instant.now()) - .millisBehindLatest(1000L).records(Collections.emptyList()).build(); - initialTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo).taskType(TaskType.INITIALIZE) + processRecordsInput = ProcessRecordsInput.builder() + .isAtShardEnd(false) + .cacheEntryTime(Instant.now()) + .millisBehindLatest(1000L) + .records(Collections.emptyList()) .build(); - processTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo).taskType(TaskType.PROCESS).build(); - shutdownRequestedTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo) - .taskType(TaskType.SHUTDOWN_NOTIFICATION).build(); - shutdownRequestedAwaitTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo) - .taskType(TaskType.SHUTDOWN_COMPLETE).build(); - shutdownTaskInput = TaskExecutionListenerInput.builder().shardInfo(shardInfo).taskType(TaskType.SHUTDOWN) + initialTaskInput = TaskExecutionListenerInput.builder() + .shardInfo(shardInfo) + .taskType(TaskType.INITIALIZE) + .build(); + processTaskInput = TaskExecutionListenerInput.builder() + .shardInfo(shardInfo) + .taskType(TaskType.PROCESS) + .build(); + shutdownRequestedTaskInput = TaskExecutionListenerInput.builder() + .shardInfo(shardInfo) + .taskType(TaskType.SHUTDOWN_NOTIFICATION) + .build(); + shutdownRequestedAwaitTaskInput = TaskExecutionListenerInput.builder() + .shardInfo(shardInfo) + .taskType(TaskType.SHUTDOWN_COMPLETE) + .build(); + shutdownTaskInput = TaskExecutionListenerInput.builder() + .shardInfo(shardInfo) + .taskType(TaskType.SHUTDOWN) .build(); } @@ -193,32 +227,31 @@ public class ShardConsumerTest { TestPublisher(boolean enableCancelAwait) { doAnswer(a -> { - requestBarrier.await(); - return null; - }).when(subscription).request(anyLong()); + requestBarrier.await(); + return null; + }) + .when(subscription) + .request(anyLong()); doAnswer(a -> { - if (enableCancelAwait) { - requestBarrier.await(); - } - return null; - }).when(subscription).cancel(); + if (enableCancelAwait) { + requestBarrier.await(); + } + return null; + }) + .when(subscription) + .cancel(); } @Override - public void start(ExtendedSequenceNumber extendedSequenceNumber, - InitialPositionInStreamExtended initialPositionInStreamExtended) { - - } + public void start( + ExtendedSequenceNumber extendedSequenceNumber, + InitialPositionInStreamExtended initialPositionInStreamExtended) {} @Override - public void notify(RecordsDeliveryAck ack) { - - } + public void notify(RecordsDeliveryAck ack) {} @Override - public void shutdown() { - - } + public void shutdown() {} @Override public RequestDetails getLastSuccessfulRequestDetails() { @@ -237,9 +270,7 @@ public class ShardConsumerTest { } @Override - public void restartFrom(RecordsRetrieved recordsRetrieved) { - - } + public void restartFrom(RecordsRetrieved recordsRetrieved) {} public void awaitSubscription() throws InterruptedException, BrokenBarrierException { barrier.await(); @@ -307,9 +338,13 @@ public class ShardConsumerTest { verify(taskExecutionListener, times(2)).beforeTaskExecution(processTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + processTaskInput = + processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + shutdownTaskInput = shutdownTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); verify(taskExecutionListener, times(2)).afterTaskExecution(processTaskInput); @@ -365,9 +400,13 @@ public class ShardConsumerTest { verify(taskExecutionListener, times(1)).beforeTaskExecution(processTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + processTaskInput = + processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + shutdownTaskInput = shutdownTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); verify(taskExecutionListener, times(1)).afterTaskExecution(processTaskInput); @@ -430,9 +469,13 @@ public class ShardConsumerTest { verify(taskExecutionListener, times(3)).beforeTaskExecution(processTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + processTaskInput = + processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + shutdownTaskInput = shutdownTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); verify(taskExecutionListener, times(3)).afterTaskExecution(processTaskInput); @@ -445,7 +488,8 @@ public class ShardConsumerTest { public final void testInitializationStateUponFailure() throws Exception { final ShardConsumer consumer = createShardConsumer(recordsPublisher); - when(initialState.createTask(eq(shardConsumerArgument), eq(consumer), any())).thenReturn(initializeTask); + when(initialState.createTask(eq(shardConsumerArgument), eq(consumer), any())) + .thenReturn(initializeTask); when(initializeTask.call()).thenReturn(new TaskResult(new Exception("Bad"))); when(initializeTask.taskType()).thenReturn(TaskType.INITIALIZE); when(initialState.failureTransition()).thenReturn(initialState); @@ -474,8 +518,7 @@ public class ShardConsumerTest { public final void testSuccessfulConsumerStateTransition() { ExecutorService directExecutorService = spy(executorService); - doAnswer(this::directlyExecuteRunnable) - .when(directExecutorService).execute(any()); + doAnswer(this::directlyExecuteRunnable).when(directExecutorService).execute(any()); final ShardConsumer consumer = createShardConsumer(directExecutorService, blockedOnParentsState); @@ -493,7 +536,9 @@ public class ShardConsumerTest { } } while (--arbitraryExecutionCount > 0); - assertEquals(ShardConsumerState.PROCESSING.consumerState().state(), consumer.currentState().state()); + assertEquals( + ShardConsumerState.PROCESSING.consumerState().state(), + consumer.currentState().state()); verify(directExecutorService, times(2)).execute(any()); } @@ -513,7 +558,8 @@ public class ShardConsumerTest { // Failing the initialization task and all other attempts after that. doAnswer(this::directlyExecuteRunnable) .doThrow(new RejectedExecutionException()) - .when(failingService).execute(any()); + .when(failingService) + .execute(any()); int arbitraryExecutionCount = 5; do { @@ -525,7 +571,9 @@ public class ShardConsumerTest { } } while (--arbitraryExecutionCount > 0); - assertEquals(ShardConsumerState.INITIALIZING.consumerState().state(), consumer.currentState().state()); + assertEquals( + ShardConsumerState.INITIALIZING.consumerState().state(), + consumer.currentState().state()); verify(failingService, times(5)).execute(any()); } @@ -548,7 +596,8 @@ public class ShardConsumerTest { .doThrow(new RejectedExecutionException()) .doThrow(new RejectedExecutionException()) .doAnswer(this::directlyExecuteRunnable) - .when(failingService).execute(any()); + .when(failingService) + .execute(any()); int arbitraryExecutionCount = 6; do { @@ -560,7 +609,9 @@ public class ShardConsumerTest { } } while (--arbitraryExecutionCount > 0); - assertEquals(ShardConsumerState.PROCESSING.consumerState().state(), consumer.currentState().state()); + assertEquals( + ShardConsumerState.PROCESSING.consumerState().state(), + consumer.currentState().state()); verify(failingService, times(5)).execute(any()); } @@ -576,8 +627,7 @@ public class ShardConsumerTest { mockSuccessfulInitializeWithFailureTransition(); // Failing the waiting_on_parents task and few other attempts after that. - doThrow(new RejectedExecutionException()) - .when(failingService).execute(any()); + doThrow(new RejectedExecutionException()).when(failingService).execute(any()); int arbitraryExecutionCount = 5; do { @@ -589,7 +639,9 @@ public class ShardConsumerTest { } } while (--arbitraryExecutionCount > 0); - assertEquals(ShardConsumerState.WAITING_ON_PARENT_SHARDS.consumerState().state(), consumer.currentState().state()); + assertEquals( + ShardConsumerState.WAITING_ON_PARENT_SHARDS.consumerState().state(), + consumer.currentState().state()); verify(failingService, times(5)).execute(any()); } @@ -648,12 +700,14 @@ public class ShardConsumerTest { when(shutdownRequestedState.shutdownTransition(eq(ShutdownReason.REQUESTED))) .thenReturn(shutdownRequestedAwaitState); - when(shutdownRequestedState.shutdownTransition(eq(ShutdownReason.LEASE_LOST))).thenReturn(shutdownState); + when(shutdownRequestedState.shutdownTransition(eq(ShutdownReason.LEASE_LOST))) + .thenReturn(shutdownState); when(shutdownRequestedAwaitState.requiresDataAvailability()).thenReturn(false); when(shutdownRequestedAwaitState.createTask(any(), any(), any())).thenReturn(null); when(shutdownRequestedAwaitState.shutdownTransition(eq(ShutdownReason.REQUESTED))) .thenReturn(shutdownRequestedState); - when(shutdownRequestedAwaitState.shutdownTransition(eq(ShutdownReason.LEASE_LOST))).thenReturn(shutdownState); + when(shutdownRequestedAwaitState.shutdownTransition(eq(ShutdownReason.LEASE_LOST))) + .thenReturn(shutdownState); when(shutdownRequestedAwaitState.taskType()).thenReturn(TaskType.SHUTDOWN_COMPLETE); mockSuccessfulShutdown(null); @@ -698,10 +752,16 @@ public class ShardConsumerTest { verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownRequestedAwaitTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownRequestedTaskInput = shutdownRequestedTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + processTaskInput = + processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + shutdownRequestedTaskInput = shutdownRequestedTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); + shutdownTaskInput = shutdownTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); // No task is created/run for this shutdownRequestedAwaitState, so there's no task outcome. // shutdownNotification.shutdownComplete() should only be called for gracefulShutdown @@ -719,8 +779,17 @@ public class ShardConsumerTest { public void testExceptionInProcessingStopsRequests() throws Exception { TestPublisher cache = new TestPublisher(); - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, Optional.of(1L), - shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener, 0); + ShardConsumer consumer = new ShardConsumer( + cache, + executorService, + shardInfo, + Optional.of(1L), + shardConsumerArgument, + initialState, + Function.identity(), + 1, + taskExecutionListener, + 0); mockSuccessfulInitialize(null); mockSuccessfulProcessing(null); @@ -755,7 +824,8 @@ public class ShardConsumerTest { verify(taskExecutionListener, times(1)).beforeTaskExecution(initialTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(processTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); verifyNoMoreInteractions(taskExecutionListener); @@ -765,8 +835,17 @@ public class ShardConsumerTest { public void testLongRunningTasks() throws Exception { TestPublisher cache = new TestPublisher(); - ShardConsumer consumer = new ShardConsumer(cache, executorService, shardInfo, Optional.of(1L), - shardConsumerArgument, initialState, Function.identity(), 1, taskExecutionListener, 0); + ShardConsumer consumer = new ShardConsumer( + cache, + executorService, + shardInfo, + Optional.of(1L), + shardConsumerArgument, + initialState, + Function.identity(), + 1, + taskExecutionListener, + 0); CyclicBarrier taskArriveBarrier = new CyclicBarrier(2); CyclicBarrier taskDepartBarrier = new CyclicBarrier(2); @@ -845,9 +924,13 @@ public class ShardConsumerTest { verify(taskExecutionListener, times(2)).beforeTaskExecution(processTaskInput); verify(taskExecutionListener, times(1)).beforeTaskExecution(shutdownTaskInput); - initialTaskInput = initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - processTaskInput = processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); - shutdownTaskInput = shutdownTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + initialTaskInput = + initialTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + processTaskInput = + processTaskInput.toBuilder().taskOutcome(TaskOutcome.SUCCESSFUL).build(); + shutdownTaskInput = shutdownTaskInput.toBuilder() + .taskOutcome(TaskOutcome.SUCCESSFUL) + .build(); verify(taskExecutionListener, times(1)).afterTaskExecution(initialTaskInput); verify(taskExecutionListener, times(2)).afterTaskExecution(processTaskInput); @@ -860,8 +943,17 @@ public class ShardConsumerTest { final RecordsPublisher mockPublisher = mock(RecordsPublisher.class); final ExecutorService mockExecutor = mock(ExecutorService.class); final ConsumerState mockState = mock(ConsumerState.class); - final ShardConsumer consumer = new ShardConsumer(mockPublisher, mockExecutor, shardInfo, Optional.of(1L), - shardConsumerArgument, mockState, Function.identity(), 1, taskExecutionListener, 0); + final ShardConsumer consumer = new ShardConsumer( + mockPublisher, + mockExecutor, + shardInfo, + Optional.of(1L), + shardConsumerArgument, + mockState, + Function.identity(), + 1, + taskExecutionListener, + 0); when(mockState.state()).thenReturn(ShardConsumerState.WAITING_ON_PARENT_SHARDS); when(mockState.taskType()).thenReturn(TaskType.BLOCK_ON_PARENT_SHARDS); @@ -871,8 +963,8 @@ public class ShardConsumerTest { // and successful Initialize task execution when(mockTask.call()).thenReturn(new TaskResult(false)); - log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to initiate async" + - " processing of blocked on parent task"); + log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to initiate async" + + " processing of blocked on parent task"); consumer.executeLifecycle(); final ArgumentCaptor taskToExecute = ArgumentCaptor.forClass(Runnable.class); verify(mockExecutor, timeout(100)).execute(taskToExecute.capture()); @@ -880,8 +972,8 @@ public class ShardConsumerTest { log.info("RecordProcessor Thread: Simulated successful execution of Blocked on parent task"); reset(mockExecutor); - log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to move to InitializingState" + - " and initiate async processing of initialize task"); + log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to move to InitializingState" + + " and initiate async processing of initialize task"); when(mockState.successTransition()).thenReturn(mockState); when(mockState.state()).thenReturn(ShardConsumerState.INITIALIZING); when(mockState.taskType()).thenReturn(TaskType.INITIALIZE); @@ -890,8 +982,8 @@ public class ShardConsumerTest { log.info("RecordProcessor Thread: Simulated successful execution of Initialize task"); taskToExecute.getValue().run(); - log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to move to ProcessingState" + - " and mark initialization future as complete"); + log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to move to ProcessingState" + + " and mark initialization future as complete"); when(mockState.state()).thenReturn(ShardConsumerState.PROCESSING); consumer.executeLifecycle(); @@ -908,27 +1000,30 @@ public class ShardConsumerTest { // to control the precise timing of the thread execution, this is the best way final CountDownLatch processTaskLatch = new CountDownLatch(1); new Thread(() -> { - reset(mockState); - when(mockState.taskType()).thenReturn(TaskType.PROCESS); - final ConsumerTask mockProcessTask = mock(ConsumerTask.class); - when(mockState.createTask(any(), any(), any())).thenReturn(mockProcessTask); - when(mockProcessTask.call()).then(input -> { - // first we want to wait for subscribe to be called, - // but we cannot control the timing, so wait for 10 seconds - // to let the main thread invoke executeLifecyle which - // will perform subscribe - processTaskLatch.countDown(); - log.info("Record Processor Thread: Holding shardConsumer lock, waiting for 10 seconds to" + - " let subscribe be called by scheduler thread"); - Thread.sleep(10 * 1000); - log.info("RecordProcessor Thread: Done waiting"); - // then return shard end result - log.info("RecordProcessor Thread: Simulating execution of ProcessTask and returning shard-end result"); - return new TaskResult(true); - }); - final Subscription mockSubscription = mock(Subscription.class); - consumer.handleInput(ProcessRecordsInput.builder().isAtShardEnd(true).build(), mockSubscription); - }).start(); + reset(mockState); + when(mockState.taskType()).thenReturn(TaskType.PROCESS); + final ConsumerTask mockProcessTask = mock(ConsumerTask.class); + when(mockState.createTask(any(), any(), any())).thenReturn(mockProcessTask); + when(mockProcessTask.call()).then(input -> { + // first we want to wait for subscribe to be called, + // but we cannot control the timing, so wait for 10 seconds + // to let the main thread invoke executeLifecyle which + // will perform subscribe + processTaskLatch.countDown(); + log.info("Record Processor Thread: Holding shardConsumer lock, waiting for 10 seconds to" + + " let subscribe be called by scheduler thread"); + Thread.sleep(10 * 1000); + log.info("RecordProcessor Thread: Done waiting"); + // then return shard end result + log.info( + "RecordProcessor Thread: Simulating execution of ProcessTask and returning shard-end result"); + return new TaskResult(true); + }); + final Subscription mockSubscription = mock(Subscription.class); + consumer.handleInput( + ProcessRecordsInput.builder().isAtShardEnd(true).build(), mockSubscription); + }) + .start(); processTaskLatch.await(); @@ -949,13 +1044,13 @@ public class ShardConsumerTest { return mockState; }); when(mockState.state()).then(input -> { - if (successTransitionCalled.get() && shutdownTransitionCalled.get()) { - return ShardConsumerState.SHUTTING_DOWN; - } - return ShardConsumerState.PROCESSING; + if (successTransitionCalled.get() && shutdownTransitionCalled.get()) { + return ShardConsumerState.SHUTTING_DOWN; + } + return ShardConsumerState.PROCESSING; }); - log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to invoke subscribe and" + - " complete initialization"); + log.info("Scheduler Thread: Invoking ShardConsumer.executeLifecycle() to invoke subscribe and" + + " complete initialization"); consumer.executeLifecycle(); log.info("Scheduler Thread: Done initializing the ShardConsumer"); @@ -987,7 +1082,8 @@ public class ShardConsumerTest { } private void mockSuccessfulProcessing(CyclicBarrier taskCallBarrier, CyclicBarrier taskInterlockBarrier) { - when(processingState.createTask(eq(shardConsumerArgument), any(), any())).thenReturn(processingTask); + when(processingState.createTask(eq(shardConsumerArgument), any(), any())) + .thenReturn(processingTask); when(processingState.requiresDataAvailability()).thenReturn(true); when(processingState.taskType()).thenReturn(TaskType.PROCESS); when(processingTask.taskType()).thenReturn(TaskType.PROCESS); @@ -1024,7 +1120,6 @@ public class ShardConsumerTest { when(initialState.requiresDataAvailability()).thenReturn(false); when(initialState.successTransition()).thenReturn(processingState); when(initialState.state()).thenReturn(ConsumerStates.ShardConsumerState.INITIALIZING); - } private void mockSuccessfulUnblockOnParentsWithFailureTransition() { @@ -1033,7 +1128,8 @@ public class ShardConsumerTest { } private void mockSuccessfulUnblockOnParents() { - when(blockedOnParentsState.createTask(eq(shardConsumerArgument), any(), any())).thenReturn(blockedOnParentsTask); + when(blockedOnParentsState.createTask(eq(shardConsumerArgument), any(), any())) + .thenReturn(blockedOnParentsTask); when(blockedOnParentsState.taskType()).thenReturn(TaskType.BLOCK_ON_PARENT_SHARDS); when(blockedOnParentsTask.taskType()).thenReturn(TaskType.BLOCK_ON_PARENT_SHARDS); when(blockedOnParentsTask.call()).thenAnswer(i -> blockOnParentsTaskResult); @@ -1069,10 +1165,18 @@ public class ShardConsumerTest { return createShardConsumer(recordsPublisher, executorService, state); } - private ShardConsumer createShardConsumer(final RecordsPublisher publisher, - final ExecutorService executorService, final ConsumerState state) { - return new ShardConsumer(publisher, executorService, shardInfo, logWarningForTaskAfterMillis, - shardConsumerArgument, state, Function.identity(), 1, taskExecutionListener, 0); + private ShardConsumer createShardConsumer( + final RecordsPublisher publisher, final ExecutorService executorService, final ConsumerState state) { + return new ShardConsumer( + publisher, + executorService, + shardInfo, + logWarningForTaskAfterMillis, + shardConsumerArgument, + state, + Function.identity(), + 1, + taskExecutionListener, + 0); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownReasonTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownReasonTest.java index ce026f1d..f216a09a 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownReasonTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownReasonTest.java @@ -14,12 +14,12 @@ */ package software.amazon.kinesis.lifecycle; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - import org.junit.Assert; import org.junit.Test; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + /** * Unit tests of ShutdownReason enum class. */ @@ -42,5 +42,4 @@ public class ShutdownReasonTest { assertTrue(ShutdownReason.REQUESTED.canTransitionTo(ShutdownReason.LEASE_LOST)); assertTrue(ShutdownReason.REQUESTED.canTransitionTo(ShutdownReason.SHARD_END)); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownTaskTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownTaskTest.java index 390d3ef6..db64d198 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownTaskTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/lifecycle/ShutdownTaskTest.java @@ -14,22 +14,6 @@ */ package software.amazon.kinesis.lifecycle; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static software.amazon.kinesis.lifecycle.ShutdownReason.LEASE_LOST; -import static software.amazon.kinesis.lifecycle.ShutdownReason.SHARD_END; - import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -44,7 +28,6 @@ import org.mockito.ArgumentCaptor; import org.mockito.Matchers; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.model.ChildShard; import software.amazon.kinesis.checkpoint.ShardRecordProcessorCheckpointer; import software.amazon.kinesis.common.InitialPositionInStream; @@ -63,8 +46,8 @@ import software.amazon.kinesis.leases.ShardObjectHelper; import software.amazon.kinesis.leases.UpdateField; import software.amazon.kinesis.leases.exceptions.CustomerApplicationException; import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion; import software.amazon.kinesis.leases.exceptions.InvalidStateException; +import software.amazon.kinesis.leases.exceptions.LeasePendingDeletion; import software.amazon.kinesis.leases.exceptions.ProvisionedThroughputException; import software.amazon.kinesis.lifecycle.events.LeaseLostInput; import software.amazon.kinesis.lifecycle.events.ShardEndedInput; @@ -75,6 +58,22 @@ import software.amazon.kinesis.processor.ShardRecordProcessor; import software.amazon.kinesis.retrieval.RecordsPublisher; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; +import static software.amazon.kinesis.lifecycle.ShutdownReason.LEASE_LOST; +import static software.amazon.kinesis.lifecycle.ShutdownReason.SHARD_END; + /** * */ @@ -91,29 +90,39 @@ public class ShutdownTaskTest { * Shard id for the default-provided {@link ShardInfo} and {@link Lease}. */ private static final String SHARD_ID = "shardId-0"; - private static final ShardInfo SHARD_INFO = new ShardInfo(SHARD_ID, "concurrencyToken", - Collections.emptySet(), ExtendedSequenceNumber.LATEST); + + private static final ShardInfo SHARD_INFO = + new ShardInfo(SHARD_ID, "concurrencyToken", Collections.emptySet(), ExtendedSequenceNumber.LATEST); private ShutdownTask task; @Mock private RecordsPublisher recordsPublisher; + @Mock private ShardRecordProcessorCheckpointer recordProcessorCheckpointer; + @Mock private Checkpointer checkpointer; + @Mock private LeaseRefresher leaseRefresher; + @Mock private LeaseCoordinator leaseCoordinator; + @Mock private ShardDetector shardDetector; + @Mock private HierarchicalShardSyncer hierarchicalShardSyncer; + @Mock private ShardRecordProcessor shardRecordProcessor; + @Mock private LeaseCleanupManager leaseCleanupManager; + @Mock private ShutdownNotification shutdownNotification; @@ -123,7 +132,8 @@ public class ShutdownTaskTest { when(recordProcessorCheckpointer.lastCheckpointValue()).thenReturn(ExtendedSequenceNumber.SHARD_END); final Lease childLease = new Lease(); childLease.leaseKey("childShardLeaseKey"); - when(hierarchicalShardSyncer.createLeaseForChildShard(Matchers.any(ChildShard.class), Matchers.any(StreamIdentifier.class))) + when(hierarchicalShardSyncer.createLeaseForChildShard( + Matchers.any(ChildShard.class), Matchers.any(StreamIdentifier.class))) .thenReturn(childLease); setupLease(SHARD_ID, Collections.emptyList()); @@ -152,7 +162,8 @@ public class ShutdownTaskTest { */ @Test public final void testCallWhenCreatingNewLeasesThrows() throws Exception { - when(hierarchicalShardSyncer.createLeaseForChildShard(Matchers.any(ChildShard.class), Matchers.any(StreamIdentifier.class))) + when(hierarchicalShardSyncer.createLeaseForChildShard( + Matchers.any(ChildShard.class), Matchers.any(StreamIdentifier.class))) .thenThrow(new InvalidStateException("InvalidStateException is thrown")); final TaskResult result = task.call(); @@ -168,11 +179,15 @@ public class ShutdownTaskTest { * This test is for the scenario that ShutdownTask is created for ShardConsumer reaching the Shard End. */ @Test - public final void testCallWhenTrueShardEnd() throws DependencyException, InvalidStateException, ProvisionedThroughputException { + public final void testCallWhenTrueShardEnd() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { final TaskResult result = task.call(); assertNull(result.getException()); verifyShutdownAndNoDrop(); - verify(shardRecordProcessor).shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build()); + verify(shardRecordProcessor) + .shardEnded(ShardEndedInput.builder() + .checkpointer(recordProcessorCheckpointer) + .build()); verify(leaseRefresher).updateLeaseWithMetaInfo(Matchers.any(Lease.class), Matchers.any(UpdateField.class)); verify(leaseRefresher, times(2)).createLeaseIfNotExists(Matchers.any(Lease.class)); verify(leaseCleanupManager).enqueueForDeletion(any(LeasePendingDeletion.class)); @@ -199,11 +214,13 @@ public class ShutdownTaskTest { private void testMergeChildWhereOneParentHasLease(final boolean blockOnParent) throws Exception { // the @Before setup makes the `SHARD_ID` parent accessible final ChildShard mergeChild = constructChildFromMerge(); - final TaskResult result = createShutdownTaskSpy(blockOnParent, Collections.singletonList(mergeChild)).call(); + final TaskResult result = createShutdownTaskSpy(blockOnParent, Collections.singletonList(mergeChild)) + .call(); if (blockOnParent) { assertNotNull(result.getException()); - assertEquals(BlockedOnParentShardException.class, result.getException().getClass()); + assertEquals( + BlockedOnParentShardException.class, result.getException().getClass()); verify(leaseCoordinator, never()).dropLease(any(Lease.class)); verify(shardRecordProcessor, never()).leaseLost(any(LeaseLostInput.class)); @@ -242,7 +259,8 @@ public class ShutdownTaskTest { when(hierarchicalShardSyncer.createLeaseForChildShard(mergeChild, STREAM_IDENTIFIER)) .thenReturn(mockChildLease); - final TaskResult result = createShutdownTask(SHARD_END, Collections.singletonList(mergeChild)).call(); + final TaskResult result = createShutdownTask(SHARD_END, Collections.singletonList(mergeChild)) + .call(); assertNull(result.getException()); verify(leaseCleanupManager).enqueueForDeletion(any(LeasePendingDeletion.class)); @@ -263,7 +281,10 @@ public class ShutdownTaskTest { assertEquals(expectedShardIds, new HashSet<>(leaseKeyCaptor.getAllValues())); verifyShutdownAndNoDrop(); - verify(shardRecordProcessor).shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build()); + verify(shardRecordProcessor) + .shardEnded(ShardEndedInput.builder() + .checkpointer(recordProcessorCheckpointer) + .build()); } /** @@ -273,10 +294,11 @@ public class ShutdownTaskTest { @Test public final void testCallWhenShardNotFound() throws Exception { final Lease lease = setupLease("shardId-4", Collections.emptyList()); - final ShardInfo shardInfo = new ShardInfo(lease.leaseKey(), "concurrencyToken", Collections.emptySet(), - ExtendedSequenceNumber.LATEST); + final ShardInfo shardInfo = new ShardInfo( + lease.leaseKey(), "concurrencyToken", Collections.emptySet(), ExtendedSequenceNumber.LATEST); - final TaskResult result = createShutdownTask(SHARD_END, Collections.emptyList(), shardInfo).call(); + final TaskResult result = createShutdownTask(SHARD_END, Collections.emptyList(), shardInfo) + .call(); assertNull(result.getException()); verifyShutdownAndNoDrop(); @@ -288,12 +310,17 @@ public class ShutdownTaskTest { * This test is for the scenario that a ShutdownTask is created for the ShardConsumer losing the lease. */ @Test - public final void testCallWhenLeaseLost() throws DependencyException, InvalidStateException, ProvisionedThroughputException { - final TaskResult result = createShutdownTask(LEASE_LOST, Collections.emptyList()).call(); + public final void testCallWhenLeaseLost() + throws DependencyException, InvalidStateException, ProvisionedThroughputException { + final TaskResult result = + createShutdownTask(LEASE_LOST, Collections.emptyList()).call(); assertNull(result.getException()); verify(recordsPublisher).shutdown(); - verify(shardRecordProcessor, never()).shardEnded(ShardEndedInput.builder().checkpointer(recordProcessorCheckpointer).build()); + verify(shardRecordProcessor, never()) + .shardEnded(ShardEndedInput.builder() + .checkpointer(recordProcessorCheckpointer) + .build()); verify(shardRecordProcessor).leaseLost(LeaseLostInput.builder().build()); verify(leaseCoordinator, never()).getAssignments(); verify(leaseRefresher, never()).createLeaseIfNotExists(any(Lease.class)); @@ -312,7 +339,8 @@ public class ShutdownTaskTest { @Test public void testCallWhenShutdownNotificationIsNull() { - final TaskResult result = createShutdownTask(LEASE_LOST, Collections.emptyList()).call(); + final TaskResult result = + createShutdownTask(LEASE_LOST, Collections.emptyList()).call(); assertNull(result.getException()); verify(recordsPublisher).shutdown(); verify(shutdownNotification, never()).shutdownComplete(); @@ -345,15 +373,15 @@ public class ShutdownTaskTest { private List constructChildrenFromSplit() { List parentShards = Collections.singletonList(SHARD_ID); ChildShard leftChild = ChildShard.builder() - .shardId("ShardId-1") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) - .build(); + .shardId("ShardId-1") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) + .build(); ChildShard rightChild = ChildShard.builder() - .shardId("ShardId-2") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) - .build(); + .shardId("ShardId-2") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) + .build(); return Arrays.asList(leftChild, rightChild); } @@ -379,11 +407,24 @@ public class ShutdownTaskTest { return createShutdownTask(reason, childShards, SHARD_INFO); } - private ShutdownTask createShutdownTask(final ShutdownReason reason, final List childShards, - final ShardInfo shardInfo) { - return new ShutdownTask(shardInfo, shardDetector, shardRecordProcessor, recordProcessorCheckpointer, - reason, INITIAL_POSITION_TRIM_HORIZON, false, false, - leaseCoordinator, TASK_BACKOFF_TIME_MILLIS, recordsPublisher, hierarchicalShardSyncer, - NULL_METRICS_FACTORY, childShards, STREAM_IDENTIFIER, leaseCleanupManager); + private ShutdownTask createShutdownTask( + final ShutdownReason reason, final List childShards, final ShardInfo shardInfo) { + return new ShutdownTask( + shardInfo, + shardDetector, + shardRecordProcessor, + recordProcessorCheckpointer, + reason, + INITIAL_POSITION_TRIM_HORIZON, + false, + false, + leaseCoordinator, + TASK_BACKOFF_TIME_MILLIS, + recordsPublisher, + hierarchicalShardSyncer, + NULL_METRICS_FACTORY, + childShards, + STREAM_IDENTIFIER, + leaseCleanupManager); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/AccumulatingMetricsScopeTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/AccumulatingMetricsScopeTest.java index 578ed98d..a1782cf7 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/AccumulatingMetricsScopeTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/AccumulatingMetricsScopeTest.java @@ -16,11 +16,9 @@ package software.amazon.kinesis.metrics; import org.junit.Assert; import org.junit.Test; - import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - public class AccumulatingMetricsScopeTest { private static class TestScope extends AccumulateByNameMetricsScope { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisherTest.java index 7f40266b..a1676aaf 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisherTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchMetricsPublisherTest.java @@ -28,7 +28,6 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.cloudwatch.CloudWatchAsyncClient; import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.PutMetricDataRequest; @@ -63,7 +62,7 @@ public class CloudWatchMetricsPublisherTest { List> dataToPublish = constructMetricDatumWithKeyList(25); List> expectedData = constructMetricDatumListMap(dataToPublish); publisher.publishMetrics(dataToPublish); - + ArgumentCaptor argument = ArgumentCaptor.forClass(PutMetricDataRequest.class); Mockito.verify(cloudWatchClient, Mockito.atLeastOnce()).putMetricData(argument.capture()); @@ -73,7 +72,6 @@ public class CloudWatchMetricsPublisherTest { for (int i = 0; i < requests.size(); i++) { assertMetricData(expectedData.get(i), requests.get(i)); } - } public static List> constructMetricDatumWithKeyList(int value) { @@ -89,7 +87,8 @@ public class CloudWatchMetricsPublisherTest { // batchSize is the number of metrics sent in a single request. // In CloudWatchMetricsPublisher this number is set to 20. - public List> constructMetricDatumListMap(List> data) { + public List> constructMetricDatumListMap( + List> data) { int batchSize = 20; List> dataList = new ArrayList>(); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnableTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnableTest.java index a6c29fb8..4596835d 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnableTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/CloudWatchPublisherRunnableTest.java @@ -24,7 +24,6 @@ import org.mockito.Mockito; import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - public class CloudWatchPublisherRunnableTest { private static final int MAX_QUEUE_SIZE = 5; @@ -36,7 +35,8 @@ public class CloudWatchPublisherRunnableTest { private static final int FLUSH_SIZE = 2; private static class TestHarness { - private List> data = new ArrayList>(); + private List> data = + new ArrayList>(); private int counter = 0; private CloudWatchMetricsPublisher publisher; private CloudWatchPublisherRunnable runnable; @@ -44,16 +44,12 @@ public class CloudWatchPublisherRunnableTest { TestHarness() { publisher = Mockito.mock(CloudWatchMetricsPublisher.class); - runnable = new CloudWatchPublisherRunnable(publisher, - MAX_BUFFER_TIME_MILLIS, - MAX_QUEUE_SIZE, - FLUSH_SIZE) { + runnable = new CloudWatchPublisherRunnable(publisher, MAX_BUFFER_TIME_MILLIS, MAX_QUEUE_SIZE, FLUSH_SIZE) { @Override protected long getTime() { return time; } - }; } @@ -67,12 +63,8 @@ public class CloudWatchPublisherRunnableTest { } private MetricDatumWithKey constructDatum(int value) { - MetricDatum datum = TestHelper.constructDatum("datum-" + Integer.toString(value), - StandardUnit.COUNT, - value, - value, - value, - 1); + MetricDatum datum = TestHelper.constructDatum( + "datum-" + Integer.toString(value), StandardUnit.COUNT, value, value, value, 1); return new MetricDatumWithKey(new CloudWatchMetricKey(datum), datum); } @@ -80,7 +72,7 @@ public class CloudWatchPublisherRunnableTest { /** * Run one iteration of the runnable and assert that it called CloudWatch with count records beginning with * record startIndex, and no more than that. - * + * * @param startIndex * @param count */ diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/EndingMetricsScopeTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/EndingMetricsScopeTest.java index a3d792ae..a4572de0 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/EndingMetricsScopeTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/EndingMetricsScopeTest.java @@ -15,14 +15,11 @@ package software.amazon.kinesis.metrics; import org.junit.Test; - import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; public class EndingMetricsScopeTest { - private static class TestScope extends EndingMetricsScope { - - } + private static class TestScope extends EndingMetricsScope {} @Test public void testAddDataNotEnded() { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/FilteringMetricsScopeTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/FilteringMetricsScopeTest.java index 5320588c..4478c07d 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/FilteringMetricsScopeTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/FilteringMetricsScopeTest.java @@ -16,13 +16,11 @@ package software.amazon.kinesis.metrics; import java.util.Set; +import com.google.common.collect.ImmutableSet; import lombok.AccessLevel; import lombok.NoArgsConstructor; import org.junit.Assert; import org.junit.Test; - -import com.google.common.collect.ImmutableSet; - import software.amazon.awssdk.services.cloudwatch.model.Dimension; import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; @@ -49,7 +47,9 @@ public class FilteringMetricsScopeTest { Assert.assertTrue(getDimensions().remove(dimension)); } - Assert.assertTrue("Dimensions should be empty at the end of assertDimensions", getDimensions().isEmpty()); + Assert.assertTrue( + "Dimensions should be empty at the end of assertDimensions", + getDimensions().isEmpty()); } } @@ -106,8 +106,9 @@ public class FilteringMetricsScopeTest { @Test public void testMetricsDimensionsAll() { - TestScope scope = new TestScope(MetricsLevel.DETAILED, ImmutableSet.of( - "ThisDoesNotMatter", MetricsScope.METRICS_DIMENSIONS_ALL, "ThisAlsoDoesNotMatter")); + TestScope scope = new TestScope( + MetricsLevel.DETAILED, + ImmutableSet.of("ThisDoesNotMatter", MetricsScope.METRICS_DIMENSIONS_ALL, "ThisAlsoDoesNotMatter")); scope.addDimension("ShardId", "shard-0001"); scope.addDimension("Operation", "ProcessRecords"); scope.addDimension("ShardId", "shard-0001"); diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/MetricAccumulatingQueueTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/MetricAccumulatingQueueTest.java index 0354a214..4472776f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/MetricAccumulatingQueueTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/MetricAccumulatingQueueTest.java @@ -21,12 +21,10 @@ import java.util.List; import org.junit.Assert; import org.junit.Before; import org.junit.Test; - import software.amazon.awssdk.services.cloudwatch.model.Dimension; import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; - public class MetricAccumulatingQueueTest { private static final int MAX_QUEUE_SIZE = 5; @@ -40,9 +38,9 @@ public class MetricAccumulatingQueueTest { private Dimension dim(String name, String value) { return Dimension.builder().name(name).value(value).build(); } - + /* - * Test whether the MetricDatums offered into the queue will accumulate data based on the same metricName and + * Test whether the MetricDatums offered into the queue will accumulate data based on the same metricName and * output those datums with the correctly accumulated output. */ @Test @@ -52,36 +50,46 @@ public class MetricAccumulatingQueueTest { String keyA = "a"; String keyB = "b"; - MetricDatum datum1 = - TestHelper.constructDatum(keyA, StandardUnit.COUNT, 10, 5, 15, 2).toBuilder().dimensions(dimensionsA).build(); + MetricDatum datum1 = TestHelper.constructDatum(keyA, StandardUnit.COUNT, 10, 5, 15, 2).toBuilder() + .dimensions(dimensionsA) + .build(); queue.offer(new CloudWatchMetricKey(datum1), datum1); - MetricDatum datum2 = - TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2).toBuilder().dimensions(dimensionsA).build(); + MetricDatum datum2 = TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2).toBuilder() + .dimensions(dimensionsA) + .build(); queue.offer(new CloudWatchMetricKey(datum2), datum2); - MetricDatum datum3 = - TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2).toBuilder().dimensions(dimensionsB).build(); + MetricDatum datum3 = TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2).toBuilder() + .dimensions(dimensionsB) + .build(); queue.offer(new CloudWatchMetricKey(datum3), datum3); MetricDatum datum4 = TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 2, 2); queue.offer(new CloudWatchMetricKey(datum4), datum4); queue.offer(new CloudWatchMetricKey(datum4), datum4); - MetricDatum datum5 = - TestHelper.constructDatum(keyB, StandardUnit.COUNT, 100, 10, 110, 2).toBuilder().dimensions(dimensionsA).build(); + MetricDatum datum5 = TestHelper.constructDatum(keyB, StandardUnit.COUNT, 100, 10, 110, 2).toBuilder() + .dimensions(dimensionsA) + .build(); queue.offer(new CloudWatchMetricKey(datum5), datum5); Assert.assertEquals(4, queue.size()); List> items = queue.drain(4); - Assert.assertEquals(items.get(0).datum, TestHelper.constructDatum(keyA, StandardUnit.COUNT, 10, 1, 17, 4) - .toBuilder().dimensions(dimensionsA).build()); + Assert.assertEquals( + items.get(0).datum, + TestHelper.constructDatum(keyA, StandardUnit.COUNT, 10, 1, 17, 4).toBuilder() + .dimensions(dimensionsA) + .build()); Assert.assertEquals(items.get(1).datum, datum3); Assert.assertEquals(items.get(2).datum, TestHelper.constructDatum(keyA, StandardUnit.COUNT, 1, 1, 4, 4)); - Assert.assertEquals(items.get(3).datum, TestHelper.constructDatum(keyB, StandardUnit.COUNT, 100, 10, 110, 2) - .toBuilder().dimensions(dimensionsA).build()); + Assert.assertEquals( + items.get(3).datum, + TestHelper.constructDatum(keyB, StandardUnit.COUNT, 100, 10, 110, 2).toBuilder() + .dimensions(dimensionsA) + .build()); } - + /* * Test that the number of MetricDatum that can be added to our queue is capped at the MAX_QUEUE_SIZE. * Therefore, any datums added to the queue that is greater than the capacity of our queue will be dropped. diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/TestHelper.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/TestHelper.java index 5d6c2389..e358d5ac 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/TestHelper.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/metrics/TestHelper.java @@ -14,25 +14,24 @@ */ package software.amazon.kinesis.metrics; - import software.amazon.awssdk.services.cloudwatch.model.Dimension; import software.amazon.awssdk.services.cloudwatch.model.MetricDatum; import software.amazon.awssdk.services.cloudwatch.model.StandardUnit; import software.amazon.awssdk.services.cloudwatch.model.StatisticSet; public class TestHelper { - public static MetricDatum constructDatum(String name, - StandardUnit unit, - double maximum, - double minimum, - double sum, - double count) { - return MetricDatum.builder().metricName(name) + public static MetricDatum constructDatum( + String name, StandardUnit unit, double maximum, double minimum, double sum, double count) { + return MetricDatum.builder() + .metricName(name) .unit(unit) - .statisticValues(StatisticSet.builder().maximum(maximum) + .statisticValues(StatisticSet.builder() + .maximum(maximum) .minimum(minimum) .sum(sum) - .sampleCount(count).build()).build(); + .sampleCount(count) + .build()) + .build(); } public static Dimension constructDimension(String name, String value) { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/processor/SingleStreamTrackerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/processor/SingleStreamTrackerTest.java index 9ae19ba3..1ac8822f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/processor/SingleStreamTrackerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/processor/SingleStreamTrackerTest.java @@ -15,11 +15,6 @@ package software.amazon.kinesis.processor; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertThat; - import org.hamcrest.Matchers; import org.junit.Test; import software.amazon.kinesis.common.InitialPositionInStream; @@ -27,6 +22,11 @@ import software.amazon.kinesis.common.InitialPositionInStreamExtended; import software.amazon.kinesis.common.StreamConfig; import software.amazon.kinesis.common.StreamIdentifier; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertThat; + public class SingleStreamTrackerTest { private static final String STREAM_NAME = SingleStreamTrackerTest.class.getSimpleName(); @@ -43,8 +43,8 @@ public class SingleStreamTrackerTest { InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); assertNotEquals(expectedPosition, StreamTracker.DEFAULT_POSITION_IN_STREAM); - final StreamTracker tracker = new SingleStreamTracker( - StreamIdentifier.singleStreamInstance(STREAM_NAME), expectedPosition); + final StreamTracker tracker = + new SingleStreamTracker(StreamIdentifier.singleStreamInstance(STREAM_NAME), expectedPosition); validate(tracker, expectedPosition); } @@ -55,12 +55,12 @@ public class SingleStreamTrackerTest { private static void validate(StreamTracker tracker, InitialPositionInStreamExtended expectedPosition) { assertEquals(1, tracker.streamConfigList().size()); assertFalse(tracker.isMultiStream()); - assertThat(tracker.formerStreamsLeasesDeletionStrategy(), + assertThat( + tracker.formerStreamsLeasesDeletionStrategy(), Matchers.instanceOf(FormerStreamsLeasesDeletionStrategy.NoLeaseDeletionStrategy.class)); final StreamConfig config = tracker.streamConfigList().get(0); assertEquals(STREAM_NAME, config.streamIdentifier().streamName()); assertEquals(expectedPosition, config.initialPositionInStreamExtended()); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/AWSExceptionManagerTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/AWSExceptionManagerTest.java index 030979df..c90f108b 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/AWSExceptionManagerTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/AWSExceptionManagerTest.java @@ -15,10 +15,9 @@ package software.amazon.kinesis.retrieval; -import org.junit.Test; - import lombok.Getter; import lombok.extern.slf4j.Slf4j; +import org.junit.Test; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.isA; @@ -100,5 +99,4 @@ public class AWSExceptionManagerTest { this.additionalMessage = additionalMessage; } } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/IteratorBuilderTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/IteratorBuilderTest.java index db28261e..458792af 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/IteratorBuilderTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/IteratorBuilderTest.java @@ -1,8 +1,5 @@ package software.amazon.kinesis.retrieval; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.MatcherAssert.assertThat; - import java.time.Instant; import java.util.Date; import java.util.function.Consumer; @@ -10,7 +7,6 @@ import java.util.function.Function; import java.util.function.Supplier; import org.junit.Test; - import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; import software.amazon.awssdk.services.kinesis.model.ShardIteratorType; import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest; @@ -18,6 +14,9 @@ import software.amazon.kinesis.checkpoint.SentinelCheckpoint; import software.amazon.kinesis.common.InitialPositionInStream; import software.amazon.kinesis.common.InitialPositionInStreamExtended; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; + public class IteratorBuilderTest { private static final String SHARD_ID = "Shard-001"; @@ -53,7 +52,11 @@ public class IteratorBuilderTest { @Test public void subscribeReconnectTest() { - sequenceNumber(this::stsBase, this::verifyStsBase, IteratorBuilder::reconnectRequest, WrappedRequest::wrapped, + sequenceNumber( + this::stsBase, + this::verifyStsBase, + IteratorBuilder::reconnectRequest, + WrappedRequest::wrapped, ShardIteratorType.AFTER_SEQUENCE_NUMBER); } @@ -64,7 +67,11 @@ public class IteratorBuilderTest { @Test public void getShardIteratorReconnectTest() { - sequenceNumber(this::gsiBase, this::verifyGsiBase, IteratorBuilder::reconnectRequest, WrappedRequest::wrapped, + sequenceNumber( + this::gsiBase, + this::verifyGsiBase, + IteratorBuilder::reconnectRequest, + WrappedRequest::wrapped, ShardIteratorType.AFTER_SEQUENCE_NUMBER); } @@ -78,55 +85,108 @@ public class IteratorBuilderTest { timeStampTest(this::gsiBase, this::verifyGsiBase, IteratorBuilder::request, WrappedRequest::wrapped); } - private interface IteratorApply { T apply(T base, String sequenceNumber, InitialPositionInStreamExtended initialPositionInStreamExtended); } - private void latestTest(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, + private void latestTest( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, Function> toRequest) { String sequenceNumber = SentinelCheckpoint.LATEST.name(); - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.LATEST); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, sequenceNumber, initialPosition, - ShardIteratorType.LATEST, null, null); + InitialPositionInStreamExtended initialPosition = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); + updateTest( + supplier, + baseVerifier, + iteratorRequest, + toRequest, + sequenceNumber, + initialPosition, + ShardIteratorType.LATEST, + null, + null); } - private void trimHorizonTest(Supplier supplier, Consumer baseVerifier, - IteratorApply iteratorRequest, Function> toRequest) { + private void trimHorizonTest( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, + Function> toRequest) { String sequenceNumber = SentinelCheckpoint.TRIM_HORIZON.name(); - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, sequenceNumber, initialPosition, - ShardIteratorType.TRIM_HORIZON, null, null); + InitialPositionInStreamExtended initialPosition = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + updateTest( + supplier, + baseVerifier, + iteratorRequest, + toRequest, + sequenceNumber, + initialPosition, + ShardIteratorType.TRIM_HORIZON, + null, + null); } - private void sequenceNumber(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, + private void sequenceNumber( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, Function> toRequest) { sequenceNumber(supplier, baseVerifier, iteratorRequest, toRequest, ShardIteratorType.AT_SEQUENCE_NUMBER); } - private void sequenceNumber(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, - Function> toRequest, ShardIteratorType shardIteratorType) { - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, SEQUENCE_NUMBER, initialPosition, - shardIteratorType, "1234", null); + private void sequenceNumber( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, + Function> toRequest, + ShardIteratorType shardIteratorType) { + InitialPositionInStreamExtended initialPosition = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + updateTest( + supplier, + baseVerifier, + iteratorRequest, + toRequest, + SEQUENCE_NUMBER, + initialPosition, + shardIteratorType, + "1234", + null); } - private void timeStampTest(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, + private void timeStampTest( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, Function> toRequest) { String sequenceNumber = SentinelCheckpoint.AT_TIMESTAMP.name(); - InitialPositionInStreamExtended initialPosition = InitialPositionInStreamExtended - .newInitialPositionAtTimestamp(new Date(TIMESTAMP.toEpochMilli())); - updateTest(supplier, baseVerifier, iteratorRequest, toRequest, sequenceNumber, initialPosition, - ShardIteratorType.AT_TIMESTAMP, null, TIMESTAMP); + InitialPositionInStreamExtended initialPosition = + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(TIMESTAMP.toEpochMilli())); + updateTest( + supplier, + baseVerifier, + iteratorRequest, + toRequest, + sequenceNumber, + initialPosition, + ShardIteratorType.AT_TIMESTAMP, + null, + TIMESTAMP); } - private void updateTest(Supplier supplier, Consumer baseVerifier, IteratorApply iteratorRequest, - Function> toRequest, String sequenceNumber, - InitialPositionInStreamExtended initialPositionInStream, ShardIteratorType expectedShardIteratorType, - String expectedSequenceNumber, Instant expectedTimestamp) { + private void updateTest( + Supplier supplier, + Consumer baseVerifier, + IteratorApply iteratorRequest, + Function> toRequest, + String sequenceNumber, + InitialPositionInStreamExtended initialPositionInStream, + ShardIteratorType expectedShardIteratorType, + String expectedSequenceNumber, + Instant expectedTimestamp) { T base = supplier.get(); T updated = iteratorRequest.apply(base, sequenceNumber, initialPositionInStream); WrappedRequest request = toRequest.apply(updated); @@ -134,7 +194,6 @@ public class IteratorBuilderTest { assertThat(request.shardIteratorType(), equalTo(expectedShardIteratorType)); assertThat(request.sequenceNumber(), equalTo(expectedSequenceNumber)); assertThat(request.timestamp(), equalTo(expectedTimestamp)); - } private interface WrappedRequest { @@ -214,5 +273,4 @@ public class IteratorBuilderTest { private GetShardIteratorRequest.Builder gsiBase() { return GetShardIteratorRequest.builder().shardId(SHARD_ID).streamName(STREAM_NAME); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/RetrievalConfigTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/RetrievalConfigTest.java index 0f8273b8..31ede003 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/RetrievalConfigTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/RetrievalConfigTest.java @@ -3,15 +3,6 @@ package software.amazon.kinesis.retrieval; import java.util.Arrays; import java.util.Optional; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -import static software.amazon.kinesis.common.InitialPositionInStream.LATEST; -import static software.amazon.kinesis.common.InitialPositionInStream.TRIM_HORIZON; - import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -27,6 +18,15 @@ import software.amazon.kinesis.processor.MultiStreamTracker; import software.amazon.kinesis.processor.SingleStreamTracker; import software.amazon.kinesis.processor.StreamTracker; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static software.amazon.kinesis.common.InitialPositionInStream.LATEST; +import static software.amazon.kinesis.common.InitialPositionInStream.TRIM_HORIZON; + @RunWith(MockitoJUnitRunner.class) public class RetrievalConfigTest { @@ -54,7 +54,13 @@ public class RetrievalConfigTest { createConfig(streamArn), createConfig(new SingleStreamTracker(streamArn)))) { assertEquals(Optional.empty(), rc.appStreamTracker().left()); - assertEquals(streamName, rc.streamTracker().streamConfigList().get(0).streamIdentifier().streamName()); + assertEquals( + streamName, + rc.streamTracker() + .streamConfigList() + .get(0) + .streamIdentifier() + .streamName()); assertEquals(1, rc.streamTracker().streamConfigList().size()); assertFalse(rc.streamTracker().isMultiStream()); } @@ -65,7 +71,9 @@ public class RetrievalConfigTest { final StreamTracker mockMultiStreamTracker = mock(MultiStreamTracker.class); final RetrievalConfig configByMultiTracker = createConfig(mockMultiStreamTracker); assertEquals(Optional.empty(), configByMultiTracker.appStreamTracker().right()); - assertEquals(mockMultiStreamTracker, configByMultiTracker.appStreamTracker().left().get()); + assertEquals( + mockMultiStreamTracker, + configByMultiTracker.appStreamTracker().left().get()); assertEquals(mockMultiStreamTracker, configByMultiTracker.streamTracker()); } @@ -76,8 +84,7 @@ public class RetrievalConfigTest { for (final StreamConfig sc : config.streamTracker().streamConfigList()) { assertEquals(LATEST, sc.initialPositionInStreamExtended().getInitialPositionInStream()); } - config.initialPositionInStreamExtended( - InitialPositionInStreamExtended.newInitialPosition(TRIM_HORIZON)); + config.initialPositionInStreamExtended(InitialPositionInStreamExtended.newInitialPosition(TRIM_HORIZON)); for (final StreamConfig sc : config.streamTracker().streamConfigList()) { assertEquals(TRIM_HORIZON, sc.initialPositionInStreamExtended().getInitialPositionInStream()); } @@ -85,8 +92,8 @@ public class RetrievalConfigTest { @Test(expected = IllegalArgumentException.class) public void testUpdateInitialPositionInMultiStream() { - createConfig(mockMultiStreamTracker).initialPositionInStreamExtended( - InitialPositionInStreamExtended.newInitialPosition(TRIM_HORIZON)); + createConfig(mockMultiStreamTracker) + .initialPositionInStreamExtended(InitialPositionInStreamExtended.newInitialPosition(TRIM_HORIZON)); } /** @@ -133,5 +140,4 @@ public class RetrievalConfigTest { .resource("stream/" + streamName) .build(); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/ThrottlingReporterTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/ThrottlingReporterTest.java index f13f0ad0..29162805 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/ThrottlingReporterTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/ThrottlingReporterTest.java @@ -14,17 +14,17 @@ */ package software.amazon.kinesis.retrieval; -import static org.mockito.Matchers.anyString; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import org.slf4j.Logger; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + @RunWith(MockitoJUnitRunner.class) public class ThrottlingReporterTest { @@ -74,5 +74,4 @@ public class ThrottlingReporterTest { return throttleLog; } } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConfigTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConfigTest.java index c5727e20..6684a22a 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConfigTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConfigTest.java @@ -15,6 +15,22 @@ package software.amazon.kinesis.retrieval.fanout; +import java.util.Optional; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; +import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; +import software.amazon.kinesis.common.StreamConfig; +import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.leases.ShardInfo; +import software.amazon.kinesis.leases.exceptions.DependencyException; +import software.amazon.kinesis.metrics.MetricsFactory; +import software.amazon.kinesis.retrieval.RetrievalFactory; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -28,23 +44,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyZeroInteractions; import static org.mockito.Mockito.when; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.kinesis.common.StreamConfig; -import software.amazon.kinesis.common.StreamIdentifier; -import software.amazon.kinesis.leases.ShardInfo; -import software.amazon.kinesis.leases.exceptions.DependencyException; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.retrieval.RetrievalFactory; - -import java.util.Optional; - @RunWith(MockitoJUnitRunner.class) public class FanOutConfigTest { @@ -55,10 +54,13 @@ public class FanOutConfigTest { @Mock private FanOutConsumerRegistration consumerRegistration; + @Mock private KinesisAsyncClient kinesisClient; + @Mock private StreamConfig streamConfig; + @Mock private StreamIdentifier streamIdentifier; @@ -70,7 +72,8 @@ public class FanOutConfigTest { // DRY: set the most commonly-used parameters .applicationName(TEST_APPLICATION_NAME) .streamName(TEST_STREAM_NAME); - doReturn(consumerRegistration).when(config) + doReturn(consumerRegistration) + .when(config) .createConsumerRegistration(eq(kinesisClient), anyString(), anyString()); when(streamConfig.streamIdentifier()).thenReturn(streamIdentifier); when(streamIdentifier.streamName()).thenReturn(TEST_STREAM_NAME); @@ -80,7 +83,8 @@ public class FanOutConfigTest { public void testNoRegisterIfConsumerArnSet() { config.consumerArn(TEST_CONSUMER_ARN) // unset common parameters - .applicationName(null).streamName(null); + .applicationName(null) + .streamName(null); RetrievalFactory retrievalFactory = config.retrievalFactory(); @@ -209,5 +213,4 @@ public class FanOutConfigTest { final RetrievalFactory factory = config.retrievalFactory(); factory.createGetRecordsCache(shardInfo, streamConfig, mock(MetricsFactory.class)); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistrationTest.java index fca6799d..e9d11b0a 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutConsumerRegistrationTest.java @@ -15,15 +15,6 @@ package software.amazon.kinesis.retrieval.fanout; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.concurrent.CompletableFuture; import org.apache.commons.lang3.StringUtils; @@ -32,7 +23,6 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.model.Consumer; import software.amazon.awssdk.services.kinesis.model.ConsumerDescription; @@ -49,6 +39,15 @@ import software.amazon.awssdk.services.kinesis.model.StreamDescriptionSummary; import software.amazon.awssdk.services.kinesis.model.StreamStatus; import software.amazon.kinesis.leases.exceptions.DependencyException; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.junit.Assert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + /** * */ @@ -70,19 +69,21 @@ public class FanOutConsumerRegistrationTest { @Before public void setup() { - consumerRegistration = new FanOutConsumerRegistration(client, STREAM_NAME, CONSUMER_NAME, MAX_DSS_RETRIES, - MAX_DSC_RETRIES, RSC_RETRIES, BACKOFF_MILLIS); + consumerRegistration = new FanOutConsumerRegistration( + client, STREAM_NAME, CONSUMER_NAME, MAX_DSS_RETRIES, MAX_DSC_RETRIES, RSC_RETRIES, BACKOFF_MILLIS); } @Test public void testConsumerAlreadyExists() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); - final CompletableFuture dscFuture = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dscFuture = + CompletableFuture.completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(dscFuture); final String consumerArn = consumerRegistration.getOrCreateStreamConsumerArn(); @@ -93,13 +94,15 @@ public class FanOutConsumerRegistrationTest { @Test public void testConsumerAlreadyExistsMultipleCalls() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); - final CompletableFuture dscFuture = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dscFuture = + CompletableFuture.completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(dscFuture); final String firstCall = consumerRegistration.getOrCreateStreamConsumerArn(); @@ -113,27 +116,28 @@ public class FanOutConsumerRegistrationTest { @Test(expected = LimitExceededException.class) public void testDescribeStreamConsumerThrottled() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); final CompletableFuture dscFuture = CompletableFuture.supplyAsync(() -> { throw LimitExceededException.builder().build(); }); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(dscFuture); try { consumerRegistration.getOrCreateStreamConsumerArn(); } finally { - verify(client, times(MAX_DSC_RETRIES)) - .describeStreamConsumer(any(DescribeStreamConsumerRequest.class)); + verify(client, times(MAX_DSC_RETRIES)).describeStreamConsumer(any(DescribeStreamConsumerRequest.class)); } } @Test(expected = DependencyException.class) public void testRegisterStreamConsumerThrottled() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); final CompletableFuture dscFuture = CompletableFuture.supplyAsync(() -> { throw ResourceNotFoundException.builder().build(); }); @@ -141,36 +145,42 @@ public class FanOutConsumerRegistrationTest { throw LimitExceededException.builder().build(); }); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); - when(client.registerStreamConsumer(any(RegisterStreamConsumerRequest.class))).thenReturn(rscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(dscFuture); + when(client.registerStreamConsumer(any(RegisterStreamConsumerRequest.class))) + .thenReturn(rscFuture); try { consumerRegistration.getOrCreateStreamConsumerArn(); } finally { - verify(client, times(RSC_RETRIES)) - .registerStreamConsumer(any(RegisterStreamConsumerRequest.class)); + verify(client, times(RSC_RETRIES)).registerStreamConsumer(any(RegisterStreamConsumerRequest.class)); } } @Test public void testNewRegisterStreamConsumer() throws Exception { - final CompletableFuture dssFuture = CompletableFuture - .completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); final CompletableFuture failureResponse = CompletableFuture.supplyAsync(() -> { throw ResourceNotFoundException.builder().build(); }); - final CompletableFuture intermidateResponse = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.CREATING)); - final CompletableFuture successResponse = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); - final CompletableFuture rscFuture = CompletableFuture - .completedFuture(createRegisterStreamConsumerResponse()); + final CompletableFuture intermidateResponse = + CompletableFuture.completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.CREATING)); + final CompletableFuture successResponse = + CompletableFuture.completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.ACTIVE)); + final CompletableFuture rscFuture = + CompletableFuture.completedFuture(createRegisterStreamConsumerResponse()); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(failureResponse) - .thenReturn(intermidateResponse).thenReturn(successResponse); - when(client.registerStreamConsumer(any(RegisterStreamConsumerRequest.class))).thenReturn(rscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(failureResponse) + .thenReturn(intermidateResponse) + .thenReturn(successResponse); + when(client.registerStreamConsumer(any(RegisterStreamConsumerRequest.class))) + .thenReturn(rscFuture); final long startTime = System.currentTimeMillis(); final String consumerArn = consumerRegistration.getOrCreateStreamConsumerArn(); @@ -184,23 +194,23 @@ public class FanOutConsumerRegistrationTest { @Test(expected = IllegalStateException.class) public void testStreamConsumerStuckInCreating() throws Exception { - final CompletableFuture dssFuture = CompletableFuture.completedFuture( - createDescribeStreamSummaryResponse()); - final CompletableFuture dscFuture = CompletableFuture - .completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.CREATING)); + final CompletableFuture dssFuture = + CompletableFuture.completedFuture(createDescribeStreamSummaryResponse()); + final CompletableFuture dscFuture = + CompletableFuture.completedFuture(createDescribeStreamConsumerResponse(ConsumerStatus.CREATING)); - when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))).thenReturn(dssFuture); - when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))).thenReturn(dscFuture); + when(client.describeStreamSummary(any(DescribeStreamSummaryRequest.class))) + .thenReturn(dssFuture); + when(client.describeStreamConsumer(any(DescribeStreamConsumerRequest.class))) + .thenReturn(dscFuture); try { consumerRegistration.getOrCreateStreamConsumerArn(); } finally { // Verify that the call to DSC was made for the max retry attempts and one for the initial response object. - verify(client, times(MAX_DSC_RETRIES + 1)) - .describeStreamConsumer(any(DescribeStreamConsumerRequest.class)); + verify(client, times(MAX_DSC_RETRIES + 1)).describeStreamConsumer(any(DescribeStreamConsumerRequest.class)); verify(client, never()).registerStreamConsumer(any(RegisterStreamConsumerRequest.class)); } - } private DescribeStreamSummaryRequest createDescribeStreamSummaryRequest() { @@ -208,29 +218,49 @@ public class FanOutConsumerRegistrationTest { } private DescribeStreamSummaryResponse createDescribeStreamSummaryResponse() { - return DescribeStreamSummaryResponse.builder().streamDescriptionSummary(StreamDescriptionSummary.builder() - .streamName(STREAM_NAME).streamARN(STREAM_ARN).streamStatus(StreamStatus.ACTIVE).build()).build(); + return DescribeStreamSummaryResponse.builder() + .streamDescriptionSummary(StreamDescriptionSummary.builder() + .streamName(STREAM_NAME) + .streamARN(STREAM_ARN) + .streamStatus(StreamStatus.ACTIVE) + .build()) + .build(); } private DescribeStreamConsumerRequest createDescribeStreamConsumerRequest(final String consumerArn) { if (StringUtils.isEmpty(consumerArn)) { - return DescribeStreamConsumerRequest.builder().streamARN(STREAM_ARN).consumerName(CONSUMER_NAME).build(); + return DescribeStreamConsumerRequest.builder() + .streamARN(STREAM_ARN) + .consumerName(CONSUMER_NAME) + .build(); } return DescribeStreamConsumerRequest.builder().consumerARN(consumerArn).build(); } private DescribeStreamConsumerResponse createDescribeStreamConsumerResponse(final ConsumerStatus status) { - return DescribeStreamConsumerResponse.builder().consumerDescription(ConsumerDescription.builder() - .consumerStatus(status).consumerARN(CONSUMER_ARN).consumerName(CONSUMER_NAME).build()).build(); + return DescribeStreamConsumerResponse.builder() + .consumerDescription(ConsumerDescription.builder() + .consumerStatus(status) + .consumerARN(CONSUMER_ARN) + .consumerName(CONSUMER_NAME) + .build()) + .build(); } private RegisterStreamConsumerRequest createRegisterStreamConsumerRequest() { - return RegisterStreamConsumerRequest.builder().streamARN(STREAM_ARN).consumerName(CONSUMER_NAME).build(); + return RegisterStreamConsumerRequest.builder() + .streamARN(STREAM_ARN) + .consumerName(CONSUMER_NAME) + .build(); } private RegisterStreamConsumerResponse createRegisterStreamConsumerResponse() { - return RegisterStreamConsumerResponse.builder().consumer(Consumer.builder().consumerName(CONSUMER_NAME) - .consumerARN(CONSUMER_ARN).consumerStatus(ConsumerStatus.CREATING).build()).build(); + return RegisterStreamConsumerResponse.builder() + .consumer(Consumer.builder() + .consumerName(CONSUMER_NAME) + .consumerARN(CONSUMER_ARN) + .consumerStatus(ConsumerStatus.CREATING) + .build()) + .build(); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisherTest.java index fc242fed..cf135159 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisherTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/fanout/FanOutRecordsPublisherTest.java @@ -1,5 +1,27 @@ package software.amazon.kinesis.retrieval.fanout; +import java.nio.ByteBuffer; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CompletionException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + import com.google.common.util.concurrent.ThreadFactoryBuilder; import io.netty.handler.timeout.ReadTimeoutException; import io.reactivex.rxjava3.core.Flowable; @@ -45,28 +67,6 @@ import software.amazon.kinesis.retrieval.RetryableRetrievalException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; import software.amazon.kinesis.utils.SubscribeToShardRequestMatcher; -import java.nio.ByteBuffer; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CompletionException; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.Stream; - import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; @@ -93,10 +93,13 @@ public class FanOutRecordsPublisherTest { @Mock private KinesisAsyncClient kinesisClient; + @Mock private SdkPublisher publisher; + @Mock private Subscription subscription; + @Mock private Subscriber subscriber; @@ -106,55 +109,62 @@ public class FanOutRecordsPublisherTest { public void testSimple() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); - source.subscribe(new ShardConsumerNotifyingSubscriber(new Subscriber() { - Subscription subscription; + source.subscribe(new ShardConsumerNotifyingSubscriber( + new Subscriber() { + Subscription subscription; - @Override public void onSubscribe(Subscription s) { - subscription = s; - subscription.request(1); - } + @Override + public void onSubscribe(Subscription s) { + subscription = s; + subscription.request(1); + } - @Override public void onNext(RecordsRetrieved input) { - receivedInput.add(input.processRecordsInput()); - subscription.request(1); - } + @Override + public void onNext(RecordsRetrieved input) { + receivedInput.add(input.processRecordsInput()); + subscription.request(1); + } - @Override public void onError(Throwable t) { - log.error("Caught throwable in subscriber", t); - fail("Caught throwable in subscriber"); - } + @Override + public void onError(Throwable t) { + log.error("Caught throwable in subscriber", t); + fail("Caught throwable in subscriber"); + } - @Override public void onComplete() { - fail("OnComplete called when not expected"); - } - }, source)); + @Override + public void onComplete() { + fail("OnComplete called when not expected"); + } + }, + source)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); batchEvent = SubscribeToShardEvent.builder() - .millisBehindLatest(100L) - .records(records) - .continuationSequenceNumber("test") - .childShards(Collections.emptyList()) - .build(); + .millisBehindLatest(100L) + .records(records) + .continuationSequenceNumber("test") + .childShards(Collections.emptyList()) + .build(); captor.getValue().onNext(batchEvent); captor.getValue().onNext(batchEvent); @@ -175,53 +185,66 @@ public class FanOutRecordsPublisherTest { public void testInvalidEvent() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, - InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); + source.start( + ExtendedSequenceNumber.LATEST, + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); - source.subscribe(new ShardConsumerNotifyingSubscriber(new Subscriber() { - Subscription subscription; + source.subscribe(new ShardConsumerNotifyingSubscriber( + new Subscriber() { + Subscription subscription; - @Override public void onSubscribe(Subscription s) { - subscription = s; - subscription.request(1); - } + @Override + public void onSubscribe(Subscription s) { + subscription = s; + subscription.request(1); + } - @Override public void onNext(RecordsRetrieved input) { - receivedInput.add(input.processRecordsInput()); - subscription.request(1); - } + @Override + public void onNext(RecordsRetrieved input) { + receivedInput.add(input.processRecordsInput()); + subscription.request(1); + } - @Override public void onError(Throwable t) { - log.error("Caught throwable in subscriber", t); - fail("Caught throwable in subscriber"); - } + @Override + public void onError(Throwable t) { + log.error("Caught throwable in subscriber", t); + fail("Caught throwable in subscriber"); + } - @Override public void onComplete() { - fail("OnComplete called when not expected"); - } - }, source)); + @Override + public void onComplete() { + fail("OnComplete called when not expected"); + } + }, + source)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); - batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records) - .continuationSequenceNumber(CONTINUATION_SEQUENCE_NUMBER).build(); - SubscribeToShardEvent invalidEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L) - .records(records).childShards(Collections.emptyList()).build(); + batchEvent = SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .records(records) + .continuationSequenceNumber(CONTINUATION_SEQUENCE_NUMBER) + .build(); + SubscribeToShardEvent invalidEvent = SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .records(records) + .childShards(Collections.emptyList()) + .build(); captor.getValue().onNext(batchEvent); captor.getValue().onNext(invalidEvent); @@ -243,14 +266,15 @@ public class FanOutRecordsPublisherTest { public void testIfAllEventsReceivedWhenNoTasksRejectedByExecutor() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -259,30 +283,37 @@ public class FanOutRecordsPublisherTest { new Subscriber() { Subscription subscription; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); subscription.request(1); } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); Scheduler testScheduler = getScheduler(getBlockingExecutor(getSpiedExecutor(getTestExecutor()))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); @@ -290,17 +321,16 @@ public class FanOutRecordsPublisherTest { captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); Stream.of("1000", "2000", "3000") - .map(contSeqNum -> - SubscribeToShardEvent.builder() - .millisBehindLatest(100L) - .continuationSequenceNumber(contSeqNum) - .records(records) - .childShards(Collections.emptyList()) - .build()) + .map(contSeqNum -> SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .continuationSequenceNumber(contSeqNum) + .records(records) + .childShards(Collections.emptyList()) + .build()) .forEach(batchEvent -> captor.getValue().onNext(batchEvent)); verify(subscription, times(4)).request(1); @@ -320,14 +350,15 @@ public class FanOutRecordsPublisherTest { public void testIfEventsAreNotDeliveredToShardConsumerWhenPreviousEventDeliveryTaskGetsRejected() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -336,30 +367,37 @@ public class FanOutRecordsPublisherTest { new Subscriber() { Subscription subscription; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); subscription.request(1); } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); Scheduler testScheduler = getScheduler(getOverwhelmedBlockingExecutor(getSpiedExecutor(getTestExecutor()))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(new SafeSubscriber<>(shardConsumerSubscriber)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); @@ -367,17 +405,16 @@ public class FanOutRecordsPublisherTest { captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); Stream.of("1000", "2000", "3000") - .map(contSeqNum -> - SubscribeToShardEvent.builder() - .millisBehindLatest(100L) - .continuationSequenceNumber(contSeqNum) - .records(records) - .childShards(Collections.emptyList()) - .build()) + .map(contSeqNum -> SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .continuationSequenceNumber(contSeqNum) + .records(records) + .childShards(Collections.emptyList()) + .build()) .forEach(batchEvent -> captor.getValue().onNext(batchEvent)); verify(subscription, times(2)).request(1); @@ -397,15 +434,15 @@ public class FanOutRecordsPublisherTest { public void testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -415,13 +452,16 @@ public class FanOutRecordsPublisherTest { CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(2); int totalServicePublisherEvents = 1000; int initialDemand = 0; - BackpressureAdheringServicePublisher servicePublisher = - new BackpressureAdheringServicePublisher(servicePublisherAction, totalServicePublisherEvents, - servicePublisherTaskCompletionLatch, initialDemand); + BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, + initialDemand); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -431,15 +471,18 @@ public class FanOutRecordsPublisherTest { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); - assertEquals("" + ++lastSeenSeqNum, + assertEquals( + "" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); @@ -448,29 +491,34 @@ public class FanOutRecordsPublisherTest { } } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -488,27 +536,31 @@ public class FanOutRecordsPublisherTest { } @Test - public void testIfStreamOfEventsAndOnCompleteAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception { + public void testIfStreamOfEventsAndOnCompleteAreDeliveredInOrderWithBackpressureAdheringServicePublisher() + throws Exception { CountDownLatch onS2SCallLatch = new CountDownLatch(2); doAnswer(new Answer() { - @Override public Object answer(InvocationOnMock invocation) throws Throwable { - onS2SCallLatch.countDown(); - return null; - } - }).when(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), any()); + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + onS2SCallLatch.countDown(); + return null; + } + }) + .when(kinesisClient) + .subscribeToShard(any(SubscribeToShardRequest.class), any()); FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -520,13 +572,17 @@ public class FanOutRecordsPublisherTest { int initialDemand = 9; int triggerCompleteAtNthEvent = 200; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( - servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, initialDemand); - servicePublisher.setCompleteTrigger(triggerCompleteAtNthEvent, () -> flowCaptor.getValue().complete()); + servicePublisher.setCompleteTrigger( + triggerCompleteAtNthEvent, () -> flowCaptor.getValue().complete()); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -536,15 +592,18 @@ public class FanOutRecordsPublisherTest { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); - assertEquals("" + ++lastSeenSeqNum, + assertEquals( + "" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); @@ -553,21 +612,26 @@ public class FanOutRecordsPublisherTest { } } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient, times(1)).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); @@ -575,8 +639,8 @@ public class FanOutRecordsPublisherTest { flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -598,18 +662,19 @@ public class FanOutRecordsPublisherTest { } @Test - public void testIfShardEndEventAndOnCompleteAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception { + public void testIfShardEndEventAndOnCompleteAreDeliveredInOrderWithBackpressureAdheringServicePublisher() + throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -620,19 +685,19 @@ public class FanOutRecordsPublisherTest { List parentShards = new ArrayList<>(); parentShards.add(SHARD_ID); ChildShard leftChild = ChildShard.builder() - .shardId("Shard-002") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) - .build(); + .shardId("Shard-002") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) + .build(); ChildShard rightChild = ChildShard.builder() - .shardId("Shard-003") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) - .build(); + .shardId("Shard-003") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) + .build(); childShards.add(leftChild); childShards.add(rightChild); - Consumer servicePublisherShardEndAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherShardEndAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(null) .records(records) @@ -645,34 +710,38 @@ public class FanOutRecordsPublisherTest { int initialDemand = 9; int triggerCompleteAtNthEvent = 200; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( - servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, initialDemand); - servicePublisher - .setShardEndAndCompleteTrigger(triggerCompleteAtNthEvent, () -> flowCaptor.getValue().complete(), - servicePublisherShardEndAction); + servicePublisher.setShardEndAndCompleteTrigger( + triggerCompleteAtNthEvent, () -> flowCaptor.getValue().complete(), servicePublisherShardEndAction); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); - final boolean[] isOnCompleteTriggered = { false }; + final boolean[] isOnCompleteTriggered = {false}; Subscriber shardConsumerSubscriber = new ShardConsumerNotifyingSubscriber( new Subscriber() { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); subscription.request(1); servicePublisher.request(1); @@ -681,22 +750,27 @@ public class FanOutRecordsPublisherTest { } } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { isOnCompleteTriggered[0] = true; onCompleteLatch.countDown(); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient, times(1)).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); @@ -704,8 +778,8 @@ public class FanOutRecordsPublisherTest { flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -726,18 +800,19 @@ public class FanOutRecordsPublisherTest { } @Test - public void testIfStreamOfEventsAndOnErrorAreDeliveredInOrderWithBackpressureAdheringServicePublisher() throws Exception { + public void testIfStreamOfEventsAndOnErrorAreDeliveredInOrderWithBackpressureAdheringServicePublisher() + throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -750,17 +825,21 @@ public class FanOutRecordsPublisherTest { int initialDemand = 9; int triggerErrorAtNthEvent = 241; BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( - servicePublisherAction, totalServicePublisherEvents, servicePublisherTaskCompletionLatch, + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, initialDemand); - servicePublisher.setErrorTrigger(triggerErrorAtNthEvent, + servicePublisher.setErrorTrigger( + triggerErrorAtNthEvent, () -> flowCaptor.getValue().exceptionOccurred(new RuntimeException("Service Exception"))); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); - final boolean[] isOnErrorThrown = { false }; + final boolean[] isOnErrorThrown = {false}; List receivedInput = new ArrayList<>(); @@ -769,15 +848,18 @@ public class FanOutRecordsPublisherTest { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); - assertEquals("" + ++lastSeenSeqNum, + assertEquals( + "" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); @@ -786,30 +868,35 @@ public class FanOutRecordsPublisherTest { } } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); isOnErrorThrown[0] = true; onErrorReceiveLatch.countDown(); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -829,18 +916,20 @@ public class FanOutRecordsPublisherTest { } @Test - public void testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstWithinLimit() throws Exception { + public void + testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstWithinLimit() + throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -850,13 +939,16 @@ public class FanOutRecordsPublisherTest { CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(2); int totalServicePublisherEvents = 1000; int initialDemand = 9; - BackpressureAdheringServicePublisher servicePublisher = - new BackpressureAdheringServicePublisher(servicePublisherAction, totalServicePublisherEvents, - servicePublisherTaskCompletionLatch, initialDemand); + BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, + initialDemand); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -866,15 +958,18 @@ public class FanOutRecordsPublisherTest { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); - assertEquals("" + ++lastSeenSeqNum, + assertEquals( + "" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); @@ -883,29 +978,34 @@ public class FanOutRecordsPublisherTest { } } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); fail("Caught throwable in subscriber"); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -923,18 +1023,20 @@ public class FanOutRecordsPublisherTest { } @Test - public void testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstOverLimit() throws Exception { + public void + testIfStreamOfEventsAreDeliveredInOrderWithBackpressureAdheringServicePublisherHavingInitialBurstOverLimit() + throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - Consumer servicePublisherAction = contSeqNum -> captor.getValue().onNext( - SubscribeToShardEvent.builder() + Consumer servicePublisherAction = contSeqNum -> captor.getValue() + .onNext(SubscribeToShardEvent.builder() .millisBehindLatest(100L) .continuationSequenceNumber(contSeqNum + "") .records(records) @@ -944,13 +1046,16 @@ public class FanOutRecordsPublisherTest { CountDownLatch servicePublisherTaskCompletionLatch = new CountDownLatch(1); int totalServicePublisherEvents = 1000; int initialDemand = 11; - BackpressureAdheringServicePublisher servicePublisher = - new BackpressureAdheringServicePublisher(servicePublisherAction, totalServicePublisherEvents, - servicePublisherTaskCompletionLatch, initialDemand); + BackpressureAdheringServicePublisher servicePublisher = new BackpressureAdheringServicePublisher( + servicePublisherAction, + totalServicePublisherEvents, + servicePublisherTaskCompletionLatch, + initialDemand); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); @@ -961,44 +1066,52 @@ public class FanOutRecordsPublisherTest { private Subscription subscription; private int lastSeenSeqNum = 0; - @Override public void onSubscribe(Subscription s) { + @Override + public void onSubscribe(Subscription s) { subscription = s; subscription.request(1); servicePublisher.request(1); } - @Override public void onNext(RecordsRetrieved input) { + @Override + public void onNext(RecordsRetrieved input) { receivedInput.add(input.processRecordsInput()); - assertEquals("" + ++lastSeenSeqNum, + assertEquals( + "" + ++lastSeenSeqNum, ((FanOutRecordsPublisher.FanoutRecordsRetrieved) input).continuationSequenceNumber()); subscription.request(1); servicePublisher.request(1); } - @Override public void onError(Throwable t) { + @Override + public void onError(Throwable t) { log.error("Caught throwable in subscriber", t); onErrorSet.set(true); servicePublisherTaskCompletionLatch.countDown(); } - @Override public void onComplete() { + @Override + public void onComplete() { fail("OnComplete called when not expected"); } - }, source); + }, + source); ExecutorService executorService = getTestExecutor(); Scheduler testScheduler = getScheduler(getInitiallyBlockingExecutor(getSpiedExecutor(executorService))); int bufferSize = 8; - Flowable.fromPublisher(source).subscribeOn(testScheduler).observeOn(testScheduler, true, bufferSize) + Flowable.fromPublisher(source) + .subscribeOn(testScheduler) + .observeOn(testScheduler, true, bufferSize) .subscribe(shardConsumerSubscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); executorService.submit(servicePublisher); servicePublisherTaskCompletionLatch.await(5000, TimeUnit.MILLISECONDS); @@ -1018,8 +1131,12 @@ public class FanOutRecordsPublisherTest { } private ExecutorService getTestExecutor() { - return Executors.newFixedThreadPool(8, - new ThreadFactoryBuilder().setNameFormat("test-fanout-record-publisher-%04d").setDaemon(true).build()); + return Executors.newFixedThreadPool( + 8, + new ThreadFactoryBuilder() + .setNameFormat("test-fanout-record-publisher-%04d") + .setDaemon(true) + .build()); } private ExecutorService getSpiedExecutor(ExecutorService executorService) { @@ -1027,25 +1144,29 @@ public class FanOutRecordsPublisherTest { } private ExecutorService getBlockingExecutor(ExecutorService executorService) { - doAnswer(invocation -> directlyExecuteRunnable(invocation)).when(executorService).execute(any()); + doAnswer(invocation -> directlyExecuteRunnable(invocation)) + .when(executorService) + .execute(any()); return executorService; } private ExecutorService getInitiallyBlockingExecutor(ExecutorService executorService) { doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .doCallRealMethod() - .when(executorService).execute(any()); + .doAnswer(invocation -> directlyExecuteRunnable(invocation)) + .doCallRealMethod() + .when(executorService) + .execute(any()); return executorService; } private ExecutorService getOverwhelmedBlockingExecutor(ExecutorService executorService) { doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .doThrow(new RejectedExecutionException()) - .doAnswer(invocation -> directlyExecuteRunnable(invocation)) - .when(executorService).execute(any()); + .doAnswer(invocation -> directlyExecuteRunnable(invocation)) + .doAnswer(invocation -> directlyExecuteRunnable(invocation)) + .doThrow(new RejectedExecutionException()) + .doAnswer(invocation -> directlyExecuteRunnable(invocation)) + .when(executorService) + .execute(any()); return executorService; } @@ -1060,55 +1181,62 @@ public class FanOutRecordsPublisherTest { public void largeRequestTest() throws Exception { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); List receivedInput = new ArrayList<>(); - source.subscribe(new ShardConsumerNotifyingSubscriber(new Subscriber() { - Subscription subscription; + source.subscribe(new ShardConsumerNotifyingSubscriber( + new Subscriber() { + Subscription subscription; - @Override public void onSubscribe(Subscription s) { - subscription = s; - subscription.request(3); - } + @Override + public void onSubscribe(Subscription s) { + subscription = s; + subscription.request(3); + } - @Override public void onNext(RecordsRetrieved input) { - receivedInput.add(input.processRecordsInput()); - subscription.request(1); - } + @Override + public void onNext(RecordsRetrieved input) { + receivedInput.add(input.processRecordsInput()); + subscription.request(1); + } - @Override public void onError(Throwable t) { - log.error("Caught throwable in subscriber", t); - fail("Caught throwable in subscriber"); - } + @Override + public void onError(Throwable t) { + log.error("Caught throwable in subscriber", t); + fail("Caught throwable in subscriber"); + } - @Override public void onComplete() { - fail("OnComplete called when not expected"); - } - }, source)); + @Override + public void onComplete() { + fail("OnComplete called when not expected"); + } + }, + source)); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); batchEvent = SubscribeToShardEvent.builder() - .millisBehindLatest(100L) - .records(records) - .continuationSequenceNumber(CONTINUATION_SEQUENCE_NUMBER) - .childShards(Collections.emptyList()) - .build(); + .millisBehindLatest(100L) + .records(records) + .continuationSequenceNumber(CONTINUATION_SEQUENCE_NUMBER) + .childShards(Collections.emptyList()) + .build(); captor.getValue().onNext(batchEvent); captor.getValue().onNext(batchEvent); @@ -1129,15 +1257,16 @@ public class FanOutRecordsPublisherTest { public void testResourceNotFoundForShard() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); ArgumentCaptor inputCaptor = ArgumentCaptor.forClass(RecordsRetrieved.class); source.subscribe(subscriber); verify(kinesisClient).subscribeToShard(any(SubscribeToShardRequest.class), flowCaptor.capture()); FanOutRecordsPublisher.RecordFlow recordFlow = flowCaptor.getValue(); - recordFlow.exceptionOccurred(new RuntimeException(ResourceNotFoundException.builder().build())); + recordFlow.exceptionOccurred( + new RuntimeException(ResourceNotFoundException.builder().build())); verify(subscriber).onSubscribe(any()); verify(subscriber, never()).onError(any()); @@ -1153,8 +1282,8 @@ public class FanOutRecordsPublisherTest { public void testReadTimeoutExceptionForShard() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); source.subscribe(subscriber); @@ -1172,52 +1301,67 @@ public class FanOutRecordsPublisherTest { public void testContinuesAfterSequence() { FanOutRecordsPublisher source = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(new ExtendedSequenceNumber("0"), + source.start( + new ExtendedSequenceNumber("0"), InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); NonFailingSubscriber nonFailingSubscriber = new NonFailingSubscriber(); source.subscribe(new ShardConsumerNotifyingSubscriber(nonFailingSubscriber, source)); - SubscribeToShardRequest expected = SubscribeToShardRequest.builder().consumerARN(CONSUMER_ARN).shardId(SHARD_ID) - .startingPosition(StartingPosition.builder().sequenceNumber("0") - .type(ShardIteratorType.AT_SEQUENCE_NUMBER).build()) + SubscribeToShardRequest expected = SubscribeToShardRequest.builder() + .consumerARN(CONSUMER_ARN) + .shardId(SHARD_ID) + .startingPosition(StartingPosition.builder() + .sequenceNumber("0") + .type(ShardIteratorType.AT_SEQUENCE_NUMBER) + .build()) .build(); - verify(kinesisClient).subscribeToShard(argThat(new SubscribeToShardRequestMatcher(expected)), flowCaptor.capture()); + verify(kinesisClient) + .subscribeToShard(argThat(new SubscribeToShardRequestMatcher(expected)), flowCaptor.capture()); flowCaptor.getValue().onEventStream(publisher); captor.getValue().onSubscribe(subscription); List records = Stream.of(1, 2, 3).map(this::makeRecord).collect(Collectors.toList()); - List matchers = records.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List matchers = + records.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); - batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(records) - .continuationSequenceNumber("3").childShards(Collections.emptyList()).build(); + batchEvent = SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .records(records) + .continuationSequenceNumber("3") + .childShards(Collections.emptyList()) + .build(); captor.getValue().onNext(batchEvent); captor.getValue().onComplete(); flowCaptor.getValue().complete(); - ArgumentCaptor nextSubscribeCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor nextFlowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor nextSubscribeCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor nextFlowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); - SubscribeToShardRequest nextExpected = SubscribeToShardRequest.builder().consumerARN(CONSUMER_ARN) - .shardId(SHARD_ID).startingPosition(StartingPosition.builder().sequenceNumber("3") - .type(ShardIteratorType.AFTER_SEQUENCE_NUMBER).build()) + SubscribeToShardRequest nextExpected = SubscribeToShardRequest.builder() + .consumerARN(CONSUMER_ARN) + .shardId(SHARD_ID) + .startingPosition(StartingPosition.builder() + .sequenceNumber("3") + .type(ShardIteratorType.AFTER_SEQUENCE_NUMBER) + .build()) .build(); - verify(kinesisClient).subscribeToShard(argThat(new SubscribeToShardRequestMatcher(nextExpected)), nextFlowCaptor.capture()); + verify(kinesisClient) + .subscribeToShard(argThat(new SubscribeToShardRequestMatcher(nextExpected)), nextFlowCaptor.capture()); reset(publisher); doNothing().when(publisher).subscribe(nextSubscribeCaptor.capture()); @@ -1225,11 +1369,15 @@ public class FanOutRecordsPublisherTest { nextSubscribeCaptor.getValue().onSubscribe(subscription); List nextRecords = Stream.of(4, 5, 6).map(this::makeRecord).collect(Collectors.toList()); - List nextMatchers = nextRecords.stream().map(KinesisClientRecordMatcher::new) - .collect(Collectors.toList()); + List nextMatchers = + nextRecords.stream().map(KinesisClientRecordMatcher::new).collect(Collectors.toList()); - batchEvent = SubscribeToShardEvent.builder().millisBehindLatest(100L).records(nextRecords) - .continuationSequenceNumber("6").childShards(Collections.emptyList()).build(); + batchEvent = SubscribeToShardEvent.builder() + .millisBehindLatest(100L) + .records(nextRecords) + .continuationSequenceNumber("6") + .childShards(Collections.emptyList()) + .build(); nextSubscribeCaptor.getValue().onNext(batchEvent); verify(subscription, times(4)).request(1); @@ -1242,41 +1390,60 @@ public class FanOutRecordsPublisherTest { @Test public void testIfBufferingRecordsWithinCapacityPublishesOneEvent() { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); RecordsRetrieved recordsRetrieved = ProcessRecordsInput.builder()::build; FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "shard-001-001"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); - IntStream.rangeClosed(1, 10).forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired(recordsRetrieved, recordFlow)); + IntStream.rangeClosed(1, 10) + .forEach(i -> + fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired(recordsRetrieved, recordFlow)); assertEquals(1, totalRecordsRetrieved[0]); } @Test public void testIfBufferingRecordsOverCapacityPublishesOneEventAndThrows() { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); RecordsRetrieved recordsRetrieved = ProcessRecordsInput.builder()::build; FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "shard-001"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); try { - IntStream.rangeClosed(1, 12).forEach( - i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired(recordsRetrieved, recordFlow)); + IntStream.rangeClosed(1, 12) + .forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( + recordsRetrieved, recordFlow)); fail("Should throw Queue full exception"); } catch (IllegalStateException e) { assertEquals("Queue full", e.getMessage()); @@ -1286,87 +1453,116 @@ public class FanOutRecordsPublisherTest { @Test public void testIfPublisherAlwaysPublishesWhenQueueIsEmpty() { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "shard-001"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; // This makes sure the queue is immediately made empty, so that the next event enqueued will // be the only element in the queue. - fanOutRecordsPublisher - .evictAckedEventAndScheduleNextEvent(() -> recordsRetrieved.batchUniqueIdentifier()); + fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent( + () -> recordsRetrieved.batchUniqueIdentifier()); } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); - IntStream.rangeClosed(1, 137).forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( - new FanOutRecordsPublisher.FanoutRecordsRetrieved(ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), - recordFlow)); + IntStream.rangeClosed(1, 137) + .forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( + new FanOutRecordsPublisher.FanoutRecordsRetrieved( + ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), + recordFlow)); assertEquals(137, totalRecordsRetrieved[0]); } @Test public void testIfPublisherIgnoresStaleEventsAndContinuesWithNextFlow() { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "shard-001"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; // This makes sure the queue is immediately made empty, so that the next event enqueued will // be the only element in the queue. - fanOutRecordsPublisher - .evictAckedEventAndScheduleNextEvent(() -> recordsRetrieved.batchUniqueIdentifier()); + fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent( + () -> recordsRetrieved.batchUniqueIdentifier()); // Send stale event periodically if (totalRecordsRetrieved[0] % 10 == 0) { fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent( () -> new BatchUniqueIdentifier("some_uuid_str", "some_old_flow")); } } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); - IntStream.rangeClosed(1, 100).forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( - new FanOutRecordsPublisher.FanoutRecordsRetrieved(ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), - recordFlow)); + IntStream.rangeClosed(1, 100) + .forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( + new FanOutRecordsPublisher.FanoutRecordsRetrieved( + ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), + recordFlow)); assertEquals(100, totalRecordsRetrieved[0]); } @Test public void testIfPublisherIgnoresStaleEventsAndContinuesWithNextFlowWhenDeliveryQueueIsNotEmpty() throws InterruptedException { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "shard-001"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; BlockingQueue ackQueue = new LinkedBlockingQueue<>(); fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; // Enqueue the ack for bursty delivery ackQueue.add(recordsRetrieved.batchUniqueIdentifier()); // Send stale event periodically } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); - IntStream.rangeClosed(1, 10).forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( - new FanOutRecordsPublisher.FanoutRecordsRetrieved(ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), - recordFlow)); + IntStream.rangeClosed(1, 10) + .forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( + new FanOutRecordsPublisher.FanoutRecordsRetrieved( + ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), + recordFlow)); BatchUniqueIdentifier batchUniqueIdentifierQueued; int count = 0; // Now that we allowed upto 10 elements queued up, send a pair of good and stale ack to verify records // delivered as expected. while (count++ < 10 && (batchUniqueIdentifierQueued = ackQueue.take()) != null) { final BatchUniqueIdentifier batchUniqueIdentifierFinal = batchUniqueIdentifierQueued; - fanOutRecordsPublisher - .evictAckedEventAndScheduleNextEvent(() -> batchUniqueIdentifierFinal); + fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent(() -> batchUniqueIdentifierFinal); fanOutRecordsPublisher.evictAckedEventAndScheduleNextEvent( () -> new BatchUniqueIdentifier("some_uuid_str", "some_old_flow")); } @@ -1375,25 +1571,35 @@ public class FanOutRecordsPublisherTest { @Test(expected = IllegalStateException.class) public void testIfPublisherThrowsWhenMismatchAckforActiveFlowSeen() throws InterruptedException { - FanOutRecordsPublisher fanOutRecordsPublisher = new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); + FanOutRecordsPublisher fanOutRecordsPublisher = + new FanOutRecordsPublisher(kinesisClient, SHARD_ID, CONSUMER_ARN); FanOutRecordsPublisher.RecordFlow recordFlow = new FanOutRecordsPublisher.RecordFlow(fanOutRecordsPublisher, Instant.now(), "Shard-001-1"); - final int[] totalRecordsRetrieved = { 0 }; + final int[] totalRecordsRetrieved = {0}; BlockingQueue ackQueue = new LinkedBlockingQueue<>(); fanOutRecordsPublisher.subscribe(new Subscriber() { - @Override public void onSubscribe(Subscription subscription) {} - @Override public void onNext(RecordsRetrieved recordsRetrieved) { + @Override + public void onSubscribe(Subscription subscription) {} + + @Override + public void onNext(RecordsRetrieved recordsRetrieved) { totalRecordsRetrieved[0]++; // Enqueue the ack for bursty delivery ackQueue.add(recordsRetrieved.batchUniqueIdentifier()); // Send stale event periodically } - @Override public void onError(Throwable throwable) {} - @Override public void onComplete() {} + + @Override + public void onError(Throwable throwable) {} + + @Override + public void onComplete() {} }); - IntStream.rangeClosed(1, 10).forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( - new FanOutRecordsPublisher.FanoutRecordsRetrieved(ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), - recordFlow)); + IntStream.rangeClosed(1, 10) + .forEach(i -> fanOutRecordsPublisher.bufferCurrentEventAndScheduleIfRequired( + new FanOutRecordsPublisher.FanoutRecordsRetrieved( + ProcessRecordsInput.builder().build(), i + "", recordFlow.getSubscribeToShardId()), + recordFlow)); BatchUniqueIdentifier batchUniqueIdentifierQueued; int count = 0; // Now that we allowed upto 10 elements queued up, send a pair of good and stale ack to verify records @@ -1416,14 +1622,15 @@ public class FanOutRecordsPublisherTest { } }; - ArgumentCaptor captor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordSubscription.class); - ArgumentCaptor flowCaptor = ArgumentCaptor - .forClass(FanOutRecordsPublisher.RecordFlow.class); + ArgumentCaptor captor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordSubscription.class); + ArgumentCaptor flowCaptor = + ArgumentCaptor.forClass(FanOutRecordsPublisher.RecordFlow.class); doNothing().when(publisher).subscribe(captor.capture()); - source.start(ExtendedSequenceNumber.LATEST, + source.start( + ExtendedSequenceNumber.LATEST, InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST)); RecordingSubscriber subscriber = new RecordingSubscriber(); source.subscribe(subscriber); @@ -1432,28 +1639,32 @@ public class FanOutRecordsPublisherTest { Throwable exception = new CompletionException( "software.amazon.awssdk.core.exception.SdkClientException", - SdkClientException.create(null, new Throwable( - "Acquire operation took longer than the configured maximum time. This indicates that a " + - "request cannot get a connection from the pool within the specified maximum time. " + - "This can be due to high request rate.\n" + - "Consider taking any of the following actions to mitigate the issue: increase max " + - "connections, increase acquire timeout, or slowing the request rate.\n" + - "Increasing the max connections can increase client throughput (unless the network " + - "interface is already fully utilized), but can eventually start to hit operation " + - "system limitations on the number of file descriptors used by the process. " + - "If you already are fully utilizing your network interface or cannot further " + - "increase your connection count, increasing the acquire timeout gives extra time " + - "for requests to acquire a connection before timing out. " + - "If the connections doesn't free up, the subsequent requests will still timeout.\n" + - "If the above mechanisms are not able to fix the issue, try smoothing out your " + - "requests so that large traffic bursts cannot overload the client, being more " + - "efficient with the number of times you need to call AWS, or by increasing the " + - "number of hosts sending requests."))); + SdkClientException.create( + null, + new Throwable( + "Acquire operation took longer than the configured maximum time. This indicates that a " + + "request cannot get a connection from the pool within the specified maximum time. " + + "This can be due to high request rate.\n" + + "Consider taking any of the following actions to mitigate the issue: increase max " + + "connections, increase acquire timeout, or slowing the request rate.\n" + + "Increasing the max connections can increase client throughput (unless the network " + + "interface is already fully utilized), but can eventually start to hit operation " + + "system limitations on the number of file descriptors used by the process. " + + "If you already are fully utilizing your network interface or cannot further " + + "increase your connection count, increasing the acquire timeout gives extra time " + + "for requests to acquire a connection before timing out. " + + "If the connections doesn't free up, the subsequent requests will still timeout.\n" + + "If the above mechanisms are not able to fix the issue, try smoothing out your " + + "requests so that large traffic bursts cannot overload the client, being more " + + "efficient with the number of times you need to call AWS, or by increasing the " + + "number of hosts sending requests."))); flowCaptor.getValue().exceptionOccurred(exception); - Optional onErrorEvent = subscriber.events.stream().filter(e -> e instanceof OnErrorEvent) - .map(e -> (OnErrorEvent) e).findFirst(); + Optional onErrorEvent = subscriber.events.stream() + .filter(e -> e instanceof OnErrorEvent) + .map(e -> (OnErrorEvent) e) + .findFirst(); assertThat(onErrorEvent, equalTo(Optional.of(new OnErrorEvent(exception)))); assertThat(acquireTimeoutLogged.get(), equalTo(true)); @@ -1466,9 +1677,7 @@ public class FanOutRecordsPublisherTest { } } - private interface SubscriberEvent { - - } + private interface SubscriberEvent {} @Data private static class SubscribeEvent implements SubscriberEvent { @@ -1486,9 +1695,7 @@ public class FanOutRecordsPublisherTest { } @Data - private static class OnCompleteEvent implements SubscriberEvent { - - } + private static class OnCompleteEvent implements SubscriberEvent {} @Data private static class RequestEvent implements SubscriberEvent { @@ -1568,8 +1775,11 @@ public class FanOutRecordsPublisherTest { private Runnable errorAction; private Consumer shardEndAction; - BackpressureAdheringServicePublisher(Consumer action, Integer numOfTimes, - CountDownLatch taskCompletionLatch, Integer initialDemand) { + BackpressureAdheringServicePublisher( + Consumer action, + Integer numOfTimes, + CountDownLatch taskCompletionLatch, + Integer initialDemand) { this(action, numOfTimes, taskCompletionLatch, new Semaphore(initialDemand)); sendCompletionAt = Integer.MAX_VALUE; sendErrorAt = Integer.MAX_VALUE; @@ -1606,7 +1816,8 @@ public class FanOutRecordsPublisherTest { this.completeAction = completeAction; } - public void setShardEndAndCompleteTrigger(Integer sendCompletionAt, Runnable completeAction, Consumer shardEndAction) { + public void setShardEndAndCompleteTrigger( + Integer sendCompletionAt, Runnable completeAction, Consumer shardEndAction) { setCompleteTrigger(sendCompletionAt, completeAction); this.shardEndAction = shardEndAction; } @@ -1622,9 +1833,13 @@ public class FanOutRecordsPublisherTest { } private Record makeRecord(int sequenceNumber) { - SdkBytes buffer = SdkBytes.fromByteArray(new byte[] { 1, 2, 3 }); - return Record.builder().data(buffer).approximateArrivalTimestamp(Instant.now()) - .sequenceNumber(Integer.toString(sequenceNumber)).partitionKey("A").build(); + SdkBytes buffer = SdkBytes.fromByteArray(new byte[] {1, 2, 3}); + return Record.builder() + .data(buffer) + .approximateArrivalTimestamp(Instant.now()) + .sequenceNumber(Integer.toString(sequenceNumber)) + .partitionKey("A") + .build(); } private static class KinesisClientRecordMatcher extends TypeSafeDiagnosingMatcher { @@ -1641,23 +1856,25 @@ public class FanOutRecordsPublisherTest { sequenceNumberMatcher = equalTo(expected.sequenceNumber()); approximateArrivalMatcher = equalTo(expected.approximateArrivalTimestamp()); dataMatcher = equalTo(expected.data()); - } @Override protected boolean matchesSafely(KinesisClientRecord item, Description mismatchDescription) { - boolean matches = matchAndDescribe(partitionKeyMatcher, item.partitionKey(), "partitionKey", + boolean matches = + matchAndDescribe(partitionKeyMatcher, item.partitionKey(), "partitionKey", mismatchDescription); + matches &= matchAndDescribe( + sequenceNumberMatcher, item.sequenceNumber(), "sequenceNumber", mismatchDescription); + matches &= matchAndDescribe( + approximateArrivalMatcher, + item.approximateArrivalTimestamp(), + "approximateArrivalTimestamp", mismatchDescription); - matches &= matchAndDescribe(sequenceNumberMatcher, item.sequenceNumber(), "sequenceNumber", - mismatchDescription); - matches &= matchAndDescribe(approximateArrivalMatcher, item.approximateArrivalTimestamp(), - "approximateArrivalTimestamp", mismatchDescription); matches &= matchAndDescribe(dataMatcher, item.data(), "data", mismatchDescription); return matches; } - private boolean matchAndDescribe(Matcher matcher, T value, String field, - Description mismatchDescription) { + private boolean matchAndDescribe( + Matcher matcher, T value, String field, Description mismatchDescription) { if (!matcher.matches(value)) { mismatchDescription.appendText(field).appendText(": "); matcher.describeMismatch(value, mismatchDescription); @@ -1668,13 +1885,16 @@ public class FanOutRecordsPublisherTest { @Override public void describeTo(Description description) { - description.appendText("A kinesis client record with: ").appendText("PartitionKey: ") - .appendDescriptionOf(partitionKeyMatcher).appendText(" SequenceNumber: ") - .appendDescriptionOf(sequenceNumberMatcher).appendText(" Approximate Arrival Time: ") - .appendDescriptionOf(approximateArrivalMatcher).appendText(" Data: ") + description + .appendText("A kinesis client record with: ") + .appendText("PartitionKey: ") + .appendDescriptionOf(partitionKeyMatcher) + .appendText(" SequenceNumber: ") + .appendDescriptionOf(sequenceNumberMatcher) + .appendText(" Approximate Arrival Time: ") + .appendDescriptionOf(approximateArrivalMatcher) + .appendText(" Data: ") .appendDescriptionOf(dataMatcher); } - } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumberTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumberTest.java index dca68406..9dcba69a 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumberTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/kpl/ExtendedSequenceNumberTest.java @@ -14,11 +14,11 @@ */ package software.amazon.kinesis.retrieval.kpl; -import static org.junit.Assert.assertTrue; - import org.junit.Test; import software.amazon.kinesis.checkpoint.SentinelCheckpoint; +import static org.junit.Assert.assertTrue; + public class ExtendedSequenceNumberTest { @Test @@ -32,5 +32,4 @@ public class ExtendedSequenceNumberTest { assertTrue(sentinel.name(), esnWithSubsequence.isSentinelCheckpoint()); } } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyIntegrationTest.java index bd3b7047..2528f158 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyIntegrationTest.java @@ -14,16 +14,6 @@ */ package software.amazon.kinesis.retrieval.polling; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.core.IsEqual.equalTo; -import static org.junit.Assert.assertNull; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ExecutorService; @@ -33,6 +23,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -41,15 +32,22 @@ import org.mockito.Mock; import org.mockito.invocation.InvocationOnMock; import org.mockito.runners.MockitoJUnitRunner; import org.mockito.stubbing.Answer; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; - import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.kinesis.retrieval.DataFetcherResult; import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.metrics.NullMetricsFactory; +import software.amazon.kinesis.retrieval.DataFetcherResult; + +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.junit.Assert.assertNull; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { @@ -65,8 +63,10 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { @Mock private Supplier> completionServiceSupplier; + @Mock private DataFetcherResult result; + @Mock private KinesisAsyncClient kinesisClient; @@ -79,7 +79,6 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { private RejectedExecutionHandler rejectedExecutionHandler; private int numberOfRecords = 10; - @Before public void setup() { dataFetcher = spy(new KinesisDataFetcherForTests(kinesisClient, streamName, shardId, numberOfRecords)); @@ -90,11 +89,14 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { TIME_TO_LIVE, TimeUnit.SECONDS, new LinkedBlockingQueue<>(1), - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("getrecords-worker-%d").build(), + new ThreadFactoryBuilder() + .setDaemon(true) + .setNameFormat("getrecords-worker-%d") + .build(), rejectedExecutionHandler)); completionService = spy(new ExecutorCompletionService(executorService)); - getRecordsRetrivalStrategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, executorService, - RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, "shardId-0001"); + getRecordsRetrivalStrategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, "shardId-0001"); getRecordsResponse = GetRecordsResponse.builder().build(); when(completionServiceSupplier.get()).thenReturn(completionService); @@ -112,7 +114,8 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { @Test public void multiRequestTest() { - ExecutorCompletionService completionService1 = spy(new ExecutorCompletionService(executorService)); + ExecutorCompletionService completionService1 = + spy(new ExecutorCompletionService(executorService)); when(completionServiceSupplier.get()).thenReturn(completionService1); GetRecordsResponse getRecordsResult = getRecordsRetrivalStrategy.getRecords(numberOfRecords); verify(dataFetcher, atLeast(getLeastNumberOfCalls())).getRecords(); @@ -120,7 +123,8 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { assertThat(getRecordsResult, equalTo(getRecordsResponse)); when(result.accept()).thenReturn(null); - ExecutorCompletionService completionService2 = spy(new ExecutorCompletionService(executorService)); + ExecutorCompletionService completionService2 = + spy(new ExecutorCompletionService(executorService)); when(completionServiceSupplier.get()).thenReturn(completionService2); getRecordsResult = getRecordsRetrivalStrategy.getRecords(numberOfRecords); assertThat(getRecordsResult, nullValue(GetRecordsResponse.class)); @@ -132,7 +136,9 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { @Override public DataFetcherResult answer(final InvocationOnMock invocationOnMock) throws Throwable { Thread.sleep(SLEEP_GET_RECORDS_IN_SECONDS * 1000); - throw ExpiredIteratorException.builder().message("ExpiredIterator").build(); + throw ExpiredIteratorException.builder() + .message("ExpiredIterator") + .build(); } }); @@ -162,8 +168,11 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { } private class KinesisDataFetcherForTests extends KinesisDataFetcher { - public KinesisDataFetcherForTests(final KinesisAsyncClient kinesisClient, final String streamName, - final String shardId, final int maxRecords) { + public KinesisDataFetcherForTests( + final KinesisAsyncClient kinesisClient, + final String streamName, + final String shardId, + final int maxRecords) { super(kinesisClient, streamName, shardId, maxRecords, NULL_METRICS_FACTORY); } @@ -178,5 +187,4 @@ public class AsynchronousGetRecordsRetrievalStrategyIntegrationTest { return result; } } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyTest.java index fc4b4fe1..13db2a4d 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/AsynchronousGetRecordsRetrievalStrategyTest.java @@ -14,18 +14,6 @@ */ package software.amazon.kinesis.retrieval.polling; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.concurrent.CompletionService; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -39,11 +27,22 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; import software.amazon.kinesis.retrieval.DataFetcherResult; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyBoolean; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + /** * */ @@ -52,18 +51,25 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { private static final long RETRY_GET_RECORDS_IN_SECONDS = 5; private static final String SHARD_ID = "ShardId-0001"; + @Mock private KinesisDataFetcher dataFetcher; + @Mock private ExecutorService executorService; + @Mock private Supplier> completionServiceSupplier; + @Mock private CompletionService completionService; + @Mock private Future successfulFuture; + @Mock private Future blockedFuture; + @Mock private DataFetcherResult dataFetcherResult; @@ -80,8 +86,8 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { @Test public void testSingleSuccessfulRequestFuture() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); + AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); when(executorService.isShutdown()).thenReturn(false); when(completionService.submit(any())).thenReturn(successfulFuture); @@ -101,8 +107,8 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { @Test public void testBlockedAndSuccessfulFuture() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); + AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); when(executorService.isShutdown()).thenReturn(false); when(completionService.submit(any())).thenReturn(blockedFuture).thenReturn(successfulFuture); @@ -127,8 +133,8 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { @Test(expected = IllegalStateException.class) public void testStrategyIsShutdown() { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); + AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); when(executorService.isShutdown()).thenReturn(true); @@ -137,13 +143,18 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { @Test public void testPoolOutOfResources() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); + AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); when(executorService.isShutdown()).thenReturn(false); - when(completionService.submit(any())).thenReturn(blockedFuture) - .thenThrow(new RejectedExecutionException("Rejected!")).thenReturn(successfulFuture); - when(completionService.poll(anyLong(), any())).thenReturn(null).thenReturn(null).thenReturn(successfulFuture); + when(completionService.submit(any())) + .thenReturn(blockedFuture) + .thenThrow(new RejectedExecutionException("Rejected!")) + .thenReturn(successfulFuture); + when(completionService.poll(anyLong(), any())) + .thenReturn(null) + .thenReturn(null) + .thenReturn(successfulFuture); when(successfulFuture.get()).thenReturn(dataFetcherResult); when(successfulFuture.cancel(anyBoolean())).thenReturn(false); when(blockedFuture.cancel(anyBoolean())).thenReturn(true); @@ -159,18 +170,21 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { assertThat(actualResult, equalTo(expectedResponses)); } - - @Test (expected = ExpiredIteratorException.class) + + @Test(expected = ExpiredIteratorException.class) public void testExpiredIteratorExceptionCase() throws Exception { - AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy(dataFetcher, - executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); + AsynchronousGetRecordsRetrievalStrategy strategy = new AsynchronousGetRecordsRetrievalStrategy( + dataFetcher, executorService, (int) RETRY_GET_RECORDS_IN_SECONDS, completionServiceSupplier, SHARD_ID); Future successfulFuture2 = mock(Future.class); when(executorService.isShutdown()).thenReturn(false); when(completionService.submit(any())).thenReturn(successfulFuture, successfulFuture2); when(completionService.poll(anyLong(), any())).thenReturn(null).thenReturn(successfulFuture); - when(successfulFuture.get()).thenThrow(new ExecutionException(ExpiredIteratorException.builder().message("ExpiredException").build())); - + when(successfulFuture.get()) + .thenThrow(new ExecutionException(ExpiredIteratorException.builder() + .message("ExpiredException") + .build())); + try { strategy.getRecords(10); } finally { @@ -181,5 +195,4 @@ public class AsynchronousGetRecordsRetrievalStrategyTest { verify(successfulFuture2).cancel(eq(true)); } } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcherTest.java index 4ac8bbf7..b3cd0c2a 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcherTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/KinesisDataFetcherTest.java @@ -14,21 +14,6 @@ */ package software.amazon.kinesis.retrieval.polling; -import static org.hamcrest.CoreMatchers.isA; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.reset; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -50,7 +35,6 @@ import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - import software.amazon.awssdk.core.exception.SdkException; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; import software.amazon.awssdk.services.kinesis.model.ChildShard; @@ -75,6 +59,21 @@ import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; import software.amazon.kinesis.retrieval.RetryableRetrievalException; import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import static org.hamcrest.CoreMatchers.isA; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + /** * Unit tests for KinesisDataFetcher. */ @@ -83,20 +82,22 @@ public class KinesisDataFetcherTest { private static final int MAX_RECORDS = 1; private static final String STREAM_NAME = "streamName"; private static final String SHARD_ID = "shardId-1"; - private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.LATEST); - private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = InitialPositionInStreamExtended - .newInitialPosition(InitialPositionInStream.TRIM_HORIZON); - private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = InitialPositionInStreamExtended - .newInitialPositionAtTimestamp(new Date(1000)); + private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST); + private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON = + InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON); + private static final InitialPositionInStreamExtended INITIAL_POSITION_AT_TIMESTAMP = + InitialPositionInStreamExtended.newInitialPositionAtTimestamp(new Date(1000)); private static final MetricsFactory NULL_METRICS_FACTORY = new NullMetricsFactory(); private KinesisDataFetcher kinesisDataFetcher; @Mock private KinesisAsyncClient kinesisClient; + @Mock private CompletableFuture getRecordsResponseFuture; + @Mock private CompletableFuture getShardIteratorResponseFuture; @@ -105,8 +106,8 @@ public class KinesisDataFetcherTest { @Before public void setup() { - kinesisDataFetcher = new KinesisDataFetcher(kinesisClient, STREAM_NAME, SHARD_ID, MAX_RECORDS, - NULL_METRICS_FACTORY); + kinesisDataFetcher = + new KinesisDataFetcher(kinesisClient, STREAM_NAME, SHARD_ID, MAX_RECORDS, NULL_METRICS_FACTORY); } /** @@ -114,8 +115,8 @@ public class KinesisDataFetcherTest { */ @Test public final void testInitializeLatest() throws Exception { - testInitializeAndFetch(ShardIteratorType.LATEST.toString(), ShardIteratorType.LATEST.toString(), - INITIAL_POSITION_LATEST); + testInitializeAndFetch( + ShardIteratorType.LATEST.toString(), ShardIteratorType.LATEST.toString(), INITIAL_POSITION_LATEST); } /** @@ -123,7 +124,9 @@ public class KinesisDataFetcherTest { */ @Test public final void testInitializeTimeZero() throws Exception { - testInitializeAndFetch(ShardIteratorType.TRIM_HORIZON.toString(), ShardIteratorType.TRIM_HORIZON.toString(), + testInitializeAndFetch( + ShardIteratorType.TRIM_HORIZON.toString(), + ShardIteratorType.TRIM_HORIZON.toString(), INITIAL_POSITION_TRIM_HORIZON); } @@ -132,7 +135,9 @@ public class KinesisDataFetcherTest { */ @Test public final void testInitializeAtTimestamp() throws Exception { - testInitializeAndFetch(ShardIteratorType.AT_TIMESTAMP.toString(), ShardIteratorType.AT_TIMESTAMP.toString(), + testInitializeAndFetch( + ShardIteratorType.AT_TIMESTAMP.toString(), + ShardIteratorType.AT_TIMESTAMP.toString(), INITIAL_POSITION_AT_TIMESTAMP); } @@ -154,8 +159,8 @@ public class KinesisDataFetcherTest { } private CompletableFuture makeGetShardIteratorResponse(String shardIterator) { - return CompletableFuture - .completedFuture(GetShardIteratorResponse.builder().shardIterator(shardIterator).build()); + return CompletableFuture.completedFuture( + GetShardIteratorResponse.builder().shardIterator(shardIterator).build()); } @Test @@ -166,8 +171,8 @@ public class KinesisDataFetcherTest { final String seqA = "123"; final String seqB = "456"; - ArgumentCaptor shardIteratorRequestCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + ArgumentCaptor shardIteratorRequestCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); when(kinesisClient.getShardIterator(shardIteratorRequestCaptor.capture())) .thenReturn(makeGetShardIteratorResponse(iteratorA)) @@ -197,24 +202,30 @@ public class KinesisDataFetcherTest { } private GetShardIteratorRequest makeGetShardIteratorRequest(String shardIteratorType) { - return GetShardIteratorRequest.builder().shardIteratorType(shardIteratorType).streamName(STREAM_NAME) - .shardId(SHARD_ID).build(); + return GetShardIteratorRequest.builder() + .shardIteratorType(shardIteratorType) + .streamName(STREAM_NAME) + .shardId(SHARD_ID) + .build(); } @Test - public void testAdvanceIteratorToTrimHorizonLatestAndAtTimestamp(){ - final ArgumentCaptor requestCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + public void testAdvanceIteratorToTrimHorizonLatestAndAtTimestamp() { + final ArgumentCaptor requestCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); final String iteratorHorizon = "TRIM_HORIZON"; final String iteratorLatest = "LATEST"; final String iteratorAtTimestamp = "AT_TIMESTAMP"; - final Map requestsMap = Arrays - .stream(new String[] { iteratorHorizon, iteratorLatest, iteratorAtTimestamp }) + final Map requestsMap = Arrays.stream( + new String[] {iteratorHorizon, iteratorLatest, iteratorAtTimestamp}) .map(this::makeGetShardIteratorRequest) .collect(Collectors.toMap(r -> ShardIteratorType.valueOf(r.shardIteratorTypeAsString()), r -> r)); GetShardIteratorRequest tsReq = requestsMap.get(ShardIteratorType.AT_TIMESTAMP); - requestsMap.put(ShardIteratorType.AT_TIMESTAMP, - tsReq.toBuilder().timestamp(INITIAL_POSITION_AT_TIMESTAMP.getTimestamp().toInstant()).build()); + requestsMap.put( + ShardIteratorType.AT_TIMESTAMP, + tsReq.toBuilder() + .timestamp(INITIAL_POSITION_AT_TIMESTAMP.getTimestamp().toInstant()) + .build()); when(kinesisClient.getShardIterator(requestCaptor.capture())) .thenReturn(makeGetShardIteratorResponse(iteratorHorizon)) @@ -241,19 +252,22 @@ public class KinesisDataFetcherTest { } private GetRecordsRequest makeGetRecordsRequest(String shardIterator) { - return GetRecordsRequest.builder().shardIterator(shardIterator).limit(MAX_RECORDS).build(); + return GetRecordsRequest.builder() + .shardIterator(shardIterator) + .limit(MAX_RECORDS) + .build(); } @Test public void testGetRecordsWithResourceNotFoundException() throws Exception { - final ArgumentCaptor iteratorCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + final ArgumentCaptor iteratorCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); // Set up arguments used by proxy final String nextIterator = "TestShardIterator"; - final GetShardIteratorRequest expectedIteratorRequest = makeGetShardIteratorRequest( - ShardIteratorType.LATEST.name()); + final GetShardIteratorRequest expectedIteratorRequest = + makeGetShardIteratorRequest(ShardIteratorType.LATEST.name()); final GetRecordsRequest expectedRecordsRequest = makeGetRecordsRequest(nextIterator); final CompletableFuture future = mock(CompletableFuture.class); @@ -262,21 +276,27 @@ public class KinesisDataFetcherTest { when(kinesisClient.getShardIterator(iteratorCaptor.capture())) .thenReturn(makeGetShardIteratorResponse(nextIterator)); when(kinesisClient.getRecords(recordsCaptor.capture())).thenReturn(future); - when(future.get(anyLong(), any(TimeUnit.class))).thenThrow( - new ExecutionException(ResourceNotFoundException.builder().message("Test Exception").build())); + when(future.get(anyLong(), any(TimeUnit.class))) + .thenThrow(new ExecutionException(ResourceNotFoundException.builder() + .message("Test Exception") + .build())); // Create data fectcher and initialize it with latest type checkpoint kinesisDataFetcher.initialize(SentinelCheckpoint.LATEST.toString(), INITIAL_POSITION_LATEST); - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = new SynchronousGetRecordsRetrievalStrategy( - kinesisDataFetcher); + final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = + new SynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher); try { // Call records of dataFetcher which will throw an exception getRecordsRetrievalStrategy.getRecords(MAX_RECORDS); } finally { // Test shard has reached the end assertTrue("Shard should reach the end", kinesisDataFetcher.isShardEndReached()); - assertEquals(expectedIteratorRequest.startingSequenceNumber(), iteratorCaptor.getValue().startingSequenceNumber()); - assertEquals(expectedRecordsRequest.shardIterator(), recordsCaptor.getValue().shardIterator()); + assertEquals( + expectedIteratorRequest.startingSequenceNumber(), + iteratorCaptor.getValue().startingSequenceNumber()); + assertEquals( + expectedRecordsRequest.shardIterator(), + recordsCaptor.getValue().shardIterator()); } } @@ -285,19 +305,20 @@ public class KinesisDataFetcherTest { expectedExceptionRule.expect(SdkException.class); expectedExceptionRule.expectMessage("Test Exception"); - CompletableFuture getShardIteratorFuture = CompletableFuture - .completedFuture(GetShardIteratorResponse.builder().shardIterator("test").build()); + CompletableFuture getShardIteratorFuture = CompletableFuture.completedFuture( + GetShardIteratorResponse.builder().shardIterator("test").build()); // Set up proxy mock methods when(kinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(getShardIteratorFuture); when(kinesisClient.getRecords(any(GetRecordsRequest.class))).thenReturn(getRecordsResponseFuture); when(getRecordsResponseFuture.get(anyLong(), any(TimeUnit.class))) - .thenThrow(new ExecutionException(SdkException.builder().message("Test Exception").build())); + .thenThrow(new ExecutionException( + SdkException.builder().message("Test Exception").build())); // Create data fectcher and initialize it with latest type checkpoint kinesisDataFetcher.initialize(SentinelCheckpoint.LATEST.toString(), INITIAL_POSITION_LATEST); - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = new SynchronousGetRecordsRetrievalStrategy( - kinesisDataFetcher); + final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = + new SynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher); // Call records of dataFetcher which will throw an exception getRecordsRetrievalStrategy.getRecords(MAX_RECORDS); @@ -306,11 +327,11 @@ public class KinesisDataFetcherTest { @Test public void testNonNullGetRecords() throws Exception { final String nextIterator = "TestIterator"; - final ArgumentCaptor iteratorCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + final ArgumentCaptor iteratorCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); - final GetShardIteratorRequest expectedIteratorRequest = makeGetShardIteratorRequest( - ShardIteratorType.LATEST.name()); + final GetShardIteratorRequest expectedIteratorRequest = + makeGetShardIteratorRequest(ShardIteratorType.LATEST.name()); final GetRecordsRequest expectedRecordsRequest = makeGetRecordsRequest(nextIterator); final CompletableFuture future = mock(CompletableFuture.class); @@ -318,15 +339,20 @@ public class KinesisDataFetcherTest { when(kinesisClient.getShardIterator(iteratorCaptor.capture())) .thenReturn(makeGetShardIteratorResponse(nextIterator)); when(kinesisClient.getRecords(recordsCaptor.capture())).thenReturn(future); - when(future.get(anyLong(), any(TimeUnit.class))).thenThrow( - new ExecutionException(ResourceNotFoundException.builder().message("Test Exception").build())); + when(future.get(anyLong(), any(TimeUnit.class))) + .thenThrow(new ExecutionException(ResourceNotFoundException.builder() + .message("Test Exception") + .build())); kinesisDataFetcher.initialize(SentinelCheckpoint.LATEST.toString(), INITIAL_POSITION_LATEST); DataFetcherResult dataFetcherResult = kinesisDataFetcher.getRecords(); assertNotNull(dataFetcherResult); - assertEquals(expectedIteratorRequest.startingSequenceNumber(), iteratorCaptor.getValue().startingSequenceNumber()); - assertEquals(expectedRecordsRequest.shardIterator(), recordsCaptor.getValue().shardIterator()); + assertEquals( + expectedIteratorRequest.startingSequenceNumber(), + iteratorCaptor.getValue().startingSequenceNumber()); + assertEquals( + expectedRecordsRequest.shardIterator(), recordsCaptor.getValue().shardIterator()); } private CompletableFuture makeGetRecordsResponse(String nextIterator, List records) { @@ -334,8 +360,11 @@ public class KinesisDataFetcherTest { if (nextIterator == null) { childShards = createChildShards(); } - return CompletableFuture.completedFuture(GetRecordsResponse.builder().nextShardIterator(nextIterator) - .records(CollectionUtils.isNullOrEmpty(records) ? Collections.emptyList() : records).childShards(childShards).build()); + return CompletableFuture.completedFuture(GetRecordsResponse.builder() + .nextShardIterator(nextIterator) + .records(CollectionUtils.isNullOrEmpty(records) ? Collections.emptyList() : records) + .childShards(childShards) + .build()); } private List createChildShards() { @@ -343,15 +372,15 @@ public class KinesisDataFetcherTest { List parentShards = new ArrayList<>(); parentShards.add(SHARD_ID); ChildShard leftChild = ChildShard.builder() - .shardId("Shard-2") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) - .build(); + .shardId("Shard-2") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("0", "49")) + .build(); ChildShard rightChild = ChildShard.builder() - .shardId("Shard-3") - .parentShards(parentShards) - .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) - .build(); + .shardId("Shard-3") + .parentShards(parentShards) + .hashKeyRange(ShardObjectHelper.newHashKeyRange("50", "99")) + .build(); childShards.add(leftChild); childShards.add(rightChild); return childShards; @@ -359,26 +388,33 @@ public class KinesisDataFetcherTest { @Test public void testFetcherDoesNotAdvanceWithoutAccept() throws InterruptedException, ExecutionException { - final ArgumentCaptor iteratorCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + final ArgumentCaptor iteratorCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); final String initialIterator = "InitialIterator"; final String nextIterator1 = "NextIteratorOne"; final String nextIterator2 = "NextIteratorTwo"; final CompletableFuture nonAdvancingResult1 = makeGetRecordsResponse(initialIterator, null); final CompletableFuture nonAdvancingResult2 = makeGetRecordsResponse(nextIterator1, null); - final CompletableFuture finalNonAdvancingResult = makeGetRecordsResponse(nextIterator2, - null); + final CompletableFuture finalNonAdvancingResult = + makeGetRecordsResponse(nextIterator2, null); final CompletableFuture advancingResult1 = makeGetRecordsResponse(nextIterator1, null); final CompletableFuture advancingResult2 = makeGetRecordsResponse(nextIterator2, null); final CompletableFuture finalAdvancingResult = makeGetRecordsResponse(null, null); when(kinesisClient.getShardIterator(iteratorCaptor.capture())) .thenReturn(makeGetShardIteratorResponse(initialIterator)); - when(kinesisClient.getRecords(recordsCaptor.capture())).thenReturn(nonAdvancingResult1, advancingResult1, - nonAdvancingResult2, advancingResult2, finalNonAdvancingResult, finalAdvancingResult); + when(kinesisClient.getRecords(recordsCaptor.capture())) + .thenReturn( + nonAdvancingResult1, + advancingResult1, + nonAdvancingResult2, + advancingResult2, + finalNonAdvancingResult, + finalAdvancingResult); - kinesisDataFetcher.initialize("TRIM_HORIZON", + kinesisDataFetcher.initialize( + "TRIM_HORIZON", InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.TRIM_HORIZON)); assertNoAdvance(nonAdvancingResult1.get(), initialIterator); @@ -412,8 +448,8 @@ public class KinesisDataFetcherTest { @Ignore public void testRestartIterator() throws Exception { GetRecordsResponse getRecordsResult = mock(GetRecordsResponse.class); - GetRecordsResponse restartGetRecordsResponse = makeGetRecordsResponse(null, null).get(anyLong(), - any(TimeUnit.class)); + GetRecordsResponse restartGetRecordsResponse = + makeGetRecordsResponse(null, null).get(anyLong(), any(TimeUnit.class)); Record record = mock(Record.class); final String nextShardIterator = "NextShardIterator"; final String sequenceNumber = "SequenceNumber"; @@ -437,8 +473,8 @@ public class KinesisDataFetcherTest { final ArgumentCaptor shardIteratorRequestCaptor = ArgumentCaptor.forClass(GetShardIteratorRequest.class); - when(kinesisClient.getShardIterator(shardIteratorRequestCaptor.capture())). - thenReturn(makeGetShardIteratorResponse(iterator)); + when(kinesisClient.getShardIterator(shardIteratorRequestCaptor.capture())) + .thenReturn(makeGetShardIteratorResponse(iterator)); kinesisDataFetcher.initialize(sequenceNumber, INITIAL_POSITION_LATEST); kinesisDataFetcher.restartIterator(); @@ -448,11 +484,14 @@ public class KinesisDataFetcherTest { final List shardIteratorRequests = shardIteratorRequestCaptor.getAllValues(); assertEquals(3, shardIteratorRequests.size()); - assertEquals(ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), + assertEquals( + ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), shardIteratorRequests.get(0).shardIteratorTypeAsString()); - assertEquals(ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), + assertEquals( + ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), shardIteratorRequests.get(1).shardIteratorTypeAsString()); - assertEquals(ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), + assertEquals( + ShardIteratorType.AT_SEQUENCE_NUMBER.toString(), shardIteratorRequests.get(2).shardIteratorTypeAsString()); } @@ -483,8 +522,8 @@ public class KinesisDataFetcherTest { expectedExceptionRule.expectCause(isA(TimeoutException.class)); expectedExceptionRule.expectMessage("Timeout"); - CompletableFuture getShardIteratorFuture = CompletableFuture - .completedFuture(GetShardIteratorResponse.builder().shardIterator("test").build()); + CompletableFuture getShardIteratorFuture = CompletableFuture.completedFuture( + GetShardIteratorResponse.builder().shardIterator("test").build()); // Set up proxy mock methods when(kinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(getShardIteratorFuture); @@ -493,15 +532,15 @@ public class KinesisDataFetcherTest { // Create data fectcher and initialize it with latest type checkpoint kinesisDataFetcher.initialize(SentinelCheckpoint.LATEST.toString(), INITIAL_POSITION_LATEST); - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = new SynchronousGetRecordsRetrievalStrategy( - kinesisDataFetcher); + final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = + new SynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher); // Call records of dataFetcher which will throw an exception getRecordsRetrievalStrategy.getRecords(MAX_RECORDS); } - private DataFetcherResult assertAdvanced(GetRecordsResponse expectedResult, String previousValue, - String nextValue) { + private DataFetcherResult assertAdvanced( + GetRecordsResponse expectedResult, String previousValue, String nextValue) { DataFetcherResult acceptResult = kinesisDataFetcher.getRecords(); assertEquals(expectedResult, acceptResult.getResult()); @@ -527,19 +566,25 @@ public class KinesisDataFetcherTest { return noAcceptResult; } - private void testInitializeAndFetch(final String iteratorType, final String seqNo, - final InitialPositionInStreamExtended initialPositionInStream) throws Exception { - final ArgumentCaptor iteratorCaptor = ArgumentCaptor - .forClass(GetShardIteratorRequest.class); + private void testInitializeAndFetch( + final String iteratorType, + final String seqNo, + final InitialPositionInStreamExtended initialPositionInStream) + throws Exception { + final ArgumentCaptor iteratorCaptor = + ArgumentCaptor.forClass(GetShardIteratorRequest.class); final ArgumentCaptor recordsCaptor = ArgumentCaptor.forClass(GetRecordsRequest.class); final String iterator = "foo"; final List expectedRecords = Collections.emptyList(); GetShardIteratorRequest expectedIteratorRequest = makeGetShardIteratorRequest(iteratorType); if (iteratorType.equals(ShardIteratorType.AT_TIMESTAMP.toString())) { expectedIteratorRequest = expectedIteratorRequest.toBuilder() - .timestamp(initialPositionInStream.getTimestamp().toInstant()).build(); + .timestamp(initialPositionInStream.getTimestamp().toInstant()) + .build(); } else if (iteratorType.equals(ShardIteratorType.AT_SEQUENCE_NUMBER.toString())) { - expectedIteratorRequest = expectedIteratorRequest.toBuilder().startingSequenceNumber(seqNo).build(); + expectedIteratorRequest = expectedIteratorRequest.toBuilder() + .startingSequenceNumber(seqNo) + .build(); } when(kinesisClient.getShardIterator(iteratorCaptor.capture())) @@ -551,13 +596,14 @@ public class KinesisDataFetcherTest { Checkpointer checkpoint = mock(Checkpointer.class); when(checkpoint.getCheckpoint(SHARD_ID)).thenReturn(new ExtendedSequenceNumber(seqNo)); - final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = new SynchronousGetRecordsRetrievalStrategy( - kinesisDataFetcher); + final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy = + new SynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher); kinesisDataFetcher.initialize(seqNo, initialPositionInStream); - assertEquals(expectedRecords, getRecordsRetrievalStrategy.getRecords(MAX_RECORDS).records()); + assertEquals( + expectedRecords, + getRecordsRetrievalStrategy.getRecords(MAX_RECORDS).records()); verify(kinesisClient, times(1)).getShardIterator(any(GetShardIteratorRequest.class)); verify(kinesisClient, times(1)).getRecords(any(GetRecordsRequest.class)); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PollingConfigTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PollingConfigTest.java index eefba7a4..572bc0f0 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PollingConfigTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PollingConfigTest.java @@ -1,8 +1,5 @@ package software.amazon.kinesis.retrieval.polling; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -10,6 +7,9 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + @RunWith(MockitoJUnitRunner.class) public class PollingConfigTest { @@ -48,5 +48,4 @@ public class PollingConfigTest { public void testInvalidRecordLimit() { config.maxRecords(PollingConfig.DEFAULT_MAX_RECORDS + 1); } - -} \ No newline at end of file +} diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherIntegrationTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherIntegrationTest.java index 780ac4ad..c5340f97 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherIntegrationTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherIntegrationTest.java @@ -15,6 +15,41 @@ package software.amazon.kinesis.retrieval.polling; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import lombok.extern.slf4j.Slf4j; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; +import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; +import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; +import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse; +import software.amazon.awssdk.services.kinesis.model.Record; +import software.amazon.kinesis.common.InitialPositionInStreamExtended; +import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; +import software.amazon.kinesis.metrics.MetricsFactory; +import software.amazon.kinesis.metrics.NullMetricsFactory; +import software.amazon.kinesis.retrieval.DataFetcherResult; +import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; +import software.amazon.kinesis.retrieval.RecordsRetrieved; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; @@ -32,43 +67,6 @@ import static org.mockito.Mockito.when; import static software.amazon.kinesis.utils.BlockingUtils.blockUntilConditionSatisfied; import static software.amazon.kinesis.utils.BlockingUtils.blockUntilRecordsAvailable; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.runners.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; - -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; - -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; -import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.awssdk.services.kinesis.model.GetShardIteratorRequest; -import software.amazon.awssdk.services.kinesis.model.GetShardIteratorResponse; -import software.amazon.awssdk.services.kinesis.model.Record; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.metrics.MetricsFactory; -import software.amazon.kinesis.metrics.NullMetricsFactory; -import software.amazon.kinesis.retrieval.DataFetcherResult; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.RecordsRetrieved; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; - /** * These are the integration tests for the PrefetchRecordsPublisher class. */ @@ -95,8 +93,10 @@ public class PrefetchRecordsPublisherIntegrationTest { @Mock private KinesisAsyncClient kinesisClient; + @Mock private ExtendedSequenceNumber extendedSequenceNumber; + @Mock private InitialPositionInStreamExtended initialPosition; @@ -109,10 +109,14 @@ public class PrefetchRecordsPublisherIntegrationTest { CompletableFuture future = mock(CompletableFuture.class); when(extendedSequenceNumber.sequenceNumber()).thenReturn("LATEST"); - when(future.get(anyLong(), any(TimeUnit.class))).thenReturn(GetShardIteratorResponse.builder().shardIterator("TestIterator").build()); + when(future.get(anyLong(), any(TimeUnit.class))) + .thenReturn(GetShardIteratorResponse.builder() + .shardIterator("TestIterator") + .build()); when(kinesisClient.getShardIterator(any(GetShardIteratorRequest.class))).thenReturn(future); - getRecordsCache = new PrefetchRecordsPublisher(MAX_SIZE, + getRecordsCache = new PrefetchRecordsPublisher( + MAX_SIZE, MAX_BYTE_SIZE, MAX_RECORDS_COUNT, MAX_RECORDS_PER_CALL, @@ -130,14 +134,16 @@ public class PrefetchRecordsPublisherIntegrationTest { getRecordsCache.start(extendedSequenceNumber, initialPosition); sleep(IDLE_MILLIS_BETWEEN_CALLS); - ProcessRecordsInput processRecordsInput1 = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) + ProcessRecordsInput processRecordsInput1 = blockUntilRecordsAvailable( + () -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) .processRecordsInput(); assertTrue(processRecordsInput1.records().isEmpty()); assertEquals(processRecordsInput1.millisBehindLatest(), new Long(1000)); assertNotNull(processRecordsInput1.cacheEntryTime()); - ProcessRecordsInput processRecordsInput2 = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) + ProcessRecordsInput processRecordsInput2 = blockUntilRecordsAvailable( + () -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) .processRecordsInput(); assertNotEquals(processRecordsInput1, processRecordsInput2); @@ -148,11 +154,14 @@ public class PrefetchRecordsPublisherIntegrationTest { getRecordsCache.start(extendedSequenceNumber, initialPosition); sleep(MAX_SIZE * IDLE_MILLIS_BETWEEN_CALLS); - assertEquals(getRecordsCache.getPublisherSession().prefetchRecordsQueue().size(), MAX_SIZE); + assertEquals( + getRecordsCache.getPublisherSession().prefetchRecordsQueue().size(), MAX_SIZE); - ProcessRecordsInput processRecordsInput1 = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) + ProcessRecordsInput processRecordsInput1 = blockUntilRecordsAvailable( + () -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) .processRecordsInput(); - ProcessRecordsInput processRecordsInput2 = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) + ProcessRecordsInput processRecordsInput2 = blockUntilRecordsAvailable( + () -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) .processRecordsInput(); assertNotEquals(processRecordsInput1, processRecordsInput2); @@ -162,10 +171,10 @@ public class PrefetchRecordsPublisherIntegrationTest { @Test public void testDifferentShardCaches() { final ExecutorService executorService2 = spy(Executors.newFixedThreadPool(1)); - final KinesisDataFetcher kinesisDataFetcher = spy(new KinesisDataFetcher(kinesisClient, streamName, shardId, - MAX_RECORDS_PER_CALL, NULL_METRICS_FACTORY)); + final KinesisDataFetcher kinesisDataFetcher = spy( + new KinesisDataFetcher(kinesisClient, streamName, shardId, MAX_RECORDS_PER_CALL, NULL_METRICS_FACTORY)); final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy2 = - spy(new AsynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher, 5 , 5, shardId)); + spy(new AsynchronousGetRecordsRetrievalStrategy(kinesisDataFetcher, 5, 5, shardId)); final PrefetchRecordsPublisher recordsPublisher2 = new PrefetchRecordsPublisher( MAX_SIZE, MAX_BYTE_SIZE, @@ -206,23 +215,28 @@ public class PrefetchRecordsPublisherIntegrationTest { recordsPublisher2.shutdown(); sleep(100L); verify(executorService2).shutdownNow(); -// verify(getRecordsRetrievalStrategy2).shutdown(); + // verify(getRecordsRetrievalStrategy2).shutdown(); } @Test public void testExpiredIteratorException() { - when(dataFetcher.getRecords()).thenAnswer(new Answer() { - @Override - public DataFetcherResult answer(final InvocationOnMock invocationOnMock) throws Throwable { - throw ExpiredIteratorException.builder().message("ExpiredIterator").build(); - } - }).thenCallRealMethod(); + when(dataFetcher.getRecords()) + .thenAnswer(new Answer() { + @Override + public DataFetcherResult answer(final InvocationOnMock invocationOnMock) throws Throwable { + throw ExpiredIteratorException.builder() + .message("ExpiredIterator") + .build(); + } + }) + .thenCallRealMethod(); doNothing().when(dataFetcher).restartIterator(); getRecordsCache.start(extendedSequenceNumber, initialPosition); sleep(IDLE_MILLIS_BETWEEN_CALLS); - ProcessRecordsInput processRecordsInput = blockUntilRecordsAvailable(() -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) + ProcessRecordsInput processRecordsInput = blockUntilRecordsAvailable( + () -> evictPublishedEvent(getRecordsCache, "shardId"), 1000L) .processRecordsInput(); assertNotNull(processRecordsInput); @@ -233,17 +247,26 @@ public class PrefetchRecordsPublisherIntegrationTest { @Test public void testExpiredIteratorExceptionWithInnerRestartIteratorException() { when(dataFetcher.getRecords()) - .thenThrow(ExpiredIteratorException.builder().message("ExpiredIterator").build()) + .thenThrow(ExpiredIteratorException.builder() + .message("ExpiredIterator") + .build()) .thenCallRealMethod() - .thenThrow(ExpiredIteratorException.builder().message("ExpiredIterator").build()) + .thenThrow(ExpiredIteratorException.builder() + .message("ExpiredIterator") + .build()) .thenCallRealMethod(); doThrow(IllegalStateException.class).when(dataFetcher).restartIterator(); getRecordsCache.start(extendedSequenceNumber, initialPosition); - final boolean conditionSatisfied = blockUntilConditionSatisfied(() -> - getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == MAX_SIZE, 5000); + final boolean conditionSatisfied = blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 5000); Assert.assertTrue(conditionSatisfied); // Asserts the exception was only thrown once for restartIterator verify(dataFetcher, times(2)).restartIterator(); @@ -258,27 +281,32 @@ public class PrefetchRecordsPublisherIntegrationTest { getRecordsCache.shutdown(); sleep(100L); verify(executorService).shutdown(); -// verify(getRecordsRetrievalStrategy).shutdown(); + // verify(getRecordsRetrievalStrategy).shutdown(); } private void sleep(long millis) { try { Thread.sleep(millis); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { + } } private class KinesisDataFetcherForTest extends KinesisDataFetcher { - public KinesisDataFetcherForTest(final KinesisAsyncClient kinesisClient, - final String streamName, - final String shardId, - final int maxRecords) { + public KinesisDataFetcherForTest( + final KinesisAsyncClient kinesisClient, + final String streamName, + final String shardId, + final int maxRecords) { super(kinesisClient, streamName, shardId, maxRecords, NULL_METRICS_FACTORY); } @Override public DataFetcherResult getRecords() { - GetRecordsResponse getRecordsResult = GetRecordsResponse.builder().records(new ArrayList<>(records)) - .nextShardIterator(nextShardIterator).millisBehindLatest(1000L).build(); + GetRecordsResponse getRecordsResult = GetRecordsResponse.builder() + .records(new ArrayList<>(records)) + .nextShardIterator(nextShardIterator) + .millisBehindLatest(1000L) + .build(); return new AdvancingResult(getRecordsResult); } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherTest.java index 6e3a56cd..a046e6b9 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/PrefetchRecordsPublisherTest.java @@ -15,6 +15,62 @@ package software.amazon.kinesis.retrieval.polling; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import io.reactivex.rxjava3.core.Flowable; +import io.reactivex.rxjava3.schedulers.Schedulers; +import lombok.extern.slf4j.Slf4j; +import org.apache.commons.lang3.StringUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.stubbing.Answer; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import software.amazon.awssdk.core.SdkBytes; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.services.kinesis.model.ChildShard; +import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; +import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; +import software.amazon.awssdk.services.kinesis.model.InvalidArgumentException; +import software.amazon.awssdk.services.kinesis.model.Record; +import software.amazon.kinesis.common.InitialPositionInStreamExtended; +import software.amazon.kinesis.common.StreamIdentifier; +import software.amazon.kinesis.leases.ShardObjectHelper; +import software.amazon.kinesis.lifecycle.ShardConsumerNotifyingSubscriber; +import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; +import software.amazon.kinesis.metrics.NullMetricsFactory; +import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; +import software.amazon.kinesis.retrieval.KinesisClientRecord; +import software.amazon.kinesis.retrieval.RecordsPublisher; +import software.amazon.kinesis.retrieval.RecordsRetrieved; +import software.amazon.kinesis.retrieval.RetryableRetrievalException; +import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; +import software.amazon.kinesis.utils.BlockingUtils; + import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; @@ -41,63 +97,6 @@ import static org.mockito.Mockito.when; import static software.amazon.kinesis.utils.BlockingUtils.blockUntilConditionSatisfied; import static software.amazon.kinesis.utils.ProcessRecordsInputMatcher.eqProcessRecordsInput; -import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.Stream; - -import org.apache.commons.lang3.StringUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.runners.MockitoJUnitRunner; -import org.mockito.stubbing.Answer; -import org.reactivestreams.Subscriber; -import org.reactivestreams.Subscription; - -import io.reactivex.rxjava3.core.Flowable; -import io.reactivex.rxjava3.schedulers.Schedulers; -import lombok.extern.slf4j.Slf4j; -import software.amazon.awssdk.core.SdkBytes; -import software.amazon.awssdk.core.exception.SdkException; -import software.amazon.awssdk.services.kinesis.model.ChildShard; -import software.amazon.awssdk.services.kinesis.model.ExpiredIteratorException; -import software.amazon.awssdk.services.kinesis.model.GetRecordsResponse; -import software.amazon.awssdk.services.kinesis.model.InvalidArgumentException; -import software.amazon.awssdk.services.kinesis.model.Record; -import software.amazon.kinesis.common.InitialPositionInStreamExtended; -import software.amazon.kinesis.leases.ShardObjectHelper; -import software.amazon.kinesis.common.StreamIdentifier; -import software.amazon.kinesis.lifecycle.ShardConsumerNotifyingSubscriber; -import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import software.amazon.kinesis.metrics.NullMetricsFactory; -import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; -import software.amazon.kinesis.retrieval.KinesisClientRecord; -import software.amazon.kinesis.retrieval.RecordsPublisher; -import software.amazon.kinesis.retrieval.RecordsRetrieved; -import software.amazon.kinesis.retrieval.RetryableRetrievalException; -import software.amazon.kinesis.retrieval.kpl.ExtendedSequenceNumber; -import software.amazon.kinesis.utils.BlockingUtils; - /** * Test class for the PrefetchRecordsPublisher class. */ @@ -115,10 +114,13 @@ public class PrefetchRecordsPublisherTest { @Mock private GetRecordsRetrievalStrategy getRecordsRetrievalStrategy; + @Mock private DataFetcher dataFetcher; + @Mock private InitialPositionInStreamExtended initialPosition; + @Mock private ExtendedSequenceNumber sequenceNumber; @@ -137,8 +139,11 @@ public class PrefetchRecordsPublisherTest { getRecordsCache = createPrefetchRecordsPublisher(0L); spyQueue = spy(getRecordsCache.getPublisherSession().prefetchRecordsQueue()); records = spy(new ArrayList<>()); - getRecordsResponse = GetRecordsResponse.builder().records(records).nextShardIterator(NEXT_SHARD_ITERATOR) - .childShards(Collections.emptyList()).build(); + getRecordsResponse = GetRecordsResponse.builder() + .records(records) + .nextShardIterator(NEXT_SHARD_ITERATOR) + .childShards(Collections.emptyList()) + .build(); when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))).thenReturn(getRecordsResponse); } @@ -153,19 +158,40 @@ public class PrefetchRecordsPublisherTest { @Test public void testPrefetchPublisherInternalStateNotModifiedWhenPrefetcherThreadStartFails() { - doThrow(new RejectedExecutionException()).doThrow(new RejectedExecutionException()).doCallRealMethod() - .when(executorService).execute(any()); + doThrow(new RejectedExecutionException()) + .doThrow(new RejectedExecutionException()) + .doCallRealMethod() + .when(executorService) + .execute(any()); // Initialize try 1 tryPrefetchCacheStart(); - blockUntilConditionSatisfied(() -> getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == MAX_SIZE, 300); + blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 300); verifyInternalState(0); // Initialize try 2 tryPrefetchCacheStart(); - blockUntilConditionSatisfied(() -> getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == MAX_SIZE, 300); + blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 300); verifyInternalState(0); // Initialize try 3 tryPrefetchCacheStart(); - blockUntilConditionSatisfied(() -> getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == MAX_SIZE, 300); + blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 300); verifyInternalState(MAX_SIZE); verify(dataFetcher, times(3)).initialize(any(ExtendedSequenceNumber.class), any()); } @@ -179,7 +205,9 @@ public class PrefetchRecordsPublisherTest { } private void verifyInternalState(int queueSize) { - assertEquals(queueSize, getRecordsCache.getPublisherSession().prefetchRecordsQueue().size()); + assertEquals( + queueSize, + getRecordsCache.getPublisherSession().prefetchRecordsQueue().size()); } @Test @@ -188,8 +216,8 @@ public class PrefetchRecordsPublisherTest { when(records.size()).thenReturn(1000); - final List expectedRecords = records.stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); + final List expectedRecords = + records.stream().map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); getRecordsCache.start(sequenceNumber, initialPosition); ProcessRecordsInput result = blockUntilRecordsAvailable().processRecordsInput(); @@ -205,9 +233,10 @@ public class PrefetchRecordsPublisherTest { public void testGetRecordsWithInitialFailures_LessThanRequiredWait_Throws() { getRecordsCache = createPrefetchRecordsPublisher(Duration.ofSeconds(1).toMillis()); // Setup the retrieval strategy to fail initial calls before succeeding - when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))).thenThrow(new - RetryableRetrievalException("Timed out")).thenThrow(new - RetryableRetrievalException("Timed out again")).thenReturn(getRecordsResponse); + when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))) + .thenThrow(new RetryableRetrievalException("Timed out")) + .thenThrow(new RetryableRetrievalException("Timed out again")) + .thenReturn(getRecordsResponse); record = Record.builder().data(createByteBufferWithSize(SIZE_512_KB)).build(); when(records.size()).thenReturn(1000); @@ -222,15 +251,16 @@ public class PrefetchRecordsPublisherTest { public void testGetRecordsWithInitialFailures_AdequateWait_Success() { getRecordsCache = createPrefetchRecordsPublisher(Duration.ofSeconds(1).toMillis()); // Setup the retrieval strategy to fail initial calls before succeeding - when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))).thenThrow(new - RetryableRetrievalException("Timed out")).thenThrow(new - RetryableRetrievalException("Timed out again")).thenReturn(getRecordsResponse); + when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))) + .thenThrow(new RetryableRetrievalException("Timed out")) + .thenThrow(new RetryableRetrievalException("Timed out again")) + .thenReturn(getRecordsResponse); record = Record.builder().data(createByteBufferWithSize(SIZE_512_KB)).build(); when(records.size()).thenReturn(1000); - final List expectedRecords = records.stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); + final List expectedRecords = + records.stream().map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); getRecordsCache.start(sequenceNumber, initialPosition); ProcessRecordsInput result = null; @@ -253,7 +283,8 @@ public class PrefetchRecordsPublisherTest { when(records.size()).thenReturn(1000); - GetRecordsResponse response = GetRecordsResponse.builder().records(records).build(); + GetRecordsResponse response = + GetRecordsResponse.builder().records(records).build(); when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))).thenReturn(response); when(dataFetcher.isShardEndReached()).thenReturn(false); @@ -288,7 +319,10 @@ public class PrefetchRecordsPublisherTest { childShards.add(leftChild); childShards.add(rightChild); - GetRecordsResponse response = GetRecordsResponse.builder().records(records).childShards(childShards).build(); + GetRecordsResponse response = GetRecordsResponse.builder() + .records(records) + .childShards(childShards) + .build(); when(getRecordsRetrievalStrategy.getRecords(eq(MAX_RECORDS_PER_CALL))).thenReturn(response); when(dataFetcher.isShardEndReached()).thenReturn(true); @@ -328,7 +362,7 @@ public class PrefetchRecordsPublisherTest { sleep(2000); - int callRate = (int) Math.ceil((double) MAX_RECORDS_COUNT/recordsSize); + int callRate = (int) Math.ceil((double) MAX_RECORDS_COUNT / recordsSize); // TODO: fix this verification // verify(getRecordsRetrievalStrategy, times(callRate)).getRecords(MAX_RECORDS_PER_CALL); // assertEquals(spyQueue.size(), callRate); @@ -357,8 +391,8 @@ public class PrefetchRecordsPublisherTest { record = Record.builder().data(createByteBufferWithSize(1024)).build(); IntStream.range(0, recordsSize).forEach(i -> records.add(record)); - final List expectedRecords = records.stream() - .map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); + final List expectedRecords = + records.stream().map(KinesisClientRecord::fromRecord).collect(Collectors.toList()); getRecordsCache.start(sequenceNumber, initialPosition); ProcessRecordsInput processRecordsInput = evictPublishedEvent().processRecordsInput(); @@ -387,9 +421,13 @@ public class PrefetchRecordsPublisherTest { @Test(expected = IllegalStateException.class) public void testRequestRecordsOnSubscriptionAfterShutdown() { - GetRecordsResponse response = GetRecordsResponse.builder().records( - Record.builder().data(SdkBytes.fromByteArray(new byte[] { 1, 2, 3 })).sequenceNumber("123").build()) - .nextShardIterator(NEXT_SHARD_ITERATOR).build(); + GetRecordsResponse response = GetRecordsResponse.builder() + .records(Record.builder() + .data(SdkBytes.fromByteArray(new byte[] {1, 2, 3})) + .sequenceNumber("123") + .build()) + .nextShardIterator(NEXT_SHARD_ITERATOR) + .build(); when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenReturn(response); getRecordsCache.start(sequenceNumber, initialPosition); @@ -406,7 +444,8 @@ public class PrefetchRecordsPublisherTest { @Test public void testExpiredIteratorException() { - when(getRecordsRetrievalStrategy.getRecords(MAX_RECORDS_PER_CALL)).thenThrow(ExpiredIteratorException.class) + when(getRecordsRetrievalStrategy.getRecords(MAX_RECORDS_PER_CALL)) + .thenThrow(ExpiredIteratorException.class) .thenReturn(getRecordsResponse); getRecordsCache.start(sequenceNumber, initialPosition); @@ -433,7 +472,13 @@ public class PrefetchRecordsPublisherTest { doThrow(new IllegalStateException()).when(dataFetcher).restartIterator(); getRecordsCache.start(sequenceNumber, initialPosition); - blockUntilConditionSatisfied(() -> getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == MAX_SIZE, 300); + blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 300); // verify restartIterator was called verify(dataFetcher, times(2)).restartIterator(); @@ -441,10 +486,14 @@ public class PrefetchRecordsPublisherTest { @Test public void testRetryableRetrievalExceptionContinues() { - GetRecordsResponse response = GetRecordsResponse.builder().millisBehindLatest(100L) - .records(Collections.emptyList()).nextShardIterator(NEXT_SHARD_ITERATOR).build(); + GetRecordsResponse response = GetRecordsResponse.builder() + .millisBehindLatest(100L) + .records(Collections.emptyList()) + .nextShardIterator(NEXT_SHARD_ITERATOR) + .build(); when(getRecordsRetrievalStrategy.getRecords(anyInt())) - .thenThrow(new RetryableRetrievalException("Timeout", new TimeoutException("Timeout"))).thenReturn(response); + .thenThrow(new RetryableRetrievalException("Timeout", new TimeoutException("Timeout"))) + .thenReturn(response); getRecordsCache.start(sequenceNumber, initialPosition); @@ -459,7 +508,13 @@ public class PrefetchRecordsPublisherTest { .thenReturn(getRecordsResponse); getRecordsCache.start(sequenceNumber, initialPosition); - blockUntilConditionSatisfied(() -> getRecordsCache.getPublisherSession().prefetchRecordsQueue().size() == MAX_SIZE, 300); + blockUntilConditionSatisfied( + () -> getRecordsCache + .getPublisherSession() + .prefetchRecordsQueue() + .size() + == MAX_SIZE, + 300); verify(dataFetcher, times(1)).restartIterator(); } @@ -473,11 +528,15 @@ public class PrefetchRecordsPublisherTest { // If the test times out before starting the subscriber it means something went wrong while filling the queue. // After the subscriber is started one of the things that can trigger a timeout is a deadlock. // - final int[] sequenceNumberInResponse = { 0 }; + final int[] sequenceNumberInResponse = {0}; - when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenAnswer( i -> GetRecordsResponse.builder().records( - Record.builder().data(SdkBytes.fromByteArray(new byte[] { 1, 2, 3 })).sequenceNumber(++sequenceNumberInResponse[0] + "").build()) - .nextShardIterator(NEXT_SHARD_ITERATOR).build()); + when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenAnswer(i -> GetRecordsResponse.builder() + .records(Record.builder() + .data(SdkBytes.fromByteArray(new byte[] {1, 2, 3})) + .sequenceNumber(++sequenceNumberInResponse[0] + "") + .build()) + .nextShardIterator(NEXT_SHARD_ITERATOR) + .build()); getRecordsCache.start(sequenceNumber, initialPosition); @@ -489,15 +548,17 @@ public class PrefetchRecordsPublisherTest { Thread.yield(); } - log.info("Queue is currently at {} starting subscriber", getRecordsCache.getPublisherSession().prefetchRecordsQueue().size()); + log.info( + "Queue is currently at {} starting subscriber", + getRecordsCache.getPublisherSession().prefetchRecordsQueue().size()); AtomicInteger receivedItems = new AtomicInteger(0); final int expectedItems = MAX_SIZE * 10; Object lock = new Object(); - final boolean[] isRecordNotInorder = { false }; - final String[] recordNotInOrderMessage = { "" }; + final boolean[] isRecordNotInorder = {false}; + final String[] recordNotInOrderMessage = {""}; Subscriber delegateSubscriber = new Subscriber() { Subscription sub; @@ -513,11 +574,12 @@ public class PrefetchRecordsPublisherTest { public void onNext(RecordsRetrieved recordsRetrieved) { receivedItems.incrementAndGet(); if (Integer.parseInt(((PrefetchRecordsPublisher.PrefetchRecordsRetrieved) recordsRetrieved) - .lastBatchSequenceNumber()) != ++receivedSeqNum) { + .lastBatchSequenceNumber()) + != ++receivedSeqNum) { isRecordNotInorder[0] = true; recordNotInOrderMessage[0] = "Expected : " + receivedSeqNum + " Actual : " + ((PrefetchRecordsPublisher.PrefetchRecordsRetrieved) recordsRetrieved) - .lastBatchSequenceNumber(); + .lastBatchSequenceNumber(); } if (receivedItems.get() >= expectedItems) { synchronized (lock) { @@ -542,12 +604,15 @@ public class PrefetchRecordsPublisherTest { } }; - Subscriber subscriber = new ShardConsumerNotifyingSubscriber(delegateSubscriber, getRecordsCache); + Subscriber subscriber = + new ShardConsumerNotifyingSubscriber(delegateSubscriber, getRecordsCache); synchronized (lock) { log.info("Awaiting notification"); - Flowable.fromPublisher(getRecordsCache).subscribeOn(Schedulers.computation()) - .observeOn(Schedulers.computation(), true, 8).subscribe(subscriber); + Flowable.fromPublisher(getRecordsCache) + .subscribeOn(Schedulers.computation()) + .observeOn(Schedulers.computation(), true, 8) + .subscribe(subscriber); try { lock.wait(); } catch (InterruptedException e) { @@ -566,9 +631,13 @@ public class PrefetchRecordsPublisherTest { // // This test is to verify that the data consumption is not stuck in the case of an failed event delivery // to the subscriber. - GetRecordsResponse response = GetRecordsResponse.builder().records( - Record.builder().data(SdkBytes.fromByteArray(new byte[] { 1, 2, 3 })).sequenceNumber("123").build()) - .nextShardIterator(NEXT_SHARD_ITERATOR).build(); + GetRecordsResponse response = GetRecordsResponse.builder() + .records(Record.builder() + .data(SdkBytes.fromByteArray(new byte[] {1, 2, 3})) + .sequenceNumber("123") + .build()) + .nextShardIterator(NEXT_SHARD_ITERATOR) + .build(); when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenReturn(response); getRecordsCache.start(sequenceNumber, initialPosition); @@ -581,7 +650,9 @@ public class PrefetchRecordsPublisherTest { Thread.yield(); } - log.info("Queue is currently at {} starting subscriber", getRecordsCache.getPublisherSession().prefetchRecordsQueue().size()); + log.info( + "Queue is currently at {} starting subscriber", + getRecordsCache.getPublisherSession().prefetchRecordsQueue().size()); AtomicInteger receivedItems = new AtomicInteger(0); final int expectedItems = MAX_SIZE * 20; @@ -627,8 +698,10 @@ public class PrefetchRecordsPublisherTest { synchronized (lock) { log.info("Awaiting notification"); - Flowable.fromPublisher(getRecordsCache).subscribeOn(Schedulers.computation()) - .observeOn(Schedulers.computation(), true, 8).subscribe(subscriber); + Flowable.fromPublisher(getRecordsCache) + .subscribeOn(Schedulers.computation()) + .observeOn(Schedulers.computation(), true, 8) + .subscribe(subscriber); try { lock.wait(); } catch (InterruptedException e) { @@ -641,22 +714,33 @@ public class PrefetchRecordsPublisherTest { @Test public void testResetClearsRemainingData() { - List responses = Stream.iterate(0, i -> i + 1).limit(10).map(i -> { - Record record = Record.builder().partitionKey("record-" + i).sequenceNumber("seq-" + i) - .data(SdkBytes.fromByteArray(new byte[] { 1, 2, 3 })).approximateArrivalTimestamp(Instant.now()) - .build(); - String nextIterator = "shard-iter-" + (i + 1); - return GetRecordsResponse.builder().records(record).nextShardIterator(nextIterator).build(); - }).collect(Collectors.toList()); + List responses = Stream.iterate(0, i -> i + 1) + .limit(10) + .map(i -> { + Record record = Record.builder() + .partitionKey("record-" + i) + .sequenceNumber("seq-" + i) + .data(SdkBytes.fromByteArray(new byte[] {1, 2, 3})) + .approximateArrivalTimestamp(Instant.now()) + .build(); + String nextIterator = "shard-iter-" + (i + 1); + return GetRecordsResponse.builder() + .records(record) + .nextShardIterator(nextIterator) + .build(); + }) + .collect(Collectors.toList()); RetrieverAnswer retrieverAnswer = new RetrieverAnswer(responses); when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenAnswer(retrieverAnswer); doAnswer(a -> { - String resetTo = a.getArgumentAt(0, String.class); - retrieverAnswer.resetIteratorTo(resetTo); - return null; - }).when(dataFetcher).resetIterator(anyString(), anyString(), any()); + String resetTo = a.getArgumentAt(0, String.class); + retrieverAnswer.resetIteratorTo(resetTo); + return null; + }) + .when(dataFetcher) + .resetIterator(anyString(), anyString(), any()); getRecordsCache.start(sequenceNumber, initialPosition); @@ -679,8 +763,11 @@ public class PrefetchRecordsPublisherTest { RecordsRetrieved postRestart = blockUntilRecordsAvailable(); assertThat(postRestart.processRecordsInput(), eqProcessRecordsInput(expected.processRecordsInput())); - verify(dataFetcher).resetIterator(eq(responses.get(0).nextShardIterator()), - eq(responses.get(0).records().get(0).sequenceNumber()), any()); + verify(dataFetcher) + .resetIterator( + eq(responses.get(0).nextShardIterator()), + eq(responses.get(0).records().get(0).sequenceNumber()), + any()); } /** @@ -694,7 +781,8 @@ public class PrefetchRecordsPublisherTest { try { // return a valid response to cause `lastSuccessfulCall` to initialize - when(getRecordsRetrievalStrategy.getRecords(anyInt())).thenReturn(GetRecordsResponse.builder().build()); + when(getRecordsRetrievalStrategy.getRecords(anyInt())) + .thenReturn(GetRecordsResponse.builder().build()); blockUntilRecordsAvailable(); } catch (RuntimeException re) { Assert.fail("first call should succeed"); @@ -702,7 +790,9 @@ public class PrefetchRecordsPublisherTest { try { when(getRecordsRetrievalStrategy.getRecords(anyInt())) - .thenThrow(SdkException.builder().message("lose yourself to dance").build()); + .thenThrow(SdkException.builder() + .message("lose yourself to dance") + .build()); blockUntilRecordsAvailable(); } finally { // the successful call is the +1 @@ -771,15 +861,21 @@ public class PrefetchRecordsPublisherTest { getDelegateSubscriber().onNext(recordsRetrieved); } else { log.info("Record Loss Triggered"); - CONSUMER_HEALTH_CHECKER.schedule(() -> { - getRecordsPublisher().restartFrom(recordsRetrieved); - Flowable.fromPublisher(getRecordsPublisher()).subscribeOn(Schedulers.computation()) - .observeOn(Schedulers.computation(), true, 8).subscribe(this); - }, 1000, TimeUnit.MILLISECONDS); + CONSUMER_HEALTH_CHECKER.schedule( + () -> { + getRecordsPublisher().restartFrom(recordsRetrieved); + Flowable.fromPublisher(getRecordsPublisher()) + .subscribeOn(Schedulers.computation()) + .observeOn(Schedulers.computation(), true, 8) + .subscribe(this); + }, + 1000, + TimeUnit.MILLISECONDS); } recordCounter++; } } + @After public void shutdown() { getRecordsCache.shutdown(); @@ -789,7 +885,8 @@ public class PrefetchRecordsPublisherTest { private void sleep(long millis) { try { Thread.sleep(millis); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { + } } private SdkBytes createByteBufferWithSize(int size) { @@ -810,5 +907,4 @@ public class PrefetchRecordsPublisherTest { "shardId", 1L); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/RecordsFetcherFactoryTest.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/RecordsFetcherFactoryTest.java index ddc25e21..ad4a6046 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/RecordsFetcherFactoryTest.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/retrieval/polling/RecordsFetcherFactoryTest.java @@ -14,16 +14,11 @@ */ package software.amazon.kinesis.retrieval.polling; -import static org.hamcrest.CoreMatchers.instanceOf; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.mockito.Mockito.when; - import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - import software.amazon.kinesis.common.StreamIdentifier; import software.amazon.kinesis.metrics.MetricsFactory; import software.amazon.kinesis.retrieval.DataFetchingStrategy; @@ -31,14 +26,20 @@ import software.amazon.kinesis.retrieval.GetRecordsRetrievalStrategy; import software.amazon.kinesis.retrieval.RecordsFetcherFactory; import software.amazon.kinesis.retrieval.RecordsPublisher; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.Mockito.when; + public class RecordsFetcherFactoryTest { private String shardId = "TestShard"; private RecordsFetcherFactory recordsFetcherFactory; @Mock private GetRecordsRetrievalStrategy getRecordsRetrievalStrategy; + @Mock private MetricsFactory metricsFactory; + @Mock private DataFetcher dataFetcher; @@ -52,18 +53,18 @@ public class RecordsFetcherFactoryTest { @Test @Ignore -// TODO: remove test no longer holds true + // TODO: remove test no longer holds true public void createDefaultRecordsFetcherTest() { - RecordsPublisher recordsCache = recordsFetcherFactory.createRecordsFetcher(getRecordsRetrievalStrategy, shardId, - metricsFactory, 1); + RecordsPublisher recordsCache = + recordsFetcherFactory.createRecordsFetcher(getRecordsRetrievalStrategy, shardId, metricsFactory, 1); assertThat(recordsCache, instanceOf(BlockingRecordsPublisher.class)); } @Test public void createPrefetchRecordsFetcherTest() { recordsFetcherFactory.dataFetchingStrategy(DataFetchingStrategy.PREFETCH_CACHED); - RecordsPublisher recordsCache = recordsFetcherFactory.createRecordsFetcher(getRecordsRetrievalStrategy, shardId, - metricsFactory, 1); + RecordsPublisher recordsCache = + recordsFetcherFactory.createRecordsFetcher(getRecordsRetrievalStrategy, shardId, metricsFactory, 1); assertThat(recordsCache, instanceOf(PrefetchRecordsPublisher.class)); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/AWSResourceManager.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/AWSResourceManager.java index 232a6859..f6be4692 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/AWSResourceManager.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/AWSResourceManager.java @@ -1,13 +1,12 @@ package software.amazon.kinesis.utils; -import software.amazon.kinesis.config.KCLAppConfig; - -import lombok.NoArgsConstructor; -import lombok.extern.slf4j.Slf4j; - import java.util.List; import java.util.concurrent.TimeUnit; +import lombok.NoArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import software.amazon.kinesis.config.KCLAppConfig; + @Slf4j @NoArgsConstructor public abstract class AWSResourceManager { @@ -55,7 +54,8 @@ public abstract class AWSResourceManager { } catch (Exception e) { try { Thread.sleep(TimeUnit.SECONDS.toMillis(10)); - } catch (InterruptedException e1) {} + } catch (InterruptedException e1) { + } log.info("Resource {} is not deleted yet, exception: ", resourceName); } } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/BlockingUtils.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/BlockingUtils.java index cd7ad8a6..a8c3b268 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/BlockingUtils.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/BlockingUtils.java @@ -21,7 +21,7 @@ public class BlockingUtils { public static Records blockUntilRecordsAvailable(Supplier recordsSupplier, long timeoutMillis) { Records recordsRetrieved; - while ((recordsRetrieved = recordsSupplier.get()) == null && timeoutMillis > 0 ) { + while ((recordsRetrieved = recordsSupplier.get()) == null && timeoutMillis > 0) { try { Thread.sleep(100); } catch (InterruptedException e) { @@ -37,7 +37,7 @@ public class BlockingUtils { } public static boolean blockUntilConditionSatisfied(Supplier conditionSupplier, long timeoutMillis) { - while (!conditionSupplier.get() && timeoutMillis > 0 ) { + while (!conditionSupplier.get() && timeoutMillis > 0) { try { Thread.sleep(100); } catch (InterruptedException e) { @@ -47,5 +47,4 @@ public class BlockingUtils { } return conditionSupplier.get(); } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/LeaseTableManager.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/LeaseTableManager.java index 40d711bd..4f0672d6 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/LeaseTableManager.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/LeaseTableManager.java @@ -1,5 +1,12 @@ package software.amazon.kinesis.utils; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient; @@ -12,13 +19,6 @@ import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException; import software.amazon.awssdk.services.dynamodb.model.TableStatus; import software.amazon.kinesis.common.FutureUtils; -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; - @Slf4j @AllArgsConstructor public class LeaseTableManager extends AWSResourceManager { @@ -26,14 +26,17 @@ public class LeaseTableManager extends AWSResourceManager { private final DynamoDbAsyncClient dynamoClient; public boolean isResourceActive(String tableName) { - final DescribeTableRequest request = DescribeTableRequest.builder().tableName(tableName).build(); - final CompletableFuture describeTableResponseCompletableFuture = dynamoClient.describeTable(request); + final DescribeTableRequest request = + DescribeTableRequest.builder().tableName(tableName).build(); + final CompletableFuture describeTableResponseCompletableFuture = + dynamoClient.describeTable(request); try { final DescribeTableResponse response = describeTableResponseCompletableFuture.get(30, TimeUnit.SECONDS); boolean isActive = response.table().tableStatus().equals(TableStatus.ACTIVE); if (!isActive) { - throw new RuntimeException("Table is not active, instead in status: " + response.table().tableStatus()); + throw new RuntimeException("Table is not active, instead in status: " + + response.table().tableStatus()); } return true; } catch (ExecutionException e) { @@ -48,7 +51,8 @@ public class LeaseTableManager extends AWSResourceManager { } public void deleteResourceCall(String tableName) throws Exception { - final DeleteTableRequest request = DeleteTableRequest.builder().tableName(tableName).build(); + final DeleteTableRequest request = + DeleteTableRequest.builder().tableName(tableName).build(); FutureUtils.resolveOrCancelFuture(dynamoClient.deleteTable(request), Duration.ofSeconds(60)); } @@ -57,9 +61,12 @@ public class LeaseTableManager extends AWSResourceManager { List allTableNames = new ArrayList<>(); ListTablesResponse result = null; do { - result = FutureUtils.resolveOrCancelFuture(dynamoClient.listTables(listTableRequest), Duration.ofSeconds(60)); + result = FutureUtils.resolveOrCancelFuture( + dynamoClient.listTables(listTableRequest), Duration.ofSeconds(60)); allTableNames.addAll(result.tableNames()); - listTableRequest = ListTablesRequest.builder().exclusiveStartTableName(result.lastEvaluatedTableName()).build(); + listTableRequest = ListTablesRequest.builder() + .exclusiveStartTableName(result.lastEvaluatedTableName()) + .build(); } while (result.lastEvaluatedTableName() != null); return allTableNames; } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/ProcessRecordsInputMatcher.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/ProcessRecordsInputMatcher.java index 1aeddc60..5c2e2f3f 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/ProcessRecordsInputMatcher.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/ProcessRecordsInputMatcher.java @@ -15,16 +15,16 @@ package software.amazon.kinesis.utils; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + import lombok.Data; import org.hamcrest.Description; import org.hamcrest.Matcher; import org.hamcrest.TypeSafeDiagnosingMatcher; import software.amazon.kinesis.lifecycle.events.ProcessRecordsInput; -import java.util.HashMap; -import java.util.Map; -import java.util.function.Function; - import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.nullValue; @@ -38,11 +38,11 @@ public class ProcessRecordsInputMatcher extends TypeSafeDiagnosingMatcher e.getValue().matcher.matches(e.getValue().accessor.apply(item))).anyMatch(e -> { + .filter(e -> e.getValue().matcher.matches(e.getValue().accessor.apply(item))) + .anyMatch(e -> { mismatchDescription.appendText(e.getKey()).appendText(" "); e.getValue().matcher.describeMismatch(e.getValue().accessor.apply(item), mismatchDescription); return true; diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidatorQueue.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidatorQueue.java index c6b9d6a6..50e993c9 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidatorQueue.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/RecordValidatorQueue.java @@ -1,14 +1,14 @@ package software.amazon.kinesis.utils; -import lombok.extern.slf4j.Slf4j; - +import java.util.ArrayList; import java.util.HashSet; import java.util.List; -import java.util.ArrayList; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import lombok.extern.slf4j.Slf4j; + /** * Class that maintains a dictionary that maps shard IDs to a list of records * that are processed by that shard. @@ -35,7 +35,8 @@ public class RecordValidatorQueue { for (String record : recordsPerShard) { int nextVal = Integer.parseInt(record); if (prevVal > nextVal) { - log.error("The records are not in increasing order. Saw record data {} before {}.", prevVal, nextVal); + log.error( + "The records are not in increasing order. Saw record data {} before {}.", prevVal, nextVal); return RecordValidationStatus.OUT_OF_ORDER; } prevVal = nextVal; @@ -52,12 +53,14 @@ public class RecordValidatorQueue { // If this is true, then there was some record that was missed during processing. if (actualRecordCount != expectedRecordCount) { - log.error("Failed to get correct number of records processed. Should be {} but was {}", expectedRecordCount, actualRecordCount); + log.error( + "Failed to get correct number of records processed. Should be {} but was {}", + expectedRecordCount, + actualRecordCount); return RecordValidationStatus.MISSING_RECORD; } // Record validation succeeded. return RecordValidationStatus.NO_ERROR; } - } diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/StreamExistenceManager.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/StreamExistenceManager.java index e7be5141..df0bcab3 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/StreamExistenceManager.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/StreamExistenceManager.java @@ -1,9 +1,20 @@ package software.amazon.kinesis.utils; +import java.io.IOException; +import java.net.URISyntaxException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + import lombok.Value; import lombok.extern.slf4j.Slf4j; import software.amazon.awssdk.arns.Arn; import software.amazon.awssdk.services.kinesis.KinesisAsyncClient; +import software.amazon.awssdk.services.kinesis.model.ConsumerStatus; import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest; import software.amazon.awssdk.services.kinesis.model.DeleteStreamRequest; import software.amazon.awssdk.services.kinesis.model.DescribeStreamConsumerRequest; @@ -17,21 +28,10 @@ import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerReque import software.amazon.awssdk.services.kinesis.model.RegisterStreamConsumerResponse; import software.amazon.awssdk.services.kinesis.model.ResourceNotFoundException; import software.amazon.awssdk.services.kinesis.model.StreamStatus; -import software.amazon.awssdk.services.kinesis.model.ConsumerStatus; import software.amazon.kinesis.common.FutureUtils; import software.amazon.kinesis.config.KCLAppConfig; import software.amazon.kinesis.config.RetrievalMode; -import java.io.IOException; -import java.net.URISyntaxException; -import java.time.Duration; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; - @Value @Slf4j public class StreamExistenceManager extends AWSResourceManager { @@ -46,11 +46,13 @@ public class StreamExistenceManager extends AWSResourceManager { } public boolean isResourceActive(String streamName) { - final DescribeStreamSummaryRequest request = DescribeStreamSummaryRequest.builder().streamName(streamName).build(); + final DescribeStreamSummaryRequest request = + DescribeStreamSummaryRequest.builder().streamName(streamName).build(); try { final DescribeStreamSummaryResponse response = FutureUtils.resolveOrCancelFuture(client.describeStreamSummary(request), Duration.ofSeconds(60)); - final boolean isActive = response.streamDescriptionSummary().streamStatus().equals(StreamStatus.ACTIVE); + final boolean isActive = + response.streamDescriptionSummary().streamStatus().equals(StreamStatus.ACTIVE); return isActive; } catch (ExecutionException e) { if (e.getCause() instanceof ResourceNotFoundException) { @@ -64,11 +66,14 @@ public class StreamExistenceManager extends AWSResourceManager { } private boolean isConsumerActive(Arn consumerArn) { - final DescribeStreamConsumerRequest request = DescribeStreamConsumerRequest.builder().consumerARN(consumerArn.toString()).build(); + final DescribeStreamConsumerRequest request = DescribeStreamConsumerRequest.builder() + .consumerARN(consumerArn.toString()) + .build(); try { final DescribeStreamConsumerResponse response = FutureUtils.resolveOrCancelFuture(client.describeStreamConsumer(request), Duration.ofSeconds(60)); - final boolean isActive = response.consumerDescription().consumerStatus().equals(ConsumerStatus.ACTIVE); + final boolean isActive = + response.consumerDescription().consumerStatus().equals(ConsumerStatus.ACTIVE); return isActive; } catch (ExecutionException e) { if (e.getCause() instanceof ResourceNotFoundException) { @@ -82,7 +87,10 @@ public class StreamExistenceManager extends AWSResourceManager { } public void deleteResourceCall(String streamName) throws Exception { - final DeleteStreamRequest request = DeleteStreamRequest.builder().streamName(streamName).enforceConsumerDeletion(true).build(); + final DeleteStreamRequest request = DeleteStreamRequest.builder() + .streamName(streamName) + .enforceConsumerDeletion(true) + .build(); client.deleteStream(request).get(30, TimeUnit.SECONDS); } @@ -93,7 +101,9 @@ public class StreamExistenceManager extends AWSResourceManager { do { result = FutureUtils.resolveOrCancelFuture(client.listStreams(listStreamRequest), Duration.ofSeconds(60)); allStreamNames.addAll(result.streamNames()); - listStreamRequest = ListStreamsRequest.builder().exclusiveStartStreamName(result.nextToken()).build(); + listStreamRequest = ListStreamsRequest.builder() + .exclusiveStartStreamName(result.nextToken()) + .build(); } while (result.hasMoreStreams()); return allStreamNames; } @@ -109,7 +119,8 @@ public class StreamExistenceManager extends AWSResourceManager { if (testConfig.isCrossAccount()) { for (Arn streamArn : testConfig.getStreamArns()) { log.info("Putting cross account stream resource policy for stream {}", streamArn); - putResourcePolicyForCrossAccount(streamArn, + putResourcePolicyForCrossAccount( + streamArn, getCrossAccountStreamResourcePolicy(testConfig.getAccountIdForConsumer(), streamArn)); } } @@ -122,9 +133,10 @@ public class StreamExistenceManager extends AWSResourceManager { if (testConfig.isCrossAccount() && testConfig.getRetrievalMode().equals(RetrievalMode.STREAMING)) { final Map streamToConsumerArnsMap = new HashMap<>(); for (Arn streamArn : testConfig.getStreamArns()) { - final Arn consumerArn = registerConsumerAndWaitForActive(streamArn, - KCLAppConfig.CROSS_ACCOUNT_CONSUMER_NAME); - putResourcePolicyForCrossAccount(consumerArn, + final Arn consumerArn = + registerConsumerAndWaitForActive(streamArn, KCLAppConfig.CROSS_ACCOUNT_CONSUMER_NAME); + putResourcePolicyForCrossAccount( + consumerArn, getCrossAccountConsumerResourcePolicy(testConfig.getAccountIdForConsumer(), consumerArn)); streamToConsumerArnsMap.put(streamArn, consumerArn); } @@ -139,9 +151,10 @@ public class StreamExistenceManager extends AWSResourceManager { .resourceARN(resourceArn.toString()) .policy(policy) .build(); - FutureUtils.resolveOrCancelFuture(client.putResourcePolicy(putResourcePolicyRequest), Duration.ofSeconds(60)); + FutureUtils.resolveOrCancelFuture( + client.putResourcePolicy(putResourcePolicyRequest), Duration.ofSeconds(60)); } catch (Exception e) { - throw new RuntimeException("Failed to PutResourcePolicy " + policy + " on resource " + resourceArn, e); + throw new RuntimeException("Failed to PutResourcePolicy " + policy + " on resource " + resourceArn, e); } } @@ -173,9 +186,8 @@ public class StreamExistenceManager extends AWSResourceManager { .streamARN(streamArn.toString()) .consumerName(consumerName) .build(); - final RegisterStreamConsumerResponse response = - FutureUtils.resolveOrCancelFuture(client.registerStreamConsumer(registerStreamConsumerRequest), - Duration.ofSeconds(60)); + final RegisterStreamConsumerResponse response = FutureUtils.resolveOrCancelFuture( + client.registerStreamConsumer(registerStreamConsumerRequest), Duration.ofSeconds(60)); final Arn consumerArn = Arn.fromString(response.consumer().consumerARN()); int retries = 0; @@ -196,7 +208,10 @@ public class StreamExistenceManager extends AWSResourceManager { } private void createStream(String streamName, int shardCount) { - final CreateStreamRequest request = CreateStreamRequest.builder().streamName(streamName).shardCount(shardCount).build(); + final CreateStreamRequest request = CreateStreamRequest.builder() + .streamName(streamName) + .shardCount(shardCount) + .build(); try { client.createStream(request).get(30, TimeUnit.SECONDS); } catch (Exception e) { diff --git a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/SubscribeToShardRequestMatcher.java b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/SubscribeToShardRequestMatcher.java index 43c887a3..52c50e05 100644 --- a/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/SubscribeToShardRequestMatcher.java +++ b/amazon-kinesis-client/src/test/java/software/amazon/kinesis/utils/SubscribeToShardRequestMatcher.java @@ -6,6 +6,7 @@ import software.amazon.awssdk.services.kinesis.model.SubscribeToShardRequest; public class SubscribeToShardRequestMatcher extends ArgumentMatcher { private SubscribeToShardRequest left; + public SubscribeToShardRequestMatcher(SubscribeToShardRequest left) { super(); this.left = left; @@ -13,8 +14,8 @@ public class SubscribeToShardRequestMatcher extends ArgumentMatcher testStreamlets = new ArrayList<>(); - + /** * Constructor. */ @@ -61,5 +61,4 @@ public class TestStreamletFactory implements ShardRecordProcessorFactory { public List getTestStreamlets() { return testStreamlets; } - }