Replacing commons-logging with slf4j. Updating lombok to 1.6.20.

This commit is contained in:
Sahil Palvia 2018-03-12 15:11:00 -07:00
parent a53473d536
commit d3abb1ab20
55 changed files with 615 additions and 788 deletions

10
pom.xml
View file

@ -6,7 +6,7 @@
<artifactId>amazon-kinesis-client</artifactId>
<packaging>jar</packaging>
<name>Amazon Kinesis Client Library for Java</name>
<version>1.9.1-SNAPSHOT</version>
<version>2.0.0-SNAPSHOT</version>
<description>The Amazon Kinesis Client Library for Java enables Java developers to easily consume and process data
from Amazon Kinesis.
</description>
@ -63,14 +63,14 @@
<version>2.6</version>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>1.1.3</version>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.25</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<version>1.16.10</version>
<version>1.16.20</version>
<scope>provided</scope>
</dependency>

View file

@ -14,21 +14,21 @@
*/
package com.amazonaws.services.kinesis.clientlibrary.config;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.lang.reflect.Constructor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.AWSCredentialsProviderChain;
import lombok.extern.slf4j.Slf4j;
/**
* Get AWSCredentialsProvider property.
*/
@Slf4j
class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecoder<AWSCredentialsProvider> {
private static final Log LOG = LogFactory.getLog(AWSCredentialsProviderPropertyValueDecoder.class);
private static final String AUTH_PREFIX = "com.amazonaws.auth.";
private static final String LIST_DELIMITER = ",";
private static final String ARG_DELIMITER = "|";
@ -82,14 +82,14 @@ class AWSCredentialsProviderPropertyValueDecoder implements IPropertyValueDecode
credentialsProviders.add((AWSCredentialsProvider) c.newInstance(
Arrays.copyOfRange(nameAndArgs, 1, nameAndArgs.length)));
} catch (Exception e) {
LOG.debug("Can't find any credentials provider matching " + providerName + ".");
log.debug("Can't find any credentials provider matching {}.", providerName);
}
} else {
try {
Class<?> className = Class.forName(providerName);
credentialsProviders.add((AWSCredentialsProvider) className.newInstance());
} catch (Exception e) {
LOG.debug("Can't find any credentials provider matching " + providerName + ".");
log.debug("Can't find any credentials provider matching {}.", providerName);
}
}
}

View file

@ -28,12 +28,11 @@ import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
import lombok.extern.slf4j.Slf4j;
/**
* KinesisClientLibConfigurator constructs a KinesisClientLibConfiguration from java properties file. The following
* three properties must be provided. 1) "applicationName" 2) "streamName" 3) "AWSCredentialsProvider"
@ -42,9 +41,8 @@ import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibC
* KinesisClientLibConfiguration and has a corresponding "with{variableName}" setter method, will be read in, and its
* value in properties file will be assigned to corresponding variable in KinesisClientLibConfiguration.
*/
@Slf4j
public class KinesisClientLibConfigurator {
private static final Log LOG = LogFactory.getLog(KinesisClientLibConfigurator.class);
private static final String PREFIX = "with";
// Required properties
@ -140,8 +138,8 @@ public class KinesisClientLibConfigurator {
String workerId = stringValueDecoder.decodeValue(properties.getProperty(PROP_WORKER_ID));
if (workerId == null || workerId.isEmpty()) {
workerId = UUID.randomUUID().toString();
LOG.info("Value of workerId is not provided in the properties. WorkerId is automatically "
+ "assigned as: " + workerId);
log.info("Value of workerId is not provided in the properties. WorkerId is automatically assigned as: {}",
workerId);
}
KinesisClientLibConfiguration config =
@ -202,38 +200,27 @@ public class KinesisClientLibConfigurator {
IPropertyValueDecoder<?> decoder = classToDecoder.get(paramType);
try {
method.invoke(config, decoder.decodeValue(propertyValue));
LOG.info(String.format("Successfully set property %s with value %s",
propertyKey,
propertyValue));
log.info("Successfully set property {} with value {}", propertyKey, propertyValue);
return;
} catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
// At this point, we really thought that we could call this method.
LOG.warn(String.format("Encountered an error while invoking method %s with value %s. "
+ "Exception was %s",
method,
propertyValue,
e));
log.warn("Encountered an error while invoking method %s with value {}. Exception was {}",
method, propertyValue, e);
} catch (UnsupportedOperationException e) {
LOG.warn(String.format("The property %s is not supported as type %s at this time.",
propertyKey,
paramType));
log.warn("The property {} is not supported as type {} at this time.", propertyKey,
paramType);
}
} else {
LOG.debug(String.format("No method for decoding parameters of type %s so method %s could not "
+ "be invoked.",
paramType,
method));
log.debug("No method for decoding parameters of type {} so method {} could not be invoked.",
paramType, method);
}
} else {
LOG.debug(String.format("Method %s doesn't look like it is appropriate for setting property %s. "
+ "Looking for something called %s.",
method,
propertyKey,
targetMethodName));
log.debug("Method {} doesn't look like it is appropriate for setting property {}. Looking for"
+ " something called {}.", method, propertyKey, targetMethodName);
}
}
} else {
LOG.debug(String.format("There was no appropriately named method for setting property %s.", propertyKey));
log.debug(String.format("There was no appropriately named method for setting property %s.", propertyKey));
}
}
}

View file

@ -35,12 +35,12 @@ import com.amazonaws.services.kinesis.model.GetRecordsResult;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import lombok.NonNull;
import lombok.extern.apachecommons.CommonsLog;
import lombok.extern.slf4j.Slf4j;
/**
*
*/
@CommonsLog
@Slf4j
public class AsynchronousGetRecordsRetrievalStrategy implements GetRecordsRetrievalStrategy {
private static final int TIME_TO_KEEP_ALIVE = 5;
private static final int CORE_THREAD_POOL_COUNT = 1;

View file

@ -14,14 +14,13 @@
*/
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.BlockedOnParentShardException;
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
import lombok.extern.slf4j.Slf4j;
/**
* Task to block until processing of all data records in the parent shard(s) is completed.
* We check if we have checkpoint(s) for the parent shard(s).
@ -30,9 +29,8 @@ import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
* If we don't find a checkpoint for the parent shard(s), we assume they have been trimmed and directly
* proceed with processing data from the shard.
*/
@Slf4j
class BlockOnParentShardTask implements ITask {
private static final Log LOG = LogFactory.getLog(BlockOnParentShardTask.class);
private final ShardInfo shardInfo;
private final ILeaseManager<KinesisClientLease> leaseManager;
@ -67,31 +65,31 @@ class BlockOnParentShardTask implements ITask {
if (lease != null) {
ExtendedSequenceNumber checkpoint = lease.getCheckpoint();
if ((checkpoint == null) || (!checkpoint.equals(ExtendedSequenceNumber.SHARD_END))) {
LOG.debug("Shard " + shardId + " is not yet done. Its current checkpoint is " + checkpoint);
log.debug("Shard {} is not yet done. Its current checkpoint is {}", shardId, checkpoint);
blockedOnParentShard = true;
exception = new BlockedOnParentShardException("Parent shard not yet done");
break;
} else {
LOG.debug("Shard " + shardId + " has been completely processed.");
log.debug("Shard {} has been completely processed.", shardId);
}
} else {
LOG.info("No lease found for shard " + shardId + ". Not blocking on completion of this shard.");
log.info("No lease found for shard {}. Not blocking on completion of this shard.", shardId);
}
}
if (!blockedOnParentShard) {
LOG.info("No need to block on parents " + shardInfo.getParentShardIds() + " of shard "
+ shardInfo.getShardId());
log.info("No need to block on parents {} of shard {}", shardInfo.getParentShardIds(),
shardInfo.getShardId());
return new TaskResult(null);
}
} catch (Exception e) {
LOG.error("Caught exception when checking for parent shard checkpoint", e);
log.error("Caught exception when checking for parent shard checkpoint", e);
exception = e;
}
try {
Thread.sleep(parentShardPollIntervalMillis);
} catch (InterruptedException e) {
LOG.error("Sleep interrupted when waiting on parent shard(s) of " + shardInfo.getShardId(), e);
log.error("Sleep interrupted when waiting on parent shard(s) of {}", shardInfo.getShardId(), e);
}
return new TaskResult(exception);

View file

@ -15,19 +15,13 @@
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
import java.time.Duration;
import java.time.Instant;
import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput;
import com.amazonaws.services.kinesis.model.GetRecordsResult;
import lombok.extern.apachecommons.CommonsLog;
/**
* This is the BlockingGetRecordsCache class. This class blocks any calls to the getRecords on the
* GetRecordsRetrievalStrategy class.
*/
@CommonsLog
public class BlockingGetRecordsCache implements GetRecordsCache {
private final int maxRecordsPerCall;
private final GetRecordsRetrievalStrategy getRecordsRetrievalStrategy;

View file

@ -19,8 +19,7 @@ import java.util.concurrent.Future;
import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import lombok.extern.slf4j.Slf4j;
class GracefulShutdownCoordinator {
@ -36,10 +35,8 @@ class GracefulShutdownCoordinator {
return new GracefulShutdownCallable(startWorkerShutdown);
}
@Slf4j
static class GracefulShutdownCallable implements Callable<Boolean> {
private static final Log log = LogFactory.getLog(GracefulShutdownCallable.class);
private final Callable<GracefulShutdownContext> startWorkerShutdown;
GracefulShutdownCallable(Callable<GracefulShutdownContext> startWorkerShutdown) {
@ -83,8 +80,8 @@ class GracefulShutdownCoordinator {
}
}
} catch (InterruptedException ie) {
log.warn("Interrupted while waiting for notification complete, terminating shutdown. "
+ awaitingLogMessage(context));
log.warn("Interrupted while waiting for notification complete, terminating shutdown. {}",
awaitingLogMessage(context));
return false;
}
@ -120,8 +117,8 @@ class GracefulShutdownCoordinator {
}
}
} catch (InterruptedException ie) {
log.warn("Interrupted while waiting for shutdown completion, terminating shutdown. "
+ awaitingFinalShutdownMessage(context));
log.warn("Interrupted while waiting for shutdown completion, terminating shutdown. {}",
awaitingFinalShutdownMessage(context));
return false;
}
return true;
@ -138,10 +135,10 @@ class GracefulShutdownCoordinator {
private boolean workerShutdownWithRemaining(long outstanding, GracefulShutdownContext context) {
if (isWorkerShutdownComplete(context)) {
if (outstanding != 0) {
log.info("Shutdown completed, but shutdownCompleteLatch still had outstanding " + outstanding
+ " with a current value of " + context.getShutdownCompleteLatch().getCount() + ". shutdownComplete: "
+ context.getWorker().isShutdownComplete() + " -- Consumer Map: "
+ context.getWorker().getShardInfoShardConsumerMap().size());
log.info("Shutdown completed, but shutdownCompleteLatch still had outstanding {} with a current"
+ " value of {}. shutdownComplete: {} -- Consumer Map: {}", outstanding,
context.getShutdownCompleteLatch().getCount(), context.getWorker().isShutdownComplete(),
context.getWorker().getShardInfoShardConsumerMap().size());
return true;
}
}

View file

@ -14,9 +14,6 @@
*/
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.Checkpoint;
@ -25,13 +22,13 @@ import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
import lombok.extern.slf4j.Slf4j;
/**
* Task for initializing shard position and invoking the RecordProcessor initialize() API.
*/
@Slf4j
class InitializeTask implements ITask {
private static final Log LOG = LogFactory.getLog(InitializeTask.class);
private static final String RECORD_PROCESSOR_INITIALIZE_METRIC = "RecordProcessor.initialize";
private final ShardInfo shardInfo;
@ -78,7 +75,7 @@ class InitializeTask implements ITask {
Exception exception = null;
try {
LOG.debug("Initializing ShardId " + shardInfo.getShardId());
log.debug("Initializing ShardId {}", shardInfo.getShardId());
Checkpoint initialCheckpointObject = checkpoint.getCheckpointObject(shardInfo.getShardId());
ExtendedSequenceNumber initialCheckpoint = initialCheckpointObject.getCheckpoint();
@ -87,7 +84,7 @@ class InitializeTask implements ITask {
recordProcessorCheckpointer.setLargestPermittedCheckpointValue(initialCheckpoint);
recordProcessorCheckpointer.setInitialCheckpointValue(initialCheckpoint);
LOG.debug("Calling the record processor initialize().");
log.debug("Calling the record processor initialize().");
final InitializationInput initializationInput = new InitializationInput()
.withShardId(shardInfo.getShardId())
.withExtendedSequenceNumber(initialCheckpoint)
@ -95,7 +92,7 @@ class InitializeTask implements ITask {
final long recordProcessorStartTimeMillis = System.currentTimeMillis();
try {
recordProcessor.initialize(initializationInput);
LOG.debug("Record processor initialize() completed.");
log.debug("Record processor initialize() completed.");
} catch (Exception e) {
applicationException = true;
throw e;
@ -107,16 +104,16 @@ class InitializeTask implements ITask {
return new TaskResult(null);
} catch (Exception e) {
if (applicationException) {
LOG.error("Application initialize() threw exception: ", e);
log.error("Application initialize() threw exception: ", e);
} else {
LOG.error("Caught exception: ", e);
log.error("Caught exception: ", e);
}
exception = e;
// backoff if we encounter an exception.
try {
Thread.sleep(this.backoffTimeMillis);
} catch (InterruptedException ie) {
LOG.debug("Interrupted sleep", ie);
log.debug("Interrupted sleep", ie);
}
}

View file

@ -1200,6 +1200,7 @@ public class KinesisClientLibConfiguration {
* @param skipShardSyncAtStartupIfLeasesExist Should Worker skip syncing shards and leases at startup (Worker
* initialization).
* @return KinesisClientLibConfiguration
* @return KinesisClientLibConfiguration
*/
public KinesisClientLibConfiguration withSkipShardSyncAtStartupIfLeasesExist(
boolean skipShardSyncAtStartupIfLeasesExist) {

View file

@ -22,9 +22,6 @@ import java.util.Objects;
import java.util.Set;
import java.util.UUID;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
@ -41,13 +38,13 @@ import com.amazonaws.services.kinesis.leases.impl.LeaseCoordinator;
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
import lombok.extern.slf4j.Slf4j;
/**
* This class is used to coordinate/manage leases owned by this worker process and to get/set checkpoints.
*/
@Slf4j
class KinesisClientLibLeaseCoordinator extends LeaseCoordinator<KinesisClientLease> implements ICheckpoint {
private static final Log LOG = LogFactory.getLog(KinesisClientLibLeaseCoordinator.class);
private static final long DEFAULT_INITIAL_LEASE_TABLE_READ_CAPACITY = 10L;
private static final long DEFAULT_INITIAL_LEASE_TABLE_WRITE_CAPACITY = 10L;
@ -151,10 +148,8 @@ class KinesisClientLibLeaseCoordinator extends LeaseCoordinator<KinesisClientLea
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
KinesisClientLease lease = getCurrentlyHeldLease(shardId);
if (lease == null) {
LOG.info(String.format(
"Worker %s could not update checkpoint for shard %s because it does not hold the lease",
getWorkerIdentifier(),
shardId));
log.info("Worker {} could not update checkpoint for shard {} because it does not hold the lease",
getWorkerIdentifier(), shardId);
return false;
}
@ -180,7 +175,7 @@ class KinesisClientLibLeaseCoordinator extends LeaseCoordinator<KinesisClientLea
throw new ThrottlingException("Got throttled while updating checkpoint.", e);
} catch (InvalidStateException e) {
String message = "Unable to save checkpoint for shardId " + shardId;
LOG.error(message, e);
log.error(message, e);
throw new com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException(message, e);
} catch (DependencyException e) {
throw new KinesisClientLibDependencyException("Unable to save checkpoint for shardId " + shardId, e);
@ -196,7 +191,7 @@ class KinesisClientLibLeaseCoordinator extends LeaseCoordinator<KinesisClientLea
return leaseManager.getLease(shardId).getCheckpoint();
} catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) {
String message = "Unable to fetch checkpoint for shardId " + shardId;
LOG.error(message, e);
log.error(message, e);
throw new KinesisClientLibIOException(message, e);
}
}
@ -218,10 +213,8 @@ class KinesisClientLibLeaseCoordinator extends LeaseCoordinator<KinesisClientLea
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
KinesisClientLease lease = getCurrentlyHeldLease(shardId);
if (lease == null) {
LOG.info(String.format(
"Worker %s could not prepare checkpoint for shard %s because it does not hold the lease",
getWorkerIdentifier(),
shardId));
log.info("Worker {} could not prepare checkpoint for shard {} because it does not hold the lease",
getWorkerIdentifier(), shardId);
return false;
}
@ -248,7 +241,7 @@ class KinesisClientLibLeaseCoordinator extends LeaseCoordinator<KinesisClientLea
throw new ThrottlingException("Got throttled while preparing checkpoint.", e);
} catch (InvalidStateException e) {
String message = "Unable to prepare checkpoint for shardId " + shardId;
LOG.error(message, e);
log.error(message, e);
throw new com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException(message, e);
} catch (DependencyException e) {
throw new KinesisClientLibDependencyException("Unable to prepare checkpoint for shardId " + shardId, e);
@ -265,7 +258,7 @@ class KinesisClientLibLeaseCoordinator extends LeaseCoordinator<KinesisClientLea
return new Checkpoint(lease.getCheckpoint(), lease.getPendingCheckpoint());
} catch (DependencyException | InvalidStateException | ProvisionedThroughputException e) {
String message = "Unable to fetch checkpoint for shardId " + shardId;
LOG.error(message, e);
log.error(message, e);
throw new KinesisClientLibIOException(message, e);
}
}
@ -306,9 +299,8 @@ class KinesisClientLibLeaseCoordinator extends LeaseCoordinator<KinesisClientLea
final boolean newTableCreated =
leaseManager.createLeaseTableIfNotExists(initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity);
if (newTableCreated) {
LOG.info(String.format(
"Created new lease table for coordinator with initial read capacity of %d and write capacity of %d.",
initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity));
log.info("Created new lease table for coordinator with initial read capacity of {} and write capacity of {}.",
initialLeaseTableReadCapacity, initialLeaseTableWriteCapacity);
}
// Need to wait for table in active state.
final long secondsBetweenPolls = 10L;

View file

@ -18,8 +18,6 @@ import java.util.Collections;
import java.util.Date;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint.SentinelCheckpoint;
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
@ -32,14 +30,13 @@ import com.amazonaws.util.CollectionUtils;
import com.google.common.collect.Iterables;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
/**
* Used to get data from Amazon Kinesis. Tracks iterator state internally.
*/
@Slf4j
class KinesisDataFetcher {
private static final Log LOG = LogFactory.getLog(KinesisDataFetcher.class);
private String nextIterator;
private IKinesisProxy kinesisProxy;
private final String shardId;
@ -73,7 +70,7 @@ class KinesisDataFetcher {
try {
return new AdvancingResult(kinesisProxy.get(nextIterator, maxRecords));
} catch (ResourceNotFoundException e) {
LOG.info("Caught ResourceNotFoundException when fetching records for shard " + shardId);
log.info("Caught ResourceNotFoundException when fetching records for shard {}", shardId);
return TERMINAL_RESULT;
}
} else {
@ -134,14 +131,14 @@ class KinesisDataFetcher {
* @param initialPositionInStream The initialPositionInStream.
*/
public void initialize(String initialCheckpoint, InitialPositionInStreamExtended initialPositionInStream) {
LOG.info("Initializing shard " + shardId + " with " + initialCheckpoint);
log.info("Initializing shard {} with {}", shardId, initialCheckpoint);
advanceIteratorTo(initialCheckpoint, initialPositionInStream);
isInitialized = true;
}
public void initialize(ExtendedSequenceNumber initialCheckpoint,
InitialPositionInStreamExtended initialPositionInStream) {
LOG.info("Initializing shard " + shardId + " with " + initialCheckpoint.getSequenceNumber());
log.info("Initializing shard {} with {}", shardId, initialCheckpoint.getSequenceNumber());
advanceIteratorTo(initialCheckpoint.getSequenceNumber(), initialPositionInStream);
isInitialized = true;
}
@ -182,13 +179,13 @@ class KinesisDataFetcher {
private String getIterator(String iteratorType, String sequenceNumber) {
String iterator = null;
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Calling getIterator for " + shardId + ", iterator type " + iteratorType
+ " and sequence number " + sequenceNumber);
if (log.isDebugEnabled()) {
log.debug("Calling getIterator for {}, iterator type {} and sequence number {}", shardId, iteratorType,
sequenceNumber);
}
iterator = kinesisProxy.getIterator(shardId, iteratorType, sequenceNumber);
} catch (ResourceNotFoundException e) {
LOG.info("Caught ResourceNotFoundException when getting an iterator for shard " + shardId, e);
log.info("Caught ResourceNotFoundException when getting an iterator for shard {}", shardId, e);
}
return iterator;
}
@ -200,12 +197,12 @@ class KinesisDataFetcher {
private String getIterator(String iteratorType) {
String iterator = null;
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Calling getIterator for " + shardId + " and iterator type " + iteratorType);
if (log.isDebugEnabled()) {
log.debug("Calling getIterator for {} and iterator type {}", shardId, iteratorType);
}
iterator = kinesisProxy.getIterator(shardId, iteratorType);
} catch (ResourceNotFoundException e) {
LOG.info("Caught ResourceNotFoundException when getting an iterator for shard " + shardId, e);
log.info("Caught ResourceNotFoundException when getting an iterator for shard {}", shardId, e);
}
return iterator;
}
@ -217,12 +214,12 @@ class KinesisDataFetcher {
private String getIterator(Date timestamp) {
String iterator = null;
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Calling getIterator for " + shardId + " and timestamp " + timestamp);
if (log.isDebugEnabled()) {
log.debug("Calling getIterator for {} and timestamp {}", shardId, timestamp);
}
iterator = kinesisProxy.getIterator(shardId, timestamp);
} catch (ResourceNotFoundException e) {
LOG.info("Caught ResourceNotFoundException when getting an iterator for shard " + shardId, e);
log.info("Caught ResourceNotFoundException when getting an iterator for shard {}", shardId, e);
}
return iterator;
}

View file

@ -33,7 +33,7 @@ import com.amazonaws.services.kinesis.model.ExpiredIteratorException;
import com.amazonaws.services.kinesis.model.GetRecordsResult;
import lombok.NonNull;
import lombok.extern.apachecommons.CommonsLog;
import lombok.extern.slf4j.Slf4j;
/**
* This is the prefetch caching class, this class spins up a thread if prefetching is enabled. That thread fetches the
@ -43,7 +43,7 @@ import lombok.extern.apachecommons.CommonsLog;
* be present in the cache across multiple GetRecordsResult object. If no data is available in the cache, the call from
* the record processor is blocked till records are retrieved from Kinesis.
*/
@CommonsLog
@Slf4j
public class PrefetchGetRecordsCache implements GetRecordsCache {
private static final String EXPIRED_ITERATOR_METRIC = "ExpiredIterator";
LinkedBlockingQueue<ProcessRecordsInput> getRecordsResultQueue;
@ -172,8 +172,8 @@ public class PrefetchGetRecordsCache implements GetRecordsCache {
} catch (InterruptedException e) {
log.info("Thread was interrupted, indicating shutdown was called on the cache.");
} catch (ExpiredIteratorException e) {
log.info(String.format("ShardId %s: getRecords threw ExpiredIteratorException - restarting"
+ " after greatest seqNum passed to customer", shardId), e);
log.info("ShardId {}: getRecords threw ExpiredIteratorException - restarting"
+ " after greatest seqNum passed to customer", shardId, e);
MetricsHelper.getMetricsScope().addData(EXPIRED_ITERATOR_METRIC, 1, StandardUnit.Count,
MetricsLevel.SUMMARY);
@ -246,14 +246,14 @@ public class PrefetchGetRecordsCache implements GetRecordsCache {
public synchronized void waitForConsumer() throws InterruptedException {
if (!shouldGetNewRecords()) {
log.debug("Queue is full waiting for consumer for " + idleMillisBetweenCalls + " ms");
log.debug("Queue is full waiting for consumer for {} ms", idleMillisBetweenCalls);
this.wait(idleMillisBetweenCalls);
}
}
public synchronized boolean shouldGetNewRecords() {
if (log.isDebugEnabled()) {
log.debug("Current Prefetch Counter States: " + this.toString());
log.debug("Current Prefetch Counter States: {}", this.toString());
}
return size < maxRecordsCount && byteSize < maxByteSize;
}

View file

@ -15,13 +15,9 @@
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
import java.math.BigInteger;
import java.util.Collections;
import java.util.List;
import java.util.ListIterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudwatch.model.StandardUnit;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
@ -33,18 +29,17 @@ import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope;
import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
import com.amazonaws.services.kinesis.model.ExpiredIteratorException;
import com.amazonaws.services.kinesis.model.GetRecordsResult;
import com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException;
import com.amazonaws.services.kinesis.model.Record;
import com.amazonaws.services.kinesis.model.Shard;
import lombok.extern.slf4j.Slf4j;
/**
* Task for fetching data records and invoking processRecords() on the record processor instance.
*/
@Slf4j
class ProcessTask implements ITask {
private static final Log LOG = LogFactory.getLog(ProcessTask.class);
private static final String EXPIRED_ITERATOR_METRIC = "ExpiredIterator";
private static final String DATA_BYTES_PROCESSED_METRIC = "DataBytesProcessed";
private static final String RECORDS_PROCESSED_METRIC = "RecordsProcessed";
@ -130,7 +125,7 @@ class ProcessTask implements ITask {
this.shard = null;
}
if (this.shard == null && !skipShardSyncAtWorkerInitializationIfLeasesExist) {
LOG.warn("Cannot get the shard for this ProcessTask, so duplicate KPL user records "
log.warn("Cannot get the shard for this ProcessTask, so duplicate KPL user records "
+ "in the event of resharding will not be dropped during deaggregation of Amazon "
+ "Kinesis records.");
}
@ -152,7 +147,7 @@ class ProcessTask implements ITask {
try {
if (dataFetcher.isShardEndReached()) {
LOG.info("Reached end of shard " + shardInfo.getShardId());
log.info("Reached end of shard {}", shardInfo.getShardId());
return new TaskResult(null, true);
}
@ -181,7 +176,7 @@ class ProcessTask implements ITask {
backoff();
} catch (RuntimeException e) {
LOG.error("ShardId " + shardInfo.getShardId() + ": Caught exception: ", e);
log.error("ShardId {}: Caught exception: ", shardInfo.getShardId(), e);
exception = e;
backoff();
}
@ -197,7 +192,7 @@ class ProcessTask implements ITask {
try {
Thread.sleep(this.backoffTimeMillis);
} catch (InterruptedException ie) {
LOG.debug(shardInfo.getShardId() + ": Sleep was interrupted", ie);
log.debug("{}: Sleep was interrupted", shardInfo.getShardId(), ie);
}
}
@ -210,8 +205,8 @@ class ProcessTask implements ITask {
* the records to be dispatched. It's possible the records have been adjusted by KPL deaggregation.
*/
private void callProcessRecords(ProcessRecordsInput input, List<Record> records) {
LOG.debug("Calling application processRecords() with " + records.size() + " records from "
+ shardInfo.getShardId());
log.debug("Calling application processRecords() with {} records from {}", records.size(),
shardInfo.getShardId());
final ProcessRecordsInput processRecordsInput = new ProcessRecordsInput().withRecords(records)
.withCheckpointer(recordProcessorCheckpointer)
.withMillisBehindLatest(input.getMillisBehindLatest());
@ -220,9 +215,9 @@ class ProcessTask implements ITask {
try {
recordProcessor.processRecords(processRecordsInput);
} catch (Exception e) {
LOG.error("ShardId " + shardInfo.getShardId()
+ ": Application processRecords() threw an exception when processing shard ", e);
LOG.error("ShardId " + shardInfo.getShardId() + ": Skipping over the following data records: " + records);
log.error("ShardId {}: Application processRecords() threw an exception when processing shard ",
shardInfo.getShardId(), e);
log.error("ShardId {}: Skipping over the following data records: {}", shardInfo.getShardId(), records);
} finally {
MetricsHelper.addLatencyPerShard(shardInfo.getShardId(), RECORD_PROCESSOR_PROCESS_RECORDS_METRIC,
recordProcessorStartTimeMillis, MetricsLevel.SUMMARY);
@ -270,18 +265,18 @@ class ProcessTask implements ITask {
* the time when the task started
*/
private void handleNoRecords(long startTimeMillis) {
LOG.debug("Kinesis didn't return any records for shard " + shardInfo.getShardId());
log.debug("Kinesis didn't return any records for shard {}", shardInfo.getShardId());
long sleepTimeMillis = streamConfig.getIdleTimeInMilliseconds()
- (System.currentTimeMillis() - startTimeMillis);
if (sleepTimeMillis > 0) {
sleepTimeMillis = Math.max(sleepTimeMillis, streamConfig.getIdleTimeInMilliseconds());
try {
LOG.debug("Sleeping for " + sleepTimeMillis + " ms since there were no new records in shard "
+ shardInfo.getShardId());
log.debug("Sleeping for {} ms since there were no new records in shard {}", sleepTimeMillis,
shardInfo.getShardId());
Thread.sleep(sleepTimeMillis);
} catch (InterruptedException e) {
LOG.debug("ShardId " + shardInfo.getShardId() + ": Sleep was interrupted");
log.debug("ShardId {}: Sleep was interrupted", shardInfo.getShardId());
}
}
}
@ -316,8 +311,8 @@ class ProcessTask implements ITask {
if (extendedSequenceNumber.compareTo(lastCheckpointValue) <= 0) {
recordIterator.remove();
LOG.debug("removing record with ESN " + extendedSequenceNumber
+ " because the ESN is <= checkpoint (" + lastCheckpointValue + ")");
log.debug("removing record with ESN {} because the ESN is <= checkpoint ({})", extendedSequenceNumber,
lastCheckpointValue);
continue;
}
@ -342,9 +337,9 @@ class ProcessTask implements ITask {
return getRecordsResultAndRecordMillisBehindLatest();
} catch (ExpiredIteratorException e) {
// If we see a ExpiredIteratorException, try once to restart from the greatest remembered sequence number
LOG.info("ShardId " + shardInfo.getShardId()
log.info("ShardId {}"
+ ": getRecords threw ExpiredIteratorException - restarting after greatest seqNum "
+ "passed to customer", e);
+ "passed to customer", shardInfo.getShardId(), e);
MetricsHelper.getMetricsScope().addData(EXPIRED_ITERATOR_METRIC, 1, StandardUnit.Count,
MetricsLevel.SUMMARY);
@ -362,7 +357,7 @@ class ProcessTask implements ITask {
String msg =
"Shard " + shardInfo.getShardId()
+ ": getRecords threw ExpiredIteratorException with a fresh iterator.";
LOG.error(msg, ex);
log.error(msg, ex);
throw ex;
}
}

View file

@ -14,12 +14,6 @@
*/
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
import com.amazonaws.services.kinesis.metrics.impl.ThreadSafeMetricsDelegatingScope;
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException;
@ -30,17 +24,20 @@ import com.amazonaws.services.kinesis.clientlibrary.interfaces.IPreparedCheckpoi
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
import com.amazonaws.services.kinesis.clientlibrary.types.UserRecord;
import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
import com.amazonaws.services.kinesis.metrics.impl.ThreadSafeMetricsDelegatingScope;
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
import com.amazonaws.services.kinesis.model.Record;
import lombok.extern.slf4j.Slf4j;
/**
* This class is used to enable RecordProcessors to checkpoint their progress.
* The Amazon Kinesis Client Library will instantiate an object and provide a reference to the application
* RecordProcessor instance. Amazon Kinesis Client Library will create one instance per shard assignment.
*/
@Slf4j
class RecordProcessorCheckpointer implements IRecordProcessorCheckpointer {
private static final Log LOG = LogFactory.getLog(RecordProcessorCheckpointer.class);
private ICheckpoint checkpoint;
private ExtendedSequenceNumber largestPermittedCheckpointValue;
@ -78,9 +75,9 @@ class RecordProcessorCheckpointer implements IRecordProcessorCheckpointer {
@Override
public synchronized void checkpoint()
throws KinesisClientLibDependencyException, InvalidStateException, ThrottlingException, ShutdownException {
if (LOG.isDebugEnabled()) {
LOG.debug("Checkpointing " + shardInfo.getShardId() + ", " + " token " + shardInfo.getConcurrencyToken()
+ " at largest permitted value " + this.largestPermittedCheckpointValue);
if (log.isDebugEnabled()) {
log.debug("Checkpointing {}, token {} at largest permitted value {}", shardInfo.getShardId(),
shardInfo.getConcurrencyToken(), this.largestPermittedCheckpointValue);
}
advancePosition(this.largestPermittedCheckpointValue);
}
@ -126,9 +123,9 @@ class RecordProcessorCheckpointer implements IRecordProcessorCheckpointer {
// throws exception if sequence number shouldn't be checkpointed for this shard
sequenceNumberValidator.validateSequenceNumber(sequenceNumber);
if (LOG.isDebugEnabled()) {
LOG.debug("Validated checkpoint sequence number " + sequenceNumber + " for " + shardInfo.getShardId()
+ ", token " + shardInfo.getConcurrencyToken());
if (log.isDebugEnabled()) {
log.debug("Validated checkpoint sequence number {} for {}, token {}", sequenceNumber,
shardInfo.getShardId(), shardInfo.getConcurrencyToken());
}
/*
* If there isn't a last checkpoint value, we only care about checking the upper bound.
@ -138,9 +135,9 @@ class RecordProcessorCheckpointer implements IRecordProcessorCheckpointer {
if ((lastCheckpointValue == null || lastCheckpointValue.compareTo(newCheckpoint) <= 0)
&& newCheckpoint.compareTo(largestPermittedCheckpointValue) <= 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Checkpointing " + shardInfo.getShardId() + ", token " + shardInfo.getConcurrencyToken()
+ " at specific extended sequence number " + newCheckpoint);
if (log.isDebugEnabled()) {
log.debug("Checkpointing {}, token {} at specific extended sequence number {}", shardInfo.getShardId(),
shardInfo.getConcurrencyToken(), newCheckpoint);
}
this.advancePosition(newCheckpoint);
} else {
@ -201,9 +198,9 @@ class RecordProcessorCheckpointer implements IRecordProcessorCheckpointer {
// throws exception if sequence number shouldn't be checkpointed for this shard
sequenceNumberValidator.validateSequenceNumber(sequenceNumber);
if (LOG.isDebugEnabled()) {
LOG.debug("Validated prepareCheckpoint sequence number " + sequenceNumber + " for " + shardInfo.getShardId()
+ ", token " + shardInfo.getConcurrencyToken());
if (log.isDebugEnabled()) {
log.debug("Validated prepareCheckpoint sequence number {} for {}, token {}", sequenceNumber,
shardInfo.getShardId(), shardInfo.getConcurrencyToken());
}
/*
* If there isn't a last checkpoint value, we only care about checking the upper bound.
@ -213,10 +210,9 @@ class RecordProcessorCheckpointer implements IRecordProcessorCheckpointer {
if ((lastCheckpointValue == null || lastCheckpointValue.compareTo(pendingCheckpoint) <= 0)
&& pendingCheckpoint.compareTo(largestPermittedCheckpointValue) <= 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Preparing checkpoint " + shardInfo.getShardId()
+ ", token " + shardInfo.getConcurrencyToken()
+ " at specific extended sequence number " + pendingCheckpoint);
if (log.isDebugEnabled()) {
log.debug("Preparing checkpoint {}, token {} at specific extended sequence number {}",
shardInfo.getShardId(), shardInfo.getConcurrencyToken(), pendingCheckpoint);
}
return doPrepareCheckpoint(pendingCheckpoint);
} else {
@ -300,9 +296,9 @@ class RecordProcessorCheckpointer implements IRecordProcessorCheckpointer {
}
if (extendedSequenceNumber != null && !extendedSequenceNumber.equals(lastCheckpointValue)) {
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Setting " + shardInfo.getShardId() + ", token " + shardInfo.getConcurrencyToken()
+ " checkpoint to " + checkpointToRecord);
if (log.isDebugEnabled()) {
log.debug("Setting {}, token {} checkpoint to {}", shardInfo.getShardId(),
shardInfo.getConcurrencyToken(), checkpointToRecord);
}
checkpoint.setCheckpoint(shardInfo.getShardId(), checkpointToRecord, shardInfo.getConcurrencyToken());
lastCheckpointValue = checkpointToRecord;
@ -310,7 +306,7 @@ class RecordProcessorCheckpointer implements IRecordProcessorCheckpointer {
| KinesisClientLibDependencyException e) {
throw e;
} catch (KinesisClientLibException e) {
LOG.warn("Caught exception setting checkpoint.", e);
log.warn("Caught exception setting checkpoint.", e);
throw new KinesisClientLibDependencyException("Caught exception while checkpointing", e);
}
}
@ -367,7 +363,7 @@ class RecordProcessorCheckpointer implements IRecordProcessorCheckpointer {
| KinesisClientLibDependencyException e) {
throw e;
} catch (KinesisClientLibException e) {
LOG.warn("Caught exception setting prepareCheckpoint.", e);
log.warn("Caught exception setting prepareCheckpoint.", e);
throw new KinesisClientLibDependencyException("Caught exception while prepareCheckpointing", e);
}

View file

@ -14,9 +14,6 @@
*/
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException;
@ -26,6 +23,8 @@ import com.amazonaws.services.kinesis.model.InvalidArgumentException;
import com.amazonaws.services.kinesis.model.ProvisionedThroughputExceededException;
import com.amazonaws.services.kinesis.model.ShardIteratorType;
import lombok.extern.slf4j.Slf4j;
/**
* This class provides some methods for validating sequence numbers. It provides a method
* {@link #validateSequenceNumber(String)} which validates a sequence number by attempting to get an iterator from
@ -34,10 +33,8 @@ import com.amazonaws.services.kinesis.model.ShardIteratorType;
* which could prevent another shard consumer instance from processing the shard later on). This class also provides a
* utility function {@link #isDigits(String)} which is used to check whether a string is all digits
*/
@Slf4j
public class SequenceNumberValidator {
private static final Log LOG = LogFactory.getLog(SequenceNumberValidator.class);
private IKinesisProxy proxy;
private String shardId;
private boolean validateWithGetIterator;
@ -73,24 +70,24 @@ public class SequenceNumberValidator {
boolean atShardEnd = ExtendedSequenceNumber.SHARD_END.getSequenceNumber().equals(sequenceNumber);
if (!atShardEnd && !isDigits(sequenceNumber)) {
LOG.info("Sequence number must be numeric, but was " + sequenceNumber);
log.info("Sequence number must be numeric, but was {}", sequenceNumber);
throw new IllegalArgumentException("Sequence number must be numeric, but was " + sequenceNumber);
}
try {
if (!atShardEnd &&validateWithGetIterator) {
proxy.getIterator(shardId, ShardIteratorType.AFTER_SEQUENCE_NUMBER.toString(), sequenceNumber);
LOG.info("Validated sequence number " + sequenceNumber + " with shard id " + shardId);
log.info("Validated sequence number {} with shard id {}", sequenceNumber, shardId);
}
} catch (InvalidArgumentException e) {
LOG.info("Sequence number " + sequenceNumber + " is invalid for shard " + shardId, e);
log.info("Sequence number {} is invalid for shard {}", sequenceNumber, shardId, e);
throw new IllegalArgumentException("Sequence number " + sequenceNumber + " is invalid for shard "
+ shardId, e);
} catch (ProvisionedThroughputExceededException e) {
// clients should have back off logic in their checkpoint logic
LOG.info("Exceeded throughput while getting an iterator for shard " + shardId, e);
log.info("Exceeded throughput while getting an iterator for shard {}", shardId, e);
throw new ThrottlingException("Exceeded throughput while getting an iterator for shard " + shardId, e);
} catch (AmazonServiceException e) {
LOG.info("Encountered service exception while getting an iterator for shard " + shardId, e);
log.info("Encountered service exception while getting an iterator for shard {}", shardId, e);
if (e.getStatusCode() >= SERVER_SIDE_ERROR_CODE) {
// clients can choose whether to retry in their checkpoint logic
throw new KinesisClientLibDependencyException("Encountered service exception while getting an iterator"

View file

@ -20,9 +20,6 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.BlockedOnParentShardException;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
@ -32,16 +29,15 @@ import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
import com.google.common.annotations.VisibleForTesting;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
/**
* Responsible for consuming data records of a (specified) shard.
* The instance should be shutdown when we lose the primary responsibility for a shard.
* A new instance should be created if the primary responsibility is reassigned back to this process.
*/
@Slf4j
class ShardConsumer {
private static final Log LOG = LogFactory.getLog(ShardConsumer.class);
private final StreamConfig streamConfig;
private final IRecordProcessor recordProcessor;
private final KinesisClientLibConfiguration config;
@ -269,30 +265,29 @@ class ShardConsumer {
future = executorService.submit(currentTask);
currentTaskSubmitTime = System.currentTimeMillis();
submittedNewTask = true;
LOG.debug("Submitted new " + currentTask.getTaskType()
+ " task for shard " + shardInfo.getShardId());
log.debug("Submitted new {} task for shard {}", currentTask.getTaskType(), shardInfo.getShardId());
} catch (RejectedExecutionException e) {
LOG.info(currentTask.getTaskType() + " task was not accepted for execution.", e);
log.info("{} task was not accepted for execution.", currentTask.getTaskType(), e);
} catch (RuntimeException e) {
LOG.info(currentTask.getTaskType() + " task encountered exception ", e);
log.info("{} task encountered exception ", currentTask.getTaskType(), e);
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("No new task to submit for shard %s, currentState %s",
if (log.isDebugEnabled()) {
log.debug("No new task to submit for shard {}, currentState {}",
shardInfo.getShardId(),
currentState.toString()));
currentState.toString());
}
}
} else {
final long timeElapsed = System.currentTimeMillis() - currentTaskSubmitTime;
final String commonMessage = String.format("Previous %s task still pending for shard %s since %d ms ago. ",
currentTask.getTaskType(), shardInfo.getShardId(), timeElapsed);
if (LOG.isDebugEnabled()) {
LOG.debug(commonMessage + "Not submitting new task.");
if (log.isDebugEnabled()) {
log.debug("{} Not submitting new task.", commonMessage);
}
config.getLogWarningForTaskAfterMillis().ifPresent(value -> {
if (timeElapsed > value) {
LOG.warn(commonMessage);
log.warn(commonMessage);
}
});
}
@ -328,14 +323,13 @@ class ShardConsumer {
}
private void logTaskException(TaskResult taskResult) {
if (LOG.isDebugEnabled()) {
if (log.isDebugEnabled()) {
Exception taskException = taskResult.getException();
if (taskException instanceof BlockedOnParentShardException) {
// No need to log the stack trace for this exception (it is very specific).
LOG.debug("Shard " + shardInfo.getShardId() + " is blocked on completion of parent shard.");
log.debug("Shard {} is blocked on completion of parent shard.", shardInfo.getShardId());
} else {
LOG.debug("Caught exception running " + currentTask.getTaskType() + " task: ",
taskResult.getException());
log.debug("Caught exception running {} task: ", currentTask.getTaskType(), taskResult.getException());
}
}
}
@ -419,10 +413,9 @@ class ShardConsumer {
if (currentState.getTaskType() == currentTask.getTaskType()) {
currentState = currentState.successTransition();
} else {
LOG.error("Current State task type of '" + currentState.getTaskType()
+ "' doesn't match the current tasks type of '" + currentTask.getTaskType()
+ "'. This shouldn't happen, and indicates a programming error. "
+ "Unable to safely transition to the next state.");
log.error("Current State task type of '{}' doesn't match the current tasks type of '{}'. This"
+ " shouldn't happen, and indicates a programming error. Unable to safely transition to the"
+ " next state.", currentState.getTaskType(), currentTask.getTaskType());
}
}
//

View file

@ -14,23 +14,20 @@
*/
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
import lombok.extern.slf4j.Slf4j;
/**
* This task syncs leases/activies with shards of the stream.
* It will create new leases/activites when it discovers new shards (e.g. setup/resharding).
* It will clean up leases/activities for shards that have been completely processed (if
* cleanupLeasesUponShardCompletion is true).
*/
@Slf4j
class ShardSyncTask implements ITask {
private static final Log LOG = LogFactory.getLog(ShardSyncTask.class);
private final IKinesisProxy kinesisProxy;
private final ILeaseManager<KinesisClientLease> leaseManager;
private InitialPositionInStreamExtended initialPosition;
@ -77,7 +74,7 @@ class ShardSyncTask implements ITask {
Thread.sleep(shardSyncTaskIdleTimeMillis);
}
} catch (Exception e) {
LOG.error("Caught exception while sync'ing Kinesis shards and leases", e);
log.error("Caught exception while sync'ing Kinesis shards and leases", e);
exception = e;
}

View file

@ -19,23 +19,20 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
import lombok.extern.slf4j.Slf4j;
/**
* The ShardSyncTaskManager is used to track the task to sync shards with leases (create leases for new
* Kinesis shards, remove obsolete leases). We'll have at most one outstanding sync task at any time.
* Worker will use this class to kick off a sync task when it finds shards which have been completely processed.
*/
@Slf4j
class ShardSyncTaskManager {
private static final Log LOG = LogFactory.getLog(ShardSyncTaskManager.class);
private ITask currentTask;
private Future<TaskResult> future;
private final IKinesisProxy kinesisProxy;
@ -90,11 +87,11 @@ class ShardSyncTaskManager {
try {
TaskResult result = future.get();
if (result.getException() != null) {
LOG.error("Caught exception running " + currentTask.getTaskType() + " task: ",
log.error("Caught exception running {} task: ", currentTask.getTaskType(),
result.getException());
}
} catch (InterruptedException | ExecutionException e) {
LOG.warn(currentTask.getTaskType() + " task encountered exception.", e);
log.warn("{} task encountered exception.", currentTask.getTaskType(), e);
}
}
@ -107,12 +104,12 @@ class ShardSyncTaskManager {
shardSyncIdleTimeMillis), metricsFactory);
future = executorService.submit(currentTask);
submittedNewTask = true;
if (LOG.isDebugEnabled()) {
LOG.debug("Submitted new " + currentTask.getTaskType() + " task.");
if (log.isDebugEnabled()) {
log.debug("Submitted new {} task.", currentTask.getTaskType());
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Previous " + currentTask.getTaskType() + " task still pending. Not submitting new task.");
if (log.isDebugEnabled()) {
log.debug("Previous {} task still pending. Not submitting new task.", currentTask.getTaskType());
}
}

View file

@ -26,8 +26,6 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.lang.StringUtils;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.KinesisClientLibIOException;
@ -42,16 +40,17 @@ import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
import com.amazonaws.services.kinesis.model.Shard;
import lombok.extern.slf4j.Slf4j;
/**
* Helper class to sync leases with shards of the Kinesis stream.
* It will create new leases/activities when it discovers new Kinesis shards (bootstrap/resharding).
* It deletes leases for shards that have been trimmed from Kinesis, or if we've completed processing it
* and begun processing it's child shards.
*/
@Slf4j
class ShardSyncer {
private static final Log LOG = LogFactory.getLog(ShardSyncer.class);
/**
* Note constructor is private: We use static synchronized methods - this is a utility class.
*/
@ -119,7 +118,7 @@ class ShardSyncer {
boolean ignoreUnexpectedChildShards)
throws DependencyException, InvalidStateException, ProvisionedThroughputException, KinesisClientLibIOException {
List<Shard> shards = getShardList(kinesisProxy);
LOG.debug("Num shards: " + shards.size());
log.debug("Num shards: {}", shards.size());
Map<String, Shard> shardIdToShardMap = constructShardIdToShardMap(shards);
Map<String, Set<String>> shardIdToChildShardIdsMap = constructShardIdToChildShardIdsMap(shardIdToShardMap);
@ -132,7 +131,7 @@ class ShardSyncer {
List<KinesisClientLease> newLeasesToCreate = determineNewLeasesToCreate(shards, currentLeases, initialPosition,
inconsistentShardIds);
LOG.debug("Num new leases to create: " + newLeasesToCreate.size());
log.debug("Num new leases to create: {}", newLeasesToCreate.size());
for (KinesisClientLease lease : newLeasesToCreate) {
long startTimeMillis = System.currentTimeMillis();
boolean success = false;
@ -228,7 +227,7 @@ class ShardSyncer {
for (String shardId : shardIdsOfClosedShards) {
Shard shard = shardIdToShardMap.get(shardId);
if (shard == null) {
LOG.info("Shard " + shardId + " is not present in Kinesis anymore.");
log.info("Shard {} is not present in Kinesis anymore.", shardId);
continue;
}
@ -381,7 +380,7 @@ class ShardSyncer {
Set<String> shardIdsOfCurrentLeases = new HashSet<String>();
for (KinesisClientLease lease : currentLeases) {
shardIdsOfCurrentLeases.add(lease.getLeaseKey());
LOG.debug("Existing lease: " + lease);
log.debug("Existing lease: {}", lease);
}
List<Shard> openShards = getOpenShards(shards);
@ -390,13 +389,13 @@ class ShardSyncer {
// Iterate over the open shards and find those that don't have any lease entries.
for (Shard shard : openShards) {
String shardId = shard.getShardId();
LOG.debug("Evaluating leases for open shard " + shardId + " and its ancestors.");
log.debug("Evaluating leases for open shard {} and its ancestors.", shardId);
if (shardIdsOfCurrentLeases.contains(shardId)) {
LOG.debug("Lease for shardId " + shardId + " already exists. Not creating a lease");
log.debug("Lease for shardId {} already exists. Not creating a lease", shardId);
} else if (inconsistentShardIds.contains(shardId)) {
LOG.info("shardId " + shardId + " is an inconsistent child. Not creating a lease");
log.info("shardId {} is an inconsistent child. Not creating a lease", shardId);
} else {
LOG.debug("Need to create a lease for shardId " + shardId);
log.debug("Need to create a lease for shardId {}", shardId);
KinesisClientLease newLease = newKCLLease(shard);
boolean isDescendant =
checkIfDescendantAndAddNewLeasesForAncestors(shardId,
@ -435,7 +434,7 @@ class ShardSyncer {
} else {
newLease.setCheckpoint(convertToCheckpoint(initialPosition));
}
LOG.debug("Set checkpoint of " + newLease.getLeaseKey() + " to " + newLease.getCheckpoint());
log.debug("Set checkpoint of {} to {}", newLease.getLeaseKey(), newLease.getCheckpoint());
shardIdToNewLeaseMap.put(shardId, newLease);
}
}
@ -511,9 +510,9 @@ class ShardSyncer {
memoizationContext)) {
isDescendant = true;
descendantParentShardIds.add(parentShardId);
LOG.debug("Parent shard " + parentShardId + " is a descendant.");
log.debug("Parent shard {} is a descendant.", parentShardId);
} else {
LOG.debug("Parent shard " + parentShardId + " is NOT a descendant.");
log.debug("Parent shard {} is NOT a descendant.", parentShardId);
}
}
@ -521,7 +520,7 @@ class ShardSyncer {
if (isDescendant) {
for (String parentShardId : parentShardIds) {
if (!shardIdsOfCurrentLeases.contains(parentShardId)) {
LOG.debug("Need to create a lease for shardId " + parentShardId);
log.debug("Need to create a lease for shardId {}", parentShardId);
KinesisClientLease lease = shardIdToLeaseMapOfNewShards.get(parentShardId);
if (lease == null) {
lease = newKCLLease(shardIdToShardMapOfAllKinesisShards.get(parentShardId));
@ -612,9 +611,8 @@ class ShardSyncer {
}
if (!garbageLeases.isEmpty()) {
LOG.info("Found " + garbageLeases.size()
+ " candidate leases for cleanup. Refreshing list of"
+ " Kinesis shards to pick up recent/latest shards");
log.info("Found {} candidate leases for cleanup. Refreshing list of"
+ " Kinesis shards to pick up recent/latest shards", garbageLeases.size());
List<Shard> currentShardList = getShardList(kinesisProxy);
Set<String> currentKinesisShardIds = new HashSet<>();
for (Shard shard : currentShardList) {
@ -623,8 +621,7 @@ class ShardSyncer {
for (KinesisClientLease lease : garbageLeases) {
if (isCandidateForCleanup(lease, currentKinesisShardIds)) {
LOG.info("Deleting lease for shard " + lease.getLeaseKey()
+ " as it is not present in Kinesis stream.");
log.info("Deleting lease for shard {} as it is not present in Kinesis stream.", lease.getLeaseKey());
leaseManager.deleteLease(lease);
}
}
@ -649,7 +646,7 @@ class ShardSyncer {
if (currentKinesisShardIds.contains(lease.getLeaseKey())) {
isCandidateForCleanup = false;
} else {
LOG.info("Found lease for non-existent shard: " + lease.getLeaseKey() + ". Checking its parent shards");
log.info("Found lease for non-existent shard: {}. Checking its parent shards", lease.getLeaseKey());
Set<String> parentShardIds = lease.getParentShardIds();
for (String parentShardId : parentShardIds) {
@ -659,7 +656,7 @@ class ShardSyncer {
String message =
"Parent shard " + parentShardId + " exists but not the child shard "
+ lease.getLeaseKey();
LOG.info(message);
log.info(message);
throw new KinesisClientLibIOException(message);
}
}
@ -760,8 +757,8 @@ class ShardSyncer {
}
if (okayToDelete) {
LOG.info("Deleting lease for shard " + leaseForClosedShard.getLeaseKey()
+ " as it has been completely processed and processing of child shards has begun.");
log.info("Deleting lease for shard {} as it has been completely processed and processing of child "
+ "shards has begun.", leaseForClosedShard.getLeaseKey());
leaseManager.deleteLease(leaseForClosedShard);
}
}
@ -817,7 +814,7 @@ class ShardSyncer {
String endingSequenceNumber = shard.getSequenceNumberRange().getEndingSequenceNumber();
if (endingSequenceNumber == null) {
openShards.add(shard);
LOG.debug("Found open shard: " + shard.getShardId());
log.debug("Found open shard: {}", shard.getShardId());
}
}
return openShards;

View file

@ -14,9 +14,6 @@
*/
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
@ -27,13 +24,13 @@ import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
import com.google.common.annotations.VisibleForTesting;
import lombok.extern.slf4j.Slf4j;
/**
* Task for invoking the RecordProcessor shutdown() callback.
*/
@Slf4j
class ShutdownTask implements ITask {
private static final Log LOG = LogFactory.getLog(ShutdownTask.class);
private static final String RECORD_PROCESSOR_SHUTDOWN_METRIC = "RecordProcessor.shutdown";
private final ShardInfo shardInfo;
@ -96,8 +93,8 @@ class ShutdownTask implements ITask {
recordProcessorCheckpointer.setLargestPermittedCheckpointValue(ExtendedSequenceNumber.SHARD_END);
}
LOG.debug("Invoking shutdown() for shard " + shardInfo.getShardId() + ", concurrencyToken "
+ shardInfo.getConcurrencyToken() + ". Shutdown reason: " + reason);
log.debug("Invoking shutdown() for shard {}, concurrencyToken {}. Shutdown reason: {}",
shardInfo.getShardId(), shardInfo.getConcurrencyToken(), reason);
final ShutdownInput shutdownInput = new ShutdownInput()
.withShutdownReason(reason)
.withCheckpointer(recordProcessorCheckpointer);
@ -113,9 +110,9 @@ class ShutdownTask implements ITask {
+ shardInfo.getShardId());
}
}
LOG.debug("Shutting down retrieval strategy.");
log.debug("Shutting down retrieval strategy.");
getRecordsCache.shutdown();
LOG.debug("Record processor completed shutdown() for shard " + shardInfo.getShardId());
log.debug("Record processor completed shutdown() for shard {}", shardInfo.getShardId());
} catch (Exception e) {
applicationException = true;
throw e;
@ -125,29 +122,29 @@ class ShutdownTask implements ITask {
}
if (reason == ShutdownReason.TERMINATE) {
LOG.debug("Looking for child shards of shard " + shardInfo.getShardId());
log.debug("Looking for child shards of shard {}", shardInfo.getShardId());
// create leases for the child shards
ShardSyncer.checkAndCreateLeasesForNewShards(kinesisProxy,
leaseManager,
initialPositionInStream,
cleanupLeasesOfCompletedShards,
ignoreUnexpectedChildShards);
LOG.debug("Finished checking for child shards of shard " + shardInfo.getShardId());
log.debug("Finished checking for child shards of shard {}", shardInfo.getShardId());
}
return new TaskResult(null);
} catch (Exception e) {
if (applicationException) {
LOG.error("Application exception. ", e);
log.error("Application exception. ", e);
} else {
LOG.error("Caught exception: ", e);
log.error("Caught exception: ", e);
}
exception = e;
// backoff if we encounter an exception.
try {
Thread.sleep(this.backoffTimeMillis);
} catch (InterruptedException ie) {
LOG.debug("Interrupted sleep", ie);
log.debug("Interrupted sleep", ie);
}
}

View file

@ -14,13 +14,13 @@
*/
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
import lombok.Getter;
import org.slf4j.Logger;
import lombok.RequiredArgsConstructor;
import lombok.extern.apachecommons.CommonsLog;
import org.apache.commons.logging.Log;
import lombok.extern.slf4j.Slf4j;
@RequiredArgsConstructor
@CommonsLog
@Slf4j
class ThrottlingReporter {
private final int maxConsecutiveWarnThrottles;
@ -45,7 +45,7 @@ class ThrottlingReporter {
consecutiveThrottles = 0;
}
protected Log getLog() {
protected Logger getLog() {
return log;
}

View file

@ -32,9 +32,6 @@ import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.regions.Region;
import com.amazonaws.regions.RegionUtils;
import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
@ -62,16 +59,15 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
import lombok.Setter;
import lombok.experimental.Accessors;
import lombok.extern.slf4j.Slf4j;
/**
* Worker is the high level class that Kinesis applications use to start processing data. It initializes and oversees
* different components (e.g. syncing shard and lease information, tracking shard assignments, and processing data from
* the shards).
*/
@Slf4j
public class Worker implements Runnable {
private static final Log LOG = LogFactory.getLog(Worker.class);
private static final int MAX_INITIALIZATION_ATTEMPTS = 20;
private static final WorkerStateChangeListener DEFAULT_WORKER_STATE_CHANGE_LISTENER = new NoOpWorkerStateChangeListener();
@ -401,25 +397,25 @@ public class Worker implements Runnable {
if (config.getRegionName() != null) {
Region region = RegionUtils.getRegion(config.getRegionName());
kinesisClient.setRegion(region);
LOG.debug("The region of Amazon Kinesis client has been set to " + config.getRegionName());
log.debug("The region of Amazon Kinesis client has been set to {}", config.getRegionName());
dynamoDBClient.setRegion(region);
LOG.debug("The region of Amazon DynamoDB client has been set to " + config.getRegionName());
log.debug("The region of Amazon DynamoDB client has been set to {}", config.getRegionName());
}
// If a dynamoDB endpoint was explicitly specified, use it to set the DynamoDB endpoint.
if (config.getDynamoDBEndpoint() != null) {
dynamoDBClient.setEndpoint(config.getDynamoDBEndpoint());
LOG.debug("The endpoint of Amazon DynamoDB client has been set to " + config.getDynamoDBEndpoint());
log.debug("The endpoint of Amazon DynamoDB client has been set to {}", config.getDynamoDBEndpoint());
}
// If a kinesis endpoint was explicitly specified, use it to set the region of kinesis.
if (config.getKinesisEndpoint() != null) {
kinesisClient.setEndpoint(config.getKinesisEndpoint());
if (config.getRegionName() != null) {
LOG.warn("Received configuration for both region name as " + config.getRegionName()
+ ", and Amazon Kinesis endpoint as " + config.getKinesisEndpoint()
+ ". Amazon Kinesis endpoint will overwrite region name.");
LOG.debug("The region of Amazon Kinesis client has been overwritten to " + config.getKinesisEndpoint());
log.warn("Received configuration for both region name as {}, and Amazon Kinesis endpoint as {}"
+ ". Amazon Kinesis endpoint will overwrite region name.", config.getRegionName(),
config.getKinesisEndpoint());
log.debug("The region of Amazon Kinesis client has been overwritten to {}", config.getKinesisEndpoint());
} else {
LOG.debug("The region of Amazon Kinesis client has been set to " + config.getKinesisEndpoint());
log.debug("The region of Amazon Kinesis client has been set to {}", config.getKinesisEndpoint());
}
}
}
@ -564,9 +560,9 @@ public class Worker implements Runnable {
try {
initialize();
LOG.info("Initialization complete. Starting worker loop.");
log.info("Initialization complete. Starting worker loop.");
} catch (RuntimeException e1) {
LOG.error("Unable to initialize after " + MAX_INITIALIZATION_ATTEMPTS + " attempts. Shutting down.", e1);
log.error("Unable to initialize after {} attempts. Shutting down.", MAX_INITIALIZATION_ATTEMPTS, e1);
shutdown();
}
@ -575,7 +571,7 @@ public class Worker implements Runnable {
}
finalShutdown();
LOG.info("Worker loop is complete. Exiting from worker.");
log.info("Worker loop is complete. Exiting from worker.");
}
@VisibleForTesting
@ -603,12 +599,12 @@ public class Worker implements Runnable {
wlog.info("Sleeping ...");
Thread.sleep(idleTimeInMilliseconds);
} catch (Exception e) {
LOG.error(String.format("Worker.run caught exception, sleeping for %s milli seconds!",
String.valueOf(idleTimeInMilliseconds)), e);
log.error("Worker.run caught exception, sleeping for {} milli seconds!",
String.valueOf(idleTimeInMilliseconds), e);
try {
Thread.sleep(idleTimeInMilliseconds);
} catch (InterruptedException ex) {
LOG.info("Worker: sleep interrupted after catching exception ", ex);
log.info("Worker: sleep interrupted after catching exception ", ex);
}
}
wlog.resetInfoLogging();
@ -621,35 +617,35 @@ public class Worker implements Runnable {
for (int i = 0; (!isDone) && (i < MAX_INITIALIZATION_ATTEMPTS); i++) {
try {
LOG.info("Initialization attempt " + (i + 1));
LOG.info("Initializing LeaseCoordinator");
log.info("Initialization attempt {}", (i + 1));
log.info("Initializing LeaseCoordinator");
leaseCoordinator.initialize();
TaskResult result = null;
if (!skipShardSyncAtWorkerInitializationIfLeasesExist
|| leaseCoordinator.getLeaseManager().isLeaseTableEmpty()) {
LOG.info("Syncing Kinesis shard info");
log.info("Syncing Kinesis shard info");
ShardSyncTask shardSyncTask = new ShardSyncTask(streamConfig.getStreamProxy(),
leaseCoordinator.getLeaseManager(), initialPosition, cleanupLeasesUponShardCompletion,
config.shouldIgnoreUnexpectedChildShards(), 0L);
result = new MetricsCollectingTaskDecorator(shardSyncTask, metricsFactory).call();
} else {
LOG.info("Skipping shard sync per config setting (and lease table is not empty)");
log.info("Skipping shard sync per config setting (and lease table is not empty)");
}
if (result == null || result.getException() == null) {
if (!leaseCoordinator.isRunning()) {
LOG.info("Starting LeaseCoordinator");
log.info("Starting LeaseCoordinator");
leaseCoordinator.start();
} else {
LOG.info("LeaseCoordinator is already running. No need to start it.");
log.info("LeaseCoordinator is already running. No need to start it.");
}
isDone = true;
} else {
lastException = result.getException();
}
} catch (LeasingException e) {
LOG.error("Caught exception when initializing LeaseCoordinator", e);
log.error("Caught exception when initializing LeaseCoordinator", e);
lastException = e;
} catch (Exception e) {
lastException = e;
@ -658,7 +654,7 @@ public class Worker implements Runnable {
try {
Thread.sleep(parentShardPollIntervalMillis);
} catch (InterruptedException e) {
LOG.debug("Sleep interrupted while initializing worker.");
log.debug("Sleep interrupted while initializing worker.");
}
}
@ -899,10 +895,10 @@ public class Worker implements Runnable {
*/
public void shutdown() {
if (shutdown) {
LOG.warn("Shutdown requested a second time.");
log.warn("Shutdown requested a second time.");
return;
}
LOG.info("Worker shutdown requested.");
log.info("Worker shutdown requested.");
// Set shutdown flag, so Worker.run can start shutdown process.
shutdown = true;
@ -920,7 +916,7 @@ public class Worker implements Runnable {
* threads, etc.
*/
private void finalShutdown() {
LOG.info("Starting worker's final shutdown.");
log.info("Starting worker's final shutdown.");
if (executorService instanceof WorkerThreadPoolExecutor) {
// This should interrupt all active record processor tasks.
@ -941,16 +937,16 @@ public class Worker implements Runnable {
@VisibleForTesting
boolean shouldShutdown() {
if (executorService.isShutdown()) {
LOG.error("Worker executor service has been shutdown, so record processors cannot be shutdown.");
log.error("Worker executor service has been shutdown, so record processors cannot be shutdown.");
return true;
}
if (shutdown) {
if (shardInfoShardConsumerMap.isEmpty()) {
LOG.info("All record processors have been shutdown successfully.");
log.info("All record processors have been shutdown successfully.");
return true;
}
if ((System.currentTimeMillis() - shutdownStartTimeMillis) >= failoverTimeMillis) {
LOG.info("Lease failover time is reached, so forcing shutdown.");
log.info("Lease failover time is reached, so forcing shutdown.");
return true;
}
}
@ -1019,27 +1015,27 @@ public class Worker implements Runnable {
@SuppressWarnings("unused")
public void debug(Object message, Throwable t) {
LOG.debug(message, t);
log.debug("{}", message, t);
}
public void info(Object message) {
if (this.isInfoEnabled()) {
LOG.info(message);
log.info("{}", message);
}
}
public void infoForce(Object message) {
LOG.info(message);
log.info("{}", message);
}
@SuppressWarnings("unused")
public void warn(Object message) {
LOG.warn(message);
log.warn("{}", message);
}
@SuppressWarnings("unused")
public void error(Object message, Throwable t) {
LOG.error(message, t);
log.error("{}", message, t);
}
private boolean isInfoEnabled() {
@ -1049,7 +1045,7 @@ public class Worker implements Runnable {
private void resetInfoLogging() {
if (infoReporting) {
// We just logged at INFO level for a pass through worker loop
if (LOG.isInfoEnabled()) {
if (log.isInfoEnabled()) {
infoReporting = false;
nextReportTime = System.currentTimeMillis() + reportIntervalMillis;
} // else is DEBUG or TRACE so leave reporting true
@ -1082,7 +1078,7 @@ public class Worker implements Runnable {
if (config.getRegionName() != null) {
Region region = RegionUtils.getRegion(config.getRegionName());
cloudWatchClient.setRegion(region);
LOG.debug("The region of Amazon CloudWatch client has been set to " + config.getRegionName());
log.debug("The region of Amazon CloudWatch client has been set to {}", config.getRegionName());
}
metricsFactory = new WorkerCWMetricsFactory(cloudWatchClient, config.getApplicationName(),
config.getMetricsBufferTimeMillis(), config.getMetricsMaxQueueSize(), config.getMetricsLevel(),
@ -1216,28 +1212,28 @@ public class Worker implements Runnable {
if (config.getRegionName() != null) {
Region region = RegionUtils.getRegion(config.getRegionName());
cloudWatchClient.setRegion(region);
LOG.debug("The region of Amazon CloudWatch client has been set to " + config.getRegionName());
log.debug("The region of Amazon CloudWatch client has been set to {}", config.getRegionName());
kinesisClient.setRegion(region);
LOG.debug("The region of Amazon Kinesis client has been set to " + config.getRegionName());
log.debug("The region of Amazon Kinesis client has been set to {}", config.getRegionName());
dynamoDBClient.setRegion(region);
LOG.debug("The region of Amazon DynamoDB client has been set to " + config.getRegionName());
log.debug("The region of Amazon DynamoDB client has been set to {}", config.getRegionName());
}
// If a dynamoDB endpoint was explicitly specified, use it to set the DynamoDB endpoint.
if (config.getDynamoDBEndpoint() != null) {
dynamoDBClient.setEndpoint(config.getDynamoDBEndpoint());
LOG.debug("The endpoint of Amazon DynamoDB client has been set to " + config.getDynamoDBEndpoint());
log.debug("The endpoint of Amazon DynamoDB client has been set to {}", config.getDynamoDBEndpoint());
}
// If a kinesis endpoint was explicitly specified, use it to set the region of kinesis.
if (config.getKinesisEndpoint() != null) {
kinesisClient.setEndpoint(config.getKinesisEndpoint());
if (config.getRegionName() != null) {
LOG.warn("Received configuration for both region name as " + config.getRegionName()
+ ", and Amazon Kinesis endpoint as " + config.getKinesisEndpoint()
+ ". Amazon Kinesis endpoint will overwrite region name.");
LOG.debug("The region of Amazon Kinesis client has been overwritten to "
+ config.getKinesisEndpoint());
log.warn("Received configuration for both region name as {}, and Amazon Kinesis endpoint as {}"
+ ". Amazon Kinesis endpoint will overwrite region name.", config.getRegionName(),
config.getKinesisEndpoint());
log.debug("The region of Amazon Kinesis client has been overwritten to {}",
config.getKinesisEndpoint());
} else {
LOG.debug("The region of Amazon Kinesis client has been set to " + config.getKinesisEndpoint());
log.debug("The region of Amazon Kinesis client has been set to {}", config.getKinesisEndpoint());
}
}
if (metricsFactory == null) {

View file

@ -24,8 +24,6 @@ import java.util.Set;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.kinesis.AmazonKinesis;
@ -51,14 +49,13 @@ import com.amazonaws.services.kinesis.model.ShardIteratorType;
import com.amazonaws.services.kinesis.model.StreamStatus;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
/**
* Kinesis proxy - used to make calls to Amazon Kinesis (e.g. fetch data records and list of shards).
*/
@Slf4j
public class KinesisProxy implements IKinesisProxyExtended {
private static final Log LOG = LogFactory.getLog(KinesisProxy.class);
private static final EnumSet<ShardIteratorType> EXPECTED_ITERATOR_TYPES = EnumSet
.of(ShardIteratorType.AT_SEQUENCE_NUMBER, ShardIteratorType.AFTER_SEQUENCE_NUMBER);
@ -153,7 +150,7 @@ public class KinesisProxy implements IKinesisProxyExtended {
maxDescribeStreamRetryAttempts,
listShardsBackoffTimeInMillis,
maxListShardsRetryAttempts);
LOG.debug("KinesisProxy has created a kinesisClient");
log.debug("KinesisProxy has created a kinesisClient");
}
/**
@ -181,7 +178,7 @@ public class KinesisProxy implements IKinesisProxyExtended {
this(streamName, kinesisClient, describeStreamBackoffTimeInMillis, maxDescribeStreamRetryAttempts,
listShardsBackoffTimeInMillis, maxListShardsRetryAttempts);
this.credentialsProvider = credentialProvider;
LOG.debug("KinesisProxy( " + streamName + ")");
log.debug("KinesisProxy( " + streamName + ")");
}
/**
@ -215,10 +212,10 @@ public class KinesisProxy implements IKinesisProxyExtended {
if (Class.forName("com.amazonaws.services.dynamodbv2.streamsadapter.AmazonDynamoDBStreamsAdapterClient")
.isAssignableFrom(client.getClass())) {
isKinesisClient = false;
LOG.debug("Client is DynamoDb client, will use DescribeStream.");
log.debug("Client is DynamoDb client, will use DescribeStream.");
}
} catch (ClassNotFoundException e) {
LOG.debug("Client is Kinesis Client, using ListShards instead of DescribeStream.");
log.debug("Client is Kinesis Client, using ListShards instead of DescribeStream.");
}
}
@ -259,12 +256,12 @@ public class KinesisProxy implements IKinesisProxyExtended {
try {
response = client.describeStream(describeStreamRequest);
} catch (LimitExceededException le) {
LOG.info("Got LimitExceededException when describing stream " + streamName + ". Backing off for "
+ this.describeStreamBackoffTimeInMillis + " millis.");
log.info("Got LimitExceededException when describing stream {}. Backing off for {} millis.", streamName,
this.describeStreamBackoffTimeInMillis);
try {
Thread.sleep(this.describeStreamBackoffTimeInMillis);
} catch (InterruptedException ie) {
LOG.debug("Stream " + streamName + " : Sleep was interrupted ", ie);
log.debug("Stream {} : Sleep was interrupted ", streamName, ie);
}
lastException = le;
}
@ -281,8 +278,8 @@ public class KinesisProxy implements IKinesisProxyExtended {
|| StreamStatus.UPDATING.toString().equals(response.getStreamDescription().getStreamStatus())) {
return response;
} else {
LOG.info("Stream is in status " + response.getStreamDescription().getStreamStatus()
+ ", KinesisProxy.DescribeStream returning null (wait until stream is Active or Updating");
log.info("Stream is in status {}, KinesisProxy.DescribeStream returning null (wait until stream is Active "
+ "or Updating", response.getStreamDescription().getStreamStatus());
return null;
}
}
@ -303,16 +300,16 @@ public class KinesisProxy implements IKinesisProxyExtended {
try {
result = client.listShards(request);
} catch (LimitExceededException e) {
LOG.info("Got LimitExceededException when listing shards " + streamName + ". Backing off for "
+ this.listShardsBackoffTimeInMillis + " millis.");
log.info("Got LimitExceededException when listing shards {}. Backing off for {} millis.", streamName,
this.listShardsBackoffTimeInMillis);
try {
Thread.sleep(this.listShardsBackoffTimeInMillis);
} catch (InterruptedException ie) {
LOG.debug("Stream " + streamName + " : Sleep was interrupted ", ie);
log.debug("Stream {} : Sleep was interrupted ", streamName, ie);
}
lastException = e;
} catch (ResourceInUseException e) {
LOG.info("Stream is not in Active/Updating status, returning null (wait until stream is in Active or"
log.info("Stream is not in Active/Updating status, returning null (wait until stream is in Active or"
+ " Updating)");
return null;
}
@ -344,7 +341,7 @@ public class KinesisProxy implements IKinesisProxyExtended {
}
}
LOG.warn("Cannot find the shard given the shardId " + shardId);
log.warn("Cannot find the shard given the shardId {}", shardId);
return null;
}
@ -426,12 +423,12 @@ public class KinesisProxy implements IKinesisProxyExtended {
try {
shardIteratorType = ShardIteratorType.fromValue(iteratorType);
} catch (IllegalArgumentException iae) {
LOG.error("Caught illegal argument exception while parsing iteratorType: " + iteratorType, iae);
log.error("Caught illegal argument exception while parsing iteratorType: {}", iteratorType, iae);
shardIteratorType = null;
}
if (!EXPECTED_ITERATOR_TYPES.contains(shardIteratorType)) {
LOG.info("This method should only be used for AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER "
log.info("This method should only be used for AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER "
+ "ShardIteratorTypes. For methods to use with other ShardIteratorTypes, see IKinesisProxy.java");
}
final GetShardIteratorRequest getShardIteratorRequest = new GetShardIteratorRequest();

View file

@ -24,19 +24,18 @@ import java.util.Date;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.model.Record;
import com.google.protobuf.InvalidProtocolBufferException;
import lombok.extern.slf4j.Slf4j;
/**
* This class represents a KPL user record.
*/
@SuppressWarnings("serial")
@Slf4j
public class UserRecord extends Record {
private static final Log LOG = LogFactory.getLog(UserRecord.class);
private static final byte[] AGGREGATED_RECORD_MAGIC = new byte[] {-13, -119, -102, -62 };
private static final int DIGEST_SIZE = 16;
private static final BigInteger SMALLEST_HASH_KEY = new BigInteger("0");
@ -286,7 +285,7 @@ public class UserRecord extends Record {
sb.append("Sequence number: ").append(r.getSequenceNumber()).append("\n")
.append("Raw data: ")
.append(javax.xml.bind.DatatypeConverter.printBase64Binary(messageData)).append("\n");
LOG.error(sb.toString(), e);
log.error(sb.toString(), e);
}
} catch (InvalidProtocolBufferException e) {
isAggregated = false;

View file

@ -14,9 +14,6 @@
*/
package com.amazonaws.services.kinesis.leases.impl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
@ -28,10 +25,6 @@ import com.amazonaws.services.kinesis.leases.interfaces.IKinesisClientLeaseManag
* An implementation of LeaseManager for the KinesisClientLibrary - takeLease updates the ownerSwitchesSinceCheckpoint field.
*/
public class KinesisClientLeaseManager extends LeaseManager<KinesisClientLease> implements IKinesisClientLeaseManager {
@SuppressWarnings("unused")
private static final Log LOG = LogFactory.getLog(KinesisClientLeaseManager.class);
/**
* Constructor.
*

View file

@ -26,9 +26,6 @@ import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
@ -44,12 +41,15 @@ import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope;
import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import lombok.extern.slf4j.Slf4j;
/**
* LeaseCoordinator abstracts away LeaseTaker and LeaseRenewer from the application code that's using leasing. It owns
* the scheduling of the two previously mentioned components as well as informing LeaseRenewer when LeaseTaker takes new
* leases.
*
*/
@Slf4j
public class LeaseCoordinator<T extends Lease> {
/*
@ -58,8 +58,6 @@ public class LeaseCoordinator<T extends Lease> {
*/
public static final String WORKER_IDENTIFIER_METRIC = "WorkerIdentifier";
private static final Log LOG = LogFactory.getLog(LeaseCoordinator.class);
// Time to wait for in-flight Runnables to finish when calling .stop();
private static final long STOP_WAIT_TIME_MILLIS = 2000L;
@ -148,15 +146,14 @@ public class LeaseCoordinator<T extends Lease> {
this.takerIntervalMillis = (leaseDurationMillis + epsilonMillis) * 2;
this.metricsFactory = metricsFactory;
LOG.info(String.format(
"With failover time %d ms and epsilon %d ms, LeaseCoordinator will renew leases every %d ms, take" +
"leases every %d ms, process maximum of %d leases and steal %d lease(s) at a time.",
log.info("With failover time {} ms and epsilon {} ms, LeaseCoordinator will renew leases every {} ms, take"
+ "leases every {} ms, process maximum of {} leases and steal {} lease(s) at a time.",
leaseDurationMillis,
epsilonMillis,
renewerIntervalMillis,
takerIntervalMillis,
maxLeasesForWorker,
maxLeasesToStealAtOneTime));
maxLeasesToStealAtOneTime);
}
private class TakerRunnable implements Runnable {
@ -166,9 +163,9 @@ public class LeaseCoordinator<T extends Lease> {
try {
runTaker();
} catch (LeasingException e) {
LOG.error("LeasingException encountered in lease taking thread", e);
log.error("LeasingException encountered in lease taking thread", e);
} catch (Throwable t) {
LOG.error("Throwable encountered in lease taking thread", t);
log.error("Throwable encountered in lease taking thread", t);
}
}
@ -181,9 +178,9 @@ public class LeaseCoordinator<T extends Lease> {
try {
runRenewer();
} catch (LeasingException e) {
LOG.error("LeasingException encountered in lease renewing thread", e);
log.error("LeasingException encountered in lease renewing thread", e);
} catch (Throwable t) {
LOG.error("Throwable encountered in lease renewing thread", t);
log.error("Throwable encountered in lease renewing thread", t);
}
}
@ -296,19 +293,19 @@ public class LeaseCoordinator<T extends Lease> {
leaseCoordinatorThreadPool.shutdown();
try {
if (leaseCoordinatorThreadPool.awaitTermination(STOP_WAIT_TIME_MILLIS, TimeUnit.MILLISECONDS)) {
LOG.info(String.format("Worker %s has successfully stopped lease-tracking threads",
leaseTaker.getWorkerIdentifier()));
log.info("Worker {} has successfully stopped lease-tracking threads",
leaseTaker.getWorkerIdentifier());
} else {
leaseCoordinatorThreadPool.shutdownNow();
LOG.info(String.format("Worker %s stopped lease-tracking threads %dms after stop",
leaseTaker.getWorkerIdentifier(),
STOP_WAIT_TIME_MILLIS));
log.info("Worker {} stopped lease-tracking threads {} ms after stop",
leaseTaker.getWorkerIdentifier(),
STOP_WAIT_TIME_MILLIS);
}
} catch (InterruptedException e) {
LOG.debug("Encountered InterruptedException when awaiting threadpool termination");
log.debug("Encountered InterruptedException when awaiting threadpool termination");
}
} else {
LOG.debug("Threadpool was null, no need to shutdown/terminate threadpool.");
log.debug("Threadpool was null, no need to shutdown/terminate threadpool.");
}
leaseRenewalThreadpool.shutdownNow();

View file

@ -19,10 +19,6 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import com.amazonaws.services.kinesis.leases.util.DynamoUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
@ -50,13 +46,13 @@ import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputExc
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseSerializer;
import lombok.extern.slf4j.Slf4j;
/**
* An implementation of ILeaseManager that uses DynamoDB.
*/
@Slf4j
public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
private static final Log LOG = LogFactory.getLog(LeaseManager.class);
protected String table;
protected AmazonDynamoDB dynamoDBClient;
protected ILeaseSerializer<T> serializer;
@ -112,7 +108,7 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
//
// Something went wrong with DynamoDB
//
LOG.error("Failed to get table status for " + table, de);
log.error("Failed to get table status for {}", table, de);
}
CreateTableRequest request = new CreateTableRequest();
request.setTableName(table);
@ -127,7 +123,7 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
try {
dynamoDBClient.createTable(request);
} catch (ResourceInUseException e) {
LOG.info("Table " + table + " already exists.");
log.info("Table {} already exists.", table);
return false;
} catch (LimitExceededException e) {
throw new ProvisionedThroughputException("Capacity exceeded when creating table " + table, e);
@ -154,9 +150,8 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
try {
result = dynamoDBClient.describeTable(request);
} catch (ResourceNotFoundException e) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Got ResourceNotFoundException for table %s in leaseTableExists, returning false.",
table));
if (log.isDebugEnabled()) {
log.debug("Got ResourceNotFoundException for table {} in leaseTableExists, returning false.", table);
}
return null;
} catch (AmazonClientException e) {
@ -164,8 +159,8 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
}
TableStatus tableStatus = TableStatus.fromValue(result.getTable().getTableStatus());
if (LOG.isDebugEnabled()) {
LOG.debug("Lease table exists and is in status " + tableStatus);
if (log.isDebugEnabled()) {
log.debug("Lease table exists and is in status {}", tableStatus);
}
return tableStatus;
@ -201,7 +196,7 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
try {
Thread.sleep(timeToSleepMillis);
} catch (InterruptedException e) {
LOG.debug("Interrupted while sleeping");
log.debug("Interrupted while sleeping");
}
return System.currentTimeMillis() - startTime;
@ -233,8 +228,8 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
* @throws ProvisionedThroughputException if DynamoDB scan fail due to exceeded capacity
*/
List<T> list(Integer limit) throws DependencyException, InvalidStateException, ProvisionedThroughputException {
if (LOG.isDebugEnabled()) {
LOG.debug("Listing leases from table " + table);
if (log.isDebugEnabled()) {
log.debug("Listing leases from table {}", table);
}
ScanRequest scanRequest = new ScanRequest();
@ -249,8 +244,8 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
while (scanResult != null) {
for (Map<String, AttributeValue> item : scanResult.getItems()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Got item " + item.toString() + " from DynamoDB.");
if (log.isDebugEnabled()) {
log.debug("Got item {} from DynamoDB.", item.toString());
}
result.add(serializer.fromDynamoRecord(item));
@ -260,23 +255,23 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
if (lastEvaluatedKey == null) {
// Signify that we're done.
scanResult = null;
if (LOG.isDebugEnabled()) {
LOG.debug("lastEvaluatedKey was null - scan finished.");
if (log.isDebugEnabled()) {
log.debug("lastEvaluatedKey was null - scan finished.");
}
} else {
// Make another request, picking up where we left off.
scanRequest.setExclusiveStartKey(lastEvaluatedKey);
if (LOG.isDebugEnabled()) {
LOG.debug("lastEvaluatedKey was " + lastEvaluatedKey + ", continuing scan.");
if (log.isDebugEnabled()) {
log.debug("lastEvaluatedKey was {}, continuing scan.", lastEvaluatedKey);
}
scanResult = dynamoDBClient.scan(scanRequest);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Listed " + result.size() + " leases from table " + table);
if (log.isDebugEnabled()) {
log.debug("Listed {} leases from table {}", result.size(), table);
}
return result;
@ -297,8 +292,8 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
verifyNotNull(lease, "lease cannot be null");
if (LOG.isDebugEnabled()) {
LOG.debug("Creating lease " + lease);
if (log.isDebugEnabled()) {
log.debug("Creating lease {}", lease);
}
PutItemRequest request = new PutItemRequest();
@ -309,8 +304,8 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
try {
dynamoDBClient.putItem(request);
} catch (ConditionalCheckFailedException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Did not create lease " + lease + " because it already existed");
if (log.isDebugEnabled()) {
log.debug("Did not create lease {} because it already existed", lease);
}
return false;
@ -329,8 +324,8 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
verifyNotNull(leaseKey, "leaseKey cannot be null");
if (LOG.isDebugEnabled()) {
LOG.debug("Getting lease with key " + leaseKey);
if (log.isDebugEnabled()) {
log.debug("Getting lease with key {}", leaseKey);
}
GetItemRequest request = new GetItemRequest();
@ -343,15 +338,15 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
Map<String, AttributeValue> dynamoRecord = result.getItem();
if (dynamoRecord == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("No lease found with key " + leaseKey + ", returning null.");
if (log.isDebugEnabled()) {
log.debug("No lease found with key {}, returning null.", leaseKey);
}
return null;
} else {
T lease = serializer.fromDynamoRecord(dynamoRecord);
if (LOG.isDebugEnabled()) {
LOG.debug("Got lease " + lease);
if (log.isDebugEnabled()) {
log.debug("Got lease {}", lease);
}
return lease;
@ -369,8 +364,8 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
verifyNotNull(lease, "lease cannot be null");
if (LOG.isDebugEnabled()) {
LOG.debug("Renewing lease with key " + lease.getLeaseKey());
if (log.isDebugEnabled()) {
log.debug("Renewing lease with key {}", lease.getLeaseKey());
}
UpdateItemRequest request = new UpdateItemRequest();
@ -382,9 +377,9 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
try {
dynamoDBClient.updateItem(request);
} catch (ConditionalCheckFailedException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Lease renewal failed for lease with key " + lease.getLeaseKey()
+ " because the lease counter was not " + lease.getLeaseCounter());
if (log.isDebugEnabled()) {
log.debug("Lease renewal failed for lease with key {} because the lease counter was not {}",
lease.getLeaseKey(), lease.getLeaseCounter());
}
// If we had a spurious retry during the Dynamo update, then this conditional PUT failure
@ -398,8 +393,7 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
return false;
}
LOG.info("Detected spurious renewal failure for lease with key " + lease.getLeaseKey()
+ ", but recovered");
log.info("Detected spurious renewal failure for lease with key {}, but recovered", lease.getLeaseKey());
} catch (AmazonClientException e) {
throw convertAndRethrowExceptions("renew", lease.getLeaseKey(), e);
}
@ -417,11 +411,11 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
verifyNotNull(lease, "lease cannot be null");
verifyNotNull(owner, "owner cannot be null");
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Taking lease with leaseKey %s from %s to %s",
if (log.isDebugEnabled()) {
log.debug("Taking lease with leaseKey {} from {} to {}",
lease.getLeaseKey(),
lease.getLeaseOwner() == null ? "nobody" : lease.getLeaseOwner(),
owner));
owner);
}
UpdateItemRequest request = new UpdateItemRequest();
@ -436,9 +430,9 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
try {
dynamoDBClient.updateItem(request);
} catch (ConditionalCheckFailedException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Lease renewal failed for lease with key " + lease.getLeaseKey()
+ " because the lease counter was not " + lease.getLeaseCounter());
if (log.isDebugEnabled()) {
log.debug("Lease renewal failed for lease with key {} because the lease counter was not {}",
lease.getLeaseKey(), lease.getLeaseCounter());
}
return false;
@ -460,10 +454,10 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
verifyNotNull(lease, "lease cannot be null");
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Evicting lease with leaseKey %s owned by %s",
if (log.isDebugEnabled()) {
log.debug("Evicting lease with leaseKey {} owned by {}",
lease.getLeaseKey(),
lease.getLeaseOwner()));
lease.getLeaseOwner());
}
UpdateItemRequest request = new UpdateItemRequest();
@ -478,9 +472,9 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
try {
dynamoDBClient.updateItem(request);
} catch (ConditionalCheckFailedException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Lease eviction failed for lease with key " + lease.getLeaseKey()
+ " because the lease owner was not " + lease.getLeaseOwner());
if (log.isDebugEnabled()) {
log.debug("Lease eviction failed for lease with key {} because the lease owner was not {}",
lease.getLeaseKey(), lease.getLeaseOwner());
}
return false;
@ -499,7 +493,7 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
public void deleteAll() throws DependencyException, InvalidStateException, ProvisionedThroughputException {
List<T> allLeases = listLeases();
LOG.warn("Deleting " + allLeases.size() + " items from table " + table);
log.warn("Deleting {} items from table {}", allLeases.size(), table);
for (T lease : allLeases) {
DeleteItemRequest deleteRequest = new DeleteItemRequest();
@ -517,8 +511,8 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
public void deleteLease(T lease) throws DependencyException, InvalidStateException, ProvisionedThroughputException {
verifyNotNull(lease, "lease cannot be null");
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Deleting lease with leaseKey %s", lease.getLeaseKey()));
if (log.isDebugEnabled()) {
log.debug("Deleting lease with leaseKey {}", lease.getLeaseKey());
}
DeleteItemRequest deleteRequest = new DeleteItemRequest();
@ -540,8 +534,8 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
throws DependencyException, InvalidStateException, ProvisionedThroughputException {
verifyNotNull(lease, "lease cannot be null");
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Updating lease %s", lease));
if (log.isDebugEnabled()) {
log.debug("Updating lease {}", lease);
}
UpdateItemRequest request = new UpdateItemRequest();
@ -556,9 +550,9 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
try {
dynamoDBClient.updateItem(request);
} catch (ConditionalCheckFailedException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Lease update failed for lease with key " + lease.getLeaseKey()
+ " because the lease counter was not " + lease.getLeaseCounter());
if (log.isDebugEnabled()) {
log.debug("Lease update failed for lease with key {} because the lease counter was not {}",
lease.getLeaseKey(), lease.getLeaseCounter());
}
return false;
@ -577,7 +571,7 @@ public class LeaseManager<T extends Lease> implements ILeaseManager<T> {
protected DependencyException convertAndRethrowExceptions(String operation, String leaseKey, AmazonClientException e)
throws ProvisionedThroughputException, InvalidStateException {
if (e instanceof ProvisionedThroughputExceededException) {
LOG.warn("Provisioned Throughput on the lease table has been exceeded. It's recommended that you increase the IOPs on the table. Failure to increase the IOPs may cause the application to not make progress.");
log.warn("Provisioned Throughput on the lease table has been exceeded. It's recommended that you increase the IOPs on the table. Failure to increase the IOPs may cause the application to not make progress.");
throw new ProvisionedThroughputException(e);
} else if (e instanceof ResourceNotFoundException) {
// @formatter:on

View file

@ -29,9 +29,6 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudwatch.model.StandardUnit;
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
@ -43,12 +40,13 @@ import com.amazonaws.services.kinesis.metrics.impl.ThreadSafeMetricsDelegatingSc
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope;
import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
import lombok.extern.slf4j.Slf4j;
/**
* An implementation of ILeaseRenewer that uses DynamoDB via LeaseManager.
*/
@Slf4j
public class LeaseRenewer<T extends Lease> implements ILeaseRenewer<T> {
private static final Log LOG = LogFactory.getLog(LeaseRenewer.class);
private static final int RENEWAL_RETRIES = 2;
private final ILeaseManager<T> leaseManager;
@ -78,13 +76,13 @@ public class LeaseRenewer<T extends Lease> implements ILeaseRenewer<T> {
*/
@Override
public void renewLeases() throws DependencyException, InvalidStateException {
if (LOG.isDebugEnabled()) {
if (log.isDebugEnabled()) {
// Due to the eventually consistent nature of ConcurrentNavigableMap iterators, this log entry may become
// inaccurate during iteration.
LOG.debug(String.format("Worker %s holding %d leases: %s",
log.debug("Worker {} holding %d leases: {}",
workerIdentifier,
ownedLeases.size(),
ownedLeases));
ownedLeases);
}
/*
@ -112,11 +110,11 @@ public class LeaseRenewer<T extends Lease> implements ILeaseRenewer<T> {
lostLeases++;
}
} catch (InterruptedException e) {
LOG.info("Interrupted while waiting for a lease to renew.");
log.info("Interrupted while waiting for a lease to renew.");
leasesInUnknownState += 1;
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
LOG.error("Encountered an exception while renewing a lease.", e.getCause());
log.error("Encountered an exception while renewing a lease.", e.getCause());
leasesInUnknownState += 1;
lastException = e;
}
@ -181,24 +179,24 @@ public class LeaseRenewer<T extends Lease> implements ILeaseRenewer<T> {
}
if (renewedLease) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Worker %s successfully renewed lease with key %s",
if (log.isDebugEnabled()) {
log.debug("Worker {} successfully renewed lease with key {}",
workerIdentifier,
leaseKey));
leaseKey);
}
} else {
LOG.info(String.format("Worker %s lost lease with key %s", workerIdentifier, leaseKey));
log.info("Worker {} lost lease with key {}", workerIdentifier, leaseKey);
ownedLeases.remove(leaseKey);
}
success = true;
break;
} catch (ProvisionedThroughputException e) {
LOG.info(String.format("Worker %s could not renew lease with key %s on try %d out of %d due to capacity",
log.info("Worker {} could not renew lease with key {} on try {} out of {} due to capacity",
workerIdentifier,
leaseKey,
i,
RENEWAL_RETRIES));
RENEWAL_RETRIES);
}
}
} finally {
@ -252,8 +250,8 @@ public class LeaseRenewer<T extends Lease> implements ILeaseRenewer<T> {
}
if (copy.isExpired(leaseDurationNanos, now)) {
LOG.info(String.format("getCurrentlyHeldLease not returning lease with key %s because it is expired",
copy.getLeaseKey()));
log.info("getCurrentlyHeldLease not returning lease with key {} because it is expired",
copy.getLeaseKey());
return null;
} else {
return copy;
@ -275,9 +273,9 @@ public class LeaseRenewer<T extends Lease> implements ILeaseRenewer<T> {
T authoritativeLease = ownedLeases.get(leaseKey);
if (authoritativeLease == null) {
LOG.info(String.format("Worker %s could not update lease with key %s because it does not hold it",
log.info("Worker {} could not update lease with key {} because it does not hold it",
workerIdentifier,
leaseKey));
leaseKey);
return false;
}
@ -287,8 +285,8 @@ public class LeaseRenewer<T extends Lease> implements ILeaseRenewer<T> {
* called update.
*/
if (!authoritativeLease.getConcurrencyToken().equals(concurrencyToken)) {
LOG.info(String.format("Worker %s refusing to update lease with key %s because"
+ " concurrency tokens don't match", workerIdentifier, leaseKey));
log.info("Worker {} refusing to update lease with key {} because"
+ " concurrency tokens don't match", workerIdentifier, leaseKey);
return false;
}
@ -306,9 +304,9 @@ public class LeaseRenewer<T extends Lease> implements ILeaseRenewer<T> {
* If updateLease returns false, it means someone took the lease from us. Remove the lease
* from our set of owned leases pro-actively rather than waiting for a run of renewLeases().
*/
LOG.info(String.format("Worker %s lost lease with key %s - discovered during update",
log.info("Worker {} lost lease with key {} - discovered during update",
workerIdentifier,
leaseKey));
leaseKey);
/*
* Remove only if the value currently in the map is the same as the authoritative lease. We're
@ -345,8 +343,8 @@ public class LeaseRenewer<T extends Lease> implements ILeaseRenewer<T> {
for (T lease : newLeases) {
if (lease.getLastCounterIncrementNanos() == null) {
LOG.info(String.format("addLeasesToRenew ignoring lease with key %s because it does not have lastRenewalNanos set",
lease.getLeaseKey()));
log.info("addLeasesToRenew ignoring lease with key {} because it does not have lastRenewalNanos set",
lease.getLeaseKey());
continue;
}
@ -389,7 +387,7 @@ public class LeaseRenewer<T extends Lease> implements ILeaseRenewer<T> {
for (T lease : leases) {
if (workerIdentifier.equals(lease.getLeaseOwner())) {
LOG.info(String.format(" Worker %s found lease %s", workerIdentifier, lease));
log.info(" Worker {} found lease {}", workerIdentifier, lease);
// Okay to renew even if lease is expired, because we start with an empty list and we add the lease to
// our list only after a successful renew. So we don't need to worry about the edge case where we could
// continue renewing a lease after signaling a lease loss to the application.
@ -397,7 +395,7 @@ public class LeaseRenewer<T extends Lease> implements ILeaseRenewer<T> {
myLeases.add(lease);
}
} else {
LOG.debug(String.format("Worker %s ignoring lease %s ", workerIdentifier, lease));
log.debug("Worker {} ignoring lease {} ", workerIdentifier, lease);
}
}

View file

@ -26,9 +26,6 @@ import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudwatch.model.StandardUnit;
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
@ -39,13 +36,13 @@ import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope;
import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
import lombok.extern.slf4j.Slf4j;
/**
* An implementation of ILeaseTaker that uses DynamoDB via LeaseManager.
*/
@Slf4j
public class LeaseTaker<T extends Lease> implements ILeaseTaker<T> {
private static final Log LOG = LogFactory.getLog(LeaseTaker.class);
private static final int TAKE_RETRIES = 3;
private static final int SCAN_RETRIES = 1;
@ -146,10 +143,10 @@ public class LeaseTaker<T extends Lease> implements ILeaseTaker<T> {
updateAllLeases(timeProvider);
success = true;
} catch (ProvisionedThroughputException e) {
LOG.info(String.format("Worker %s could not find expired leases on try %d out of %d",
log.info("Worker {} could not find expired leases on try {} out of {}",
workerIdentifier,
i,
TAKE_RETRIES));
TAKE_RETRIES);
lastException = e;
}
}
@ -158,8 +155,8 @@ public class LeaseTaker<T extends Lease> implements ILeaseTaker<T> {
}
if (lastException != null) {
LOG.error("Worker " + workerIdentifier
+ " could not scan leases table, aborting takeLeases. Exception caught by last retry:",
log.error("Worker {} could not scan leases table, aborting takeLeases. Exception caught by last retry:",
workerIdentifier,
lastException);
return takenLeases;
}
@ -187,11 +184,11 @@ public class LeaseTaker<T extends Lease> implements ILeaseTaker<T> {
success = true;
break;
} catch (ProvisionedThroughputException e) {
LOG.info(String.format("Could not take lease with key %s for worker %s on try %d out of %d due to capacity",
log.info("Could not take lease with key {} for worker {} on try {} out of {} due to capacity",
leaseKey,
workerIdentifier,
i,
TAKE_RETRIES));
TAKE_RETRIES);
}
}
} finally {
@ -200,17 +197,17 @@ public class LeaseTaker<T extends Lease> implements ILeaseTaker<T> {
}
if (takenLeases.size() > 0) {
LOG.info(String.format("Worker %s successfully took %d leases: %s",
log.info("Worker {} successfully took {} leases: {}",
workerIdentifier,
takenLeases.size(),
stringJoin(takenLeases.keySet(), ", ")));
stringJoin(takenLeases.keySet(), ", "));
}
if (untakenLeaseKeys.size() > 0) {
LOG.info(String.format("Worker %s failed to take %d leases: %s",
log.info("Worker {} failed to take {} leases: {}",
workerIdentifier,
untakenLeaseKeys.size(),
stringJoin(untakenLeaseKeys, ", ")));
stringJoin(untakenLeaseKeys, ", "));
}
MetricsHelper.getMetricsScope().addData(
@ -284,16 +281,16 @@ public class LeaseTaker<T extends Lease> implements ILeaseTaker<T> {
// if this new lease is unowned, it's never been renewed.
lease.setLastCounterIncrementNanos(0L);
if (LOG.isDebugEnabled()) {
LOG.debug("Treating new lease with key " + leaseKey
+ " as never renewed because it is new and unowned.");
if (log.isDebugEnabled()) {
log.debug("Treating new lease with key {} as never renewed because it is new and unowned.",
leaseKey);
}
} else {
// if this new lease is owned, treat it as renewed as of the scan
lease.setLastCounterIncrementNanos(lastScanTimeNanos);
if (LOG.isDebugEnabled()) {
LOG.debug("Treating new lease with key " + leaseKey
+ " as recently renewed because it is new and owned.");
if (log.isDebugEnabled()) {
log.debug("Treating new lease with key {} as recently renewed because it is new and owned.",
leaseKey);
}
}
}
@ -356,14 +353,14 @@ public class LeaseTaker<T extends Lease> implements ILeaseTaker<T> {
// exceed the max allowed for this worker.
int leaseSpillover = Math.max(0, target - maxLeasesForWorker);
if (target > maxLeasesForWorker) {
LOG.warn(String.format("Worker %s target is %d leases and maxLeasesForWorker is %d."
+ " Resetting target to %d, lease spillover is %d. "
log.warn("Worker {} target is {} leases and maxLeasesForWorker is {}."
+ " Resetting target to {}, lease spillover is {}. "
+ " Note that some shards may not be processed if no other workers are able to pick them up.",
workerIdentifier,
target,
maxLeasesForWorker,
maxLeasesForWorker,
leaseSpillover));
leaseSpillover);
target = maxLeasesForWorker;
}
metrics.addData("LeaseSpillover", leaseSpillover, StandardUnit.Count, MetricsLevel.SUMMARY);
@ -390,25 +387,25 @@ public class LeaseTaker<T extends Lease> implements ILeaseTaker<T> {
// If there are no expired leases and we need a lease, consider stealing.
List<T> leasesToSteal = chooseLeasesToSteal(leaseCounts, numLeasesToReachTarget, target);
for (T leaseToSteal : leasesToSteal) {
LOG.info(String.format("Worker %s needed %d leases but none were expired, so it will steal lease %s from %s",
log.info("Worker {} needed {} leases but none were expired, so it will steal lease {} from {}",
workerIdentifier,
numLeasesToReachTarget,
leaseToSteal.getLeaseKey(),
leaseToSteal.getLeaseOwner()));
leaseToSteal.getLeaseOwner());
leasesToTake.add(leaseToSteal);
}
}
if (!leasesToTake.isEmpty()) {
LOG.info(String.format("Worker %s saw %d total leases, %d available leases, %d "
+ "workers. Target is %d leases, I have %d leases, I will take %d leases",
log.info("Worker {} saw {} total leases, {} available leases, {} "
+ "workers. Target is {} leases, I have {} leases, I will take {} leases",
workerIdentifier,
numLeases,
originalExpiredLeasesSize,
numWorkers,
target,
myCount,
leasesToTake.size()));
leasesToTake.size());
}
metrics.addData("TotalLeases", numLeases, StandardUnit.Count, MetricsLevel.DETAILED);
@ -456,8 +453,8 @@ public class LeaseTaker<T extends Lease> implements ILeaseTaker<T> {
}
if (numLeasesToSteal <= 0) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Worker %s not stealing from most loaded worker %s. He has %d,"
if (log.isDebugEnabled()) {
log.debug(String.format("Worker %s not stealing from most loaded worker %s. He has %d,"
+ " target is %d, and I need %d",
workerIdentifier,
mostLoadedWorker.getKey(),
@ -467,16 +464,16 @@ public class LeaseTaker<T extends Lease> implements ILeaseTaker<T> {
}
return leasesToSteal;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Worker %s will attempt to steal %d leases from most loaded worker %s. "
+ " He has %d leases, target is %d, I need %d, maxLeasesToSteatAtOneTime is %d.",
if (log.isDebugEnabled()) {
log.debug("Worker {} will attempt to steal {} leases from most loaded worker {}. "
+ " He has {} leases, target is {}, I need {}, maxLeasesToSteatAtOneTime is {}.",
workerIdentifier,
numLeasesToSteal,
mostLoadedWorker.getKey(),
mostLoadedWorker.getValue(),
target,
needed,
maxLeasesToStealAtOneTime));
maxLeasesToStealAtOneTime);
}
}

View file

@ -18,19 +18,15 @@ import java.util.Collection;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import lombok.extern.slf4j.Slf4j;
/**
* A CWPublisherRunnable contains the logic of when to publish metrics.
*
* @param <KeyType>
*/
@Slf4j
public class CWPublisherRunnable<KeyType> implements Runnable {
private static final Log LOG = LogFactory.getLog(CWPublisherRunnable.class);
private final ICWMetricsPublisher<KeyType> metricsPublisher;
private final MetricAccumulatingQueue<KeyType> queue;
private final long bufferTimeMillis;
@ -68,12 +64,12 @@ public class CWPublisherRunnable<KeyType> implements Runnable {
int maxQueueSize,
int batchSize,
int maxJitter) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Constructing CWPublisherRunnable with maxBufferTimeMillis %d maxQueueSize %d batchSize %d maxJitter %d",
if (log.isDebugEnabled()) {
log.debug("Constructing CWPublisherRunnable with maxBufferTimeMillis {} maxQueueSize {} batchSize {} maxJitter {}",
bufferTimeMillis,
maxQueueSize,
batchSize,
maxJitter));
maxJitter);
}
this.metricsPublisher = metricsPublisher;
@ -89,11 +85,11 @@ public class CWPublisherRunnable<KeyType> implements Runnable {
try {
runOnce();
} catch (Throwable t) {
LOG.error("Encountered throwable in CWPublisherRunable", t);
log.error("Encountered throwable in CWPublisherRunable", t);
}
}
LOG.info("CWPublication thread finished.");
log.info("CWPublication thread finished.");
}
/**
@ -112,13 +108,13 @@ public class CWPublisherRunnable<KeyType> implements Runnable {
long timeSinceFlush = Math.max(0, getTime() - lastFlushTime);
if (timeSinceFlush >= bufferTimeMillis || queue.size() >= flushSize || shuttingDown) {
dataToPublish = queue.drain(flushSize);
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Drained %d datums from queue", dataToPublish.size()));
if (log.isDebugEnabled()) {
log.debug("Drained {} datums from queue", dataToPublish.size());
}
if (shuttingDown) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Shutting down with %d datums left on the queue", queue.size()));
if (log.isDebugEnabled()) {
log.debug("Shutting down with {} datums left on the queue", queue.size());
}
// If we're shutting down, we successfully shut down only when the queue is empty.
@ -126,9 +122,9 @@ public class CWPublisherRunnable<KeyType> implements Runnable {
}
} else {
long waitTime = bufferTimeMillis - timeSinceFlush;
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Waiting up to %dms for %d more datums to appear.", waitTime, flushSize
- queue.size()));
if (log.isDebugEnabled()) {
log.debug("Waiting up to {} ms for {} more datums to appear.", waitTime, flushSize
- queue.size());
}
try {
@ -143,7 +139,7 @@ public class CWPublisherRunnable<KeyType> implements Runnable {
try {
metricsPublisher.publishMetrics(dataToPublish);
} catch (Throwable t) {
LOG.error("Caught exception thrown by metrics Publisher in CWPublisherRunnable", t);
log.error("Caught exception thrown by metrics Publisher in CWPublisherRunnable", t);
}
// Changing the value of lastFlushTime will change the time when metrics are flushed next.
lastFlushTime = getTime() + nextJitterValueToUse;
@ -162,7 +158,7 @@ public class CWPublisherRunnable<KeyType> implements Runnable {
}
public void shutdown() {
LOG.info("Shutting down CWPublication thread.");
log.info("Shutting down CWPublication thread.");
synchronized (queue) {
shuttingDown = true;
queue.notify();
@ -181,17 +177,17 @@ public class CWPublisherRunnable<KeyType> implements Runnable {
public void enqueue(Collection<MetricDatumWithKey<KeyType>> data) {
synchronized (queue) {
if (shuttingDown) {
LOG.warn(String.format("Dropping metrics %s because CWPublisherRunnable is shutting down.", data));
log.warn("Dropping metrics {} because CWPublisherRunnable is shutting down.", data);
return;
}
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Enqueueing %d datums for publication", data.size()));
if (log.isDebugEnabled()) {
log.debug("Enqueueing {} datums for publication", data.size());
}
for (MetricDatumWithKey<KeyType> datumWithKey : data) {
if (!queue.offer(datumWithKey.key, datumWithKey.datum)) {
LOG.warn("Metrics queue full - dropping metric " + datumWithKey.datum);
log.warn("Metrics queue full - dropping metric {}", datumWithKey.datum);
}
}

View file

@ -17,22 +17,18 @@ package com.amazonaws.services.kinesis.metrics.impl;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.AmazonClientException;
import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
import com.amazonaws.services.cloudwatch.model.MetricDatum;
import com.amazonaws.services.cloudwatch.model.PutMetricDataRequest;
import lombok.extern.slf4j.Slf4j;
/**
* Default implementation for publishing metrics to CloudWatch.
*/
@Slf4j
public class DefaultCWMetricsPublisher implements ICWMetricsPublisher<CWMetricKey> {
private static final Log LOG = LogFactory.getLog(CWPublisherRunnable.class);
// CloudWatch API has a limit of 20 MetricDatums per request
private static final int BATCH_SIZE = 20;
@ -62,9 +58,9 @@ public class DefaultCWMetricsPublisher implements ICWMetricsPublisher<CWMetricKe
try {
cloudWatchClient.putMetricData(request);
LOG.debug(String.format("Successfully published %d datums.", endIndex - startIndex));
log.debug("Successfully published {} datums.", endIndex - startIndex);
} catch (AmazonClientException e) {
LOG.warn(String.format("Could not publish %d datums to CloudWatch", endIndex - startIndex), e);
log.warn("Could not publish {} datums to CloudWatch", endIndex - startIndex, e);
}
}
}

View file

@ -14,20 +14,17 @@
*/
package com.amazonaws.services.kinesis.metrics.impl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudwatch.model.Dimension;
import com.amazonaws.services.cloudwatch.model.MetricDatum;
import com.amazonaws.services.cloudwatch.model.StatisticSet;
import lombok.extern.slf4j.Slf4j;
/**
* An AccumulatingMetricsScope that outputs via log4j.
*/
@Slf4j
public class LogMetricsScope extends AccumulateByNameMetricsScope {
private static final Log LOG = LogFactory.getLog(LogMetricsScope.class);
@Override
public void end() {
StringBuilder output = new StringBuilder();
@ -53,6 +50,6 @@ public class LogMetricsScope extends AccumulateByNameMetricsScope {
datum.getUnit()));
}
LOG.info(output.toString());
log.info(output.toString());
}
}

View file

@ -14,21 +14,19 @@
*/
package com.amazonaws.services.kinesis.metrics.impl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.cloudwatch.model.StandardUnit;
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsFactory;
import com.amazonaws.services.kinesis.metrics.interfaces.IMetricsScope;
import com.amazonaws.services.kinesis.metrics.interfaces.MetricsLevel;
import lombok.extern.slf4j.Slf4j;
/**
* MetricsHelper assists with common metrics operations, most notably the storage of IMetricsScopes objects in a
* ThreadLocal so we don't have to pass one throughout the whole call stack.
*/
@Slf4j
public class MetricsHelper {
private static final Log LOG = LogFactory.getLog(MetricsHelper.class);
private static final NullMetricsScope NULL_METRICS_SCOPE = new NullMetricsScope();
private static final ThreadLocal<IMetricsScope> currentScope = new ThreadLocal<IMetricsScope>();
@ -98,8 +96,8 @@ public class MetricsHelper {
public static IMetricsScope getMetricsScope() {
IMetricsScope result = currentScope.get();
if (result == null) {
LOG.warn(String.format("No metrics scope set in thread %s, getMetricsScope returning NullMetricsScope.",
Thread.currentThread().getName()));
log.warn("No metrics scope set in thread {}, getMetricsScope returning NullMetricsScope.",
Thread.currentThread().getName());
return NULL_METRICS_SCOPE;
} else {

View file

@ -16,22 +16,19 @@ package com.amazonaws.services.kinesis.multilang;
import java.io.BufferedReader;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import lombok.extern.slf4j.Slf4j;
/**
* Reads lines off the STDERR of the child process and prints them to this process's (the JVM's) STDERR and log.
*/
@Slf4j
class DrainChildSTDERRTask extends LineReaderTask<Boolean> {
private static final Log LOG = LogFactory.getLog(DrainChildSTDERRTask.class);
DrainChildSTDERRTask() {
}
@Override
protected HandleLineResult<Boolean> handleLine(String line) {
LOG.error("Received error line from subprocess [" + line + "] for shard " + getShardId());
log.error("Received error line from subprocess [{}] for shard {}", line, getShardId());
System.err.println(line);
return new HandleLineResult<Boolean>();
}

View file

@ -16,8 +16,7 @@ package com.amazonaws.services.kinesis.multilang;
import java.io.BufferedReader;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import lombok.extern.slf4j.Slf4j;
/**
* This class is used to drain the STDOUT of the child process. After the child process has been given a shutdown
@ -36,22 +35,20 @@ import org.apache.commons.logging.LogFactory;
* To prevent the child process from becoming blocked in this way, it is the responsibility of the parent process to
* drain the child process's STDOUT. We reprint each drained line to our log to permit debugging.
*/
@Slf4j
class DrainChildSTDOUTTask extends LineReaderTask<Boolean> {
private static final Log LOG = LogFactory.getLog(DrainChildSTDOUTTask.class);
DrainChildSTDOUTTask() {
}
@Override
protected HandleLineResult<Boolean> handleLine(String line) {
LOG.info("Drained line for shard " + getShardId() + ": " + line);
log.info("Drained line for shard {}: {}", getShardId(), line);
return new HandleLineResult<Boolean>();
}
@Override
protected Boolean returnAfterException(Exception e) {
LOG.info("Encountered exception while draining STDOUT of child process for shard " + getShardId(), e);
log.info("Encountered exception while draining STDOUT of child process for shard {}", getShardId(), e);
return false;
}

View file

@ -17,21 +17,17 @@ package com.amazonaws.services.kinesis.multilang;
import java.io.BufferedReader;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.multilang.messages.Message;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
/**
* Gets the next message off the STDOUT of the child process. Throws an exception if a message is not found before the
* end of the input stream is reached.
*/
@Slf4j
class GetNextMessageTask extends LineReaderTask<Message> {
private static final Log LOG = LogFactory.getLog(GetNextMessageTask.class);
private ObjectMapper objectMapper;
private static final String EMPTY_LINE = "";
@ -68,7 +64,7 @@ class GetNextMessageTask extends LineReaderTask<Message> {
return new HandleLineResult<Message>(objectMapper.readValue(line, Message.class));
}
} catch (IOException e) {
LOG.info("Skipping unexpected line on STDOUT for shard " + getShardId() + ": " + line);
log.info("Skipping unexpected line on STDOUT for shard {}: {}", getShardId(), line);
}
return new HandleLineResult<Message>();
}

View file

@ -20,8 +20,7 @@ import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.concurrent.Callable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import lombok.extern.slf4j.Slf4j;
/**
* This abstract class captures the process of reading from an input stream. Three methods must be provided for
@ -34,10 +33,8 @@ import org.apache.commons.logging.LogFactory;
*
* @param <T>
*/
@Slf4j
abstract class LineReaderTask<T> implements Callable<T> {
private static final Log LOG = LogFactory.getLog(LineReaderTask.class);
private BufferedReader reader;
private String description;
@ -56,7 +53,7 @@ abstract class LineReaderTask<T> implements Callable<T> {
public T call() throws Exception {
String nextLine = null;
try {
LOG.info("Starting: " + description);
log.info("Starting: {}", description);
while ((nextLine = reader.readLine()) != null) {
HandleLineResult<T> result = handleLine(nextLine);
if (result.hasReturnValue()) {
@ -66,7 +63,7 @@ abstract class LineReaderTask<T> implements Callable<T> {
} catch (IOException e) {
return returnAfterException(e);
}
LOG.info("Stopping: " + description);
log.info("Stopping: {}", description);
return returnAfterEndOfInput();
}

View file

@ -22,29 +22,24 @@ import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ShutdownReason;
import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput;
import com.amazonaws.services.kinesis.multilang.messages.CheckpointMessage;
import com.amazonaws.services.kinesis.multilang.messages.InitializeMessage;
import com.amazonaws.services.kinesis.multilang.messages.Message;
import com.amazonaws.services.kinesis.multilang.messages.ProcessRecordsMessage;
import com.amazonaws.services.kinesis.multilang.messages.ShutdownMessage;
import com.amazonaws.services.kinesis.multilang.messages.ShutdownRequestedMessage;
import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
/**
* Defines methods for writing {@link Message} objects to the child process's STDIN.
*/
@Slf4j
class MessageWriter {
private static final Log LOG = LogFactory.getLog(MessageWriter.class);
private BufferedWriter writer;
private volatile boolean open = true;
@ -82,7 +77,7 @@ class MessageWriter {
writer.write(System.lineSeparator(), 0, System.lineSeparator().length());
writer.flush();
}
LOG.info("Message size == " + message.getBytes().length + " bytes for shard " + shardId);
log.info("Message size == {} bytes for shard {}", message.getBytes().length, shardId);
} catch (IOException e) {
open = false;
}
@ -94,7 +89,7 @@ class MessageWriter {
return this.executorService.submit(writeMessageToOutputTask);
} else {
String errorMessage = "Cannot write message " + message + " because writer is closed for shard " + shardId;
LOG.info(errorMessage);
log.info(errorMessage);
throw new IllegalStateException(errorMessage);
}
}
@ -106,7 +101,7 @@ class MessageWriter {
* @return
*/
private Future<Boolean> writeMessage(Message message) {
LOG.info("Writing " + message.getClass().getSimpleName() + " to child process for shard " + shardId);
log.info("Writing {} to child process for shard {}", message.getClass().getSimpleName(), shardId);
try {
String jsonText = objectMapper.writeValueAsString(message);
return writeMessageToOutput(jsonText);
@ -114,7 +109,7 @@ class MessageWriter {
String errorMessage =
String.format("Encountered I/O error while writing %s action to subprocess", message.getClass()
.getSimpleName());
LOG.error(errorMessage, e);
log.error(errorMessage, e);
throw new RuntimeException(errorMessage, e);
}
}

View file

@ -16,7 +16,6 @@ package com.amazonaws.services.kinesis.multilang;
import java.io.IOException;
import java.io.PrintStream;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
@ -24,13 +23,12 @@ import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker;
import lombok.extern.slf4j.Slf4j;
/**
* Main app that launches the worker that runs the multi-language record processor.
*
@ -58,10 +56,8 @@ import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker;
* AWSCredentialsProvider = DefaultAWSCredentialsProviderChain
* </pre>
*/
@Slf4j
public class MultiLangDaemon implements Callable<Integer> {
private static final Log LOG = LogFactory.getLog(MultiLangDaemon.class);
private Worker worker;
/**
@ -113,7 +109,7 @@ public class MultiLangDaemon implements Callable<Integer> {
try {
worker.run();
} catch (Throwable t) {
LOG.error("Caught throwable while processing data.", t);
log.error("Caught throwable while processing data.", t);
exitCode = 1;
}
return exitCode;
@ -152,13 +148,13 @@ public class MultiLangDaemon implements Callable<Integer> {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
LOG.info("Process terminanted, will initiate shutdown.");
log.info("Process terminanted, will initiate shutdown.");
try {
Future<Void> fut = daemon.worker.requestShutdown();
fut.get(shutdownGraceMillis, TimeUnit.MILLISECONDS);
LOG.info("Process shutdown is complete.");
log.info("Process shutdown is complete.");
} catch (InterruptedException | ExecutionException | TimeoutException e) {
LOG.error("Encountered an error during shutdown.", e);
log.error("Encountered an error during shutdown.", e);
}
}
});
@ -167,7 +163,7 @@ public class MultiLangDaemon implements Callable<Integer> {
try {
System.exit(future.get());
} catch (InterruptedException | ExecutionException e) {
LOG.error("Encountered an error while running daemon", e);
log.error("Encountered an error while running daemon", e);
}
System.exit(1);
}

View file

@ -26,20 +26,17 @@ import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.config.KinesisClientLibConfigurator;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import lombok.extern.slf4j.Slf4j;
/**
* This class captures the configuration needed to run the MultiLangDaemon.
*/
@Slf4j
public class MultiLangDaemonConfig {
private static final Log LOG = LogFactory.getLog(MultiLangDaemonConfig.class);
private static final String USER_AGENT = "amazon-kinesis-multi-lang-daemon";
private static final String VERSION = "1.0.1";
@ -102,8 +99,8 @@ public class MultiLangDaemonConfig {
executorService = buildExecutorService(properties);
recordProcessorFactory = new MultiLangRecordProcessorFactory(executableName, executorService, kinesisClientLibConfig);
LOG.info("Running " + kinesisClientLibConfig.getApplicationName() + " to process stream "
+ kinesisClientLibConfig.getStreamName() + " with executable " + executableName);
log.info("Running {} to process stream {} with executable {}", kinesisClientLibConfig.getApplicationName(),
kinesisClientLibConfig.getStreamName(), executableName);
prepare(processingLanguage);
}
@ -111,9 +108,9 @@ public class MultiLangDaemonConfig {
// Ensure the JVM will refresh the cached IP values of AWS resources (e.g. service endpoints).
java.security.Security.setProperty("networkaddress.cache.ttl", "60");
LOG.info("Using workerId: " + kinesisClientLibConfig.getWorkerIdentifier());
LOG.info("Using credentials with access key id: "
+ kinesisClientLibConfig.getKinesisCredentialsProvider().getCredentials().getAWSAccessKeyId());
log.info("Using workerId: {}", kinesisClientLibConfig.getWorkerIdentifier());
log.info("Using credentials with access key id: {}",
kinesisClientLibConfig.getKinesisCredentialsProvider().getCredentials().getAWSAccessKeyId());
StringBuilder userAgent = new StringBuilder(KinesisClientLibConfiguration.KINESIS_CLIENT_LIB_USER_AGENT);
userAgent.append(" ");
@ -131,8 +128,7 @@ public class MultiLangDaemonConfig {
userAgent.append(recordProcessorFactory.getCommandArray()[0]);
}
LOG.info(String.format("MultiLangDaemon is adding the following fields to the User Agent: %s",
userAgent.toString()));
log.info("MultiLangDaemon is adding the following fields to the User Agent: {}", userAgent.toString());
kinesisClientLibConfig.withUserAgent(userAgent.toString());
}
@ -174,13 +170,13 @@ public class MultiLangDaemonConfig {
private static ExecutorService buildExecutorService(Properties properties) {
int maxActiveThreads = getMaxActiveThreads(properties);
ThreadFactoryBuilder builder = new ThreadFactoryBuilder().setNameFormat("multi-lang-daemon-%04d");
LOG.debug(String.format("Value for %s property is %d", PROP_MAX_ACTIVE_THREADS, maxActiveThreads));
log.debug("Value for {} property is {}", PROP_MAX_ACTIVE_THREADS, maxActiveThreads);
if (maxActiveThreads <= 0) {
LOG.info("Using a cached thread pool.");
log.info("Using a cached thread pool.");
return new ThreadPoolExecutor(0, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
builder.build());
} else {
LOG.info(String.format("Using a fixed thread pool with %d max active threads.", maxActiveThreads));
log.info("Using a fixed thread pool with {} max active threads.", maxActiveThreads);
return new ThreadPoolExecutor(maxActiveThreads, maxActiveThreads, 0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(), builder.build());
}

View file

@ -14,6 +14,12 @@
*/
package com.amazonaws.services.kinesis.multilang;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
@ -27,18 +33,13 @@ import com.amazonaws.services.kinesis.multilang.messages.ProcessRecordsMessage;
import com.amazonaws.services.kinesis.multilang.messages.ShutdownMessage;
import com.amazonaws.services.kinesis.multilang.messages.ShutdownRequestedMessage;
import com.amazonaws.services.kinesis.multilang.messages.StatusMessage;
import lombok.extern.apachecommons.CommonsLog;
import java.util.Optional;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import lombok.extern.slf4j.Slf4j;
/**
* An implementation of the multi language protocol.
*/
@CommonsLog
@Slf4j
class MultiLangProtocol {
private MessageReader messageReader;
@ -142,13 +143,10 @@ class MultiLangProtocol {
boolean writerIsStillOpen = writeFuture.get();
return statusWasCorrect && writerIsStillOpen;
} catch (InterruptedException e) {
log.error(String.format("Interrupted while writing %s message for shard %s", action,
initializationInput.getShardId()));
log.error("Interrupted while writing {} message for shard {}", action, initializationInput.getShardId());
return false;
} catch (ExecutionException e) {
log.error(
String.format("Failed to write %s message for shard %s", action, initializationInput.getShardId()),
e);
log.error("Failed to write {} message for shard {}", action, initializationInput.getShardId(), e);
return false;
}
}
@ -196,15 +194,15 @@ class MultiLangProtocol {
try {
return Optional.of(fm.get());
} catch (InterruptedException e) {
log.error(String.format("Interrupted while waiting for %s message for shard %s", action,
initializationInput.getShardId()), e);
log.error("Interrupted while waiting for {} message for shard {}", action,
initializationInput.getShardId(), e);
} catch (ExecutionException e) {
log.error(String.format("Failed to get status message for %s action for shard %s", action,
initializationInput.getShardId()), e);
log.error("Failed to get status message for {} action for shard {}", action,
initializationInput.getShardId(), e);
} catch (TimeoutException e) {
log.error(String.format("Timedout to get status message for %s action for shard %s. Terminating...",
log.error("Timedout to get status message for {} action for shard {}. Terminating...",
action,
initializationInput.getShardId()),
initializationInput.getShardId(),
e);
haltJvm(1);
}
@ -229,8 +227,8 @@ class MultiLangProtocol {
* @return Whether or not this operation succeeded.
*/
private boolean validateStatusMessage(StatusMessage statusMessage, String action) {
log.info("Received response " + statusMessage + " from subprocess while waiting for " + action
+ " while processing shard " + initializationInput.getShardId());
log.info("Received response {} from subprocess while waiting for {}"
+ " while processing shard {}", statusMessage, action, initializationInput.getShardId());
return !(statusMessage == null || statusMessage.getResponseFor() == null || !statusMessage.getResponseFor()
.equals(action));

View file

@ -20,20 +20,17 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IShutdownNotificationAware;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput;
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
/**
* A record processor that manages creating a child process that implements the multi language protocol and connecting
@ -41,9 +38,8 @@ import com.fasterxml.jackson.databind.ObjectMapper;
* that object when its corresponding {@link #initialize}, {@link #processRecords}, and {@link #shutdown} methods are
* called.
*/
@Slf4j
public class MultiLangRecordProcessor implements IRecordProcessor, IShutdownNotificationAware {
private static final Log LOG = LogFactory.getLog(MultiLangRecordProcessor.class);
private static final int EXIT_VALUE = 1;
/** Whether or not record processor initialization is successful. Defaults to false. */
@ -118,7 +114,7 @@ public class MultiLangRecordProcessor implements IRecordProcessor, IShutdownNoti
// In cases where KCL loses lease for the shard after creating record processor instance but before
// record processor initialize() is called, then shutdown() may be called directly before initialize().
if (!initialized) {
LOG.info("Record processor was not initialized and will not have a child process, "
log.info("Record processor was not initialized and will not have a child process, "
+ "so not invoking child process shutdown.");
this.state = ProcessState.SHUTDOWN;
return;
@ -132,7 +128,7 @@ public class MultiLangRecordProcessor implements IRecordProcessor, IShutdownNoti
childProcessShutdownSequence();
} else {
LOG.warn("Shutdown was called but this processor is already shutdown. Not doing anything.");
log.warn("Shutdown was called but this processor is already shutdown. Not doing anything.");
}
} catch (Throwable t) {
if (ProcessState.ACTIVE.equals(this.state)) {
@ -146,14 +142,14 @@ public class MultiLangRecordProcessor implements IRecordProcessor, IShutdownNoti
@Override
public void shutdownRequested(IRecordProcessorCheckpointer checkpointer) {
LOG.info("Shutdown is requested.");
log.info("Shutdown is requested.");
if (!initialized) {
LOG.info("Record processor was not initialized so no need to initiate a final checkpoint.");
log.info("Record processor was not initialized so no need to initiate a final checkpoint.");
return;
}
LOG.info("Requesting a checkpoint on shutdown notification.");
log.info("Requesting a checkpoint on shutdown notification.");
if (!protocol.shutdownRequested(checkpointer)) {
LOG.error("Child process failed to complete shutdown notification.");
log.error("Child process failed to complete shutdown notification.");
}
}
@ -228,7 +224,7 @@ public class MultiLangRecordProcessor implements IRecordProcessor, IShutdownNoti
messageWriter.close();
}
} catch (IOException e) {
LOG.error("Encountered exception while trying to close output stream.", e);
log.error("Encountered exception while trying to close output stream.", e);
}
// We should drain the STDOUT and STDERR of the child process. If we don't, the child process might remain
@ -245,9 +241,9 @@ public class MultiLangRecordProcessor implements IRecordProcessor, IShutdownNoti
* sure that it exits before we finish.
*/
try {
LOG.info("Child process exited with value: " + process.waitFor());
log.info("Child process exited with value: {}", process.waitFor());
} catch (InterruptedException e) {
LOG.error("Interrupted before process finished exiting. Attempting to kill process.");
log.error("Interrupted before process finished exiting. Attempting to kill process.");
process.destroy();
}
@ -258,7 +254,7 @@ public class MultiLangRecordProcessor implements IRecordProcessor, IShutdownNoti
try {
inputStream.close();
} catch (IOException e) {
LOG.error("Encountered exception while trying to close " + name + " stream.", e);
log.error("Encountered exception while trying to close {} stream.", name, e);
}
}
@ -273,7 +269,7 @@ public class MultiLangRecordProcessor implements IRecordProcessor, IShutdownNoti
try {
future.get();
} catch (InterruptedException | ExecutionException e) {
LOG.error("Encountered error while " + whatThisFutureIsDoing + " for shard " + shardId, e);
log.error("Encountered error while {} for shard {}", whatThisFutureIsDoing, shardId, e);
}
}
@ -286,12 +282,12 @@ public class MultiLangRecordProcessor implements IRecordProcessor, IShutdownNoti
*/
private void stopProcessing(String message, Throwable reason) {
try {
LOG.error(message, reason);
log.error(message, reason);
if (!state.equals(ProcessState.SHUTDOWN)) {
childProcessShutdownSequence();
}
} catch (Throwable t) {
LOG.error("Encountered error while trying to shutdown", t);
log.error("Encountered error while trying to shutdown", t);
}
exit();
}

View file

@ -16,21 +16,18 @@ package com.amazonaws.services.kinesis.multilang;
import java.util.concurrent.ExecutorService;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessorFactory;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.KinesisClientLibConfiguration;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
/**
* Creates {@link MultiLangRecordProcessor}'s.
*/
@Slf4j
public class MultiLangRecordProcessorFactory implements IRecordProcessorFactory {
private static final Log LOG = LogFactory.getLog(MultiLangRecordProcessorFactory.class);
private static final String COMMAND_DELIMETER_REGEX = " +";
private final String command;
@ -67,7 +64,7 @@ public class MultiLangRecordProcessorFactory implements IRecordProcessorFactory
@Override
public IRecordProcessor createProcessor() {
LOG.debug(String.format("Creating new record processor for client executable: %s", command));
log.debug("Creating new record processor for client executable: {}", command);
/*
* Giving ProcessBuilder the command as an array of Strings allows users to specify command line arguments.
*/

View file

@ -17,20 +17,17 @@ package com.amazonaws.services.kinesis.clientlibrary.lib.checkpoint;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibException;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.ICheckpoint;
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
import lombok.extern.slf4j.Slf4j;
/**
* Everything is stored in memory and there is no fault-tolerance.
*/
@Slf4j
public class InMemoryCheckpointImpl implements ICheckpoint {
private static final Log LOG = LogFactory.getLog(InMemoryCheckpointImpl.class);
private Map<String, ExtendedSequenceNumber> checkpoints = new HashMap<>();
private Map<String, ExtendedSequenceNumber> flushpoints = new HashMap<>();
private Map<String, ExtendedSequenceNumber> pendingCheckpoints = new HashMap<>();
@ -51,13 +48,13 @@ public class InMemoryCheckpointImpl implements ICheckpoint {
if (checkpoint == null) {
checkpoint = new ExtendedSequenceNumber(startingSequenceNumber);
}
LOG.debug("getLastCheckpoint shardId: " + shardId + " checkpoint: " + checkpoint);
log.debug("getLastCheckpoint shardId: {} checkpoint: {}", shardId, checkpoint);
return checkpoint;
}
ExtendedSequenceNumber getLastFlushpoint(String shardId) {
ExtendedSequenceNumber flushpoint = flushpoints.get(shardId);
LOG.debug("getLastFlushpoint shardId: " + shardId + " flushpoint: " + flushpoint);
log.debug("getLastFlushpoint shardId: {} flushpoint: {}", shardId, flushpoint);
return flushpoint;
}
@ -73,8 +70,8 @@ public class InMemoryCheckpointImpl implements ICheckpoint {
ExtendedSequenceNumber getGreatestPrimaryFlushpoint(String shardId) throws KinesisClientLibException {
verifyNotEmpty(shardId, "shardId must not be null.");
ExtendedSequenceNumber greatestFlushpoint = getLastFlushpoint(shardId);
if (LOG.isDebugEnabled()) {
LOG.debug("getGreatestPrimaryFlushpoint value for shardId " + shardId + " = " + greatestFlushpoint);
if (log.isDebugEnabled()) {
log.debug("getGreatestPrimaryFlushpoint value for shardId {} = {}", shardId, greatestFlushpoint);
}
return greatestFlushpoint;
};
@ -82,8 +79,8 @@ public class InMemoryCheckpointImpl implements ICheckpoint {
ExtendedSequenceNumber getRestartPoint(String shardId) {
verifyNotEmpty(shardId, "shardId must not be null.");
ExtendedSequenceNumber restartPoint = getLastCheckpoint(shardId);
if (LOG.isDebugEnabled()) {
LOG.debug("getRestartPoint value for shardId " + shardId + " = " + restartPoint);
if (log.isDebugEnabled()) {
log.debug("getRestartPoint value for shardId {} = {}", shardId, restartPoint);
}
return restartPoint;
}
@ -98,8 +95,8 @@ public class InMemoryCheckpointImpl implements ICheckpoint {
flushpoints.put(shardId, checkpointValue);
pendingCheckpoints.remove(shardId);
if (LOG.isDebugEnabled()) {
LOG.debug("shardId: " + shardId + " checkpoint: " + checkpointValue);
if (log.isDebugEnabled()) {
log.debug("shardId: {} checkpoint: {}", shardId, checkpointValue);
}
}
@ -110,7 +107,7 @@ public class InMemoryCheckpointImpl implements ICheckpoint {
@Override
public ExtendedSequenceNumber getCheckpoint(String shardId) throws KinesisClientLibException {
ExtendedSequenceNumber checkpoint = flushpoints.get(shardId);
LOG.debug("getCheckpoint shardId: " + shardId + " checkpoint: " + checkpoint);
log.debug("getCheckpoint shardId: {} checkpoint: {}", shardId, checkpoint);
return checkpoint;
}
@ -126,7 +123,7 @@ public class InMemoryCheckpointImpl implements ICheckpoint {
ExtendedSequenceNumber pendingCheckpoint = pendingCheckpoints.get(shardId);
Checkpoint checkpointObj = new Checkpoint(checkpoint, pendingCheckpoint);
LOG.debug("getCheckpointObject shardId: " + shardId + ", " + checkpointObj);
log.debug("getCheckpointObject shardId: {}, {}", shardId, checkpointObj);
return checkpointObj;
}

View file

@ -20,8 +20,6 @@ import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
@ -40,8 +38,6 @@ import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
*
*/
public class BlockOnParentShardTaskTest {
private static final Log LOG = LogFactory.getLog(BlockOnParentShardTaskTest.class);
private final long backoffTimeInMillis = 50L;
private final String shardId = "shardId-97";
private final String concurrencyToken = "testToken";

View file

@ -17,21 +17,20 @@ package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
import com.amazonaws.services.kinesis.leases.exceptions.InvalidStateException;
import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputException;
import com.amazonaws.services.kinesis.leases.impl.KinesisClientLease;
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
import lombok.extern.slf4j.Slf4j;
/**
* Mock Lease Manager by randomly throwing Leasing Exceptions.
*
*/
@Slf4j
class ExceptionThrowingLeaseManager implements ILeaseManager<KinesisClientLease> {
private static final Log LOG = LogFactory.getLog(ExceptionThrowingLeaseManager.class);
private static final Throwable EXCEPTION_MSG = new Throwable("Test Exception");
// Use array below to control in what situations we want to throw exceptions.
@ -113,7 +112,7 @@ class ExceptionThrowingLeaseManager implements ILeaseManager<KinesisClientLease>
if (method.equals(methodThrowingException)
&& (leaseManagerMethodCallingCount[method.getIndex()] == timeThrowingException)) {
// Throw Dependency Exception if all conditions are satisfied.
LOG.debug("Throwing DependencyException in " + methodName);
log.debug("Throwing DependencyException in {}", methodName);
throw new DependencyException(EXCEPTION_MSG);
}
}

View file

@ -50,8 +50,6 @@ import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
import org.hamcrest.TypeSafeMatcher;
@ -80,14 +78,14 @@ import com.amazonaws.services.kinesis.model.Record;
import com.amazonaws.services.kinesis.model.Shard;
import com.amazonaws.services.kinesis.model.ShardIteratorType;
import lombok.extern.slf4j.Slf4j;
/**
* Unit tests of {@link ShardConsumer}.
*/
@RunWith(MockitoJUnitRunner.class)
@Slf4j
public class ShardConsumerTest {
private static final Log LOG = LogFactory.getLog(ShardConsumerTest.class);
private final IMetricsFactory metricsFactory = new NullMetricsFactory();
private final boolean callProcessRecordsForEmptyRecordList = false;
private final long taskBackoffTimeMillis = 500L;
@ -385,7 +383,7 @@ public class ShardConsumerTest {
for (int i = 0; i < numRecs;) {
boolean newTaskSubmitted = consumer.consumeShard();
if (newTaskSubmitted) {
LOG.debug("New processing task was submitted, call # " + i);
log.debug("New processing task was submitted, call # {}", i);
assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.PROCESSING)));
// CHECKSTYLE:IGNORE ModifiedControlVariable FOR NEXT 1 LINES
i += maxRecords;
@ -529,7 +527,7 @@ public class ShardConsumerTest {
for (int i = 0; i < numRecs;) {
boolean newTaskSubmitted = consumer.consumeShard();
if (newTaskSubmitted) {
LOG.debug("New processing task was submitted, call # " + i);
log.debug("New processing task was submitted, call # {}", i);
assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.PROCESSING)));
// CHECKSTYLE:IGNORE ModifiedControlVariable FOR NEXT 1 LINES
i += maxRecords;
@ -667,7 +665,7 @@ public class ShardConsumerTest {
for (int i = 0; i < numRecs;) {
boolean newTaskSubmitted = consumer.consumeShard();
if (newTaskSubmitted) {
LOG.debug("New processing task was submitted, call # " + i);
log.debug("New processing task was submitted, call # {}", i);
assertThat(consumer.getCurrentState(), is(equalTo(ConsumerStates.ShardConsumerState.PROCESSING)));
// CHECKSTYLE:IGNORE ModifiedControlVariable FOR NEXT 1 LINES
i += maxRecords;

View file

@ -21,20 +21,17 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentSkipListSet;
import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.model.Shard;
import junit.framework.Assert;
import lombok.extern.slf4j.Slf4j;
/**
* Helper class to verify shard lineage in unit tests that use TestStreamlet.
* Verifies that parent shard processors were shutdown before child shard processor was initialized.
*/
@Slf4j
class ShardSequenceVerifier {
private static final Log LOG = LogFactory.getLog(ShardSequenceVerifier.class);
private Map<String, Shard> shardIdToShards = new HashMap<String, Shard>();
private ConcurrentSkipListSet<String> initializedShards = new ConcurrentSkipListSet<>();
private ConcurrentSkipListSet<String> shutdownShards = new ConcurrentSkipListSet<>();
@ -56,7 +53,7 @@ class ShardSequenceVerifier {
if (!shutdownShards.contains(parentShardId)) {
String message = "Parent shard " + parentShardId + " was not shutdown before shard "
+ shardId + " was initialized.";
LOG.error(message);
log.error(message);
validationFailures.add(message);
}
}
@ -72,7 +69,7 @@ class ShardSequenceVerifier {
void verify() {
for (String message : validationFailures) {
LOG.error(message);
log.error(message);
}
Assert.assertTrue(validationFailures.isEmpty());
}

View file

@ -25,9 +25,6 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import com.amazonaws.services.dynamodbv2.local.embedded.DynamoDBEmbedded;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
@ -35,6 +32,7 @@ import org.junit.BeforeClass;
import org.junit.Test;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.local.embedded.DynamoDBEmbedded;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.internal.KinesisClientLibIOException;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.ExceptionThrowingLeaseManager.ExceptionThrowingLeaseManagerMethods;
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
@ -53,13 +51,14 @@ import com.amazonaws.services.kinesis.model.SequenceNumberRange;
import com.amazonaws.services.kinesis.model.Shard;
import junit.framework.Assert;
import lombok.extern.slf4j.Slf4j;
/**
*
*/
// CHECKSTYLE:IGNORE JavaNCSS FOR NEXT 800 LINES
@Slf4j
public class ShardSyncerTest {
private static final Log LOG = LogFactory.getLog(ShardSyncer.class);
private static final InitialPositionInStreamExtended INITIAL_POSITION_LATEST =
InitialPositionInStreamExtended.newInitialPosition(InitialPositionInStream.LATEST);
private static final InitialPositionInStreamExtended INITIAL_POSITION_TRIM_HORIZON =
@ -96,7 +95,7 @@ public class ShardSyncerTest {
public void setUp() throws Exception {
boolean created = leaseManager.createLeaseTableIfNotExists(1L, 1L);
if (created) {
LOG.info("New table created.");
log.info("New table created.");
}
leaseManager.deleteAll();
}
@ -467,7 +466,7 @@ public class ShardSyncerTest {
cleanupLeasesOfCompletedShards);
return;
} catch (LeasingException e) {
LOG.debug("Catch leasing exception", e);
log.debug("Catch leasing exception", e);
}
// Clear throwing exception scenario every time after calling ShardSyncer
exceptionThrowingLeaseManager.clearLeaseManagerThrowingExceptionScenario();

View file

@ -21,11 +21,6 @@ import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Semaphore;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IShutdownNotificationAware;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.model.Record;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.InvalidStateException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibDependencyException;
import com.amazonaws.services.kinesis.clientlibrary.exceptions.KinesisClientLibNonRetryableException;
@ -33,17 +28,19 @@ import com.amazonaws.services.kinesis.clientlibrary.exceptions.ShutdownException
import com.amazonaws.services.kinesis.clientlibrary.exceptions.ThrottlingException;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor;
import com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IShutdownNotificationAware;
import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
import com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput;
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownInput;
import com.amazonaws.services.kinesis.model.Record;
import lombok.extern.slf4j.Slf4j;
/**
* Streamlet that tracks records it's seen - useful for testing.
*/
@Slf4j
class TestStreamlet implements IRecordProcessor, IShutdownNotificationAware {
private static final Log LOG = LogFactory.getLog(TestStreamlet.class);
private List<Record> records = new ArrayList<Record>();
private Set<String> processedSeqNums = new HashSet<String>(); // used for deduping
@ -91,7 +88,7 @@ class TestStreamlet implements IRecordProcessor, IShutdownNotificationAware {
IRecordProcessorCheckpointer checkpointer = input.getCheckpointer();
if ((dataRecords != null) && (!dataRecords.isEmpty())) {
for (Record record : dataRecords) {
LOG.debug("Processing record: " + record);
log.debug("Processing record: {}", record);
String seqNum = record.getSequenceNumber();
if (!processedSeqNums.contains(seqNum)) {
records.add(record);
@ -108,7 +105,7 @@ class TestStreamlet implements IRecordProcessor, IShutdownNotificationAware {
| KinesisClientLibDependencyException | InvalidStateException e) {
// Continue processing records and checkpoint next time if we get a transient error.
// Don't checkpoint if the processor has been shutdown.
LOG.debug("Caught exception while checkpointing: ", e);
log.debug("Caught exception while checkpointing: ", e);
}
if (sem != null) {
@ -128,7 +125,7 @@ class TestStreamlet implements IRecordProcessor, IShutdownNotificationAware {
try {
checkpointer.checkpoint();
} catch (KinesisClientLibNonRetryableException e) {
LOG.error("Caught exception when checkpointing while shutdown.", e);
log.error("Caught exception when checkpointing while shutdown.", e);
throw new RuntimeException(e);
}
}

View file

@ -14,16 +14,16 @@
*/
package com.amazonaws.services.kinesis.clientlibrary.lib.worker;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import org.apache.commons.logging.Log;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import org.slf4j.Logger;
@RunWith(MockitoJUnitRunner.class)
public class ThrottlingReporterTest {
@ -31,14 +31,14 @@ public class ThrottlingReporterTest {
private static final String SHARD_ID = "Shard-001";
@Mock
private Log throttleLog;
private Logger throttleLog;
@Test
public void testLessThanMaxThrottles() {
ThrottlingReporter reporter = new LogTestingThrottingReporter(5, SHARD_ID);
reporter.throttled();
verify(throttleLog).warn(any(Object.class));
verify(throttleLog, never()).error(any(Object.class));
verify(throttleLog).warn(anyString());
verify(throttleLog, never()).error(anyString());
}
@ -47,8 +47,8 @@ public class ThrottlingReporterTest {
ThrottlingReporter reporter = new LogTestingThrottingReporter(1, SHARD_ID);
reporter.throttled();
reporter.throttled();
verify(throttleLog).warn(any(Object.class));
verify(throttleLog).error(any(Object.class));
verify(throttleLog).warn(anyString());
verify(throttleLog).error(anyString());
}
@Test
@ -60,8 +60,8 @@ public class ThrottlingReporterTest {
reporter.throttled();
reporter.success();
reporter.throttled();
verify(throttleLog, times(2)).warn(any(Object.class));
verify(throttleLog, times(3)).error(any(Object.class));
verify(throttleLog, times(2)).warn(anyString());
verify(throttleLog, times(3)).error(anyString());
}
@ -72,7 +72,7 @@ public class ThrottlingReporterTest {
}
@Override
protected Log getLog() {
protected Logger getLog() {
return throttleLog;
}
}

View file

@ -64,8 +64,6 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.hamcrest.Condition;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
@ -92,8 +90,8 @@ import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker.WorkerCWMe
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.Worker.WorkerThreadPoolExecutor;
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.WorkerStateChangeListener.WorkerState;
import com.amazonaws.services.kinesis.clientlibrary.proxies.IKinesisProxy;
import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy;
import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisLocalFileProxy;
import com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy;
import com.amazonaws.services.kinesis.clientlibrary.proxies.util.KinesisLocalFileDataCreator;
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
import com.amazonaws.services.kinesis.clientlibrary.types.InitializationInput;
@ -115,15 +113,14 @@ import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
/**
* Unit tests of Worker.
*/
@RunWith(MockitoJUnitRunner.class)
@Slf4j
public class WorkerTest {
private static final Log LOG = LogFactory.getLog(WorkerTest.class);
// @Rule
// public Timeout timeout = new Timeout((int)TimeUnit.SECONDS.toMillis(30));
@ -707,18 +704,18 @@ public class WorkerTest {
final long startTimeMillis = System.currentTimeMillis();
long elapsedTimeMillis = 0;
LOG.info("Entering sleep @ " + startTimeMillis + " with elapsedMills: " + elapsedTimeMillis);
log.info("Entering sleep @ {} with elapsedMills: {}", startTimeMillis, elapsedTimeMillis);
shutdownBlocker.acquire();
try {
actionBlocker.acquire();
} catch (InterruptedException e) {
LOG.info("Sleep interrupted @ " + System.currentTimeMillis() + " elapsedMillis: "
+ (System.currentTimeMillis() - startTimeMillis));
log.info("Sleep interrupted @ {} elapsedMillis: {}", System.currentTimeMillis(),
(System.currentTimeMillis() - startTimeMillis));
recordProcessorInterrupted.getAndSet(true);
}
shutdownBlocker.release();
elapsedTimeMillis = System.currentTimeMillis() - startTimeMillis;
LOG.info("Sleep completed @ " + System.currentTimeMillis() + " elapsedMillis: " + elapsedTimeMillis);
log.info("Sleep completed @ {} elapsedMillis: {}", System.currentTimeMillis(), elapsedTimeMillis);
return null;
}
@ -2097,7 +2094,7 @@ public class WorkerTest {
String shardId = shard.getShardId();
String endingSequenceNumber = shard.getSequenceNumberRange().getEndingSequenceNumber();
if (endingSequenceNumber != null) {
LOG.info("Closed shard " + shardId + " has an endingSequenceNumber " + endingSequenceNumber);
log.info("Closed shard {} has an endingSequenceNumber {}", shardId, endingSequenceNumber);
Assert.assertEquals(ShutdownReason.TERMINATE, shardsLastProcessorShutdownReason.get(shardId));
}
}

View file

@ -33,9 +33,6 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.services.kinesis.model.DescribeStreamResult;
import com.amazonaws.services.kinesis.model.ExpiredIteratorException;
import com.amazonaws.services.kinesis.model.GetRecordsResult;
@ -48,11 +45,14 @@ import com.amazonaws.services.kinesis.model.Shard;
import com.amazonaws.services.kinesis.model.ShardIteratorType;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
/**
* This is a (temporary) test utility class, to mimic Kinesis without having to integrate with Alpha.
* In future, we should consider moving this to the Kinesis client/sampleApp package (if useful to
* other Kinesis clients).
*/
@Slf4j
public class KinesisLocalFileProxy implements IKinesisProxy {
/**
@ -84,8 +84,6 @@ public class KinesisLocalFileProxy implements IKinesisProxy {
}
};
private static final Log LOG = LogFactory.getLog(KinesisLocalFileProxy.class);
private static final String ITERATOR_DELIMITER = ":";
private static final int NUM_FIELDS_IN_FILE = LocalFileFields.values().length;
@ -385,9 +383,9 @@ public class KinesisLocalFileProxy implements IKinesisProxy {
*/
response.setNextShardIterator(serializeIterator(iterator.shardId, lastRecordsSeqNo.add(BigInteger.ONE)
.toString()));
LOG.debug("Returning a non null iterator for shard " + iterator.shardId);
log.debug("Returning a non null iterator for shard {}", iterator.shardId);
} else {
LOG.info("Returning null iterator for shard " + iterator.shardId);
log.info("Returning null iterator for shard {}", iterator.shardId);
}
return response;

View file

@ -27,11 +27,8 @@ import java.util.Map;
import javax.swing.*;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
import com.amazonaws.services.kinesis.clientlibrary.types.ExtendedSequenceNumber;
import com.amazonaws.services.kinesis.leases.exceptions.DependencyException;
@ -41,9 +38,10 @@ import com.amazonaws.services.kinesis.leases.exceptions.ProvisionedThroughputExc
import com.amazonaws.services.kinesis.leases.interfaces.ILeaseManager;
import com.amazonaws.services.kinesis.metrics.impl.CWMetricsFactory;
public class LeaseCoordinatorExerciser {
import lombok.extern.slf4j.Slf4j;
private static final Log LOG = LogFactory.getLog(LeaseCoordinatorExerciser.class);
@Slf4j
public class LeaseCoordinatorExerciser {
public static void main(String[] args)
throws InterruptedException, DependencyException, InvalidStateException, ProvisionedThroughputException,
@ -61,9 +59,9 @@ public class LeaseCoordinatorExerciser {
ILeaseManager<KinesisClientLease> leaseManager = new KinesisClientLeaseManager("nagl_ShardProgress", ddb);
if (leaseManager.createLeaseTableIfNotExists(10L, 50L)) {
LOG.info("Waiting for newly created lease table");
log.info("Waiting for newly created lease table");
if (!leaseManager.waitUntilLeaseTableExists(10, 300)) {
LOG.error("Table was not created in time");
log.error("Table was not created in time");
return;
}
}
@ -116,7 +114,7 @@ public class LeaseCoordinatorExerciser {
try {
coord.start();
} catch (LeasingException e) {
LOG.error(e);
log.error("{}", e);
}
button.setLabel("Stop " + coord.getWorkerIdentifier());
}

View file

@ -14,11 +14,6 @@
*/
package com.amazonaws.services.kinesis.leases.impl;
import java.util.logging.Logger;
import com.amazonaws.services.kinesis.leases.exceptions.LeasingException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.rules.TestWatcher;
@ -29,15 +24,16 @@ import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient;
import com.amazonaws.services.kinesis.metrics.impl.MetricsHelper;
import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory;
import lombok.extern.slf4j.Slf4j;
@Ignore
@Slf4j
public class LeaseIntegrationTest {
protected static KinesisClientLeaseManager leaseManager;
protected static AmazonDynamoDBClient ddbClient =
new AmazonDynamoDBClient(new DefaultAWSCredentialsProviderChain());
private static final Log LOG = LogFactory.getLog(LeaseIntegrationTest.class);
@Rule
public TestWatcher watcher = new TestWatcher() {
@ -53,20 +49,20 @@ public class LeaseIntegrationTest {
try {
if (!leaseManager.leaseTableExists()) {
LOG.info("Creating lease table");
log.info("Creating lease table");
leaseManager.createLeaseTableIfNotExists(10L, 10L);
leaseManager.waitUntilLeaseTableExists(10, 500);
}
LOG.info("Beginning test case " + description.getMethodName());
log.info("Beginning test case {}", description.getMethodName());
for (KinesisClientLease lease : leaseManager.listLeases()) {
leaseManager.deleteLease(lease);
}
} catch (Exception e) {
String message =
"Test case " + description.getMethodName() + " fails because of exception during init: " + e;
LOG.error(message);
log.error(message);
throw new RuntimeException(message, e);
}
}