2018-08-17 13:03:25 +00:00
/ *
* Copyright ( c ) 2018 VMware , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a copy of this software and
* associated documentation files ( the "Software" ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish , distribute , sublicense , and / or sell
* copies of the Software , and to permit persons to whom the Software is furnished to do
* so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED "AS IS" , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR IMPLIED , INCLUDING BUT
* NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY ,
* WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
* /
// The implementation is derived from https://github.com/patrobinson/gokini
//
// Copyright 2018 Patrick robinson
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
2018-04-17 16:25:41 +00:00
package worker
import (
"errors"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/kinesis"
"github.com/aws/aws-sdk-go/service/kinesis/kinesisiface"
2019-04-22 20:55:39 +00:00
chk "github.com/vmware/vmware-go-kcl/clientlibrary/checkpoint"
2018-09-08 16:46:02 +00:00
"github.com/vmware/vmware-go-kcl/clientlibrary/config"
kcl "github.com/vmware/vmware-go-kcl/clientlibrary/interfaces"
"github.com/vmware/vmware-go-kcl/clientlibrary/metrics"
2019-04-22 20:55:39 +00:00
par "github.com/vmware/vmware-go-kcl/clientlibrary/partition"
2018-04-17 16:25:41 +00:00
)
/ * *
* Worker is the high level class that Kinesis applications use to start processing data . It initializes and oversees
* different components ( e . g . syncing shard and lease information , tracking shard assignments , and processing data from
* the shards ) .
* /
type Worker struct {
streamName string
regionName string
workerID string
processorFactory kcl . IRecordProcessorFactory
kclConfig * config . KinesisClientLibConfiguration
kc kinesisiface . KinesisAPI
2019-04-22 20:55:39 +00:00
checkpointer chk . Checkpointer
2018-04-17 16:25:41 +00:00
stop * chan struct { }
waitGroup * sync . WaitGroup
2019-05-20 13:57:32 +00:00
done bool
2018-04-17 16:25:41 +00:00
2019-04-22 20:55:39 +00:00
shardStatus map [ string ] * par . ShardStatus
2018-04-17 16:25:41 +00:00
metricsConfig * metrics . MonitoringConfiguration
mService metrics . MonitoringService
}
// NewWorker constructs a Worker instance for processing Kinesis stream data.
func NewWorker ( factory kcl . IRecordProcessorFactory , kclConfig * config . KinesisClientLibConfiguration , metricsConfig * metrics . MonitoringConfiguration ) * Worker {
w := & Worker {
streamName : kclConfig . StreamName ,
regionName : kclConfig . RegionName ,
workerID : kclConfig . WorkerID ,
processorFactory : factory ,
kclConfig : kclConfig ,
metricsConfig : metricsConfig ,
2019-05-20 13:57:32 +00:00
done : false ,
2018-04-17 16:25:41 +00:00
}
if w . metricsConfig == nil {
2019-03-16 13:11:09 +00:00
// "" means noop monitor service. i.e. not emitting any metrics.
2018-04-17 16:25:41 +00:00
w . metricsConfig = & metrics . MonitoringConfiguration { MonitoringService : "" }
}
return w
}
2019-07-08 22:20:33 +00:00
// WithKinesis is used to provide Kinesis service for either custom implementation or unit testing.
func ( w * Worker ) WithKinesis ( svc kinesisiface . KinesisAPI ) * Worker {
w . kc = svc
return w
}
// WithCheckpointer is used to provide a custom checkpointer service for non-dynamodb implementation
// or unit testing.
func ( w * Worker ) WithCheckpointer ( checker chk . Checkpointer ) * Worker {
w . checkpointer = checker
return w
}
2018-04-17 16:25:41 +00:00
// Run starts consuming data from the stream, and pass it to the application record processors.
func ( w * Worker ) Start ( ) error {
2019-10-28 12:08:18 +00:00
log := w . kclConfig . Logger
2018-04-17 16:25:41 +00:00
if err := w . initialize ( ) ; err != nil {
2019-07-08 22:20:33 +00:00
log . Errorf ( "Failed to initialize Worker: %+v" , err )
2018-04-17 16:25:41 +00:00
return err
}
2018-04-21 04:07:11 +00:00
// Start monitoring service
2019-10-28 12:08:18 +00:00
log . Infof ( "Starting monitoring service." )
2018-08-27 19:23:20 +00:00
if err := w . mService . Start ( ) ; err != nil {
log . Errorf ( "Failed to start monitoring service: %+v" , err )
return err
}
2018-04-17 16:25:41 +00:00
2019-10-28 12:08:18 +00:00
log . Infof ( "Starting worker event loop." )
2018-04-17 16:25:41 +00:00
// entering event loop
go w . eventLoop ( )
return nil
}
// Shutdown signals worker to shutdown. Worker will try initiating shutdown of all record processors.
func ( w * Worker ) Shutdown ( ) {
2019-10-28 12:08:18 +00:00
log := w . kclConfig . Logger
log . Infof ( "Worker shutdown in requested." )
2018-04-17 16:25:41 +00:00
2019-05-20 13:57:32 +00:00
if w . done {
return
}
2018-04-17 16:25:41 +00:00
close ( * w . stop )
2019-05-20 13:57:32 +00:00
w . done = true
2018-04-17 16:25:41 +00:00
w . waitGroup . Wait ( )
2018-04-21 04:07:11 +00:00
w . mService . Shutdown ( )
2019-10-28 12:08:18 +00:00
log . Infof ( "Worker loop is complete. Exiting from worker." )
2018-04-17 16:25:41 +00:00
}
// Publish to write some data into stream. This function is mainly used for testing purpose.
func ( w * Worker ) Publish ( streamName , partitionKey string , data [ ] byte ) error {
2019-10-28 12:08:18 +00:00
log := w . kclConfig . Logger
2018-04-17 16:25:41 +00:00
_ , err := w . kc . PutRecord ( & kinesis . PutRecordInput {
Data : data ,
StreamName : aws . String ( streamName ) ,
PartitionKey : aws . String ( partitionKey ) ,
} )
if err != nil {
log . Errorf ( "Error in publishing data to %s/%s. Error: %+v" , streamName , partitionKey , err )
}
return err
}
// initialize
func ( w * Worker ) initialize ( ) error {
2019-10-28 12:08:18 +00:00
log := w . kclConfig . Logger
log . Infof ( "Worker initialization in progress..." )
2018-04-17 16:25:41 +00:00
2019-07-08 22:20:33 +00:00
// Create default Kinesis session
if w . kc == nil {
// create session for Kinesis
2019-10-28 12:08:18 +00:00
log . Infof ( "Creating Kinesis session" )
2019-07-08 22:20:33 +00:00
s , err := session . NewSession ( & aws . Config {
Region : aws . String ( w . regionName ) ,
Endpoint : & w . kclConfig . KinesisEndpoint ,
Credentials : w . kclConfig . KinesisCredentials ,
} )
if err != nil {
// no need to move forward
log . Fatalf ( "Failed in getting Kinesis session for creating Worker: %+v" , err )
}
w . kc = kinesis . New ( s )
} else {
2019-10-28 12:08:18 +00:00
log . Infof ( "Use custom Kinesis service." )
2019-07-08 22:20:33 +00:00
}
// Create default dynamodb based checkpointer implementation
if w . checkpointer == nil {
2019-10-28 12:08:18 +00:00
log . Infof ( "Creating DynamoDB based checkpointer" )
2019-07-08 22:20:33 +00:00
w . checkpointer = chk . NewDynamoCheckpoint ( w . kclConfig )
} else {
2019-10-28 12:08:18 +00:00
log . Infof ( "Use custom checkpointer implementation." )
2019-07-08 22:20:33 +00:00
}
2018-04-17 16:25:41 +00:00
err := w . metricsConfig . Init ( w . kclConfig . ApplicationName , w . streamName , w . workerID )
if err != nil {
2018-08-07 03:49:15 +00:00
log . Errorf ( "Failed to start monitoring service: %+v" , err )
2018-04-17 16:25:41 +00:00
}
w . mService = w . metricsConfig . GetMonitoringService ( )
2019-10-28 12:08:18 +00:00
log . Infof ( "Initializing Checkpointer" )
2018-04-17 16:25:41 +00:00
if err := w . checkpointer . Init ( ) ; err != nil {
log . Errorf ( "Failed to start Checkpointer: %+v" , err )
return err
}
2019-04-22 20:55:39 +00:00
w . shardStatus = make ( map [ string ] * par . ShardStatus )
2018-04-17 16:25:41 +00:00
stopChan := make ( chan struct { } )
w . stop = & stopChan
2019-10-27 15:43:21 +00:00
w . waitGroup = & sync . WaitGroup { }
2018-04-17 16:25:41 +00:00
2019-10-28 12:08:18 +00:00
log . Infof ( "Initialization complete." )
2018-04-21 04:07:11 +00:00
2018-04-17 16:25:41 +00:00
return nil
}
// newShardConsumer to create a shard consumer instance
2019-04-22 20:55:39 +00:00
func ( w * Worker ) newShardConsumer ( shard * par . ShardStatus ) * ShardConsumer {
2018-04-17 16:25:41 +00:00
return & ShardConsumer {
streamName : w . streamName ,
shard : shard ,
kc : w . kc ,
checkpointer : w . checkpointer ,
recordProcessor : w . processorFactory . CreateProcessor ( ) ,
kclConfig : w . kclConfig ,
consumerID : w . workerID ,
stop : w . stop ,
mService : w . mService ,
state : WAITING_ON_PARENT_SHARDS ,
}
}
// eventLoop
func ( w * Worker ) eventLoop ( ) {
2019-10-28 12:08:18 +00:00
log := w . kclConfig . Logger
2018-04-17 16:25:41 +00:00
for {
2018-04-22 02:58:51 +00:00
err := w . syncShard ( )
2018-04-17 16:25:41 +00:00
if err != nil {
2018-08-07 03:49:15 +00:00
log . Errorf ( "Error getting Kinesis shards: %+v" , err )
time . Sleep ( time . Duration ( w . kclConfig . ShardSyncIntervalMillis ) * time . Millisecond )
2018-08-17 13:03:25 +00:00
continue
2018-04-17 16:25:41 +00:00
}
2018-04-22 02:58:51 +00:00
2018-04-17 16:25:41 +00:00
log . Infof ( "Found %d shards" , len ( w . shardStatus ) )
2018-09-05 03:32:45 +00:00
// Count the number of leases hold by this worker excluding the processed shard
2018-04-17 16:25:41 +00:00
counter := 0
for _ , shard := range w . shardStatus {
2019-04-22 20:55:39 +00:00
if shard . GetLeaseOwner ( ) == w . workerID && shard . Checkpoint != chk . SHARD_END {
2018-04-17 16:25:41 +00:00
counter ++
}
}
2018-08-07 03:49:15 +00:00
// max number of lease has not been reached yet
2018-04-17 16:25:41 +00:00
if counter < w . kclConfig . MaxLeasesForWorker {
for _ , shard := range w . shardStatus {
2018-08-07 03:49:15 +00:00
// already owner of the shard
2019-04-22 20:55:39 +00:00
if shard . GetLeaseOwner ( ) == w . workerID {
2018-04-17 16:25:41 +00:00
continue
}
err := w . checkpointer . FetchCheckpoint ( shard )
if err != nil {
2018-08-07 03:49:15 +00:00
// checkpoint may not existed yet is not an error condition.
2019-04-22 20:55:39 +00:00
if err != chk . ErrSequenceIDNotFound {
2018-08-27 19:23:20 +00:00
log . Errorf ( " Error: %+v" , err )
2018-04-19 03:09:52 +00:00
// move on to next shard
continue
2018-04-17 16:25:41 +00:00
}
}
2018-04-19 03:09:52 +00:00
// The shard is closed and we have processed all records
2019-04-22 20:55:39 +00:00
if shard . Checkpoint == chk . SHARD_END {
2018-04-19 03:09:52 +00:00
continue
}
2018-04-17 16:25:41 +00:00
err = w . checkpointer . GetLease ( shard , w . workerID )
if err != nil {
2018-04-19 03:09:52 +00:00
// cannot get lease on the shard
2019-04-22 20:55:39 +00:00
if err . Error ( ) != chk . ErrLeaseNotAquired {
2019-10-28 12:08:18 +00:00
log . Errorf ( "Cannot get lease: %+v" , err )
2018-04-17 16:25:41 +00:00
}
2018-04-19 03:09:52 +00:00
continue
2018-04-17 16:25:41 +00:00
}
2018-04-19 03:09:52 +00:00
// log metrics on got lease
2018-04-17 16:25:41 +00:00
w . mService . LeaseGained ( shard . ID )
log . Infof ( "Start Shard Consumer for shard: %v" , shard . ID )
sc := w . newShardConsumer ( shard )
w . waitGroup . Add ( 1 )
2019-10-27 15:43:21 +00:00
go func ( ) {
defer w . waitGroup . Done ( )
2019-10-28 12:08:18 +00:00
if err := sc . getRecords ( shard ) ; err != nil {
log . Errorf ( "Error in getRecords: %+v" , err )
}
2019-10-27 15:43:21 +00:00
} ( )
2018-08-07 03:49:15 +00:00
// exit from for loop and not to grab more shard for now.
break
2018-04-17 16:25:41 +00:00
}
}
select {
case <- * w . stop :
2019-10-28 12:08:18 +00:00
log . Infof ( "Shutting down..." )
2018-04-17 16:25:41 +00:00
return
case <- time . After ( time . Duration ( w . kclConfig . ShardSyncIntervalMillis ) * time . Millisecond ) :
}
}
}
// List all ACTIVE shard and store them into shardStatus table
2018-04-22 02:58:51 +00:00
// If shard has been removed, need to exclude it from cached shard status.
func ( w * Worker ) getShardIDs ( startShardID string , shardInfo map [ string ] bool ) error {
2019-10-28 12:08:18 +00:00
log := w . kclConfig . Logger
2018-04-22 02:58:51 +00:00
// The default pagination limit is 100.
2018-04-17 16:25:41 +00:00
args := & kinesis . DescribeStreamInput {
StreamName : aws . String ( w . streamName ) ,
}
2018-08-07 03:49:15 +00:00
2018-04-17 16:25:41 +00:00
if startShardID != "" {
args . ExclusiveStartShardId = aws . String ( startShardID )
}
2018-08-07 03:49:15 +00:00
2018-04-17 16:25:41 +00:00
streamDesc , err := w . kc . DescribeStream ( args )
if err != nil {
2018-08-27 19:23:20 +00:00
log . Errorf ( "Error in DescribeStream: %s Error: %+v Request: %s" , w . streamName , err , args )
2018-04-17 16:25:41 +00:00
return err
}
if * streamDesc . StreamDescription . StreamStatus != "ACTIVE" {
2018-08-27 19:23:20 +00:00
log . Warnf ( "Stream %s is not active" , w . streamName )
2018-08-07 03:49:15 +00:00
return errors . New ( "stream not active" )
2018-04-17 16:25:41 +00:00
}
var lastShardID string
for _ , s := range streamDesc . StreamDescription . Shards {
2018-04-22 02:58:51 +00:00
// record avail shardId from fresh reading from Kinesis
shardInfo [ * s . ShardId ] = true
2018-08-27 19:23:20 +00:00
2018-04-18 22:50:15 +00:00
// found new shard
2018-04-17 16:25:41 +00:00
if _ , ok := w . shardStatus [ * s . ShardId ] ; ! ok {
2018-09-05 03:32:45 +00:00
log . Infof ( "Found new shard with id %s" , * s . ShardId )
2019-04-22 20:55:39 +00:00
w . shardStatus [ * s . ShardId ] = & par . ShardStatus {
2019-02-09 16:23:54 +00:00
ID : * s . ShardId ,
ParentShardId : aws . StringValue ( s . ParentShardId ) ,
2019-04-22 20:55:39 +00:00
Mux : & sync . Mutex { } ,
2018-04-19 03:09:52 +00:00
StartingSequenceNumber : aws . StringValue ( s . SequenceNumberRange . StartingSequenceNumber ) ,
EndingSequenceNumber : aws . StringValue ( s . SequenceNumberRange . EndingSequenceNumber ) ,
2018-04-17 16:25:41 +00:00
}
}
lastShardID = * s . ShardId
}
if * streamDesc . StreamDescription . HasMoreShards {
2018-04-22 02:58:51 +00:00
err := w . getShardIDs ( lastShardID , shardInfo )
2018-04-17 16:25:41 +00:00
if err != nil {
2018-08-27 19:23:20 +00:00
log . Errorf ( "Error in getShardIDs: %s Error: %+v" , lastShardID , err )
2018-04-17 16:25:41 +00:00
return err
}
}
return nil
}
2018-04-22 02:58:51 +00:00
// syncShard to sync the cached shard info with actual shard info from Kinesis
func ( w * Worker ) syncShard ( ) error {
2019-10-28 12:08:18 +00:00
log := w . kclConfig . Logger
2018-04-22 02:58:51 +00:00
shardInfo := make ( map [ string ] bool )
err := w . getShardIDs ( "" , shardInfo )
2018-05-22 02:14:18 +00:00
if err != nil {
return err
}
2018-04-22 02:58:51 +00:00
for _ , shard := range w . shardStatus {
// The cached shard no longer existed, remove it.
if _ , ok := shardInfo [ shard . ID ] ; ! ok {
2018-04-23 19:40:39 +00:00
// remove the shard from local status cache
2018-04-22 02:58:51 +00:00
delete ( w . shardStatus , shard . ID )
2018-04-23 19:40:39 +00:00
// remove the shard entry in dynamoDB as well
// Note: syncShard runs periodically. we don't need to do anything in case of error here.
2018-08-27 19:23:20 +00:00
if err := w . checkpointer . RemoveLeaseInfo ( shard . ID ) ; err != nil {
log . Errorf ( "Failed to remove shard lease info: %s Error: %+v" , shard . ID , err )
}
2018-04-22 02:58:51 +00:00
}
}
2018-05-22 02:14:18 +00:00
return nil
2018-04-22 02:58:51 +00:00
}