Expose monitoring service (#49)
* Remove MonitoringConfiguration and export no-op service MonitoringConfiguration is not needed anymore as the user directly implements its monitoring service or use one the default constructors. Signed-off-by: Aurélien Rainone <aurelien.rainone@gmail.com> * Provide a constructor for CloudWatchMonitoringService Unexport all fields Signed-off-by: Aurélien Rainone <aurelien.rainone@gmail.com> * Provide a constructor to PrometheusMonitoringService Unexport fields Signed-off-by: Aurélien Rainone <aurelien.rainone@gmail.com> * Remove all CloudWatch specific-stuff from config package Signed-off-by: Aurélien Rainone <aurelien.rainone@gmail.com> * NewWorker accepts a metrics.MonitoringService Signed-off-by: Aurélien Rainone <aurelien.rainone@gmail.com> * Fix tests Signed-off-by: Aurélien Rainone <aurelien.rainone@gmail.com> * Add WithMonitoringService to config Instead of having an additional parameter to NewWorker so that the user can provide its own MonitoringService, WithMonitoringService is added to the configuration. This is much cleaner and remains in-line with the rest of the current API. Signed-off-by: Aurélien Rainone <aurelien.rainone@gmail.com> * Fix tests after introduction of WithMonitoringService Also, fix tests that should have been fixed in earlier commits. Signed-off-by: Aurélien Rainone <aurelien.rainone@gmail.com> * Move Prometheus into its own package Also rename it to prometheus.MonitoringService to not have to repeat Prometheus twice when using. Signed-off-by: Aurélien Rainone <aurelien.rainone@gmail.com> * Move CloudWatch metrics into its own package Also rename it to cloudwatch.MonitoringService to not have to repeat Cloudwatch twice when using. Signed-off-by: Aurélien Rainone <aurelien.rainone@gmail.com> * Remove references to Cloudwatch in comments Signed-off-by: Aurélien Rainone <aurelien.rainone@gmail.com>
This commit is contained in:
parent
d6369e48c2
commit
21980a54e3
12 changed files with 313 additions and 389 deletions
|
|
@ -67,9 +67,8 @@ func TestGetLeaseNotAquired(t *testing.T) {
|
|||
WithMaxRecords(10).
|
||||
WithMaxLeasesForWorker(1).
|
||||
WithShardSyncIntervalMillis(5000).
|
||||
WithFailoverTimeMillis(300000).
|
||||
WithMetricsBufferTimeMillis(10000).
|
||||
WithMetricsMaxQueueSize(20)
|
||||
WithFailoverTimeMillis(300000)
|
||||
|
||||
checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc)
|
||||
checkpoint.Init()
|
||||
err := checkpoint.GetLease(&par.ShardStatus{
|
||||
|
|
@ -98,9 +97,8 @@ func TestGetLeaseAquired(t *testing.T) {
|
|||
WithMaxRecords(10).
|
||||
WithMaxLeasesForWorker(1).
|
||||
WithShardSyncIntervalMillis(5000).
|
||||
WithFailoverTimeMillis(300000).
|
||||
WithMetricsBufferTimeMillis(10000).
|
||||
WithMetricsMaxQueueSize(20)
|
||||
WithFailoverTimeMillis(300000)
|
||||
|
||||
checkpoint := NewDynamoCheckpoint(kclConfig).WithDynamoDB(svc)
|
||||
checkpoint.Init()
|
||||
marshalledCheckpoint := map[string]*dynamodb.AttributeValue{
|
||||
|
|
|
|||
|
|
@ -67,7 +67,7 @@ var errorMap = map[ErrorCode]ClientLibraryError{
|
|||
KinesisClientLibRetryableError: {ErrorCode: KinesisClientLibRetryableError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Retryable exceptions (e.g. transient errors). The request/operation is expected to succeed upon (back off and) retry."},
|
||||
KinesisClientLibIOError: {ErrorCode: KinesisClientLibIOError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Error in reading/writing information (e.g. shard information from Kinesis may not be current/complete)."},
|
||||
BlockedOnParentShardError: {ErrorCode: BlockedOnParentShardError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Cannot start processing data for a shard because the data from the parent shard has not been completely processed (yet)."},
|
||||
KinesisClientLibDependencyError: {ErrorCode: KinesisClientLibDependencyError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Cannot talk to its dependencies (e.g. fetching data from Kinesis, DynamoDB table reads/writes, emitting metrics to CloudWatch)."},
|
||||
KinesisClientLibDependencyError: {ErrorCode: KinesisClientLibDependencyError, Retryable: true, Status: http.StatusServiceUnavailable, Msg: "Cannot talk to its dependencies (e.g. fetching data from Kinesis, DynamoDB table reads/writes)."},
|
||||
ThrottlingError: {ErrorCode: ThrottlingError, Retryable: true, Status: http.StatusTooManyRequests, Msg: "Requests are throttled by a service (e.g. DynamoDB when storing a checkpoint)."},
|
||||
|
||||
// Non-Retryable
|
||||
|
|
|
|||
|
|
@ -41,6 +41,7 @@ import (
|
|||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
creds "github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/vmware/vmware-go-kcl/clientlibrary/metrics"
|
||||
"github.com/vmware/vmware-go-kcl/logger"
|
||||
)
|
||||
|
||||
|
|
@ -88,12 +89,6 @@ const (
|
|||
// Backoff time in milliseconds for Amazon Kinesis Client Library tasks (in the event of failures).
|
||||
DEFAULT_TASK_BACKOFF_TIME_MILLIS = 500
|
||||
|
||||
// Buffer metrics for at most this long before publishing to CloudWatch.
|
||||
DEFAULT_METRICS_BUFFER_TIME_MILLIS = 10000
|
||||
|
||||
// Buffer at most this many metrics before publishing to CloudWatch.
|
||||
DEFAULT_METRICS_MAX_QUEUE_SIZE = 10000
|
||||
|
||||
// KCL will validate client provided sequence numbers with a call to Amazon Kinesis before
|
||||
// checkpointing for calls to {@link RecordProcessorCheckpointer#checkpoint(String)} by default.
|
||||
DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING = true
|
||||
|
|
@ -174,9 +169,6 @@ type (
|
|||
// DynamoDBCredentials is used to access DynamoDB
|
||||
DynamoDBCredentials *creds.Credentials
|
||||
|
||||
// CloudWatchCredentials is used to access CloudWatch
|
||||
CloudWatchCredentials *creds.Credentials
|
||||
|
||||
// TableName is name of the dynamo db table for managing kinesis stream default to ApplicationName
|
||||
TableName string
|
||||
|
||||
|
|
@ -192,7 +184,7 @@ type (
|
|||
// InitialPositionInStreamExtended provides actual AT_TMESTAMP value
|
||||
InitialPositionInStreamExtended InitialPositionInStreamExtended
|
||||
|
||||
// credentials to access Kinesis/Dynamo/CloudWatch: https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/
|
||||
// credentials to access Kinesis/Dynamo: https://docs.aws.amazon.com/sdk-for-go/api/aws/credentials/
|
||||
// Note: No need to configure here. Use NewEnvCredentials for testing and EC2RoleProvider for production
|
||||
|
||||
// FailoverTimeMillis Lease duration (leases not renewed within this period will be claimed by others)
|
||||
|
|
@ -219,18 +211,11 @@ type (
|
|||
|
||||
// kinesisClientConfig Client Configuration used by Kinesis client
|
||||
// dynamoDBClientConfig Client Configuration used by DynamoDB client
|
||||
// cloudWatchClientConfig Client Configuration used by CloudWatch client
|
||||
// Note: we will use default client provided by AWS SDK
|
||||
|
||||
// TaskBackoffTimeMillis Backoff period when tasks encounter an exception
|
||||
TaskBackoffTimeMillis int
|
||||
|
||||
// MetricsBufferTimeMillis Metrics are buffered for at most this long before publishing to CloudWatch
|
||||
MetricsBufferTimeMillis int
|
||||
|
||||
// MetricsMaxQueueSize Max number of metrics to buffer before publishing to CloudWatch
|
||||
MetricsMaxQueueSize int
|
||||
|
||||
// ValidateSequenceNumberBeforeCheckpointing whether KCL should validate client provided sequence numbers
|
||||
ValidateSequenceNumberBeforeCheckpointing bool
|
||||
|
||||
|
|
@ -260,6 +245,9 @@ type (
|
|||
|
||||
// Logger used to log message.
|
||||
Logger logger.Logger
|
||||
|
||||
// MonitoringService publishes per worker-scoped metrics.
|
||||
MonitoringService metrics.MonitoringService
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -32,12 +32,10 @@ func TestConfig(t *testing.T) {
|
|||
WithInitialPositionInStream(TRIM_HORIZON).
|
||||
WithIdleTimeBetweenReadsInMillis(20).
|
||||
WithCallProcessRecordsEvenForEmptyRecordList(true).
|
||||
WithTaskBackoffTimeMillis(10).
|
||||
WithMetricsBufferTimeMillis(500).
|
||||
WithMetricsMaxQueueSize(200)
|
||||
WithTaskBackoffTimeMillis(10)
|
||||
|
||||
assert.Equal(t, "appName", kclConfig.ApplicationName)
|
||||
assert.Equal(t, 500, kclConfig.FailoverTimeMillis)
|
||||
assert.Equal(t, 500, kclConfig.TaskBackoffTimeMillis)
|
||||
|
||||
contextLogger := kclConfig.Logger.WithFields(logger.Fields{"key1": "value1"})
|
||||
contextLogger.Debugf("Starting with default logger")
|
||||
|
|
|
|||
|
|
@ -37,26 +37,28 @@ import (
|
|||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/vmware/vmware-go-kcl/clientlibrary/metrics"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/vmware/vmware-go-kcl/clientlibrary/utils"
|
||||
"github.com/vmware/vmware-go-kcl/logger"
|
||||
)
|
||||
|
||||
// NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields.
|
||||
// NewKinesisClientLibConfig creates a default KinesisClientLibConfiguration based on the required fields.
|
||||
func NewKinesisClientLibConfig(applicationName, streamName, regionName, workerID string) *KinesisClientLibConfiguration {
|
||||
return NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID,
|
||||
nil, nil, nil)
|
||||
nil, nil)
|
||||
}
|
||||
|
||||
// NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields.
|
||||
// NewKinesisClientLibConfigWithCredential creates a default KinesisClientLibConfiguration based on the required fields and unique credentials.
|
||||
func NewKinesisClientLibConfigWithCredential(applicationName, streamName, regionName, workerID string,
|
||||
creds *credentials.Credentials) *KinesisClientLibConfiguration {
|
||||
return NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID, creds, creds, creds)
|
||||
return NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID, creds, creds)
|
||||
}
|
||||
|
||||
// NewKinesisClientLibConfig to create a default KinesisClientLibConfiguration based on the required fields.
|
||||
// NewKinesisClientLibConfigWithCredentials creates a default KinesisClientLibConfiguration based on the required fields and specific credentials for each service.
|
||||
func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regionName, workerID string,
|
||||
kiniesisCreds, dynamodbCreds, cloudwatchCreds *credentials.Credentials) *KinesisClientLibConfiguration {
|
||||
kiniesisCreds, dynamodbCreds *credentials.Credentials) *KinesisClientLibConfiguration {
|
||||
checkIsValueNotEmpty("ApplicationName", applicationName)
|
||||
checkIsValueNotEmpty("StreamName", streamName)
|
||||
checkIsValueNotEmpty("RegionName", regionName)
|
||||
|
|
@ -70,7 +72,6 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio
|
|||
ApplicationName: applicationName,
|
||||
KinesisCredentials: kiniesisCreds,
|
||||
DynamoDBCredentials: dynamodbCreds,
|
||||
CloudWatchCredentials: cloudwatchCreds,
|
||||
TableName: applicationName,
|
||||
StreamName: streamName,
|
||||
RegionName: regionName,
|
||||
|
|
@ -85,8 +86,6 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio
|
|||
ShardSyncIntervalMillis: DEFAULT_SHARD_SYNC_INTERVAL_MILLIS,
|
||||
CleanupTerminatedShardsBeforeExpiry: DEFAULT_CLEANUP_LEASES_UPON_SHARDS_COMPLETION,
|
||||
TaskBackoffTimeMillis: DEFAULT_TASK_BACKOFF_TIME_MILLIS,
|
||||
MetricsBufferTimeMillis: DEFAULT_METRICS_BUFFER_TIME_MILLIS,
|
||||
MetricsMaxQueueSize: DEFAULT_METRICS_MAX_QUEUE_SIZE,
|
||||
ValidateSequenceNumberBeforeCheckpointing: DEFAULT_VALIDATE_SEQUENCE_NUMBER_BEFORE_CHECKPOINTING,
|
||||
ShutdownGraceMillis: DEFAULT_SHUTDOWN_GRACE_MILLIS,
|
||||
MaxLeasesForWorker: DEFAULT_MAX_LEASES_FOR_WORKER,
|
||||
|
|
@ -191,20 +190,6 @@ func (c *KinesisClientLibConfiguration) WithTaskBackoffTimeMillis(taskBackoffTim
|
|||
return c
|
||||
}
|
||||
|
||||
// WithMetricsBufferTimeMillis configures Metrics are buffered for at most this long before publishing to CloudWatch
|
||||
func (c *KinesisClientLibConfiguration) WithMetricsBufferTimeMillis(metricsBufferTimeMillis int) *KinesisClientLibConfiguration {
|
||||
checkIsValuePositive("MetricsBufferTimeMillis", metricsBufferTimeMillis)
|
||||
c.MetricsBufferTimeMillis = metricsBufferTimeMillis
|
||||
return c
|
||||
}
|
||||
|
||||
// WithMetricsMaxQueueSize configures Max number of metrics to buffer before publishing to CloudWatch
|
||||
func (c *KinesisClientLibConfiguration) WithMetricsMaxQueueSize(metricsMaxQueueSize int) *KinesisClientLibConfiguration {
|
||||
checkIsValuePositive("MetricsMaxQueueSize", metricsMaxQueueSize)
|
||||
c.MetricsMaxQueueSize = metricsMaxQueueSize
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *KinesisClientLibConfiguration) WithLogger(logger logger.Logger) *KinesisClientLibConfiguration {
|
||||
if logger == nil {
|
||||
log.Panic("Logger cannot be null")
|
||||
|
|
@ -212,3 +197,11 @@ func (c *KinesisClientLibConfiguration) WithLogger(logger logger.Logger) *Kinesi
|
|||
c.Logger = logger
|
||||
return c
|
||||
}
|
||||
|
||||
// WithMonitoringService sets the monitoring service to use to publish metrics.
|
||||
func (c *KinesisClientLibConfiguration) WithMonitoringService(mService metrics.MonitoringService) *KinesisClientLibConfiguration {
|
||||
// Nil case is handled downward (at worker creation) so no need to do it here.
|
||||
// Plus the user might want to be explicit about passing a nil monitoring service here.
|
||||
c.MonitoringService = mService
|
||||
return c
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@
|
|||
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
package metrics
|
||||
package cloudwatch
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
|
@ -34,23 +34,25 @@ import (
|
|||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
cwatch "github.com/aws/aws-sdk-go/service/cloudwatch"
|
||||
"github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface"
|
||||
|
||||
"github.com/vmware/vmware-go-kcl/logger"
|
||||
)
|
||||
|
||||
type CloudWatchMonitoringService struct {
|
||||
Namespace string
|
||||
KinesisStream string
|
||||
WorkerID string
|
||||
Region string
|
||||
Credentials *credentials.Credentials
|
||||
Logger logger.Logger
|
||||
// Buffer metrics for at most this long before publishing to CloudWatch.
|
||||
const DEFAULT_CLOUDWATCH_METRICS_BUFFER_DURATION = 10 * time.Second
|
||||
|
||||
// control how often to pusblish to CloudWatch
|
||||
MetricsBufferTimeMillis int
|
||||
MetricsMaxQueueSize int
|
||||
type MonitoringService struct {
|
||||
appName string
|
||||
streamName string
|
||||
workerID string
|
||||
region string
|
||||
credentials *credentials.Credentials
|
||||
logger logger.Logger
|
||||
|
||||
// control how often to publish to CloudWatch
|
||||
bufferDuration time.Duration
|
||||
|
||||
stop *chan struct{}
|
||||
waitGroup *sync.WaitGroup
|
||||
|
|
@ -59,6 +61,8 @@ type CloudWatchMonitoringService struct {
|
|||
}
|
||||
|
||||
type cloudWatchMetrics struct {
|
||||
sync.Mutex
|
||||
|
||||
processedRecords int64
|
||||
processedBytes int64
|
||||
behindLatestMillis []float64
|
||||
|
|
@ -66,18 +70,33 @@ type cloudWatchMetrics struct {
|
|||
leaseRenewals int64
|
||||
getRecordsTime []float64
|
||||
processRecordsTime []float64
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) Init() error {
|
||||
cfg := &aws.Config{Region: aws.String(cw.Region)}
|
||||
cfg.Credentials = cw.Credentials
|
||||
// NewMonitoringService returns a Monitoring service publishing metrics to CloudWatch.
|
||||
func NewMonitoringService(region string, creds *credentials.Credentials) *MonitoringService {
|
||||
return NewMonitoringServiceWithOptions(region, creds, logger.GetDefaultLogger(), DEFAULT_CLOUDWATCH_METRICS_BUFFER_DURATION)
|
||||
}
|
||||
|
||||
// NewMonitoringServiceWithOptions returns a Monitoring service publishing metrics to
|
||||
// CloudWatch with the provided credentials, buffering duration and logger.
|
||||
func NewMonitoringServiceWithOptions(region string, creds *credentials.Credentials, logger logger.Logger, bufferDur time.Duration) *MonitoringService {
|
||||
return &MonitoringService{
|
||||
region: region,
|
||||
credentials: creds,
|
||||
logger: logger,
|
||||
bufferDuration: bufferDur,
|
||||
}
|
||||
}
|
||||
|
||||
func (cw *MonitoringService) Init(appName, streamName, workerID string) error {
|
||||
cfg := &aws.Config{Region: aws.String(cw.region)}
|
||||
cfg.Credentials = cw.credentials
|
||||
s, err := session.NewSession(cfg)
|
||||
if err != nil {
|
||||
cw.Logger.Errorf("Error in creating session for cloudwatch. %+v", err)
|
||||
cw.logger.Errorf("Error in creating session for cloudwatch. %+v", err)
|
||||
return err
|
||||
}
|
||||
cw.svc = cloudwatch.New(s)
|
||||
cw.svc = cwatch.New(s)
|
||||
cw.shardMetrics = new(sync.Map)
|
||||
|
||||
stopChan := make(chan struct{})
|
||||
|
|
@ -88,71 +107,71 @@ func (cw *CloudWatchMonitoringService) Init() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) Start() error {
|
||||
func (cw *MonitoringService) Start() error {
|
||||
cw.waitGroup.Add(1)
|
||||
// entering eventloop for sending metrics to CloudWatch
|
||||
go cw.eventloop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) Shutdown() {
|
||||
cw.Logger.Infof("Shutting down cloudwatch metrics system...")
|
||||
func (cw *MonitoringService) Shutdown() {
|
||||
cw.logger.Infof("Shutting down cloudwatch metrics system...")
|
||||
close(*cw.stop)
|
||||
cw.waitGroup.Wait()
|
||||
cw.Logger.Infof("Cloudwatch metrics system has been shutdown.")
|
||||
cw.logger.Infof("Cloudwatch metrics system has been shutdown.")
|
||||
}
|
||||
|
||||
// Start daemon to flush metrics periodically
|
||||
func (cw *CloudWatchMonitoringService) eventloop() {
|
||||
func (cw *MonitoringService) eventloop() {
|
||||
defer cw.waitGroup.Done()
|
||||
|
||||
for {
|
||||
if err := cw.flush(); err != nil {
|
||||
cw.Logger.Errorf("Error sending metrics to CloudWatch. %+v", err)
|
||||
cw.logger.Errorf("Error sending metrics to CloudWatch. %+v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-*cw.stop:
|
||||
cw.Logger.Infof("Shutting down monitoring system")
|
||||
cw.logger.Infof("Shutting down monitoring system")
|
||||
if err := cw.flush(); err != nil {
|
||||
cw.Logger.Errorf("Error sending metrics to CloudWatch. %+v", err)
|
||||
cw.logger.Errorf("Error sending metrics to CloudWatch. %+v", err)
|
||||
}
|
||||
return
|
||||
case <-time.After(time.Duration(cw.MetricsBufferTimeMillis) * time.Millisecond):
|
||||
case <-time.After(cw.bufferDuration):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWatchMetrics) bool {
|
||||
func (cw *MonitoringService) flushShard(shard string, metric *cloudWatchMetrics) bool {
|
||||
metric.Lock()
|
||||
defaultDimensions := []*cloudwatch.Dimension{
|
||||
defaultDimensions := []*cwatch.Dimension{
|
||||
{
|
||||
Name: aws.String("Shard"),
|
||||
Value: &shard,
|
||||
},
|
||||
{
|
||||
Name: aws.String("KinesisStreamName"),
|
||||
Value: &cw.KinesisStream,
|
||||
Value: &cw.streamName,
|
||||
},
|
||||
}
|
||||
|
||||
leaseDimensions := []*cloudwatch.Dimension{
|
||||
leaseDimensions := []*cwatch.Dimension{
|
||||
{
|
||||
Name: aws.String("Shard"),
|
||||
Value: &shard,
|
||||
},
|
||||
{
|
||||
Name: aws.String("KinesisStreamName"),
|
||||
Value: &cw.KinesisStream,
|
||||
Value: &cw.streamName,
|
||||
},
|
||||
{
|
||||
Name: aws.String("WorkerID"),
|
||||
Value: &cw.WorkerID,
|
||||
Value: &cw.workerID,
|
||||
},
|
||||
}
|
||||
metricTimestamp := time.Now()
|
||||
|
||||
data := []*cloudwatch.MetricDatum{
|
||||
data := []*cwatch.MetricDatum{
|
||||
{
|
||||
Dimensions: defaultDimensions,
|
||||
MetricName: aws.String("RecordsProcessed"),
|
||||
|
|
@ -184,12 +203,12 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat
|
|||
}
|
||||
|
||||
if len(metric.behindLatestMillis) > 0 {
|
||||
data = append(data, &cloudwatch.MetricDatum{
|
||||
data = append(data, &cwatch.MetricDatum{
|
||||
Dimensions: defaultDimensions,
|
||||
MetricName: aws.String("MillisBehindLatest"),
|
||||
Unit: aws.String("Milliseconds"),
|
||||
Timestamp: &metricTimestamp,
|
||||
StatisticValues: &cloudwatch.StatisticSet{
|
||||
StatisticValues: &cwatch.StatisticSet{
|
||||
SampleCount: aws.Float64(float64(len(metric.behindLatestMillis))),
|
||||
Sum: sumFloat64(metric.behindLatestMillis),
|
||||
Maximum: maxFloat64(metric.behindLatestMillis),
|
||||
|
|
@ -198,12 +217,12 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat
|
|||
}
|
||||
|
||||
if len(metric.getRecordsTime) > 0 {
|
||||
data = append(data, &cloudwatch.MetricDatum{
|
||||
data = append(data, &cwatch.MetricDatum{
|
||||
Dimensions: defaultDimensions,
|
||||
MetricName: aws.String("KinesisDataFetcher.getRecords.Time"),
|
||||
Unit: aws.String("Milliseconds"),
|
||||
Timestamp: &metricTimestamp,
|
||||
StatisticValues: &cloudwatch.StatisticSet{
|
||||
StatisticValues: &cwatch.StatisticSet{
|
||||
SampleCount: aws.Float64(float64(len(metric.getRecordsTime))),
|
||||
Sum: sumFloat64(metric.getRecordsTime),
|
||||
Maximum: maxFloat64(metric.getRecordsTime),
|
||||
|
|
@ -212,12 +231,12 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat
|
|||
}
|
||||
|
||||
if len(metric.processRecordsTime) > 0 {
|
||||
data = append(data, &cloudwatch.MetricDatum{
|
||||
data = append(data, &cwatch.MetricDatum{
|
||||
Dimensions: defaultDimensions,
|
||||
MetricName: aws.String("RecordProcessor.processRecords.Time"),
|
||||
Unit: aws.String("Milliseconds"),
|
||||
Timestamp: &metricTimestamp,
|
||||
StatisticValues: &cloudwatch.StatisticSet{
|
||||
StatisticValues: &cwatch.StatisticSet{
|
||||
SampleCount: aws.Float64(float64(len(metric.processRecordsTime))),
|
||||
Sum: sumFloat64(metric.processRecordsTime),
|
||||
Maximum: maxFloat64(metric.processRecordsTime),
|
||||
|
|
@ -226,8 +245,8 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat
|
|||
}
|
||||
|
||||
// Publish metrics data to cloud watch
|
||||
_, err := cw.svc.PutMetricData(&cloudwatch.PutMetricDataInput{
|
||||
Namespace: aws.String(cw.Namespace),
|
||||
_, err := cw.svc.PutMetricData(&cwatch.PutMetricDataInput{
|
||||
Namespace: aws.String(cw.appName),
|
||||
MetricData: data,
|
||||
})
|
||||
|
||||
|
|
@ -239,15 +258,15 @@ func (cw *CloudWatchMonitoringService) flushShard(shard string, metric *cloudWat
|
|||
metric.getRecordsTime = []float64{}
|
||||
metric.processRecordsTime = []float64{}
|
||||
} else {
|
||||
cw.Logger.Errorf("Error in publishing cloudwatch metrics. Error: %+v", err)
|
||||
cw.logger.Errorf("Error in publishing cloudwatch metrics. Error: %+v", err)
|
||||
}
|
||||
|
||||
metric.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) flush() error {
|
||||
cw.Logger.Debugf("Flushing metrics data. Stream: %s, Worker: %s", cw.KinesisStream, cw.WorkerID)
|
||||
func (cw *MonitoringService) flush() error {
|
||||
cw.logger.Debugf("Flushing metrics data. Stream: %s, Worker: %s", cw.streamName, cw.workerID)
|
||||
// publish per shard metrics
|
||||
cw.shardMetrics.Range(func(k, v interface{}) bool {
|
||||
shard, metric := k.(string), v.(*cloudWatchMetrics)
|
||||
|
|
@ -257,62 +276,62 @@ func (cw *CloudWatchMonitoringService) flush() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) IncrRecordsProcessed(shard string, count int) {
|
||||
func (cw *MonitoringService) IncrRecordsProcessed(shard string, count int) {
|
||||
m := cw.getOrCreatePerShardMetrics(shard)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.processedRecords += int64(count)
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) IncrBytesProcessed(shard string, count int64) {
|
||||
func (cw *MonitoringService) IncrBytesProcessed(shard string, count int64) {
|
||||
m := cw.getOrCreatePerShardMetrics(shard)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.processedBytes += count
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) MillisBehindLatest(shard string, millSeconds float64) {
|
||||
func (cw *MonitoringService) MillisBehindLatest(shard string, millSeconds float64) {
|
||||
m := cw.getOrCreatePerShardMetrics(shard)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.behindLatestMillis = append(m.behindLatestMillis, millSeconds)
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) LeaseGained(shard string) {
|
||||
func (cw *MonitoringService) LeaseGained(shard string) {
|
||||
m := cw.getOrCreatePerShardMetrics(shard)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.leasesHeld++
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) LeaseLost(shard string) {
|
||||
func (cw *MonitoringService) LeaseLost(shard string) {
|
||||
m := cw.getOrCreatePerShardMetrics(shard)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.leasesHeld--
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) LeaseRenewed(shard string) {
|
||||
func (cw *MonitoringService) LeaseRenewed(shard string) {
|
||||
m := cw.getOrCreatePerShardMetrics(shard)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.leaseRenewals++
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) RecordGetRecordsTime(shard string, time float64) {
|
||||
func (cw *MonitoringService) RecordGetRecordsTime(shard string, time float64) {
|
||||
m := cw.getOrCreatePerShardMetrics(shard)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.getRecordsTime = append(m.getRecordsTime, time)
|
||||
}
|
||||
func (cw *CloudWatchMonitoringService) RecordProcessRecordsTime(shard string, time float64) {
|
||||
func (cw *MonitoringService) RecordProcessRecordsTime(shard string, time float64) {
|
||||
m := cw.getOrCreatePerShardMetrics(shard)
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.processRecordsTime = append(m.processRecordsTime, time)
|
||||
}
|
||||
|
||||
func (cw *CloudWatchMonitoringService) getOrCreatePerShardMetrics(shard string) *cloudWatchMetrics {
|
||||
func (cw *MonitoringService) getOrCreatePerShardMetrics(shard string) *cloudWatchMetrics {
|
||||
var i interface{}
|
||||
var ok bool
|
||||
if i, ok = cw.shardMetrics.Load(shard); !ok {
|
||||
|
|
@ -27,23 +27,8 @@
|
|||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/vmware/vmware-go-kcl/logger"
|
||||
)
|
||||
|
||||
// MonitoringConfiguration allows you to configure how record processing metrics are exposed
|
||||
type MonitoringConfiguration struct {
|
||||
MonitoringService string // Type of monitoring to expose. Supported types are "prometheus"
|
||||
Region string
|
||||
Prometheus PrometheusMonitoringService
|
||||
CloudWatch CloudWatchMonitoringService
|
||||
service MonitoringService
|
||||
Logger logger.Logger
|
||||
}
|
||||
|
||||
type MonitoringService interface {
|
||||
Init() error
|
||||
Init(appName, streamName, workerID string) error
|
||||
Start() error
|
||||
IncrRecordsProcessed(string, int)
|
||||
IncrBytesProcessed(string, int64)
|
||||
|
|
@ -56,53 +41,18 @@ type MonitoringService interface {
|
|||
Shutdown()
|
||||
}
|
||||
|
||||
func (m *MonitoringConfiguration) Init(nameSpace, streamName string, workerID string) error {
|
||||
if m.MonitoringService == "" {
|
||||
m.service = &noopMonitoringService{}
|
||||
return nil
|
||||
}
|
||||
// NoopMonitoringService implements MonitoringService by does nothing.
|
||||
type NoopMonitoringService struct{}
|
||||
|
||||
// Config with default logger if logger is not specified.
|
||||
if m.Logger == nil {
|
||||
m.Logger = logger.GetDefaultLogger()
|
||||
}
|
||||
func (NoopMonitoringService) Init(appName, streamName, workerID string) error { return nil }
|
||||
func (NoopMonitoringService) Start() error { return nil }
|
||||
func (NoopMonitoringService) Shutdown() {}
|
||||
|
||||
switch m.MonitoringService {
|
||||
case "prometheus":
|
||||
m.Prometheus.Namespace = nameSpace
|
||||
m.Prometheus.KinesisStream = streamName
|
||||
m.Prometheus.WorkerID = workerID
|
||||
m.Prometheus.Region = m.Region
|
||||
m.Prometheus.Logger = m.Logger
|
||||
m.service = &m.Prometheus
|
||||
case "cloudwatch":
|
||||
m.CloudWatch.Namespace = nameSpace
|
||||
m.CloudWatch.KinesisStream = streamName
|
||||
m.CloudWatch.WorkerID = workerID
|
||||
m.CloudWatch.Region = m.Region
|
||||
m.CloudWatch.Logger = m.Logger
|
||||
m.service = &m.CloudWatch
|
||||
default:
|
||||
return fmt.Errorf("Invalid monitoring service type %s", m.MonitoringService)
|
||||
}
|
||||
return m.service.Init()
|
||||
}
|
||||
|
||||
func (m *MonitoringConfiguration) GetMonitoringService() MonitoringService {
|
||||
return m.service
|
||||
}
|
||||
|
||||
type noopMonitoringService struct{}
|
||||
|
||||
func (n *noopMonitoringService) Init() error { return nil }
|
||||
func (n *noopMonitoringService) Start() error { return nil }
|
||||
func (n *noopMonitoringService) Shutdown() {}
|
||||
|
||||
func (n *noopMonitoringService) IncrRecordsProcessed(shard string, count int) {}
|
||||
func (n *noopMonitoringService) IncrBytesProcessed(shard string, count int64) {}
|
||||
func (n *noopMonitoringService) MillisBehindLatest(shard string, millSeconds float64) {}
|
||||
func (n *noopMonitoringService) LeaseGained(shard string) {}
|
||||
func (n *noopMonitoringService) LeaseLost(shard string) {}
|
||||
func (n *noopMonitoringService) LeaseRenewed(shard string) {}
|
||||
func (n *noopMonitoringService) RecordGetRecordsTime(shard string, time float64) {}
|
||||
func (n *noopMonitoringService) RecordProcessRecordsTime(shard string, time float64) {}
|
||||
func (NoopMonitoringService) IncrRecordsProcessed(shard string, count int) {}
|
||||
func (NoopMonitoringService) IncrBytesProcessed(shard string, count int64) {}
|
||||
func (NoopMonitoringService) MillisBehindLatest(shard string, millSeconds float64) {}
|
||||
func (NoopMonitoringService) LeaseGained(shard string) {}
|
||||
func (NoopMonitoringService) LeaseLost(shard string) {}
|
||||
func (NoopMonitoringService) LeaseRenewed(shard string) {}
|
||||
func (NoopMonitoringService) RecordGetRecordsTime(shard string, time float64) {}
|
||||
func (NoopMonitoringService) RecordProcessRecordsTime(shard string, time float64) {}
|
||||
|
|
|
|||
|
|
@ -1,155 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018 VMware, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
* associated documentation files (the "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is furnished to do
|
||||
* so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
* NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
// The implementation is derived from https://github.com/patrobinson/gokini
|
||||
//
|
||||
// Copyright 2018 Patrick robinson
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"github.com/vmware/vmware-go-kcl/logger"
|
||||
)
|
||||
|
||||
// PrometheusMonitoringService to start Prometheus as metrics system.
|
||||
// It might be trick if the service onboarding with KCL also uses Prometheus.
|
||||
// Therefore, we should start cloudwatch metrics by default instead.
|
||||
type PrometheusMonitoringService struct {
|
||||
ListenAddress string
|
||||
|
||||
Namespace string
|
||||
KinesisStream string
|
||||
WorkerID string
|
||||
Region string
|
||||
Logger logger.Logger
|
||||
|
||||
processedRecords *prometheus.CounterVec
|
||||
processedBytes *prometheus.CounterVec
|
||||
behindLatestMillis *prometheus.GaugeVec
|
||||
leasesHeld *prometheus.GaugeVec
|
||||
leaseRenewals *prometheus.CounterVec
|
||||
getRecordsTime *prometheus.HistogramVec
|
||||
processRecordsTime *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
func (p *PrometheusMonitoringService) Init() error {
|
||||
p.processedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: p.Namespace + `_processed_bytes`,
|
||||
Help: "Number of bytes processed",
|
||||
}, []string{"kinesisStream", "shard"})
|
||||
p.processedRecords = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: p.Namespace + `_processed_records`,
|
||||
Help: "Number of records processed",
|
||||
}, []string{"kinesisStream", "shard"})
|
||||
p.behindLatestMillis = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: p.Namespace + `_behind_latest_millis`,
|
||||
Help: "The amount of milliseconds processing is behind",
|
||||
}, []string{"kinesisStream", "shard"})
|
||||
p.leasesHeld = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: p.Namespace + `_leases_held`,
|
||||
Help: "The number of leases held by the worker",
|
||||
}, []string{"kinesisStream", "shard", "workerID"})
|
||||
p.leaseRenewals = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: p.Namespace + `_lease_renewals`,
|
||||
Help: "The number of successful lease renewals",
|
||||
}, []string{"kinesisStream", "shard", "workerID"})
|
||||
p.getRecordsTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: p.Namespace + `_get_records_duration_milliseconds`,
|
||||
Help: "The time taken to fetch records and process them",
|
||||
}, []string{"kinesisStream", "shard"})
|
||||
p.processRecordsTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Name: p.Namespace + `_process_records_duration_milliseconds`,
|
||||
Help: "The time taken to process records",
|
||||
}, []string{"kinesisStream", "shard"})
|
||||
|
||||
metrics := []prometheus.Collector{
|
||||
p.processedBytes,
|
||||
p.processedRecords,
|
||||
p.behindLatestMillis,
|
||||
p.leasesHeld,
|
||||
p.leaseRenewals,
|
||||
p.getRecordsTime,
|
||||
p.processRecordsTime,
|
||||
}
|
||||
for _, metric := range metrics {
|
||||
err := prometheus.Register(metric)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PrometheusMonitoringService) Start() error {
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
go func() {
|
||||
p.Logger.Infof("Starting Prometheus listener on %s", p.ListenAddress)
|
||||
err := http.ListenAndServe(p.ListenAddress, nil)
|
||||
if err != nil {
|
||||
p.Logger.Errorf("Error starting Prometheus metrics endpoint. %+v", err)
|
||||
}
|
||||
p.Logger.Infof("Stopped metrics server")
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PrometheusMonitoringService) Shutdown() {}
|
||||
|
||||
func (p *PrometheusMonitoringService) IncrRecordsProcessed(shard string, count int) {
|
||||
p.processedRecords.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Add(float64(count))
|
||||
}
|
||||
|
||||
func (p *PrometheusMonitoringService) IncrBytesProcessed(shard string, count int64) {
|
||||
p.processedBytes.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Add(float64(count))
|
||||
}
|
||||
|
||||
func (p *PrometheusMonitoringService) MillisBehindLatest(shard string, millSeconds float64) {
|
||||
p.behindLatestMillis.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Set(millSeconds)
|
||||
}
|
||||
|
||||
func (p *PrometheusMonitoringService) LeaseGained(shard string) {
|
||||
p.leasesHeld.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream, "workerID": p.WorkerID}).Inc()
|
||||
}
|
||||
|
||||
func (p *PrometheusMonitoringService) LeaseLost(shard string) {
|
||||
p.leasesHeld.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream, "workerID": p.WorkerID}).Dec()
|
||||
}
|
||||
|
||||
func (p *PrometheusMonitoringService) LeaseRenewed(shard string) {
|
||||
p.leaseRenewals.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream, "workerID": p.WorkerID}).Inc()
|
||||
}
|
||||
|
||||
func (p *PrometheusMonitoringService) RecordGetRecordsTime(shard string, time float64) {
|
||||
p.getRecordsTime.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Observe(time)
|
||||
}
|
||||
|
||||
func (p *PrometheusMonitoringService) RecordProcessRecordsTime(shard string, time float64) {
|
||||
p.processRecordsTime.With(prometheus.Labels{"shard": shard, "kinesisStream": p.KinesisStream}).Observe(time)
|
||||
}
|
||||
166
clientlibrary/metrics/prometheus/prometheus.go
Normal file
166
clientlibrary/metrics/prometheus/prometheus.go
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* Copyright (c) 2018 VMware, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
|
||||
* associated documentation files (the "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is furnished to do
|
||||
* so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
|
||||
* NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
// The implementation is derived from https://github.com/patrobinson/gokini
|
||||
//
|
||||
// Copyright 2018 Patrick robinson
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
prom "github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"github.com/vmware/vmware-go-kcl/logger"
|
||||
)
|
||||
|
||||
// MonitoringService publishes kcl metrics to Prometheus.
|
||||
// It might be trick if the service onboarding with KCL already uses Prometheus.
|
||||
type MonitoringService struct {
|
||||
listenAddress string
|
||||
namespace string
|
||||
streamName string
|
||||
workerID string
|
||||
region string
|
||||
logger logger.Logger
|
||||
|
||||
processedRecords *prom.CounterVec
|
||||
processedBytes *prom.CounterVec
|
||||
behindLatestMillis *prom.GaugeVec
|
||||
leasesHeld *prom.GaugeVec
|
||||
leaseRenewals *prom.CounterVec
|
||||
getRecordsTime *prom.HistogramVec
|
||||
processRecordsTime *prom.HistogramVec
|
||||
}
|
||||
|
||||
// NewMonitoringService returns a Monitoring service publishing metrics to Prometheus.
|
||||
func NewMonitoringService(listenAddress, region string, logger logger.Logger) *MonitoringService {
|
||||
return &MonitoringService{
|
||||
listenAddress: listenAddress,
|
||||
region: region,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *MonitoringService) Init(appName, streamName, workerID string) error {
|
||||
p.namespace = appName
|
||||
p.streamName = streamName
|
||||
p.workerID = workerID
|
||||
|
||||
p.processedBytes = prom.NewCounterVec(prom.CounterOpts{
|
||||
Name: p.namespace + `_processed_bytes`,
|
||||
Help: "Number of bytes processed",
|
||||
}, []string{"kinesisStream", "shard"})
|
||||
p.processedRecords = prom.NewCounterVec(prom.CounterOpts{
|
||||
Name: p.namespace + `_processed_records`,
|
||||
Help: "Number of records processed",
|
||||
}, []string{"kinesisStream", "shard"})
|
||||
p.behindLatestMillis = prom.NewGaugeVec(prom.GaugeOpts{
|
||||
Name: p.namespace + `_behind_latest_millis`,
|
||||
Help: "The amount of milliseconds processing is behind",
|
||||
}, []string{"kinesisStream", "shard"})
|
||||
p.leasesHeld = prom.NewGaugeVec(prom.GaugeOpts{
|
||||
Name: p.namespace + `_leases_held`,
|
||||
Help: "The number of leases held by the worker",
|
||||
}, []string{"kinesisStream", "shard", "workerID"})
|
||||
p.leaseRenewals = prom.NewCounterVec(prom.CounterOpts{
|
||||
Name: p.namespace + `_lease_renewals`,
|
||||
Help: "The number of successful lease renewals",
|
||||
}, []string{"kinesisStream", "shard", "workerID"})
|
||||
p.getRecordsTime = prom.NewHistogramVec(prom.HistogramOpts{
|
||||
Name: p.namespace + `_get_records_duration_milliseconds`,
|
||||
Help: "The time taken to fetch records and process them",
|
||||
}, []string{"kinesisStream", "shard"})
|
||||
p.processRecordsTime = prom.NewHistogramVec(prom.HistogramOpts{
|
||||
Name: p.namespace + `_process_records_duration_milliseconds`,
|
||||
Help: "The time taken to process records",
|
||||
}, []string{"kinesisStream", "shard"})
|
||||
|
||||
metrics := []prom.Collector{
|
||||
p.processedBytes,
|
||||
p.processedRecords,
|
||||
p.behindLatestMillis,
|
||||
p.leasesHeld,
|
||||
p.leaseRenewals,
|
||||
p.getRecordsTime,
|
||||
p.processRecordsTime,
|
||||
}
|
||||
for _, metric := range metrics {
|
||||
err := prom.Register(metric)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *MonitoringService) Start() error {
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
go func() {
|
||||
p.logger.Infof("Starting Prometheus listener on %s", p.listenAddress)
|
||||
err := http.ListenAndServe(p.listenAddress, nil)
|
||||
if err != nil {
|
||||
p.logger.Errorf("Error starting Prometheus metrics endpoint. %+v", err)
|
||||
}
|
||||
p.logger.Infof("Stopped metrics server")
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *MonitoringService) Shutdown() {}
|
||||
|
||||
func (p *MonitoringService) IncrRecordsProcessed(shard string, count int) {
|
||||
p.processedRecords.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Add(float64(count))
|
||||
}
|
||||
|
||||
func (p *MonitoringService) IncrBytesProcessed(shard string, count int64) {
|
||||
p.processedBytes.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Add(float64(count))
|
||||
}
|
||||
|
||||
func (p *MonitoringService) MillisBehindLatest(shard string, millSeconds float64) {
|
||||
p.behindLatestMillis.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Set(millSeconds)
|
||||
}
|
||||
|
||||
func (p *MonitoringService) LeaseGained(shard string) {
|
||||
p.leasesHeld.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName, "workerID": p.workerID}).Inc()
|
||||
}
|
||||
|
||||
func (p *MonitoringService) LeaseLost(shard string) {
|
||||
p.leasesHeld.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName, "workerID": p.workerID}).Dec()
|
||||
}
|
||||
|
||||
func (p *MonitoringService) LeaseRenewed(shard string) {
|
||||
p.leaseRenewals.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName, "workerID": p.workerID}).Inc()
|
||||
}
|
||||
|
||||
func (p *MonitoringService) RecordGetRecordsTime(shard string, time float64) {
|
||||
p.getRecordsTime.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Observe(time)
|
||||
}
|
||||
|
||||
func (p *MonitoringService) RecordProcessRecordsTime(shard string, time float64) {
|
||||
p.processRecordsTime.With(prom.Labels{"shard": shard, "kinesisStream": p.streamName}).Observe(time)
|
||||
}
|
||||
|
|
@ -58,34 +58,32 @@ type Worker struct {
|
|||
kclConfig *config.KinesisClientLibConfiguration
|
||||
kc kinesisiface.KinesisAPI
|
||||
checkpointer chk.Checkpointer
|
||||
mService metrics.MonitoringService
|
||||
|
||||
stop *chan struct{}
|
||||
waitGroup *sync.WaitGroup
|
||||
done bool
|
||||
|
||||
shardStatus map[string]*par.ShardStatus
|
||||
|
||||
metricsConfig *metrics.MonitoringConfiguration
|
||||
mService metrics.MonitoringService
|
||||
}
|
||||
|
||||
// NewWorker constructs a Worker instance for processing Kinesis stream data.
|
||||
func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisClientLibConfiguration, metricsConfig *metrics.MonitoringConfiguration) *Worker {
|
||||
w := &Worker{
|
||||
func NewWorker(factory kcl.IRecordProcessorFactory, kclConfig *config.KinesisClientLibConfiguration) *Worker {
|
||||
var mService metrics.MonitoringService
|
||||
if kclConfig.MonitoringService == nil {
|
||||
// Replaces nil with noop monitor service (not emitting any metrics).
|
||||
mService = metrics.NoopMonitoringService{}
|
||||
}
|
||||
|
||||
return &Worker{
|
||||
streamName: kclConfig.StreamName,
|
||||
regionName: kclConfig.RegionName,
|
||||
workerID: kclConfig.WorkerID,
|
||||
processorFactory: factory,
|
||||
kclConfig: kclConfig,
|
||||
metricsConfig: metricsConfig,
|
||||
mService: mService,
|
||||
done: false,
|
||||
}
|
||||
|
||||
if w.metricsConfig == nil {
|
||||
// "" means noop monitor service. i.e. not emitting any metrics.
|
||||
w.metricsConfig = &metrics.MonitoringConfiguration{MonitoringService: ""}
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
// WithKinesis is used to provide Kinesis service for either custom implementation or unit testing.
|
||||
|
|
@ -186,11 +184,10 @@ func (w *Worker) initialize() error {
|
|||
log.Infof("Use custom checkpointer implementation.")
|
||||
}
|
||||
|
||||
err := w.metricsConfig.Init(w.kclConfig.ApplicationName, w.streamName, w.workerID)
|
||||
err := w.mService.Init(w.kclConfig.ApplicationName, w.streamName, w.workerID)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to start monitoring service: %+v", err)
|
||||
}
|
||||
w.mService = w.metricsConfig.GetMonitoringService()
|
||||
|
||||
log.Infof("Initializing Checkpointer")
|
||||
if err := w.checkpointer.Init(); err != nil {
|
||||
|
|
|
|||
|
|
@ -44,10 +44,7 @@ func TestWorkerInjectCheckpointer(t *testing.T) {
|
|||
WithMaxRecords(10).
|
||||
WithMaxLeasesForWorker(1).
|
||||
WithShardSyncIntervalMillis(5000).
|
||||
WithFailoverTimeMillis(300000).
|
||||
WithMetricsBufferTimeMillis(10000).
|
||||
WithMetricsMaxQueueSize(20)
|
||||
|
||||
WithFailoverTimeMillis(300000)
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
||||
|
|
@ -55,13 +52,13 @@ func TestWorkerInjectCheckpointer(t *testing.T) {
|
|||
assert.Equal(t, streamName, kclConfig.StreamName)
|
||||
|
||||
// configure cloudwatch as metrics system
|
||||
metricsConfig := getMetricsConfig(kclConfig, metricsSystem)
|
||||
kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem))
|
||||
|
||||
// custom checkpointer or a mock checkpointer.
|
||||
checkpointer := chk.NewDynamoCheckpoint(kclConfig)
|
||||
|
||||
// Inject a custom checkpointer into the worker.
|
||||
worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig).
|
||||
worker := wk.NewWorker(recordProcessorFactory(t), kclConfig).
|
||||
WithCheckpointer(checkpointer)
|
||||
|
||||
err := worker.Start()
|
||||
|
|
@ -101,9 +98,7 @@ func TestWorkerInjectKinesis(t *testing.T) {
|
|||
WithMaxRecords(10).
|
||||
WithMaxLeasesForWorker(1).
|
||||
WithShardSyncIntervalMillis(5000).
|
||||
WithFailoverTimeMillis(300000).
|
||||
WithMetricsBufferTimeMillis(10000).
|
||||
WithMetricsMaxQueueSize(20)
|
||||
WithFailoverTimeMillis(300000)
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
|
@ -112,7 +107,7 @@ func TestWorkerInjectKinesis(t *testing.T) {
|
|||
assert.Equal(t, streamName, kclConfig.StreamName)
|
||||
|
||||
// configure cloudwatch as metrics system
|
||||
metricsConfig := getMetricsConfig(kclConfig, metricsSystem)
|
||||
kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem))
|
||||
|
||||
// create custom Kinesis
|
||||
s, err := session.NewSession(&aws.Config{
|
||||
|
|
@ -122,7 +117,7 @@ func TestWorkerInjectKinesis(t *testing.T) {
|
|||
kc := kinesis.New(s)
|
||||
|
||||
// Inject a custom checkpointer into the worker.
|
||||
worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig).
|
||||
worker := wk.NewWorker(recordProcessorFactory(t), kclConfig).
|
||||
WithKinesis(kc)
|
||||
|
||||
err = worker.Start()
|
||||
|
|
@ -148,9 +143,7 @@ func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) {
|
|||
WithMaxRecords(10).
|
||||
WithMaxLeasesForWorker(1).
|
||||
WithShardSyncIntervalMillis(5000).
|
||||
WithFailoverTimeMillis(300000).
|
||||
WithMetricsBufferTimeMillis(10000).
|
||||
WithMetricsMaxQueueSize(20)
|
||||
WithFailoverTimeMillis(300000)
|
||||
|
||||
log.SetOutput(os.Stdout)
|
||||
log.SetLevel(log.DebugLevel)
|
||||
|
|
@ -159,7 +152,7 @@ func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) {
|
|||
assert.Equal(t, streamName, kclConfig.StreamName)
|
||||
|
||||
// configure cloudwatch as metrics system
|
||||
metricsConfig := getMetricsConfig(kclConfig, metricsSystem)
|
||||
kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem))
|
||||
|
||||
// create custom Kinesis
|
||||
s, err := session.NewSession(&aws.Config{
|
||||
|
|
@ -172,7 +165,7 @@ func TestWorkerInjectKinesisAndCheckpointer(t *testing.T) {
|
|||
checkpointer := chk.NewDynamoCheckpoint(kclConfig)
|
||||
|
||||
// Inject both custom checkpointer and kinesis into the worker.
|
||||
worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig).
|
||||
worker := wk.NewWorker(recordProcessorFactory(t), kclConfig).
|
||||
WithKinesis(kc).
|
||||
WithCheckpointer(checkpointer)
|
||||
|
||||
|
|
|
|||
|
|
@ -75,8 +75,6 @@ func TestWorker(t *testing.T) {
|
|||
WithMaxLeasesForWorker(1).
|
||||
WithShardSyncIntervalMillis(5000).
|
||||
WithFailoverTimeMillis(300000).
|
||||
WithMetricsBufferTimeMillis(10000).
|
||||
WithMetricsMaxQueueSize(20).
|
||||
WithLogger(log)
|
||||
|
||||
runTest(kclConfig, false, t)
|
||||
|
|
@ -99,8 +97,6 @@ func TestWorkerWithTimestamp(t *testing.T) {
|
|||
WithMaxLeasesForWorker(1).
|
||||
WithShardSyncIntervalMillis(5000).
|
||||
WithFailoverTimeMillis(300000).
|
||||
WithMetricsBufferTimeMillis(10000).
|
||||
WithMetricsMaxQueueSize(20).
|
||||
WithLogger(log)
|
||||
|
||||
runTest(kclConfig, false, t)
|
||||
|
|
@ -131,8 +127,6 @@ func TestWorkerWithSigInt(t *testing.T) {
|
|||
WithMaxLeasesForWorker(1).
|
||||
WithShardSyncIntervalMillis(5000).
|
||||
WithFailoverTimeMillis(300000).
|
||||
WithMetricsBufferTimeMillis(10000).
|
||||
WithMetricsMaxQueueSize(20).
|
||||
WithLogger(log)
|
||||
|
||||
runTest(kclConfig, true, t)
|
||||
|
|
@ -148,9 +142,7 @@ func TestWorkerStatic(t *testing.T) {
|
|||
WithMaxRecords(10).
|
||||
WithMaxLeasesForWorker(1).
|
||||
WithShardSyncIntervalMillis(5000).
|
||||
WithFailoverTimeMillis(300000).
|
||||
WithMetricsBufferTimeMillis(10000).
|
||||
WithMetricsMaxQueueSize(20)
|
||||
WithFailoverTimeMillis(300000)
|
||||
|
||||
runTest(kclConfig, false, t)
|
||||
}
|
||||
|
|
@ -172,9 +164,7 @@ func TestWorkerAssumeRole(t *testing.T) {
|
|||
WithMaxRecords(10).
|
||||
WithMaxLeasesForWorker(1).
|
||||
WithShardSyncIntervalMillis(5000).
|
||||
WithFailoverTimeMillis(300000).
|
||||
WithMetricsBufferTimeMillis(10000).
|
||||
WithMetricsMaxQueueSize(20)
|
||||
WithFailoverTimeMillis(300000)
|
||||
|
||||
runTest(kclConfig, false, t)
|
||||
}
|
||||
|
|
@ -184,9 +174,9 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t
|
|||
assert.Equal(t, streamName, kclConfig.StreamName)
|
||||
|
||||
// configure cloudwatch as metrics system
|
||||
metricsConfig := getMetricsConfig(kclConfig, metricsSystem)
|
||||
kclConfig.WithMonitoringService(getMetricsConfig(kclConfig, metricsSystem))
|
||||
|
||||
worker := wk.NewWorker(recordProcessorFactory(t), kclConfig, metricsConfig)
|
||||
worker := wk.NewWorker(recordProcessorFactory(t), kclConfig)
|
||||
|
||||
err := worker.Start()
|
||||
assert.Nil(t, err)
|
||||
|
|
@ -223,7 +213,7 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t
|
|||
// wait a few seconds before shutdown processing
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
if metricsConfig != nil && metricsConfig.MonitoringService == "prometheus" {
|
||||
if metricsSystem == "prometheus" {
|
||||
res, err := http.Get("http://localhost:8080/metrics")
|
||||
if err != nil {
|
||||
t.Fatalf("Error scraping Prometheus endpoint %s", err)
|
||||
|
|
@ -244,30 +234,17 @@ func runTest(kclConfig *cfg.KinesisClientLibConfiguration, triggersig bool, t *t
|
|||
}
|
||||
|
||||
// configure different metrics system
|
||||
func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service string) *metrics.MonitoringConfiguration {
|
||||
func getMetricsConfig(kclConfig *cfg.KinesisClientLibConfiguration, service string) metrics.MonitoringService {
|
||||
|
||||
if service == "cloudwatch" {
|
||||
return &metrics.MonitoringConfiguration{
|
||||
MonitoringService: "cloudwatch",
|
||||
Region: regionName,
|
||||
Logger: kclConfig.Logger,
|
||||
CloudWatch: metrics.CloudWatchMonitoringService{
|
||||
Credentials: kclConfig.CloudWatchCredentials,
|
||||
// Those value should come from kclConfig
|
||||
MetricsBufferTimeMillis: kclConfig.MetricsBufferTimeMillis,
|
||||
MetricsMaxQueueSize: kclConfig.MetricsMaxQueueSize,
|
||||
},
|
||||
}
|
||||
return metrics.NewDetailedCloudWatchMonitoringService(kclConfig.RegionName,
|
||||
kclConfig.KinesisCredentials,
|
||||
kclConfig.Logger,
|
||||
metrics.DEFAULT_CLOUDWATCH_METRICS_BUFFER_DURATION)
|
||||
}
|
||||
|
||||
if service == "prometheus" {
|
||||
return &metrics.MonitoringConfiguration{
|
||||
MonitoringService: "prometheus",
|
||||
Region: regionName,
|
||||
Logger: kclConfig.Logger,
|
||||
Prometheus: metrics.PrometheusMonitoringService{
|
||||
ListenAddress: ":8080",
|
||||
},
|
||||
}
|
||||
return metrics.NewPrometheusMonitoringService(":8080", regionName, kclConfig.Logger)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
Loading…
Reference in a new issue