improve gofmt
This commit is contained in:
parent
6f0fbfe4c7
commit
f9ced84cbd
7 changed files with 56 additions and 57 deletions
|
|
@ -70,7 +70,7 @@ func NewKinesisClientLibConfigWithCredentials(applicationName, streamName, regio
|
|||
}
|
||||
|
||||
// populate the KCL configuration with default values
|
||||
return &KinesisClientLibConfiguration {
|
||||
return &KinesisClientLibConfiguration{
|
||||
ApplicationName: applicationName,
|
||||
KinesisCredentials: kinesisCreds,
|
||||
DynamoDBCredentials: dynamodbCreds,
|
||||
|
|
@ -167,25 +167,24 @@ func (c *KinesisClientLibConfiguration) WithMaxLeasesForWorker(n int) *KinesisCl
|
|||
return c
|
||||
}
|
||||
|
||||
/* WithIdleTimeBetweenReadsInMillis
|
||||
Controls how long the KCL will sleep if no records are returned from Kinesis
|
||||
|
||||
<p>
|
||||
This value is only used when no records are returned; if records are returned, the {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ProcessTask} will
|
||||
immediately retrieve the next set of records after the call to
|
||||
{@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#processRecords(ProcessRecordsInput)}
|
||||
has returned. Setting this value to high may result in the KCL being unable to catch up. If you are changing this
|
||||
value it's recommended that you enable {@link #withCallProcessRecordsEvenForEmptyRecordList(boolean)}, and
|
||||
monitor how far behind the records retrieved are by inspecting
|
||||
{@link com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput#getMillisBehindLatest()}, and the
|
||||
<a href=
|
||||
"http://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html#kinesis-metrics-stream">CloudWatch
|
||||
Metric: GetRecords.MillisBehindLatest</a>
|
||||
</p>
|
||||
|
||||
@param IdleTimeBetweenReadsInMillis: how long to sleep between GetRecords calls when no records are returned.
|
||||
@return KinesisClientLibConfiguration
|
||||
*/
|
||||
// WithIdleTimeBetweenReadsInMillis
|
||||
// Controls how long the KCL will sleep if no records are returned from Kinesis
|
||||
//
|
||||
// <p>
|
||||
// This value is only used when no records are returned; if records are returned, the {@link com.amazonaws.services.kinesis.clientlibrary.lib.worker.ProcessTask} will
|
||||
// immediately retrieve the next set of records after the call to
|
||||
// {@link com.amazonaws.services.kinesis.clientlibrary.interfaces.v2.IRecordProcessor#processRecords(ProcessRecordsInput)}
|
||||
// has returned. Setting this value to high may result in the KCL being unable to catch up. If you are changing this
|
||||
// value it's recommended that you enable {@link #withCallProcessRecordsEvenForEmptyRecordList(boolean)}, and
|
||||
// monitor how far behind the records retrieved are by inspecting
|
||||
// {@link com.amazonaws.services.kinesis.clientlibrary.types.ProcessRecordsInput#getMillisBehindLatest()}, and the
|
||||
// <a href=
|
||||
// "http://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html#kinesis-metrics-stream">CloudWatch
|
||||
// Metric: GetRecords.MillisBehindLatest</a>
|
||||
// </p>
|
||||
//
|
||||
// @param IdleTimeBetweenReadsInMillis: how long to sleep between GetRecords calls when no records are returned.
|
||||
// @return KinesisClientLibConfiguration
|
||||
func (c *KinesisClientLibConfiguration) WithIdleTimeBetweenReadsInMillis(idleTimeBetweenReadsInMillis int) *KinesisClientLibConfiguration {
|
||||
checkIsValuePositive("IdleTimeBetweenReadsInMillis", idleTimeBetweenReadsInMillis)
|
||||
c.IdleTimeBetweenReadsInMillis = idleTimeBetweenReadsInMillis
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ func generateKinesisRecord(data []byte) types.Record {
|
|||
encryptionType := types.EncryptionTypeNone
|
||||
partitionKey := "1234"
|
||||
sequenceNumber := "21269319989900637946712965403778482371"
|
||||
return types.Record {
|
||||
return types.Record{
|
||||
ApproximateArrivalTimestamp: ¤tTime,
|
||||
Data: data,
|
||||
EncryptionType: encryptionType,
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ func NewKinesisClient(t *testing.T, regionName, endpoint string, creds *credenti
|
|||
t.Logf("Creating Kinesis client")
|
||||
|
||||
resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
|
||||
return aws.Endpoint {
|
||||
return aws.Endpoint{
|
||||
PartitionID: "aws",
|
||||
URL: endpoint,
|
||||
SigningRegion: regionName,
|
||||
|
|
@ -79,7 +79,7 @@ func NewKinesisClient(t *testing.T, regionName, endpoint string, creds *credenti
|
|||
// NewDynamoDBClient to create a Kinesis Client.
|
||||
func NewDynamoDBClient(t *testing.T, regionName, endpoint string, creds *credentials.StaticCredentialsProvider) *dynamodb.Client {
|
||||
resolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
|
||||
return aws.Endpoint {
|
||||
return aws.Endpoint{
|
||||
PartitionID: "aws",
|
||||
URL: endpoint,
|
||||
SigningRegion: regionName,
|
||||
|
|
@ -111,7 +111,7 @@ func continuouslyPublishSomeData(t *testing.T, kc *kinesis.Client) func() {
|
|||
var shards []types.Shard
|
||||
var nextToken *string
|
||||
for {
|
||||
out, err := kc.ListShards(context.TODO(), &kinesis.ListShardsInput {
|
||||
out, err := kc.ListShards(context.TODO(), &kinesis.ListShardsInput{
|
||||
StreamName: aws.String(streamName),
|
||||
NextToken: nextToken,
|
||||
})
|
||||
|
|
@ -185,7 +185,7 @@ func publishSomeData(t *testing.T, kc *kinesis.Client) {
|
|||
|
||||
// publishRecord to put a record into Kinesis stream using PutRecord API.
|
||||
func publishRecord(t *testing.T, kc *kinesis.Client, hashKey *string) {
|
||||
input := &kinesis.PutRecordInput {
|
||||
input := &kinesis.PutRecordInput{
|
||||
Data: []byte(specstr),
|
||||
StreamName: aws.String(streamName),
|
||||
PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)),
|
||||
|
|
@ -207,7 +207,7 @@ func publishRecords(t *testing.T, kc *kinesis.Client) {
|
|||
records := make([]types.PutRecordsRequestEntry, 5)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
record := types.PutRecordsRequestEntry {
|
||||
record := types.PutRecordsRequestEntry{
|
||||
Data: []byte(specstr),
|
||||
PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)),
|
||||
}
|
||||
|
|
@ -228,7 +228,7 @@ func publishRecords(t *testing.T, kc *kinesis.Client) {
|
|||
func publishAggregateRecord(t *testing.T, kc *kinesis.Client) {
|
||||
data := generateAggregateRecord(5, specstr)
|
||||
// Use random string as partition key to ensure even distribution across shards
|
||||
_, err := kc.PutRecord(context.TODO(), &kinesis.PutRecordInput {
|
||||
_, err := kc.PutRecord(context.TODO(), &kinesis.PutRecordInput{
|
||||
Data: data,
|
||||
StreamName: aws.String(streamName),
|
||||
PartitionKey: aws.String(utils.RandStringBytesMaskImpr(10)),
|
||||
|
|
|
|||
Loading…
Reference in a new issue