Bump github.com/aws/aws-sdk-go-v2/service/kinesis from 1.27.4 to 1.27.8
Bumps [github.com/aws/aws-sdk-go-v2/service/kinesis](https://github.com/aws/aws-sdk-go-v2) from 1.27.4 to 1.27.8. - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.27.4...config/v1.27.8) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go-v2/service/kinesis dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
fa5681358b
commit
10dc9d7031
43 changed files with 974 additions and 621 deletions
2
go.mod
2
go.mod
|
|
@ -11,7 +11,7 @@ require (
|
|||
github.com/aws/aws-sdk-go-v2/credentials v1.17.16
|
||||
github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.14
|
||||
github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.0
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.8
|
||||
github.com/awslabs/kinesis-aggregation/go/v2 v2.0.0-20230808105340-e631fe742486
|
||||
github.com/go-sql-driver/mysql v1.8.1
|
||||
github.com/lib/pq v1.10.9
|
||||
|
|
|
|||
4
go.sum
4
go.sum
|
|
@ -42,8 +42,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.8/go.mod h
|
|||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9 h1:Wx0rlZoEJR7JwlSZcHnEa7CNjrSIyVxMFWGAaXy4fJY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9/go.mod h1:aVMHdE0aHO3v+f/iw01fmXV/5DbfQ3Bi9nN7nd9bE9Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis v1.6.0/go.mod h1:9O7UG2pELnP0hq35+Gd7XDjOLBkg7tmgRQ0y14ZjoJI=
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4 h1:Oe8awBiS/iitcsRJB5+DHa3iCxoA0KwJJf0JNrYMINY=
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4/go.mod h1:RCZCSFbieSgNG1RKegO26opXV4EXyef/vNBVJsUyHuw=
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.8 h1:U1X1JiulWfr3lyIpdx0YCVANbF2UoMVhfv3DiDKBKwc=
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.8/go.mod h1:YxRRhvHMl4YR2OZR3369QQUc2iLqTc3KUCv9ayD8758=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.9 h1:aD7AGQhvPuAxlSUfo0CWU7s6FpkbyykMhGYMvlqTjVs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.9/go.mod h1:c1qtZUWtygI6ZdvKppzCSXsDOq5I4luJPZ0Ud3juFCA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.3 h1:Pav5q3cA260Zqez42T9UhIlsd9QeypszRPwC9LdSSsQ=
|
||||
|
|
|
|||
16
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/CHANGELOG.md
generated
vendored
16
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/CHANGELOG.md
generated
vendored
|
|
@ -1,3 +1,19 @@
|
|||
# v1.27.8 (2024-05-23)
|
||||
|
||||
* No change notes available for this release.
|
||||
|
||||
# v1.27.7 (2024-05-16)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.27.6 (2024-05-15)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.27.5 (2024-05-08)
|
||||
|
||||
* **Bug Fix**: GoDoc improvement
|
||||
|
||||
# v1.27.4 (2024-03-29)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
|
|
|||
16
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_AddTagsToStream.go
generated
vendored
16
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_AddTagsToStream.go
generated
vendored
|
|
@ -12,12 +12,16 @@ import (
|
|||
)
|
||||
|
||||
// Adds or updates tags for the specified Kinesis data stream. You can assign up
|
||||
// to 50 tags to a data stream. When invoking this API, you must use either the
|
||||
// StreamARN or the StreamName parameter, or both. It is recommended that you use
|
||||
// the StreamARN input parameter when you invoke this API. If tags have already
|
||||
// been assigned to the stream, AddTagsToStream overwrites any existing tags that
|
||||
// correspond to the specified tag keys. AddTagsToStream has a limit of five
|
||||
// transactions per second per account.
|
||||
// to 50 tags to a data stream.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// If tags have already been assigned to the stream, AddTagsToStream overwrites
|
||||
// any existing tags that correspond to the specified tag keys.
|
||||
//
|
||||
// AddTagsToStreamhas a limit of five transactions per second per account.
|
||||
func (c *Client) AddTagsToStream(ctx context.Context, params *AddTagsToStreamInput, optFns ...func(*Options)) (*AddTagsToStreamOutput, error) {
|
||||
if params == nil {
|
||||
params = &AddTagsToStreamInput{}
|
||||
|
|
|
|||
66
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_CreateStream.go
generated
vendored
66
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_CreateStream.go
generated
vendored
|
|
@ -14,37 +14,47 @@ import (
|
|||
// Creates a Kinesis data stream. A stream captures and transports data records
|
||||
// that are continuously emitted from different data sources or producers.
|
||||
// Scale-out within a stream is explicitly supported by means of shards, which are
|
||||
// uniquely identified groups of data records in a stream. You can create your data
|
||||
// stream using either on-demand or provisioned capacity mode. Data streams with an
|
||||
// on-demand mode require no capacity planning and automatically scale to handle
|
||||
// gigabytes of write and read throughput per minute. With the on-demand mode,
|
||||
// Kinesis Data Streams automatically manages the shards in order to provide the
|
||||
// necessary throughput. For the data streams with a provisioned mode, you must
|
||||
// specify the number of shards for the data stream. Each shard can support reads
|
||||
// up to five transactions per second, up to a maximum data read total of 2 MiB per
|
||||
// second. Each shard can support writes up to 1,000 records per second, up to a
|
||||
// maximum data write total of 1 MiB per second. If the amount of data input
|
||||
// increases or decreases, you can add or remove shards. The stream name identifies
|
||||
// the stream. The name is scoped to the Amazon Web Services account used by the
|
||||
// application. It is also scoped by Amazon Web Services Region. That is, two
|
||||
// streams in two different accounts can have the same name, and two streams in the
|
||||
// same account, but in two different Regions, can have the same name. CreateStream
|
||||
// is an asynchronous operation. Upon receiving a CreateStream request, Kinesis
|
||||
// Data Streams immediately returns and sets the stream status to CREATING . After
|
||||
// the stream is created, Kinesis Data Streams sets the stream status to ACTIVE .
|
||||
// You should perform read and write operations only on an ACTIVE stream. You
|
||||
// receive a LimitExceededException when making a CreateStream request when you
|
||||
// try to do one of the following:
|
||||
// uniquely identified groups of data records in a stream.
|
||||
//
|
||||
// You can create your data stream using either on-demand or provisioned capacity
|
||||
// mode. Data streams with an on-demand mode require no capacity planning and
|
||||
// automatically scale to handle gigabytes of write and read throughput per minute.
|
||||
// With the on-demand mode, Kinesis Data Streams automatically manages the shards
|
||||
// in order to provide the necessary throughput. For the data streams with a
|
||||
// provisioned mode, you must specify the number of shards for the data stream.
|
||||
// Each shard can support reads up to five transactions per second, up to a maximum
|
||||
// data read total of 2 MiB per second. Each shard can support writes up to 1,000
|
||||
// records per second, up to a maximum data write total of 1 MiB per second. If the
|
||||
// amount of data input increases or decreases, you can add or remove shards.
|
||||
//
|
||||
// The stream name identifies the stream. The name is scoped to the Amazon Web
|
||||
// Services account used by the application. It is also scoped by Amazon Web
|
||||
// Services Region. That is, two streams in two different accounts can have the
|
||||
// same name, and two streams in the same account, but in two different Regions,
|
||||
// can have the same name.
|
||||
//
|
||||
// CreateStream is an asynchronous operation. Upon receiving a CreateStream
|
||||
// request, Kinesis Data Streams immediately returns and sets the stream status to
|
||||
// CREATING . After the stream is created, Kinesis Data Streams sets the stream
|
||||
// status to ACTIVE . You should perform read and write operations only on an
|
||||
// ACTIVE stream.
|
||||
//
|
||||
// You receive a LimitExceededException when making a CreateStream request when
|
||||
// you try to do one of the following:
|
||||
//
|
||||
// - Have more than five streams in the CREATING state at any point in time.
|
||||
//
|
||||
// - Create more shards than are authorized for your account.
|
||||
//
|
||||
// For the default shard limit for an Amazon Web Services account, see Amazon
|
||||
// Kinesis Data Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. To increase this limit,
|
||||
// contact Amazon Web Services Support (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html)
|
||||
// . You can use DescribeStreamSummary to check the stream status, which is
|
||||
// returned in StreamStatus . CreateStream has a limit of five transactions per
|
||||
// second per account.
|
||||
// For the default shard limit for an Amazon Web Services account, see [Amazon Kinesis Data Streams Limits] in the
|
||||
// Amazon Kinesis Data Streams Developer Guide. To increase this limit, [contact Amazon Web Services Support].
|
||||
//
|
||||
// You can use DescribeStreamSummary to check the stream status, which is returned in StreamStatus .
|
||||
//
|
||||
// CreateStreamhas a limit of five transactions per second per account.
|
||||
//
|
||||
// [contact Amazon Web Services Support]: https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html
|
||||
// [Amazon Kinesis Data Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
|
||||
func (c *Client) CreateStream(ctx context.Context, params *CreateStreamInput, optFns ...func(*Options)) (*CreateStreamOutput, error) {
|
||||
if params == nil {
|
||||
params = &CreateStreamInput{}
|
||||
|
|
|
|||
|
|
@ -13,12 +13,15 @@ import (
|
|||
|
||||
// Decreases the Kinesis data stream's retention period, which is the length of
|
||||
// time data records are accessible after they are added to the stream. The minimum
|
||||
// value of a stream's retention period is 24 hours. When invoking this API, you
|
||||
// must use either the StreamARN or the StreamName parameter, or both. It is
|
||||
// recommended that you use the StreamARN input parameter when you invoke this
|
||||
// API. This operation may result in lost data. For example, if the stream's
|
||||
// retention period is 48 hours and is decreased to 24 hours, any data already in
|
||||
// the stream that is older than 24 hours is inaccessible.
|
||||
// value of a stream's retention period is 24 hours.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// This operation may result in lost data. For example, if the stream's retention
|
||||
// period is 48 hours and is decreased to 24 hours, any data already in the stream
|
||||
// that is older than 24 hours is inaccessible.
|
||||
func (c *Client) DecreaseStreamRetentionPeriod(ctx context.Context, params *DecreaseStreamRetentionPeriodInput, optFns ...func(*Options)) (*DecreaseStreamRetentionPeriodOutput, error) {
|
||||
if params == nil {
|
||||
params = &DecreaseStreamRetentionPeriodInput{}
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteResourcePolicy.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteResourcePolicy.go
generated
vendored
|
|
@ -13,7 +13,9 @@ import (
|
|||
|
||||
// Delete a policy for the specified data stream or consumer. Request patterns can
|
||||
// be one of the following:
|
||||
//
|
||||
// - Data stream pattern: arn:aws.*:kinesis:.*:\d{12}:.*stream/\S+
|
||||
//
|
||||
// - Consumer pattern:
|
||||
// ^(arn):aws.*:kinesis:.*:\d{12}:.*stream\/[a-zA-Z0-9_.-]+\/consumer\/[a-zA-Z0-9_.-]+:[0-9]+
|
||||
func (c *Client) DeleteResourcePolicy(ctx context.Context, params *DeleteResourcePolicyInput, optFns ...func(*Options)) (*DeleteResourcePolicyOutput, error) {
|
||||
|
|
|
|||
33
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteStream.go
generated
vendored
33
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeleteStream.go
generated
vendored
|
|
@ -14,18 +14,27 @@ import (
|
|||
// Deletes a Kinesis data stream and all its shards and data. You must shut down
|
||||
// any applications that are operating on the stream before you delete the stream.
|
||||
// If an application attempts to operate on a deleted stream, it receives the
|
||||
// exception ResourceNotFoundException . When invoking this API, you must use
|
||||
// either the StreamARN or the StreamName parameter, or both. It is recommended
|
||||
// that you use the StreamARN input parameter when you invoke this API. If the
|
||||
// stream is in the ACTIVE state, you can delete it. After a DeleteStream request,
|
||||
// the specified stream is in the DELETING state until Kinesis Data Streams
|
||||
// completes the deletion. Note: Kinesis Data Streams might continue to accept data
|
||||
// read and write operations, such as PutRecord , PutRecords , and GetRecords , on
|
||||
// a stream in the DELETING state until the stream deletion is complete. When you
|
||||
// delete a stream, any shards in that stream are also deleted, and any tags are
|
||||
// dissociated from the stream. You can use the DescribeStreamSummary operation to
|
||||
// check the state of the stream, which is returned in StreamStatus . DeleteStream
|
||||
// has a limit of five transactions per second per account.
|
||||
// exception ResourceNotFoundException .
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// If the stream is in the ACTIVE state, you can delete it. After a DeleteStream
|
||||
// request, the specified stream is in the DELETING state until Kinesis Data
|
||||
// Streams completes the deletion.
|
||||
//
|
||||
// Note: Kinesis Data Streams might continue to accept data read and write
|
||||
// operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the DELETING state until the
|
||||
// stream deletion is complete.
|
||||
//
|
||||
// When you delete a stream, any shards in that stream are also deleted, and any
|
||||
// tags are dissociated from the stream.
|
||||
//
|
||||
// You can use the DescribeStreamSummary operation to check the state of the stream, which is returned
|
||||
// in StreamStatus .
|
||||
//
|
||||
// DeleteStreamhas a limit of five transactions per second per account.
|
||||
func (c *Client) DeleteStream(ctx context.Context, params *DeleteStreamInput, optFns ...func(*Options)) (*DeleteStreamOutput, error) {
|
||||
if params == nil {
|
||||
params = &DeleteStreamInput{}
|
||||
|
|
|
|||
15
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeregisterStreamConsumer.go
generated
vendored
15
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DeregisterStreamConsumer.go
generated
vendored
|
|
@ -15,10 +15,11 @@ import (
|
|||
// ARN of the data stream and the name you gave the consumer when you registered
|
||||
// it. You may also provide all three parameters, as long as they don't conflict
|
||||
// with each other. If you don't know the name or ARN of the consumer that you want
|
||||
// to deregister, you can use the ListStreamConsumers operation to get a list of
|
||||
// the descriptions of all the consumers that are currently registered with a given
|
||||
// data stream. The description of a consumer contains its name and ARN. This
|
||||
// operation has a limit of five transactions per second per stream.
|
||||
// to deregister, you can use the ListStreamConsumersoperation to get a list of the descriptions of
|
||||
// all the consumers that are currently registered with a given data stream. The
|
||||
// description of a consumer contains its name and ARN.
|
||||
//
|
||||
// This operation has a limit of five transactions per second per stream.
|
||||
func (c *Client) DeregisterStreamConsumer(ctx context.Context, params *DeregisterStreamConsumerInput, optFns ...func(*Options)) (*DeregisterStreamConsumerOutput, error) {
|
||||
if params == nil {
|
||||
params = &DeregisterStreamConsumerInput{}
|
||||
|
|
@ -47,9 +48,9 @@ type DeregisterStreamConsumerInput struct {
|
|||
ConsumerName *string
|
||||
|
||||
// The ARN of the Kinesis data stream that the consumer is registered with. For
|
||||
// more information, see Amazon Resource Names (ARNs) and Amazon Web Services
|
||||
// Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams)
|
||||
// .
|
||||
// more information, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces].
|
||||
//
|
||||
// [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams
|
||||
StreamARN *string
|
||||
|
||||
noSmithyDocumentSerde
|
||||
|
|
|
|||
9
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeLimits.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeLimits.go
generated
vendored
|
|
@ -10,9 +10,12 @@ import (
|
|||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
)
|
||||
|
||||
// Describes the shard limits and usage for the account. If you update your
|
||||
// account limits, the old limits might be returned for a few minutes. This
|
||||
// operation has a limit of one transaction per second per account.
|
||||
// Describes the shard limits and usage for the account.
|
||||
//
|
||||
// If you update your account limits, the old limits might be returned for a few
|
||||
// minutes.
|
||||
//
|
||||
// This operation has a limit of one transaction per second per account.
|
||||
func (c *Client) DescribeLimits(ctx context.Context, params *DescribeLimitsInput, optFns ...func(*Options)) (*DescribeLimitsOutput, error) {
|
||||
if params == nil {
|
||||
params = &DescribeLimitsInput{}
|
||||
|
|
|
|||
87
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStream.go
generated
vendored
87
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStream.go
generated
vendored
|
|
@ -17,26 +17,35 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
// Describes the specified Kinesis data stream. This API has been revised. It's
|
||||
// highly recommended that you use the DescribeStreamSummary API to get a
|
||||
// summarized description of the specified Kinesis data stream and the ListShards
|
||||
// API to list the shards in a specified data stream and obtain information about
|
||||
// each shard. When invoking this API, you must use either the StreamARN or the
|
||||
// StreamName parameter, or both. It is recommended that you use the StreamARN
|
||||
// input parameter when you invoke this API. The information returned includes the
|
||||
// stream name, Amazon Resource Name (ARN), creation time, enhanced metric
|
||||
// configuration, and shard map. The shard map is an array of shard objects. For
|
||||
// each shard object, there is the hash key and sequence number ranges that the
|
||||
// shard spans, and the IDs of any earlier shards that played in a role in creating
|
||||
// the shard. Every record ingested in the stream is identified by a sequence
|
||||
// number, which is assigned when the record is put into the stream. You can limit
|
||||
// the number of shards returned by each call. For more information, see
|
||||
// Retrieving Shards from a Stream (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. There are no guarantees
|
||||
// about the chronological order shards returned. To process shards in
|
||||
// chronological order, use the ID of the parent shard to track the lineage to the
|
||||
// oldest shard. This operation has a limit of 10 transactions per second per
|
||||
// account.
|
||||
// Describes the specified Kinesis data stream.
|
||||
//
|
||||
// This API has been revised. It's highly recommended that you use the DescribeStreamSummary API to get
|
||||
// a summarized description of the specified Kinesis data stream and the ListShardsAPI to
|
||||
// list the shards in a specified data stream and obtain information about each
|
||||
// shard.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// The information returned includes the stream name, Amazon Resource Name (ARN),
|
||||
// creation time, enhanced metric configuration, and shard map. The shard map is an
|
||||
// array of shard objects. For each shard object, there is the hash key and
|
||||
// sequence number ranges that the shard spans, and the IDs of any earlier shards
|
||||
// that played in a role in creating the shard. Every record ingested in the stream
|
||||
// is identified by a sequence number, which is assigned when the record is put
|
||||
// into the stream.
|
||||
//
|
||||
// You can limit the number of shards returned by each call. For more information,
|
||||
// see [Retrieving Shards from a Stream]in the Amazon Kinesis Data Streams Developer Guide.
|
||||
//
|
||||
// There are no guarantees about the chronological order shards returned. To
|
||||
// process shards in chronological order, use the ID of the parent shard to track
|
||||
// the lineage to the oldest shard.
|
||||
//
|
||||
// This operation has a limit of 10 transactions per second per account.
|
||||
//
|
||||
// [Retrieving Shards from a Stream]: https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html
|
||||
func (c *Client) DescribeStream(ctx context.Context, params *DescribeStreamInput, optFns ...func(*Options)) (*DescribeStreamOutput, error) {
|
||||
if params == nil {
|
||||
params = &DescribeStreamInput{}
|
||||
|
|
@ -55,11 +64,13 @@ func (c *Client) DescribeStream(ctx context.Context, params *DescribeStreamInput
|
|||
// Represents the input for DescribeStream .
|
||||
type DescribeStreamInput struct {
|
||||
|
||||
// The shard ID of the shard to start with. Specify this parameter to indicate
|
||||
// that you want to describe the stream starting with the shard whose ID
|
||||
// immediately follows ExclusiveStartShardId . If you don't specify this parameter,
|
||||
// the default behavior for DescribeStream is to describe the stream starting with
|
||||
// the first shard in the stream.
|
||||
// The shard ID of the shard to start with.
|
||||
//
|
||||
// Specify this parameter to indicate that you want to describe the stream
|
||||
// starting with the shard whose ID immediately follows ExclusiveStartShardId .
|
||||
//
|
||||
// If you don't specify this parameter, the default behavior for DescribeStream is
|
||||
// to describe the stream starting with the first shard in the stream.
|
||||
ExclusiveStartShardId *string
|
||||
|
||||
// The maximum number of shards to return in a single call. The default value is
|
||||
|
|
@ -212,12 +223,13 @@ type StreamExistsWaiterOptions struct {
|
|||
|
||||
// Retryable is function that can be used to override the service defined
|
||||
// waiter-behavior based on operation output, or returned error. This function is
|
||||
// used by the waiter to decide if a state is retryable or a terminal state. By
|
||||
// default service-modeled logic will populate this option. This option can thus be
|
||||
// used to define a custom waiter state with fall-back to service-modeled waiter
|
||||
// state mutators.The function returns an error in case of a failure state. In case
|
||||
// of retry state, this function returns a bool value of true and nil error, while
|
||||
// in case of success it returns a bool value of false and nil error.
|
||||
// used by the waiter to decide if a state is retryable or a terminal state.
|
||||
//
|
||||
// By default service-modeled logic will populate this option. This option can
|
||||
// thus be used to define a custom waiter state with fall-back to service-modeled
|
||||
// waiter state mutators.The function returns an error in case of a failure state.
|
||||
// In case of retry state, this function returns a bool value of true and nil
|
||||
// error, while in case of success it returns a bool value of false and nil error.
|
||||
Retryable func(context.Context, *DescribeStreamInput, *DescribeStreamOutput, error) (bool, error)
|
||||
}
|
||||
|
||||
|
|
@ -383,12 +395,13 @@ type StreamNotExistsWaiterOptions struct {
|
|||
|
||||
// Retryable is function that can be used to override the service defined
|
||||
// waiter-behavior based on operation output, or returned error. This function is
|
||||
// used by the waiter to decide if a state is retryable or a terminal state. By
|
||||
// default service-modeled logic will populate this option. This option can thus be
|
||||
// used to define a custom waiter state with fall-back to service-modeled waiter
|
||||
// state mutators.The function returns an error in case of a failure state. In case
|
||||
// of retry state, this function returns a bool value of true and nil error, while
|
||||
// in case of success it returns a bool value of false and nil error.
|
||||
// used by the waiter to decide if a state is retryable or a terminal state.
|
||||
//
|
||||
// By default service-modeled logic will populate this option. This option can
|
||||
// thus be used to define a custom waiter state with fall-back to service-modeled
|
||||
// waiter state mutators.The function returns an error in case of a failure state.
|
||||
// In case of retry state, this function returns a bool value of true and nil
|
||||
// error, while in case of success it returns a bool value of false and nil error.
|
||||
Retryable func(context.Context, *DescribeStreamInput, *DescribeStreamOutput, error) (bool, error)
|
||||
}
|
||||
|
||||
|
|
|
|||
20
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamConsumer.go
generated
vendored
20
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamConsumer.go
generated
vendored
|
|
@ -16,12 +16,14 @@ import (
|
|||
// consumer. Alternatively, you can provide the ARN of the data stream and the name
|
||||
// you gave the consumer when you registered it. You may also provide all three
|
||||
// parameters, as long as they don't conflict with each other. If you don't know
|
||||
// the name or ARN of the consumer that you want to describe, you can use the
|
||||
// ListStreamConsumers operation to get a list of the descriptions of all the
|
||||
// consumers that are currently registered with a given data stream. This operation
|
||||
// has a limit of 20 transactions per second per stream. When making a
|
||||
// cross-account call with DescribeStreamConsumer , make sure to provide the ARN of
|
||||
// the consumer.
|
||||
// the name or ARN of the consumer that you want to describe, you can use the ListStreamConsumers
|
||||
// operation to get a list of the descriptions of all the consumers that are
|
||||
// currently registered with a given data stream.
|
||||
//
|
||||
// This operation has a limit of 20 transactions per second per stream.
|
||||
//
|
||||
// When making a cross-account call with DescribeStreamConsumer , make sure to
|
||||
// provide the ARN of the consumer.
|
||||
func (c *Client) DescribeStreamConsumer(ctx context.Context, params *DescribeStreamConsumerInput, optFns ...func(*Options)) (*DescribeStreamConsumerOutput, error) {
|
||||
if params == nil {
|
||||
params = &DescribeStreamConsumerInput{}
|
||||
|
|
@ -46,9 +48,9 @@ type DescribeStreamConsumerInput struct {
|
|||
ConsumerName *string
|
||||
|
||||
// The ARN of the Kinesis data stream that the consumer is registered with. For
|
||||
// more information, see Amazon Resource Names (ARNs) and Amazon Web Services
|
||||
// Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams)
|
||||
// .
|
||||
// more information, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces].
|
||||
//
|
||||
// [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams
|
||||
StreamARN *string
|
||||
|
||||
noSmithyDocumentSerde
|
||||
|
|
|
|||
15
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamSummary.go
generated
vendored
15
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DescribeStreamSummary.go
generated
vendored
|
|
@ -13,11 +13,16 @@ import (
|
|||
)
|
||||
|
||||
// Provides a summarized description of the specified Kinesis data stream without
|
||||
// the shard list. When invoking this API, you must use either the StreamARN or
|
||||
// the StreamName parameter, or both. It is recommended that you use the StreamARN
|
||||
// input parameter when you invoke this API. The information returned includes the
|
||||
// stream name, Amazon Resource Name (ARN), status, record retention period,
|
||||
// approximate creation time, monitoring, encryption details, and open shard count.
|
||||
// the shard list.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// The information returned includes the stream name, Amazon Resource Name (ARN),
|
||||
// status, record retention period, approximate creation time, monitoring,
|
||||
// encryption details, and open shard count.
|
||||
//
|
||||
// DescribeStreamSummaryhas a limit of 20 transactions per second per account.
|
||||
func (c *Client) DescribeStreamSummary(ctx context.Context, params *DescribeStreamSummaryInput, optFns ...func(*Options)) (*DescribeStreamSummaryOutput, error) {
|
||||
if params == nil {
|
||||
|
|
|
|||
32
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DisableEnhancedMonitoring.go
generated
vendored
32
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_DisableEnhancedMonitoring.go
generated
vendored
|
|
@ -12,9 +12,11 @@ import (
|
|||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
)
|
||||
|
||||
// Disables enhanced monitoring. When invoking this API, you must use either the
|
||||
// StreamARN or the StreamName parameter, or both. It is recommended that you use
|
||||
// the StreamARN input parameter when you invoke this API.
|
||||
// Disables enhanced monitoring.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
func (c *Client) DisableEnhancedMonitoring(ctx context.Context, params *DisableEnhancedMonitoringInput, optFns ...func(*Options)) (*DisableEnhancedMonitoringOutput, error) {
|
||||
if params == nil {
|
||||
params = &DisableEnhancedMonitoringInput{}
|
||||
|
|
@ -33,19 +35,30 @@ func (c *Client) DisableEnhancedMonitoring(ctx context.Context, params *DisableE
|
|||
// Represents the input for DisableEnhancedMonitoring.
|
||||
type DisableEnhancedMonitoringInput struct {
|
||||
|
||||
// List of shard-level metrics to disable. The following are the valid shard-level
|
||||
// metrics. The value " ALL " disables every metric.
|
||||
// List of shard-level metrics to disable.
|
||||
//
|
||||
// The following are the valid shard-level metrics. The value " ALL " disables
|
||||
// every metric.
|
||||
//
|
||||
// - IncomingBytes
|
||||
//
|
||||
// - IncomingRecords
|
||||
//
|
||||
// - OutgoingBytes
|
||||
//
|
||||
// - OutgoingRecords
|
||||
//
|
||||
// - WriteProvisionedThroughputExceeded
|
||||
//
|
||||
// - ReadProvisionedThroughputExceeded
|
||||
//
|
||||
// - IteratorAgeMilliseconds
|
||||
//
|
||||
// - ALL
|
||||
// For more information, see Monitoring the Amazon Kinesis Data Streams Service
|
||||
// with Amazon CloudWatch (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide.
|
||||
//
|
||||
// For more information, see [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch] in the Amazon Kinesis Data Streams Developer Guide.
|
||||
//
|
||||
// [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch]: https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
|
||||
//
|
||||
// This member is required.
|
||||
ShardLevelMetrics []types.MetricsName
|
||||
|
|
@ -64,8 +77,7 @@ func (in *DisableEnhancedMonitoringInput) bindEndpointParams(p *EndpointParamete
|
|||
p.OperationType = ptr.String("control")
|
||||
}
|
||||
|
||||
// Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring
|
||||
// .
|
||||
// Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring.
|
||||
type DisableEnhancedMonitoringOutput struct {
|
||||
|
||||
// Represents the current state of the metrics that are in the enhanced state
|
||||
|
|
|
|||
29
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_EnableEnhancedMonitoring.go
generated
vendored
29
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_EnableEnhancedMonitoring.go
generated
vendored
|
|
@ -12,8 +12,9 @@ import (
|
|||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
)
|
||||
|
||||
// Enables enhanced Kinesis data stream monitoring for shard-level metrics. When
|
||||
// invoking this API, you must use either the StreamARN or the StreamName
|
||||
// Enables enhanced Kinesis data stream monitoring for shard-level metrics.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
func (c *Client) EnableEnhancedMonitoring(ctx context.Context, params *EnableEnhancedMonitoringInput, optFns ...func(*Options)) (*EnableEnhancedMonitoringOutput, error) {
|
||||
|
|
@ -34,19 +35,30 @@ func (c *Client) EnableEnhancedMonitoring(ctx context.Context, params *EnableEnh
|
|||
// Represents the input for EnableEnhancedMonitoring.
|
||||
type EnableEnhancedMonitoringInput struct {
|
||||
|
||||
// List of shard-level metrics to enable. The following are the valid shard-level
|
||||
// metrics. The value " ALL " enables every metric.
|
||||
// List of shard-level metrics to enable.
|
||||
//
|
||||
// The following are the valid shard-level metrics. The value " ALL " enables every
|
||||
// metric.
|
||||
//
|
||||
// - IncomingBytes
|
||||
//
|
||||
// - IncomingRecords
|
||||
//
|
||||
// - OutgoingBytes
|
||||
//
|
||||
// - OutgoingRecords
|
||||
//
|
||||
// - WriteProvisionedThroughputExceeded
|
||||
//
|
||||
// - ReadProvisionedThroughputExceeded
|
||||
//
|
||||
// - IteratorAgeMilliseconds
|
||||
//
|
||||
// - ALL
|
||||
// For more information, see Monitoring the Amazon Kinesis Data Streams Service
|
||||
// with Amazon CloudWatch (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide.
|
||||
//
|
||||
// For more information, see [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch] in the Amazon Kinesis Data Streams Developer Guide.
|
||||
//
|
||||
// [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch]: https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
|
||||
//
|
||||
// This member is required.
|
||||
ShardLevelMetrics []types.MetricsName
|
||||
|
|
@ -65,8 +77,7 @@ func (in *EnableEnhancedMonitoringInput) bindEndpointParams(p *EndpointParameter
|
|||
p.OperationType = ptr.String("control")
|
||||
}
|
||||
|
||||
// Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring
|
||||
// .
|
||||
// Represents the output for EnableEnhancedMonitoring and DisableEnhancedMonitoring.
|
||||
type EnableEnhancedMonitoringOutput struct {
|
||||
|
||||
// Represents the current state of the metrics that are in the enhanced state
|
||||
|
|
|
|||
110
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetRecords.go
generated
vendored
110
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetRecords.go
generated
vendored
|
|
@ -14,57 +14,69 @@ import (
|
|||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
)
|
||||
|
||||
// Gets data records from a Kinesis data stream's shard. When invoking this API,
|
||||
// you must use either the StreamARN or the StreamName parameter, or both. It is
|
||||
// recommended that you use the StreamARN input parameter when you invoke this
|
||||
// API. Specify a shard iterator using the ShardIterator parameter. The shard
|
||||
// iterator specifies the position in the shard from which you want to start
|
||||
// reading data records sequentially. If there are no records available in the
|
||||
// portion of the shard that the iterator points to, GetRecords returns an empty
|
||||
// list. It might take multiple calls to get to a portion of the shard that
|
||||
// contains records. You can scale by provisioning multiple shards per stream while
|
||||
// considering service limits (for more information, see Amazon Kinesis Data
|
||||
// Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide). Your application should
|
||||
// have one thread per shard, each reading continuously from its stream. To read
|
||||
// from a stream continually, call GetRecords in a loop. Use GetShardIterator to
|
||||
// get the shard iterator to specify in the first GetRecords call. GetRecords
|
||||
// Gets data records from a Kinesis data stream's shard.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// Specify a shard iterator using the ShardIterator parameter. The shard iterator
|
||||
// specifies the position in the shard from which you want to start reading data
|
||||
// records sequentially. If there are no records available in the portion of the
|
||||
// shard that the iterator points to, GetRecordsreturns an empty list. It might take
|
||||
// multiple calls to get to a portion of the shard that contains records.
|
||||
//
|
||||
// You can scale by provisioning multiple shards per stream while considering
|
||||
// service limits (for more information, see [Amazon Kinesis Data Streams Limits]in the Amazon Kinesis Data Streams
|
||||
// Developer Guide). Your application should have one thread per shard, each
|
||||
// reading continuously from its stream. To read from a stream continually, call GetRecords
|
||||
// in a loop. Use GetShardIteratorto get the shard iterator to specify in the first GetRecords call. GetRecords
|
||||
// returns a new shard iterator in NextShardIterator . Specify the shard iterator
|
||||
// returned in NextShardIterator in subsequent calls to GetRecords . If the shard
|
||||
// has been closed, the shard iterator can't return more data and GetRecords
|
||||
// returns null in NextShardIterator . You can terminate the loop when the shard is
|
||||
// closed, or when the shard iterator reaches the record with the sequence number
|
||||
// or other attribute that marks it as the last record to process. Each data record
|
||||
// can be up to 1 MiB in size, and each shard can read up to 2 MiB per second. You
|
||||
// can ensure that your calls don't exceed the maximum supported size or throughput
|
||||
// by using the Limit parameter to specify the maximum number of records that
|
||||
// GetRecords can return. Consider your average record size when determining this
|
||||
// limit. The maximum number of records that can be returned per call is 10,000.
|
||||
// The size of the data returned by GetRecords varies depending on the utilization
|
||||
// of the shard. It is recommended that consumer applications retrieve records via
|
||||
// the GetRecords command using the 5 TPS limit to remain caught up. Retrieving
|
||||
// records less frequently can lead to consumer applications falling behind. The
|
||||
// maximum size of data that GetRecords can return is 10 MiB. If a call returns
|
||||
// this amount of data, subsequent calls made within the next 5 seconds throw
|
||||
// returned in NextShardIterator in subsequent calls to GetRecords. If the shard has been
|
||||
// closed, the shard iterator can't return more data and GetRecordsreturns null in
|
||||
// NextShardIterator . You can terminate the loop when the shard is closed, or when
|
||||
// the shard iterator reaches the record with the sequence number or other
|
||||
// attribute that marks it as the last record to process.
|
||||
//
|
||||
// Each data record can be up to 1 MiB in size, and each shard can read up to 2
|
||||
// MiB per second. You can ensure that your calls don't exceed the maximum
|
||||
// supported size or throughput by using the Limit parameter to specify the
|
||||
// maximum number of records that GetRecordscan return. Consider your average record size
|
||||
// when determining this limit. The maximum number of records that can be returned
|
||||
// per call is 10,000.
|
||||
//
|
||||
// The size of the data returned by GetRecords varies depending on the utilization of the
|
||||
// shard. It is recommended that consumer applications retrieve records via the
|
||||
// GetRecords command using the 5 TPS limit to remain caught up. Retrieving records
|
||||
// less frequently can lead to consumer applications falling behind. The maximum
|
||||
// size of data that GetRecordscan return is 10 MiB. If a call returns this amount of data,
|
||||
// subsequent calls made within the next 5 seconds throw
|
||||
// ProvisionedThroughputExceededException . If there is insufficient provisioned
|
||||
// throughput on the stream, subsequent calls made within the next 1 second throw
|
||||
// ProvisionedThroughputExceededException . GetRecords doesn't return any data
|
||||
// when it throws an exception. For this reason, we recommend that you wait 1
|
||||
// second between calls to GetRecords . However, it's possible that the application
|
||||
// will get exceptions for longer than 1 second. To detect whether the application
|
||||
// is falling behind in processing, you can use the MillisBehindLatest response
|
||||
// attribute. You can also monitor the stream using CloudWatch metrics and other
|
||||
// mechanisms (see Monitoring (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide). Each Amazon Kinesis record
|
||||
// includes a value, ApproximateArrivalTimestamp , that is set when a stream
|
||||
// successfully receives and stores a record. This is commonly referred to as a
|
||||
// server-side time stamp, whereas a client-side time stamp is set when a data
|
||||
// producer creates or sends the record to a stream (a data producer is any data
|
||||
// source putting data records into a stream, for example with PutRecords ). The
|
||||
// ProvisionedThroughputExceededException . GetRecords doesn't return any data when it
|
||||
// throws an exception. For this reason, we recommend that you wait 1 second
|
||||
// between calls to GetRecords. However, it's possible that the application will get
|
||||
// exceptions for longer than 1 second.
|
||||
//
|
||||
// To detect whether the application is falling behind in processing, you can use
|
||||
// the MillisBehindLatest response attribute. You can also monitor the stream
|
||||
// using CloudWatch metrics and other mechanisms (see [Monitoring]in the Amazon Kinesis Data
|
||||
// Streams Developer Guide).
|
||||
//
|
||||
// Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp , that
|
||||
// is set when a stream successfully receives and stores a record. This is commonly
|
||||
// referred to as a server-side time stamp, whereas a client-side time stamp is set
|
||||
// when a data producer creates or sends the record to a stream (a data producer is
|
||||
// any data source putting data records into a stream, for example with PutRecords). The
|
||||
// time stamp has millisecond precision. There are no guarantees about the time
|
||||
// stamp accuracy, or that the time stamp is always increasing. For example,
|
||||
// records in a shard or across a stream might have time stamps that are out of
|
||||
// order. This operation has a limit of five transactions per second per shard.
|
||||
// order.
|
||||
//
|
||||
// This operation has a limit of five transactions per second per shard.
|
||||
//
|
||||
// [Amazon Kinesis Data Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
|
||||
// [Monitoring]: https://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html
|
||||
func (c *Client) GetRecords(ctx context.Context, params *GetRecordsInput, optFns ...func(*Options)) (*GetRecordsOutput, error) {
|
||||
if params == nil {
|
||||
params = &GetRecordsInput{}
|
||||
|
|
@ -91,8 +103,8 @@ type GetRecordsInput struct {
|
|||
ShardIterator *string
|
||||
|
||||
// The maximum number of records to return. Specify a value of up to 10,000. If
|
||||
// you specify a value that is greater than 10,000, GetRecords throws
|
||||
// InvalidArgumentException . The default value is 10,000.
|
||||
// you specify a value that is greater than 10,000, GetRecordsthrows InvalidArgumentException
|
||||
// . The default value is 10,000.
|
||||
Limit *int32
|
||||
|
||||
// The ARN of the stream.
|
||||
|
|
@ -118,8 +130,8 @@ type GetRecordsOutput struct {
|
|||
// response only when the end of the current shard is reached.
|
||||
ChildShards []types.ChildShard
|
||||
|
||||
// The number of milliseconds the GetRecords response is from the tip of the
|
||||
// stream, indicating how far behind current time the consumer is. A value of zero
|
||||
// The number of milliseconds the GetRecords response is from the tip of the stream,
|
||||
// indicating how far behind current time the consumer is. A value of zero
|
||||
// indicates that record processing is caught up, and there are no new records to
|
||||
// process at this moment.
|
||||
MillisBehindLatest *int64
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetResourcePolicy.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetResourcePolicy.go
generated
vendored
|
|
@ -13,7 +13,9 @@ import (
|
|||
|
||||
// Returns a policy attached to the specified data stream or consumer. Request
|
||||
// patterns can be one of the following:
|
||||
//
|
||||
// - Data stream pattern: arn:aws.*:kinesis:.*:\d{12}:.*stream/\S+
|
||||
//
|
||||
// - Consumer pattern:
|
||||
// ^(arn):aws.*:kinesis:.*:\d{12}:.*stream\/[a-zA-Z0-9_.-]+\/consumer\/[a-zA-Z0-9_.-]+:[0-9]+
|
||||
func (c *Client) GetResourcePolicy(ctx context.Context, params *GetResourcePolicyInput, optFns ...func(*Options)) (*GetResourcePolicyOutput, error) {
|
||||
|
|
|
|||
77
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetShardIterator.go
generated
vendored
77
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_GetShardIterator.go
generated
vendored
|
|
@ -14,37 +14,45 @@ import (
|
|||
)
|
||||
|
||||
// Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after
|
||||
// it is returned to the requester. When invoking this API, you must use either the
|
||||
// StreamARN or the StreamName parameter, or both. It is recommended that you use
|
||||
// the StreamARN input parameter when you invoke this API. A shard iterator
|
||||
// specifies the shard position from which to start reading data records
|
||||
// sequentially. The position is specified using the sequence number of a data
|
||||
// record in a shard. A sequence number is the identifier associated with every
|
||||
// record ingested in the stream, and is assigned when a record is put into the
|
||||
// stream. Each stream has one or more shards. You must specify the shard iterator
|
||||
// type. For example, you can set the ShardIteratorType parameter to read exactly
|
||||
// from the position denoted by a specific sequence number by using the
|
||||
// AT_SEQUENCE_NUMBER shard iterator type. Alternatively, the parameter can read
|
||||
// right after the sequence number by using the AFTER_SEQUENCE_NUMBER shard
|
||||
// iterator type, using sequence numbers returned by earlier calls to PutRecord ,
|
||||
// PutRecords , GetRecords , or DescribeStream . In the request, you can specify
|
||||
// the shard iterator type AT_TIMESTAMP to read records from an arbitrary point in
|
||||
// time, TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record
|
||||
// in the shard in the system (the oldest data record in the shard), or LATEST so
|
||||
// that you always read the most recent data in the shard. When you read repeatedly
|
||||
// from a stream, use a GetShardIterator request to get the first shard iterator
|
||||
// for use in your first GetRecords request and for subsequent reads use the shard
|
||||
// iterator returned by the GetRecords request in NextShardIterator . A new shard
|
||||
// iterator is returned by every GetRecords request in NextShardIterator , which
|
||||
// you use in the ShardIterator parameter of the next GetRecords request. If a
|
||||
// GetShardIterator request is made too often, you receive a
|
||||
// it is returned to the requester.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// A shard iterator specifies the shard position from which to start reading data
|
||||
// records sequentially. The position is specified using the sequence number of a
|
||||
// data record in a shard. A sequence number is the identifier associated with
|
||||
// every record ingested in the stream, and is assigned when a record is put into
|
||||
// the stream. Each stream has one or more shards.
|
||||
//
|
||||
// You must specify the shard iterator type. For example, you can set the
|
||||
// ShardIteratorType parameter to read exactly from the position denoted by a
|
||||
// specific sequence number by using the AT_SEQUENCE_NUMBER shard iterator type.
|
||||
// Alternatively, the parameter can read right after the sequence number by using
|
||||
// the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers returned
|
||||
// by earlier calls to PutRecord, PutRecords, GetRecords, or DescribeStream. In the request, you can specify the shard
|
||||
// iterator type AT_TIMESTAMP to read records from an arbitrary point in time,
|
||||
// TRIM_HORIZON to cause ShardIterator to point to the last untrimmed record in
|
||||
// the shard in the system (the oldest data record in the shard), or LATEST so
|
||||
// that you always read the most recent data in the shard.
|
||||
//
|
||||
// When you read repeatedly from a stream, use a GetShardIterator request to get the first shard
|
||||
// iterator for use in your first GetRecordsrequest and for subsequent reads use the shard
|
||||
// iterator returned by the GetRecordsrequest in NextShardIterator . A new shard iterator is
|
||||
// returned by every GetRecordsrequest in NextShardIterator , which you use in the
|
||||
// ShardIterator parameter of the next GetRecords request.
|
||||
//
|
||||
// If a GetShardIterator request is made too often, you receive a
|
||||
// ProvisionedThroughputExceededException . For more information about throughput
|
||||
// limits, see GetRecords , and Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. If the shard is closed,
|
||||
// GetShardIterator returns a valid iterator for the last sequence number of the
|
||||
// shard. A shard can be closed as a result of using SplitShard or MergeShards .
|
||||
// GetShardIterator has a limit of five transactions per second per account per
|
||||
// open shard.
|
||||
// limits, see GetRecords, and [Streams Limits] in the Amazon Kinesis Data Streams Developer Guide.
|
||||
//
|
||||
// If the shard is closed, GetShardIterator returns a valid iterator for the last sequence number
|
||||
// of the shard. A shard can be closed as a result of using SplitShardor MergeShards.
|
||||
//
|
||||
// GetShardIteratorhas a limit of five transactions per second per account per open shard.
|
||||
//
|
||||
// [Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
|
||||
func (c *Client) GetShardIterator(ctx context.Context, params *GetShardIteratorInput, optFns ...func(*Options)) (*GetShardIteratorOutput, error) {
|
||||
if params == nil {
|
||||
params = &GetShardIteratorInput{}
|
||||
|
|
@ -69,15 +77,22 @@ type GetShardIteratorInput struct {
|
|||
ShardId *string
|
||||
|
||||
// Determines how the shard iterator is used to start reading data records from
|
||||
// the shard. The following are the valid Amazon Kinesis shard iterator types:
|
||||
// the shard.
|
||||
//
|
||||
// The following are the valid Amazon Kinesis shard iterator types:
|
||||
//
|
||||
// - AT_SEQUENCE_NUMBER - Start reading from the position denoted by a specific
|
||||
// sequence number, provided in the value StartingSequenceNumber .
|
||||
//
|
||||
// - AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a
|
||||
// specific sequence number, provided in the value StartingSequenceNumber .
|
||||
//
|
||||
// - AT_TIMESTAMP - Start reading from the position denoted by a specific time
|
||||
// stamp, provided in the value Timestamp .
|
||||
//
|
||||
// - TRIM_HORIZON - Start reading at the last untrimmed record in the shard in
|
||||
// the system, which is the oldest data record in the shard.
|
||||
//
|
||||
// - LATEST - Start reading just after the most recent record in the shard, so
|
||||
// that you always read the most recent data in the shard.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -13,16 +13,19 @@ import (
|
|||
|
||||
// Increases the Kinesis data stream's retention period, which is the length of
|
||||
// time data records are accessible after they are added to the stream. The maximum
|
||||
// value of a stream's retention period is 8760 hours (365 days). When invoking
|
||||
// this API, you must use either the StreamARN or the StreamName parameter, or
|
||||
// both. It is recommended that you use the StreamARN input parameter when you
|
||||
// invoke this API. If you choose a longer stream retention period, this operation
|
||||
// increases the time period during which records that have not yet expired are
|
||||
// accessible. However, it does not make previous, expired data (older than the
|
||||
// stream's previous retention period) accessible after the operation has been
|
||||
// called. For example, if a stream's retention period is set to 24 hours and is
|
||||
// increased to 168 hours, any data that is older than 24 hours remains
|
||||
// inaccessible to consumer applications.
|
||||
// value of a stream's retention period is 8760 hours (365 days).
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// If you choose a longer stream retention period, this operation increases the
|
||||
// time period during which records that have not yet expired are accessible.
|
||||
// However, it does not make previous, expired data (older than the stream's
|
||||
// previous retention period) accessible after the operation has been called. For
|
||||
// example, if a stream's retention period is set to 24 hours and is increased to
|
||||
// 168 hours, any data that is older than 24 hours remains inaccessible to consumer
|
||||
// applications.
|
||||
func (c *Client) IncreaseStreamRetentionPeriod(ctx context.Context, params *IncreaseStreamRetentionPeriodInput, optFns ...func(*Options)) (*IncreaseStreamRetentionPeriodOutput, error) {
|
||||
if params == nil {
|
||||
params = &IncreaseStreamRetentionPeriodInput{}
|
||||
|
|
|
|||
110
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListShards.go
generated
vendored
110
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListShards.go
generated
vendored
|
|
@ -14,18 +14,22 @@ import (
|
|||
)
|
||||
|
||||
// Lists the shards in a stream and provides information about each shard. This
|
||||
// operation has a limit of 1000 transactions per second per data stream. When
|
||||
// invoking this API, you must use either the StreamARN or the StreamName
|
||||
// operation has a limit of 1000 transactions per second per data stream.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API. This action does not list expired shards.
|
||||
// For information about expired shards, see Data Routing, Data Persistence, and
|
||||
// Shard State after a Reshard (https://docs.aws.amazon.com/streams/latest/dev/kinesis-using-sdk-java-after-resharding.html#kinesis-using-sdk-java-resharding-data-routing)
|
||||
// . This API is a new operation that is used by the Amazon Kinesis Client Library
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// This action does not list expired shards. For information about expired shards,
|
||||
// see [Data Routing, Data Persistence, and Shard State after a Reshard].
|
||||
//
|
||||
// This API is a new operation that is used by the Amazon Kinesis Client Library
|
||||
// (KCL). If you have a fine-grained IAM policy that only allows specific
|
||||
// operations, you must update your policy to allow calls to this API. For more
|
||||
// information, see Controlling Access to Amazon Kinesis Data Streams Resources
|
||||
// Using IAM (https://docs.aws.amazon.com/streams/latest/dev/controlling-access.html)
|
||||
// .
|
||||
// information, see [Controlling Access to Amazon Kinesis Data Streams Resources Using IAM].
|
||||
//
|
||||
// [Data Routing, Data Persistence, and Shard State after a Reshard]: https://docs.aws.amazon.com/streams/latest/dev/kinesis-using-sdk-java-after-resharding.html#kinesis-using-sdk-java-resharding-data-routing
|
||||
// [Controlling Access to Amazon Kinesis Data Streams Resources Using IAM]: https://docs.aws.amazon.com/streams/latest/dev/controlling-access.html
|
||||
func (c *Client) ListShards(ctx context.Context, params *ListShardsInput, optFns ...func(*Options)) (*ListShardsOutput, error) {
|
||||
if params == nil {
|
||||
params = &ListShardsInput{}
|
||||
|
|
@ -44,15 +48,18 @@ func (c *Client) ListShards(ctx context.Context, params *ListShardsInput, optFns
|
|||
type ListShardsInput struct {
|
||||
|
||||
// Specify this parameter to indicate that you want to list the shards starting
|
||||
// with the shard whose ID immediately follows ExclusiveStartShardId . If you don't
|
||||
// specify this parameter, the default behavior is for ListShards to list the
|
||||
// shards starting with the first one in the stream. You cannot specify this
|
||||
// parameter if you specify NextToken .
|
||||
// with the shard whose ID immediately follows ExclusiveStartShardId .
|
||||
//
|
||||
// If you don't specify this parameter, the default behavior is for ListShards to
|
||||
// list the shards starting with the first one in the stream.
|
||||
//
|
||||
// You cannot specify this parameter if you specify NextToken .
|
||||
ExclusiveStartShardId *string
|
||||
|
||||
// The maximum number of shards to return in a single call to ListShards . The
|
||||
// maximum number of shards to return in a single call. The default value is 1000.
|
||||
// If you specify a value greater than 1000, at most 1000 results are returned.
|
||||
//
|
||||
// When the number of shards to be listed is greater than the value of MaxResults ,
|
||||
// the response contains a NextToken value that you can use in a subsequent call
|
||||
// to ListShards to list the next set of shards.
|
||||
|
|
@ -63,33 +70,42 @@ type ListShardsInput struct {
|
|||
// MaxResults that is less than the number of shards in the data stream, the
|
||||
// response includes a pagination token named NextToken . You can specify this
|
||||
// NextToken value in a subsequent call to ListShards to list the next set of
|
||||
// shards. Don't specify StreamName or StreamCreationTimestamp if you specify
|
||||
// NextToken because the latter unambiguously identifies the stream. You can
|
||||
// optionally specify a value for the MaxResults parameter when you specify
|
||||
// NextToken . If you specify a MaxResults value that is less than the number of
|
||||
// shards that the operation returns if you don't specify MaxResults , the response
|
||||
// will contain a new NextToken value. You can use the new NextToken value in a
|
||||
// subsequent call to the ListShards operation. Tokens expire after 300 seconds.
|
||||
// When you obtain a value for NextToken in the response to a call to ListShards ,
|
||||
// you have 300 seconds to use that value. If you specify an expired token in a
|
||||
// call to ListShards , you get ExpiredNextTokenException .
|
||||
// shards.
|
||||
//
|
||||
// Don't specify StreamName or StreamCreationTimestamp if you specify NextToken
|
||||
// because the latter unambiguously identifies the stream.
|
||||
//
|
||||
// You can optionally specify a value for the MaxResults parameter when you
|
||||
// specify NextToken . If you specify a MaxResults value that is less than the
|
||||
// number of shards that the operation returns if you don't specify MaxResults ,
|
||||
// the response will contain a new NextToken value. You can use the new NextToken
|
||||
// value in a subsequent call to the ListShards operation.
|
||||
//
|
||||
// Tokens expire after 300 seconds. When you obtain a value for NextToken in the
|
||||
// response to a call to ListShards , you have 300 seconds to use that value. If
|
||||
// you specify an expired token in a call to ListShards , you get
|
||||
// ExpiredNextTokenException .
|
||||
NextToken *string
|
||||
|
||||
// Enables you to filter out the response of the ListShards API. You can only
|
||||
// specify one filter at a time. If you use the ShardFilter parameter when
|
||||
// invoking the ListShards API, the Type is the required property and must be
|
||||
// specified. If you specify the AT_TRIM_HORIZON , FROM_TRIM_HORIZON , or AT_LATEST
|
||||
// types, you do not need to specify either the ShardId or the Timestamp optional
|
||||
// properties. If you specify the AFTER_SHARD_ID type, you must also provide the
|
||||
// value for the optional ShardId property. The ShardId property is identical in
|
||||
// fuctionality to the ExclusiveStartShardId parameter of the ListShards API. When
|
||||
// ShardId property is specified, the response includes the shards starting with
|
||||
// the shard whose ID immediately follows the ShardId that you provided. If you
|
||||
// specify the AT_TIMESTAMP or FROM_TIMESTAMP_ID type, you must also provide the
|
||||
// value for the optional Timestamp property. If you specify the AT_TIMESTAMP
|
||||
// type, then all shards that were open at the provided timestamp are returned. If
|
||||
// you specify the FROM_TIMESTAMP type, then all shards starting from the provided
|
||||
// timestamp to TIP are returned.
|
||||
// specify one filter at a time.
|
||||
//
|
||||
// If you use the ShardFilter parameter when invoking the ListShards API, the Type
|
||||
// is the required property and must be specified. If you specify the
|
||||
// AT_TRIM_HORIZON , FROM_TRIM_HORIZON , or AT_LATEST types, you do not need to
|
||||
// specify either the ShardId or the Timestamp optional properties.
|
||||
//
|
||||
// If you specify the AFTER_SHARD_ID type, you must also provide the value for the
|
||||
// optional ShardId property. The ShardId property is identical in fuctionality to
|
||||
// the ExclusiveStartShardId parameter of the ListShards API. When ShardId
|
||||
// property is specified, the response includes the shards starting with the shard
|
||||
// whose ID immediately follows the ShardId that you provided.
|
||||
//
|
||||
// If you specify the AT_TIMESTAMP or FROM_TIMESTAMP_ID type, you must also
|
||||
// provide the value for the optional Timestamp property. If you specify the
|
||||
// AT_TIMESTAMP type, then all shards that were open at the provided timestamp are
|
||||
// returned. If you specify the FROM_TIMESTAMP type, then all shards starting from
|
||||
// the provided timestamp to TIP are returned.
|
||||
ShardFilter *types.ShardFilter
|
||||
|
||||
// The ARN of the stream.
|
||||
|
|
@ -98,12 +114,14 @@ type ListShardsInput struct {
|
|||
// Specify this input parameter to distinguish data streams that have the same
|
||||
// name. For example, if you create a data stream and then delete it, and you later
|
||||
// create another data stream with the same name, you can use this input parameter
|
||||
// to specify which of the two streams you want to list the shards for. You cannot
|
||||
// specify this parameter if you specify the NextToken parameter.
|
||||
// to specify which of the two streams you want to list the shards for.
|
||||
//
|
||||
// You cannot specify this parameter if you specify the NextToken parameter.
|
||||
StreamCreationTimestamp *time.Time
|
||||
|
||||
// The name of the data stream whose shards you want to list. You cannot specify
|
||||
// this parameter if you specify the NextToken parameter.
|
||||
// The name of the data stream whose shards you want to list.
|
||||
//
|
||||
// You cannot specify this parameter if you specify the NextToken parameter.
|
||||
StreamName *string
|
||||
|
||||
noSmithyDocumentSerde
|
||||
|
|
@ -122,10 +140,12 @@ type ListShardsOutput struct {
|
|||
// response includes a pagination token named NextToken . You can specify this
|
||||
// NextToken value in a subsequent call to ListShards to list the next set of
|
||||
// shards. For more information about the use of this pagination token when calling
|
||||
// the ListShards operation, see ListShardsInput$NextToken . Tokens expire after
|
||||
// 300 seconds. When you obtain a value for NextToken in the response to a call to
|
||||
// ListShards , you have 300 seconds to use that value. If you specify an expired
|
||||
// token in a call to ListShards , you get ExpiredNextTokenException .
|
||||
// the ListShards operation, see ListShardsInput$NextToken.
|
||||
//
|
||||
// Tokens expire after 300 seconds. When you obtain a value for NextToken in the
|
||||
// response to a call to ListShards , you have 300 seconds to use that value. If
|
||||
// you specify an expired token in a call to ListShards , you get
|
||||
// ExpiredNextTokenException .
|
||||
NextToken *string
|
||||
|
||||
// An array of JSON objects. Each object represents one shard and specifies the
|
||||
|
|
|
|||
52
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreamConsumers.go
generated
vendored
52
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreamConsumers.go
generated
vendored
|
|
@ -14,8 +14,9 @@ import (
|
|||
)
|
||||
|
||||
// Lists the consumers registered to receive data from a stream using enhanced
|
||||
// fan-out, and provides information about each consumer. This operation has a
|
||||
// limit of 5 transactions per second per stream.
|
||||
// fan-out, and provides information about each consumer.
|
||||
//
|
||||
// This operation has a limit of 5 transactions per second per stream.
|
||||
func (c *Client) ListStreamConsumers(ctx context.Context, params *ListStreamConsumersInput, optFns ...func(*Options)) (*ListStreamConsumersOutput, error) {
|
||||
if params == nil {
|
||||
params = &ListStreamConsumersInput{}
|
||||
|
|
@ -34,9 +35,9 @@ func (c *Client) ListStreamConsumers(ctx context.Context, params *ListStreamCons
|
|||
type ListStreamConsumersInput struct {
|
||||
|
||||
// The ARN of the Kinesis data stream for which you want to list the registered
|
||||
// consumers. For more information, see Amazon Resource Names (ARNs) and Amazon
|
||||
// Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams)
|
||||
// .
|
||||
// consumers. For more information, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces].
|
||||
//
|
||||
// [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams
|
||||
//
|
||||
// This member is required.
|
||||
StreamARN *string
|
||||
|
|
@ -52,24 +53,30 @@ type ListStreamConsumersInput struct {
|
|||
// consumers that are registered with the data stream, the response includes a
|
||||
// pagination token named NextToken . You can specify this NextToken value in a
|
||||
// subsequent call to ListStreamConsumers to list the next set of registered
|
||||
// consumers. Don't specify StreamName or StreamCreationTimestamp if you specify
|
||||
// NextToken because the latter unambiguously identifies the stream. You can
|
||||
// optionally specify a value for the MaxResults parameter when you specify
|
||||
// NextToken . If you specify a MaxResults value that is less than the number of
|
||||
// consumers that the operation returns if you don't specify MaxResults , the
|
||||
// response will contain a new NextToken value. You can use the new NextToken
|
||||
// consumers.
|
||||
//
|
||||
// Don't specify StreamName or StreamCreationTimestamp if you specify NextToken
|
||||
// because the latter unambiguously identifies the stream.
|
||||
//
|
||||
// You can optionally specify a value for the MaxResults parameter when you
|
||||
// specify NextToken . If you specify a MaxResults value that is less than the
|
||||
// number of consumers that the operation returns if you don't specify MaxResults ,
|
||||
// the response will contain a new NextToken value. You can use the new NextToken
|
||||
// value in a subsequent call to the ListStreamConsumers operation to list the
|
||||
// next set of consumers. Tokens expire after 300 seconds. When you obtain a value
|
||||
// for NextToken in the response to a call to ListStreamConsumers , you have 300
|
||||
// seconds to use that value. If you specify an expired token in a call to
|
||||
// ListStreamConsumers , you get ExpiredNextTokenException .
|
||||
// next set of consumers.
|
||||
//
|
||||
// Tokens expire after 300 seconds. When you obtain a value for NextToken in the
|
||||
// response to a call to ListStreamConsumers , you have 300 seconds to use that
|
||||
// value. If you specify an expired token in a call to ListStreamConsumers , you
|
||||
// get ExpiredNextTokenException .
|
||||
NextToken *string
|
||||
|
||||
// Specify this input parameter to distinguish data streams that have the same
|
||||
// name. For example, if you create a data stream and then delete it, and you later
|
||||
// create another data stream with the same name, you can use this input parameter
|
||||
// to specify which of the two streams you want to list the consumers for. You
|
||||
// can't specify this parameter if you specify the NextToken parameter.
|
||||
// to specify which of the two streams you want to list the consumers for.
|
||||
//
|
||||
// You can't specify this parameter if you specify the NextToken parameter.
|
||||
StreamCreationTimestamp *time.Time
|
||||
|
||||
noSmithyDocumentSerde
|
||||
|
|
@ -91,11 +98,12 @@ type ListStreamConsumersOutput struct {
|
|||
// registered consumers, the response includes a pagination token named NextToken .
|
||||
// You can specify this NextToken value in a subsequent call to ListStreamConsumers
|
||||
// to list the next set of registered consumers. For more information about the use
|
||||
// of this pagination token when calling the ListStreamConsumers operation, see
|
||||
// ListStreamConsumersInput$NextToken . Tokens expire after 300 seconds. When you
|
||||
// obtain a value for NextToken in the response to a call to ListStreamConsumers ,
|
||||
// you have 300 seconds to use that value. If you specify an expired token in a
|
||||
// call to ListStreamConsumers , you get ExpiredNextTokenException .
|
||||
// of this pagination token when calling the ListStreamConsumers operation, see ListStreamConsumersInput$NextToken.
|
||||
//
|
||||
// Tokens expire after 300 seconds. When you obtain a value for NextToken in the
|
||||
// response to a call to ListStreamConsumers , you have 300 seconds to use that
|
||||
// value. If you specify an expired token in a call to ListStreamConsumers , you
|
||||
// get ExpiredNextTokenException .
|
||||
NextToken *string
|
||||
|
||||
// Metadata pertaining to the operation's result.
|
||||
|
|
|
|||
16
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreams.go
generated
vendored
16
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListStreams.go
generated
vendored
|
|
@ -11,18 +11,22 @@ import (
|
|||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
)
|
||||
|
||||
// Lists your Kinesis data streams. The number of streams may be too large to
|
||||
// return from a single call to ListStreams . You can limit the number of returned
|
||||
// streams using the Limit parameter. If you do not specify a value for the Limit
|
||||
// parameter, Kinesis Data Streams uses the default limit, which is currently 100.
|
||||
// Lists your Kinesis data streams.
|
||||
//
|
||||
// The number of streams may be too large to return from a single call to
|
||||
// ListStreams . You can limit the number of returned streams using the Limit
|
||||
// parameter. If you do not specify a value for the Limit parameter, Kinesis Data
|
||||
// Streams uses the default limit, which is currently 100.
|
||||
//
|
||||
// You can detect if there are more streams available to list by using the
|
||||
// HasMoreStreams flag from the returned output. If there are more streams
|
||||
// available, you can request more streams by using the name of the last stream
|
||||
// returned by the ListStreams request in the ExclusiveStartStreamName parameter
|
||||
// in a subsequent request to ListStreams . The group of stream names returned by
|
||||
// the subsequent request is then added to the list. You can continue this process
|
||||
// until all the stream names have been collected in the list. ListStreams has a
|
||||
// limit of five transactions per second per account.
|
||||
// until all the stream names have been collected in the list.
|
||||
//
|
||||
// ListStreamshas a limit of five transactions per second per account.
|
||||
func (c *Client) ListStreams(ctx context.Context, params *ListStreamsInput, optFns ...func(*Options)) (*ListStreamsOutput, error) {
|
||||
if params == nil {
|
||||
params = &ListStreamsInput{}
|
||||
|
|
|
|||
8
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListTagsForStream.go
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_ListTagsForStream.go
generated
vendored
|
|
@ -13,9 +13,11 @@ import (
|
|||
)
|
||||
|
||||
// Lists the tags for the specified Kinesis data stream. This operation has a
|
||||
// limit of five transactions per second per account. When invoking this API, you
|
||||
// must use either the StreamARN or the StreamName parameter, or both. It is
|
||||
// recommended that you use the StreamARN input parameter when you invoke this API.
|
||||
// limit of five transactions per second per account.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
func (c *Client) ListTagsForStream(ctx context.Context, params *ListTagsForStreamInput, optFns ...func(*Options)) (*ListTagsForStreamOutput, error) {
|
||||
if params == nil {
|
||||
params = &ListTagsForStreamInput{}
|
||||
|
|
|
|||
51
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_MergeShards.go
generated
vendored
51
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_MergeShards.go
generated
vendored
|
|
@ -20,28 +20,39 @@ import (
|
|||
// range of 382...454, then you could merge these two shards into a single shard
|
||||
// that would have a hash key range of 276...454. After the merge, the single child
|
||||
// shard receives data for all hash key values covered by the two parent shards.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API. MergeShards is called when there is a need
|
||||
// to reduce the overall capacity of a stream because of excess capacity that is
|
||||
// not being used. You must specify the shard to be merged and the adjacent shard
|
||||
// for a stream. For more information about merging shards, see Merge Two Shards (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. If the stream is in the
|
||||
// ACTIVE state, you can call MergeShards . If a stream is in the CREATING ,
|
||||
// UPDATING , or DELETING state, MergeShards returns a ResourceInUseException . If
|
||||
// the specified stream does not exist, MergeShards returns a
|
||||
// ResourceNotFoundException . You can use DescribeStreamSummary to check the
|
||||
// state of the stream, which is returned in StreamStatus . MergeShards is an
|
||||
// asynchronous operation. Upon receiving a MergeShards request, Amazon Kinesis
|
||||
// Data Streams immediately returns a response and sets the StreamStatus to
|
||||
// UPDATING . After the operation is completed, Kinesis Data Streams sets the
|
||||
// StreamStatus to ACTIVE . Read and write operations continue to work while the
|
||||
// stream is in the UPDATING state. You use DescribeStreamSummary and the
|
||||
// ListShards APIs to determine the shard IDs that are specified in the MergeShards
|
||||
// request. If you try to operate on too many streams in parallel using
|
||||
// CreateStream , DeleteStream , MergeShards , or SplitShard , you receive a
|
||||
// LimitExceededException . MergeShards has a limit of five transactions per
|
||||
// second per account.
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// MergeShards is called when there is a need to reduce the overall capacity of a
|
||||
// stream because of excess capacity that is not being used. You must specify the
|
||||
// shard to be merged and the adjacent shard for a stream. For more information
|
||||
// about merging shards, see [Merge Two Shards]in the Amazon Kinesis Data Streams Developer Guide.
|
||||
//
|
||||
// If the stream is in the ACTIVE state, you can call MergeShards . If a stream is
|
||||
// in the CREATING , UPDATING , or DELETING state, MergeShards returns a
|
||||
// ResourceInUseException . If the specified stream does not exist, MergeShards
|
||||
// returns a ResourceNotFoundException .
|
||||
//
|
||||
// You can use DescribeStreamSummary to check the state of the stream, which is returned in StreamStatus
|
||||
// .
|
||||
//
|
||||
// MergeShards is an asynchronous operation. Upon receiving a MergeShards request,
|
||||
// Amazon Kinesis Data Streams immediately returns a response and sets the
|
||||
// StreamStatus to UPDATING . After the operation is completed, Kinesis Data
|
||||
// Streams sets the StreamStatus to ACTIVE . Read and write operations continue to
|
||||
// work while the stream is in the UPDATING state.
|
||||
//
|
||||
// You use DescribeStreamSummary and the ListShards APIs to determine the shard IDs that are specified in the
|
||||
// MergeShards request.
|
||||
//
|
||||
// If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards , or SplitShard
|
||||
// , you receive a LimitExceededException .
|
||||
//
|
||||
// MergeShards has a limit of five transactions per second per account.
|
||||
//
|
||||
// [Merge Two Shards]: https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html
|
||||
func (c *Client) MergeShards(ctx context.Context, params *MergeShardsInput, optFns ...func(*Options)) (*MergeShardsOutput, error) {
|
||||
if params == nil {
|
||||
params = &MergeShardsInput{}
|
||||
|
|
|
|||
77
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecord.go
generated
vendored
77
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecord.go
generated
vendored
|
|
@ -15,38 +15,51 @@ import (
|
|||
// Writes a single data record into an Amazon Kinesis data stream. Call PutRecord
|
||||
// to send data into the stream for real-time ingestion and subsequent processing,
|
||||
// one record at a time. Each shard can support writes up to 1,000 records per
|
||||
// second, up to a maximum data write total of 1 MiB per second. When invoking this
|
||||
// API, you must use either the StreamARN or the StreamName parameter, or both. It
|
||||
// is recommended that you use the StreamARN input parameter when you invoke this
|
||||
// API. You must specify the name of the stream that captures, stores, and
|
||||
// transports the data; a partition key; and the data blob itself. The data blob
|
||||
// can be any type of data; for example, a segment from a log file,
|
||||
// geographic/location data, website clickstream data, and so on. The partition key
|
||||
// is used by Kinesis Data Streams to distribute data across shards. Kinesis Data
|
||||
// Streams segregates the data records that belong to a stream into multiple
|
||||
// shards, using the partition key associated with each data record to determine
|
||||
// the shard to which a given data record belongs. Partition keys are Unicode
|
||||
// strings, with a maximum length limit of 256 characters for each key. An MD5 hash
|
||||
// function is used to map partition keys to 128-bit integer values and to map
|
||||
// associated data records to shards using the hash key ranges of the shards. You
|
||||
// can override hashing the partition key to determine the shard by explicitly
|
||||
// specifying a hash value using the ExplicitHashKey parameter. For more
|
||||
// information, see Adding Data to a Stream (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. PutRecord returns the shard
|
||||
// ID of where the data record was placed and the sequence number that was assigned
|
||||
// to the data record. Sequence numbers increase over time and are specific to a
|
||||
// shard within a stream, not across all shards within a stream. To guarantee
|
||||
// strictly increasing ordering, write serially to a shard and use the
|
||||
// SequenceNumberForOrdering parameter. For more information, see Adding Data to a
|
||||
// Stream (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. After you write a record to
|
||||
// a stream, you cannot modify that record or its order within the stream. If a
|
||||
// PutRecord request cannot be processed because of insufficient provisioned
|
||||
// second, up to a maximum data write total of 1 MiB per second.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// You must specify the name of the stream that captures, stores, and transports
|
||||
// the data; a partition key; and the data blob itself.
|
||||
//
|
||||
// The data blob can be any type of data; for example, a segment from a log file,
|
||||
// geographic/location data, website clickstream data, and so on.
|
||||
//
|
||||
// The partition key is used by Kinesis Data Streams to distribute data across
|
||||
// shards. Kinesis Data Streams segregates the data records that belong to a stream
|
||||
// into multiple shards, using the partition key associated with each data record
|
||||
// to determine the shard to which a given data record belongs.
|
||||
//
|
||||
// Partition keys are Unicode strings, with a maximum length limit of 256
|
||||
// characters for each key. An MD5 hash function is used to map partition keys to
|
||||
// 128-bit integer values and to map associated data records to shards using the
|
||||
// hash key ranges of the shards. You can override hashing the partition key to
|
||||
// determine the shard by explicitly specifying a hash value using the
|
||||
// ExplicitHashKey parameter. For more information, see [Adding Data to a Stream] in the Amazon Kinesis
|
||||
// Data Streams Developer Guide.
|
||||
//
|
||||
// PutRecord returns the shard ID of where the data record was placed and the
|
||||
// sequence number that was assigned to the data record.
|
||||
//
|
||||
// Sequence numbers increase over time and are specific to a shard within a
|
||||
// stream, not across all shards within a stream. To guarantee strictly increasing
|
||||
// ordering, write serially to a shard and use the SequenceNumberForOrdering
|
||||
// parameter. For more information, see [Adding Data to a Stream]in the Amazon Kinesis Data Streams
|
||||
// Developer Guide.
|
||||
//
|
||||
// After you write a record to a stream, you cannot modify that record or its
|
||||
// order within the stream.
|
||||
//
|
||||
// If a PutRecord request cannot be processed because of insufficient provisioned
|
||||
// throughput on the shard involved in the request, PutRecord throws
|
||||
// ProvisionedThroughputExceededException . By default, data records are accessible
|
||||
// for 24 hours from the time that they are added to a stream. You can use
|
||||
// IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this
|
||||
// retention period.
|
||||
// ProvisionedThroughputExceededException .
|
||||
//
|
||||
// By default, data records are accessible for 24 hours from the time that they
|
||||
// are added to a stream. You can use IncreaseStreamRetentionPeriodor DecreaseStreamRetentionPeriod to modify this retention period.
|
||||
//
|
||||
// [Adding Data to a Stream]: https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream
|
||||
func (c *Client) PutRecord(ctx context.Context, params *PutRecordInput, optFns ...func(*Options)) (*PutRecordOutput, error) {
|
||||
if params == nil {
|
||||
params = &PutRecordInput{}
|
||||
|
|
@ -128,7 +141,9 @@ type PutRecordOutput struct {
|
|||
|
||||
// The encryption type to use on the record. This parameter can be one of the
|
||||
// following values:
|
||||
//
|
||||
// - NONE : Do not encrypt the records in the stream.
|
||||
//
|
||||
// - KMS : Use server-side encryption on the records in the stream using a
|
||||
// customer-managed Amazon Web Services KMS key.
|
||||
EncryptionType types.EncryptionType
|
||||
|
|
|
|||
106
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecords.go
generated
vendored
106
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutRecords.go
generated
vendored
|
|
@ -14,56 +14,74 @@ import (
|
|||
|
||||
// Writes multiple data records into a Kinesis data stream in a single call (also
|
||||
// referred to as a PutRecords request). Use this operation to send data into the
|
||||
// stream for data ingestion and processing. When invoking this API, you must use
|
||||
// either the StreamARN or the StreamName parameter, or both. It is recommended
|
||||
// that you use the StreamARN input parameter when you invoke this API. Each
|
||||
// PutRecords request can support up to 500 records. Each record in the request can
|
||||
// be as large as 1 MiB, up to a limit of 5 MiB for the entire request, including
|
||||
// partition keys. Each shard can support writes up to 1,000 records per second, up
|
||||
// to a maximum data write total of 1 MiB per second. You must specify the name of
|
||||
// the stream that captures, stores, and transports the data; and an array of
|
||||
// request Records , with each record in the array requiring a partition key and
|
||||
// data blob. The record size limit applies to the total size of the partition key
|
||||
// and data blob. The data blob can be any type of data; for example, a segment
|
||||
// from a log file, geographic/location data, website clickstream data, and so on.
|
||||
// stream for data ingestion and processing.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// Each PutRecords request can support up to 500 records. Each record in the
|
||||
// request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request,
|
||||
// including partition keys. Each shard can support writes up to 1,000 records per
|
||||
// second, up to a maximum data write total of 1 MiB per second.
|
||||
//
|
||||
// You must specify the name of the stream that captures, stores, and transports
|
||||
// the data; and an array of request Records , with each record in the array
|
||||
// requiring a partition key and data blob. The record size limit applies to the
|
||||
// total size of the partition key and data blob.
|
||||
//
|
||||
// The data blob can be any type of data; for example, a segment from a log file,
|
||||
// geographic/location data, website clickstream data, and so on.
|
||||
//
|
||||
// The partition key is used by Kinesis Data Streams as input to a hash function
|
||||
// that maps the partition key and associated data to a specific shard. An MD5 hash
|
||||
// function is used to map partition keys to 128-bit integer values and to map
|
||||
// associated data records to shards. As a result of this hashing mechanism, all
|
||||
// data records with the same partition key map to the same shard within the
|
||||
// stream. For more information, see Adding Data to a Stream (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. Each record in the Records
|
||||
// array may include an optional parameter, ExplicitHashKey , which overrides the
|
||||
// partition key to shard mapping. This parameter allows a data producer to
|
||||
// determine explicitly the shard where the record is stored. For more information,
|
||||
// see Adding Multiple Records with PutRecords (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. The PutRecords response
|
||||
// includes an array of response Records . Each record in the response array
|
||||
// directly correlates with a record in the request array using natural ordering,
|
||||
// from the top to the bottom of the request and response. The response Records
|
||||
// array always includes the same number of records as the request array. The
|
||||
// response Records array includes both successfully and unsuccessfully processed
|
||||
// records. Kinesis Data Streams attempts to process all records in each PutRecords
|
||||
// request. A single record failure does not stop the processing of subsequent
|
||||
// records. As a result, PutRecords doesn't guarantee the ordering of records. If
|
||||
// you need to read records in the same order they are written to the stream, use
|
||||
// PutRecord instead of PutRecords , and write to the same shard. A successfully
|
||||
// processed record includes ShardId and SequenceNumber values. The ShardId
|
||||
// parameter identifies the shard in the stream where the record is stored. The
|
||||
// SequenceNumber parameter is an identifier assigned to the put record, unique to
|
||||
// all records in the stream. An unsuccessfully processed record includes ErrorCode
|
||||
// and ErrorMessage values. ErrorCode reflects the type of error and can be one of
|
||||
// the following values: ProvisionedThroughputExceededException or InternalFailure
|
||||
// . ErrorMessage provides more detailed information about the
|
||||
// stream. For more information, see [Adding Data to a Stream]in the Amazon Kinesis Data Streams Developer
|
||||
// Guide.
|
||||
//
|
||||
// Each record in the Records array may include an optional parameter,
|
||||
// ExplicitHashKey , which overrides the partition key to shard mapping. This
|
||||
// parameter allows a data producer to determine explicitly the shard where the
|
||||
// record is stored. For more information, see [Adding Multiple Records with PutRecords]in the Amazon Kinesis Data Streams
|
||||
// Developer Guide.
|
||||
//
|
||||
// The PutRecords response includes an array of response Records . Each record in
|
||||
// the response array directly correlates with a record in the request array using
|
||||
// natural ordering, from the top to the bottom of the request and response. The
|
||||
// response Records array always includes the same number of records as the
|
||||
// request array.
|
||||
//
|
||||
// The response Records array includes both successfully and unsuccessfully
|
||||
// processed records. Kinesis Data Streams attempts to process all records in each
|
||||
// PutRecords request. A single record failure does not stop the processing of
|
||||
// subsequent records. As a result, PutRecords doesn't guarantee the ordering of
|
||||
// records. If you need to read records in the same order they are written to the
|
||||
// stream, use PutRecordinstead of PutRecords , and write to the same shard.
|
||||
//
|
||||
// A successfully processed record includes ShardId and SequenceNumber values. The
|
||||
// ShardId parameter identifies the shard in the stream where the record is stored.
|
||||
// The SequenceNumber parameter is an identifier assigned to the put record,
|
||||
// unique to all records in the stream.
|
||||
//
|
||||
// An unsuccessfully processed record includes ErrorCode and ErrorMessage values.
|
||||
// ErrorCode reflects the type of error and can be one of the following values:
|
||||
// ProvisionedThroughputExceededException or InternalFailure . ErrorMessage
|
||||
// provides more detailed information about the
|
||||
// ProvisionedThroughputExceededException exception including the account ID,
|
||||
// stream name, and shard ID of the record that was throttled. For more information
|
||||
// about partially successful responses, see Adding Multiple Records with
|
||||
// PutRecords (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. After you write a record to
|
||||
// a stream, you cannot modify that record or its order within the stream. By
|
||||
// default, data records are accessible for 24 hours from the time that they are
|
||||
// added to a stream. You can use IncreaseStreamRetentionPeriod or
|
||||
// DecreaseStreamRetentionPeriod to modify this retention period.
|
||||
// about partially successful responses, see [Adding Multiple Records with PutRecords]in the Amazon Kinesis Data Streams
|
||||
// Developer Guide.
|
||||
//
|
||||
// After you write a record to a stream, you cannot modify that record or its
|
||||
// order within the stream.
|
||||
//
|
||||
// By default, data records are accessible for 24 hours from the time that they
|
||||
// are added to a stream. You can use IncreaseStreamRetentionPeriodor DecreaseStreamRetentionPeriod to modify this retention period.
|
||||
//
|
||||
// [Adding Multiple Records with PutRecords]: https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords
|
||||
// [Adding Data to a Stream]: https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream
|
||||
func (c *Client) PutRecords(ctx context.Context, params *PutRecordsInput, optFns ...func(*Options)) (*PutRecordsOutput, error) {
|
||||
if params == nil {
|
||||
params = &PutRecordsInput{}
|
||||
|
|
@ -114,7 +132,9 @@ type PutRecordsOutput struct {
|
|||
|
||||
// The encryption type used on the records. This parameter can be one of the
|
||||
// following values:
|
||||
//
|
||||
// - NONE : Do not encrypt the records.
|
||||
//
|
||||
// - KMS : Use server-side encryption on the records using a customer-managed
|
||||
// Amazon Web Services KMS key.
|
||||
EncryptionType types.EncryptionType
|
||||
|
|
|
|||
13
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutResourcePolicy.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_PutResourcePolicy.go
generated
vendored
|
|
@ -18,15 +18,18 @@ import (
|
|||
// belong to the owner's account in order to use this operation. If you don't have
|
||||
// PutResourcePolicy permissions, Amazon Kinesis Data Streams returns a 403 Access
|
||||
// Denied error . If you receive a ResourceNotFoundException , check to see if you
|
||||
// passed a valid stream or consumer resource. Request patterns can be one of the
|
||||
// following:
|
||||
// passed a valid stream or consumer resource.
|
||||
//
|
||||
// Request patterns can be one of the following:
|
||||
//
|
||||
// - Data stream pattern: arn:aws.*:kinesis:.*:\d{12}:.*stream/\S+
|
||||
//
|
||||
// - Consumer pattern:
|
||||
// ^(arn):aws.*:kinesis:.*:\d{12}:.*stream\/[a-zA-Z0-9_.-]+\/consumer\/[a-zA-Z0-9_.-]+:[0-9]+
|
||||
//
|
||||
// For more information, see Controlling Access to Amazon Kinesis Data Streams
|
||||
// Resources Using IAM (https://docs.aws.amazon.com/streams/latest/dev/controlling-access.html)
|
||||
// .
|
||||
// For more information, see [Controlling Access to Amazon Kinesis Data Streams Resources Using IAM].
|
||||
//
|
||||
// [Controlling Access to Amazon Kinesis Data Streams Resources Using IAM]: https://docs.aws.amazon.com/streams/latest/dev/controlling-access.html
|
||||
func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolicyInput, optFns ...func(*Options)) (*PutResourcePolicyOutput, error) {
|
||||
if params == nil {
|
||||
params = &PutResourcePolicyInput{}
|
||||
|
|
|
|||
32
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RegisterStreamConsumer.go
generated
vendored
32
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RegisterStreamConsumer.go
generated
vendored
|
|
@ -13,17 +13,21 @@ import (
|
|||
)
|
||||
|
||||
// Registers a consumer with a Kinesis data stream. When you use this operation,
|
||||
// the consumer you register can then call SubscribeToShard to receive data from
|
||||
// the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every
|
||||
// shard you subscribe to. This rate is unaffected by the total number of consumers
|
||||
// that read from the same stream. You can register up to 20 consumers per stream.
|
||||
// A given consumer can only be registered with one stream at a time. For an
|
||||
// example of how to use this operations, see Enhanced Fan-Out Using the Kinesis
|
||||
// Data Streams API . The use of this operation has a limit of five transactions
|
||||
// per second per account. Also, only 5 consumers can be created simultaneously. In
|
||||
// other words, you cannot have more than 5 consumers in a CREATING status at the
|
||||
// same time. Registering a 6th consumer while there are 5 in a CREATING status
|
||||
// results in a LimitExceededException .
|
||||
// the consumer you register can then call SubscribeToShardto receive data from the stream using
|
||||
// enhanced fan-out, at a rate of up to 2 MiB per second for every shard you
|
||||
// subscribe to. This rate is unaffected by the total number of consumers that read
|
||||
// from the same stream.
|
||||
//
|
||||
// You can register up to 20 consumers per stream. A given consumer can only be
|
||||
// registered with one stream at a time.
|
||||
//
|
||||
// For an example of how to use this operations, see Enhanced Fan-Out Using the Kinesis Data Streams API.
|
||||
//
|
||||
// The use of this operation has a limit of five transactions per second per
|
||||
// account. Also, only 5 consumers can be created simultaneously. In other words,
|
||||
// you cannot have more than 5 consumers in a CREATING status at the same time.
|
||||
// Registering a 6th consumer while there are 5 in a CREATING status results in a
|
||||
// LimitExceededException .
|
||||
func (c *Client) RegisterStreamConsumer(ctx context.Context, params *RegisterStreamConsumerInput, optFns ...func(*Options)) (*RegisterStreamConsumerOutput, error) {
|
||||
if params == nil {
|
||||
params = &RegisterStreamConsumerInput{}
|
||||
|
|
@ -48,9 +52,9 @@ type RegisterStreamConsumerInput struct {
|
|||
ConsumerName *string
|
||||
|
||||
// The ARN of the Kinesis data stream that you want to register the consumer with.
|
||||
// For more info, see Amazon Resource Names (ARNs) and Amazon Web Services Service
|
||||
// Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams)
|
||||
// .
|
||||
// For more info, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces].
|
||||
//
|
||||
// [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kinesis-streams
|
||||
//
|
||||
// This member is required.
|
||||
StreamARN *string
|
||||
|
|
|
|||
13
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RemoveTagsFromStream.go
generated
vendored
13
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_RemoveTagsFromStream.go
generated
vendored
|
|
@ -12,12 +12,15 @@ import (
|
|||
)
|
||||
|
||||
// Removes tags from the specified Kinesis data stream. Removed tags are deleted
|
||||
// and cannot be recovered after this operation successfully completes. When
|
||||
// invoking this API, you must use either the StreamARN or the StreamName
|
||||
// and cannot be recovered after this operation successfully completes.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API. If you specify a tag that does not exist, it
|
||||
// is ignored. RemoveTagsFromStream has a limit of five transactions per second
|
||||
// per account.
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// If you specify a tag that does not exist, it is ignored.
|
||||
//
|
||||
// RemoveTagsFromStreamhas a limit of five transactions per second per account.
|
||||
func (c *Client) RemoveTagsFromStream(ctx context.Context, params *RemoveTagsFromStreamInput, optFns ...func(*Options)) (*RemoveTagsFromStreamOutput, error) {
|
||||
if params == nil {
|
||||
params = &RemoveTagsFromStreamInput{}
|
||||
|
|
|
|||
80
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SplitShard.go
generated
vendored
80
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SplitShard.go
generated
vendored
|
|
@ -15,40 +15,52 @@ import (
|
|||
// stream's capacity to ingest and transport data. SplitShard is called when there
|
||||
// is a need to increase the overall capacity of a stream because of an expected
|
||||
// increase in the volume of data records being ingested. This API is only
|
||||
// supported for the data streams with the provisioned capacity mode. When invoking
|
||||
// this API, you must use either the StreamARN or the StreamName parameter, or
|
||||
// both. It is recommended that you use the StreamARN input parameter when you
|
||||
// invoke this API. You can also use SplitShard when a shard appears to be
|
||||
// approaching its maximum utilization; for example, the producers sending data
|
||||
// into the specific shard are suddenly sending more than previously anticipated.
|
||||
// You can also call SplitShard to increase stream capacity, so that more Kinesis
|
||||
// Data Streams applications can simultaneously read data from the stream for
|
||||
// real-time processing. You must specify the shard to be split and the new hash
|
||||
// key, which is the position in the shard where the shard gets split in two. In
|
||||
// many cases, the new hash key might be the average of the beginning and ending
|
||||
// hash key, but it can be any hash key value in the range being mapped into the
|
||||
// shard. For more information, see Split a Shard (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. You can use
|
||||
// DescribeStreamSummary and the ListShards APIs to determine the shard ID and
|
||||
// hash key values for the ShardToSplit and NewStartingHashKey parameters that are
|
||||
// specified in the SplitShard request. SplitShard is an asynchronous operation.
|
||||
// Upon receiving a SplitShard request, Kinesis Data Streams immediately returns a
|
||||
// response and sets the stream status to UPDATING . After the operation is
|
||||
// completed, Kinesis Data Streams sets the stream status to ACTIVE . Read and
|
||||
// write operations continue to work while the stream is in the UPDATING state.
|
||||
// You can use DescribeStreamSummary to check the status of the stream, which is
|
||||
// returned in StreamStatus . If the stream is in the ACTIVE state, you can call
|
||||
// SplitShard . If the specified stream does not exist, DescribeStreamSummary
|
||||
// returns a ResourceNotFoundException . If you try to create more shards than are
|
||||
// authorized for your account, you receive a LimitExceededException . For the
|
||||
// default shard limit for an Amazon Web Services account, see Kinesis Data
|
||||
// Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. To increase this limit,
|
||||
// contact Amazon Web Services Support (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html)
|
||||
// . If you try to operate on too many streams simultaneously using CreateStream ,
|
||||
// DeleteStream , MergeShards , and/or SplitShard , you receive a
|
||||
// LimitExceededException . SplitShard has a limit of five transactions per second
|
||||
// per account.
|
||||
// supported for the data streams with the provisioned capacity mode.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// You can also use SplitShard when a shard appears to be approaching its maximum
|
||||
// utilization; for example, the producers sending data into the specific shard are
|
||||
// suddenly sending more than previously anticipated. You can also call SplitShard
|
||||
// to increase stream capacity, so that more Kinesis Data Streams applications can
|
||||
// simultaneously read data from the stream for real-time processing.
|
||||
//
|
||||
// You must specify the shard to be split and the new hash key, which is the
|
||||
// position in the shard where the shard gets split in two. In many cases, the new
|
||||
// hash key might be the average of the beginning and ending hash key, but it can
|
||||
// be any hash key value in the range being mapped into the shard. For more
|
||||
// information, see [Split a Shard]in the Amazon Kinesis Data Streams Developer Guide.
|
||||
//
|
||||
// You can use DescribeStreamSummary and the ListShards APIs to determine the shard ID and hash key values for
|
||||
// the ShardToSplit and NewStartingHashKey parameters that are specified in the
|
||||
// SplitShard request.
|
||||
//
|
||||
// SplitShard is an asynchronous operation. Upon receiving a SplitShard request,
|
||||
// Kinesis Data Streams immediately returns a response and sets the stream status
|
||||
// to UPDATING . After the operation is completed, Kinesis Data Streams sets the
|
||||
// stream status to ACTIVE . Read and write operations continue to work while the
|
||||
// stream is in the UPDATING state.
|
||||
//
|
||||
// You can use DescribeStreamSummary to check the status of the stream, which is returned in
|
||||
// StreamStatus . If the stream is in the ACTIVE state, you can call SplitShard .
|
||||
//
|
||||
// If the specified stream does not exist, DescribeStreamSummary returns a ResourceNotFoundException .
|
||||
// If you try to create more shards than are authorized for your account, you
|
||||
// receive a LimitExceededException .
|
||||
//
|
||||
// For the default shard limit for an Amazon Web Services account, see [Kinesis Data Streams Limits] in the
|
||||
// Amazon Kinesis Data Streams Developer Guide. To increase this limit, [contact Amazon Web Services Support].
|
||||
//
|
||||
// If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard,
|
||||
// you receive a LimitExceededException .
|
||||
//
|
||||
// SplitShard has a limit of five transactions per second per account.
|
||||
//
|
||||
// [Kinesis Data Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
|
||||
// [contact Amazon Web Services Support]: https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html
|
||||
// [Split a Shard]: https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html
|
||||
func (c *Client) SplitShard(ctx context.Context, params *SplitShardInput, optFns ...func(*Options)) (*SplitShardOutput, error) {
|
||||
if params == nil {
|
||||
params = &SplitShardInput{}
|
||||
|
|
|
|||
42
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StartStreamEncryption.go
generated
vendored
42
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StartStreamEncryption.go
generated
vendored
|
|
@ -13,22 +13,27 @@ import (
|
|||
)
|
||||
|
||||
// Enables or updates server-side encryption using an Amazon Web Services KMS key
|
||||
// for a specified stream. When invoking this API, you must use either the
|
||||
// StreamARN or the StreamName parameter, or both. It is recommended that you use
|
||||
// the StreamARN input parameter when you invoke this API. Starting encryption is
|
||||
// an asynchronous operation. Upon receiving the request, Kinesis Data Streams
|
||||
// returns immediately and sets the status of the stream to UPDATING . After the
|
||||
// update is complete, Kinesis Data Streams sets the status of the stream back to
|
||||
// ACTIVE . Updating or applying encryption normally takes a few seconds to
|
||||
// complete, but it can take minutes. You can continue to read and write data to
|
||||
// your stream while its status is UPDATING . Once the status of the stream is
|
||||
// ACTIVE , encryption begins for records written to the stream. API Limits: You
|
||||
// can successfully apply a new Amazon Web Services KMS key for server-side
|
||||
// encryption 25 times in a rolling 24-hour period. Note: It can take up to 5
|
||||
// seconds after the stream is in an ACTIVE status before all records written to
|
||||
// the stream are encrypted. After you enable encryption, you can verify that
|
||||
// encryption is applied by inspecting the API response from PutRecord or
|
||||
// PutRecords .
|
||||
// for a specified stream.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// Starting encryption is an asynchronous operation. Upon receiving the request,
|
||||
// Kinesis Data Streams returns immediately and sets the status of the stream to
|
||||
// UPDATING . After the update is complete, Kinesis Data Streams sets the status of
|
||||
// the stream back to ACTIVE . Updating or applying encryption normally takes a few
|
||||
// seconds to complete, but it can take minutes. You can continue to read and write
|
||||
// data to your stream while its status is UPDATING . Once the status of the stream
|
||||
// is ACTIVE , encryption begins for records written to the stream.
|
||||
//
|
||||
// API Limits: You can successfully apply a new Amazon Web Services KMS key for
|
||||
// server-side encryption 25 times in a rolling 24-hour period.
|
||||
//
|
||||
// Note: It can take up to 5 seconds after the stream is in an ACTIVE status
|
||||
// before all records written to the stream are encrypted. After you enable
|
||||
// encryption, you can verify that encryption is applied by inspecting the API
|
||||
// response from PutRecord or PutRecords .
|
||||
func (c *Client) StartStreamEncryption(ctx context.Context, params *StartStreamEncryptionInput, optFns ...func(*Options)) (*StartStreamEncryptionOutput, error) {
|
||||
if params == nil {
|
||||
params = &StartStreamEncryptionInput{}
|
||||
|
|
@ -56,11 +61,16 @@ type StartStreamEncryptionInput struct {
|
|||
// Amazon Resource Name (ARN) to either an alias or a key, or an alias name
|
||||
// prefixed by "alias/".You can also use a master key owned by Kinesis Data Streams
|
||||
// by specifying the alias aws/kinesis .
|
||||
//
|
||||
// - Key ARN example:
|
||||
// arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
|
||||
//
|
||||
// - Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
|
||||
//
|
||||
// - Globally unique key ID example: 12345678-1234-1234-1234-123456789012
|
||||
//
|
||||
// - Alias name example: alias/MyAliasName
|
||||
//
|
||||
// - Master key owned by Kinesis Data Streams: alias/aws/kinesis
|
||||
//
|
||||
// This member is required.
|
||||
|
|
|
|||
43
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StopStreamEncryption.go
generated
vendored
43
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_StopStreamEncryption.go
generated
vendored
|
|
@ -12,22 +12,28 @@ import (
|
|||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
)
|
||||
|
||||
// Disables server-side encryption for a specified stream. When invoking this API,
|
||||
// you must use either the StreamARN or the StreamName parameter, or both. It is
|
||||
// recommended that you use the StreamARN input parameter when you invoke this
|
||||
// API. Stopping encryption is an asynchronous operation. Upon receiving the
|
||||
// request, Kinesis Data Streams returns immediately and sets the status of the
|
||||
// stream to UPDATING . After the update is complete, Kinesis Data Streams sets the
|
||||
// status of the stream back to ACTIVE . Stopping encryption normally takes a few
|
||||
// seconds to complete, but it can take minutes. You can continue to read and write
|
||||
// data to your stream while its status is UPDATING . Once the status of the stream
|
||||
// is ACTIVE , records written to the stream are no longer encrypted by Kinesis
|
||||
// Data Streams. API Limits: You can successfully disable server-side encryption 25
|
||||
// times in a rolling 24-hour period. Note: It can take up to 5 seconds after the
|
||||
// stream is in an ACTIVE status before all records written to the stream are no
|
||||
// longer subject to encryption. After you disabled encryption, you can verify that
|
||||
// encryption is not applied by inspecting the API response from PutRecord or
|
||||
// PutRecords .
|
||||
// Disables server-side encryption for a specified stream.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// Stopping encryption is an asynchronous operation. Upon receiving the request,
|
||||
// Kinesis Data Streams returns immediately and sets the status of the stream to
|
||||
// UPDATING . After the update is complete, Kinesis Data Streams sets the status of
|
||||
// the stream back to ACTIVE . Stopping encryption normally takes a few seconds to
|
||||
// complete, but it can take minutes. You can continue to read and write data to
|
||||
// your stream while its status is UPDATING . Once the status of the stream is
|
||||
// ACTIVE , records written to the stream are no longer encrypted by Kinesis Data
|
||||
// Streams.
|
||||
//
|
||||
// API Limits: You can successfully disable server-side encryption 25 times in a
|
||||
// rolling 24-hour period.
|
||||
//
|
||||
// Note: It can take up to 5 seconds after the stream is in an ACTIVE status
|
||||
// before all records written to the stream are no longer subject to encryption.
|
||||
// After you disabled encryption, you can verify that encryption is not applied by
|
||||
// inspecting the API response from PutRecord or PutRecords .
|
||||
func (c *Client) StopStreamEncryption(ctx context.Context, params *StopStreamEncryptionInput, optFns ...func(*Options)) (*StopStreamEncryptionOutput, error) {
|
||||
if params == nil {
|
||||
params = &StopStreamEncryptionInput{}
|
||||
|
|
@ -55,11 +61,16 @@ type StopStreamEncryptionInput struct {
|
|||
// Amazon Resource Name (ARN) to either an alias or a key, or an alias name
|
||||
// prefixed by "alias/".You can also use a master key owned by Kinesis Data Streams
|
||||
// by specifying the alias aws/kinesis .
|
||||
//
|
||||
// - Key ARN example:
|
||||
// arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
|
||||
//
|
||||
// - Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
|
||||
//
|
||||
// - Globally unique key ID example: 12345678-1234-1234-1234-123456789012
|
||||
//
|
||||
// - Alias name example: alias/MyAliasName
|
||||
//
|
||||
// - Master key owned by Kinesis Data Streams: alias/aws/kinesis
|
||||
//
|
||||
// This member is required.
|
||||
|
|
|
|||
40
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SubscribeToShard.go
generated
vendored
40
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_SubscribeToShard.go
generated
vendored
|
|
@ -17,22 +17,27 @@ import (
|
|||
// specify in the ConsumerARN parameter and the shard you specify in the ShardId
|
||||
// parameter. After the connection is successfully established, Kinesis Data
|
||||
// Streams pushes records from the shard to the consumer over this connection.
|
||||
// Before you call this operation, call RegisterStreamConsumer to register the
|
||||
// consumer with Kinesis Data Streams. When the SubscribeToShard call succeeds,
|
||||
// your consumer starts receiving events of type SubscribeToShardEvent over the
|
||||
// HTTP/2 connection for up to 5 minutes, after which time you need to call
|
||||
// SubscribeToShard again to renew the subscription if you want to continue to
|
||||
// receive records. You can make one call to SubscribeToShard per second per
|
||||
// registered consumer per shard. For example, if you have a 4000 shard stream and
|
||||
// two registered stream consumers, you can make one SubscribeToShard request per
|
||||
// second for each combination of shard and registered consumer, allowing you to
|
||||
// subscribe both consumers to all 4000 shards in one second. If you call
|
||||
// SubscribeToShard again with the same ConsumerARN and ShardId within 5 seconds
|
||||
// of a successful call, you'll get a ResourceInUseException . If you call
|
||||
// SubscribeToShard 5 seconds or more after a successful call, the second call
|
||||
// takes over the subscription and the previous connection expires or fails with a
|
||||
// ResourceInUseException . For an example of how to use this operations, see
|
||||
// Enhanced Fan-Out Using the Kinesis Data Streams API .
|
||||
// Before you call this operation, call RegisterStreamConsumerto register the consumer with Kinesis Data
|
||||
// Streams.
|
||||
//
|
||||
// When the SubscribeToShard call succeeds, your consumer starts receiving events
|
||||
// of type SubscribeToShardEventover the HTTP/2 connection for up to 5 minutes, after which time you
|
||||
// need to call SubscribeToShard again to renew the subscription if you want to
|
||||
// continue to receive records.
|
||||
//
|
||||
// You can make one call to SubscribeToShard per second per registered consumer
|
||||
// per shard. For example, if you have a 4000 shard stream and two registered
|
||||
// stream consumers, you can make one SubscribeToShard request per second for each
|
||||
// combination of shard and registered consumer, allowing you to subscribe both
|
||||
// consumers to all 4000 shards in one second.
|
||||
//
|
||||
// If you call SubscribeToShard again with the same ConsumerARN and ShardId within
|
||||
// 5 seconds of a successful call, you'll get a ResourceInUseException . If you
|
||||
// call SubscribeToShard 5 seconds or more after a successful call, the second
|
||||
// call takes over the subscription and the previous connection expires or fails
|
||||
// with a ResourceInUseException .
|
||||
//
|
||||
// For an example of how to use this operations, see Enhanced Fan-Out Using the Kinesis Data Streams API.
|
||||
func (c *Client) SubscribeToShard(ctx context.Context, params *SubscribeToShardInput, optFns ...func(*Options)) (*SubscribeToShardOutput, error) {
|
||||
if params == nil {
|
||||
params = &SubscribeToShardInput{}
|
||||
|
|
@ -50,8 +55,7 @@ func (c *Client) SubscribeToShard(ctx context.Context, params *SubscribeToShardI
|
|||
|
||||
type SubscribeToShardInput struct {
|
||||
|
||||
// For this parameter, use the value you obtained when you called
|
||||
// RegisterStreamConsumer .
|
||||
// For this parameter, use the value you obtained when you called RegisterStreamConsumer.
|
||||
//
|
||||
// This member is required.
|
||||
ConsumerARN *string
|
||||
|
|
|
|||
64
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateShardCount.go
generated
vendored
64
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateShardCount.go
generated
vendored
|
|
@ -14,37 +14,53 @@ import (
|
|||
|
||||
// Updates the shard count of the specified stream to the specified number of
|
||||
// shards. This API is only supported for the data streams with the provisioned
|
||||
// capacity mode. When invoking this API, you must use either the StreamARN or the
|
||||
// StreamName parameter, or both. It is recommended that you use the StreamARN
|
||||
// input parameter when you invoke this API. Updating the shard count is an
|
||||
// asynchronous operation. Upon receiving the request, Kinesis Data Streams returns
|
||||
// immediately and sets the status of the stream to UPDATING . After the update is
|
||||
// complete, Kinesis Data Streams sets the status of the stream back to ACTIVE .
|
||||
// Depending on the size of the stream, the scaling action could take a few minutes
|
||||
// to complete. You can continue to read and write data to your stream while its
|
||||
// status is UPDATING . To update the shard count, Kinesis Data Streams performs
|
||||
// splits or merges on individual shards. This can cause short-lived shards to be
|
||||
// created, in addition to the final shards. These short-lived shards count towards
|
||||
// your total shard limit for your account in the Region. When using this
|
||||
// operation, we recommend that you specify a target shard count that is a multiple
|
||||
// of 25% (25%, 50%, 75%, 100%). You can specify any target value within your shard
|
||||
// limit. However, if you specify a target that isn't a multiple of 25%, the
|
||||
// scaling action might take longer to complete. This operation has the following
|
||||
// default limits. By default, you cannot do the following:
|
||||
// capacity mode.
|
||||
//
|
||||
// When invoking this API, you must use either the StreamARN or the StreamName
|
||||
// parameter, or both. It is recommended that you use the StreamARN input
|
||||
// parameter when you invoke this API.
|
||||
//
|
||||
// Updating the shard count is an asynchronous operation. Upon receiving the
|
||||
// request, Kinesis Data Streams returns immediately and sets the status of the
|
||||
// stream to UPDATING . After the update is complete, Kinesis Data Streams sets the
|
||||
// status of the stream back to ACTIVE . Depending on the size of the stream, the
|
||||
// scaling action could take a few minutes to complete. You can continue to read
|
||||
// and write data to your stream while its status is UPDATING .
|
||||
//
|
||||
// To update the shard count, Kinesis Data Streams performs splits or merges on
|
||||
// individual shards. This can cause short-lived shards to be created, in addition
|
||||
// to the final shards. These short-lived shards count towards your total shard
|
||||
// limit for your account in the Region.
|
||||
//
|
||||
// When using this operation, we recommend that you specify a target shard count
|
||||
// that is a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target
|
||||
// value within your shard limit. However, if you specify a target that isn't a
|
||||
// multiple of 25%, the scaling action might take longer to complete.
|
||||
//
|
||||
// This operation has the following default limits. By default, you cannot do the
|
||||
// following:
|
||||
//
|
||||
// - Scale more than ten times per rolling 24-hour period per stream
|
||||
//
|
||||
// - Scale up to more than double your current shard count for a stream
|
||||
//
|
||||
// - Scale down below half your current shard count for a stream
|
||||
//
|
||||
// - Scale up to more than 10000 shards in a stream
|
||||
//
|
||||
// - Scale a stream with more than 10000 shards down unless the result is less
|
||||
// than 10000 shards
|
||||
//
|
||||
// - Scale up to more than the shard limit for your account
|
||||
//
|
||||
// - Make over 10 TPS. TPS over 10 will trigger the LimitExceededException
|
||||
//
|
||||
// For the default limits for an Amazon Web Services account, see Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide. To request an increase in
|
||||
// the call rate limit, the shard limit for this API, or your overall shard limit,
|
||||
// use the limits form (https://console.aws.amazon.com/support/v1#/case/create?issueType=service-limit-increase&limitType=service-code-kinesis)
|
||||
// .
|
||||
// For the default limits for an Amazon Web Services account, see [Streams Limits] in the Amazon
|
||||
// Kinesis Data Streams Developer Guide. To request an increase in the call rate
|
||||
// limit, the shard limit for this API, or your overall shard limit, use the [limits form].
|
||||
//
|
||||
// [limits form]: https://console.aws.amazon.com/support/v1#/case/create?issueType=service-limit-increase&limitType=service-code-kinesis
|
||||
// [Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
|
||||
func (c *Client) UpdateShardCount(ctx context.Context, params *UpdateShardCountInput, optFns ...func(*Options)) (*UpdateShardCountOutput, error) {
|
||||
if params == nil {
|
||||
params = &UpdateShardCountInput{}
|
||||
|
|
@ -69,11 +85,15 @@ type UpdateShardCountInput struct {
|
|||
|
||||
// The new number of shards. This value has the following default limits. By
|
||||
// default, you cannot do the following:
|
||||
//
|
||||
// - Set this value to more than double your current shard count for a stream.
|
||||
//
|
||||
// - Set this value below half your current shard count for a stream.
|
||||
//
|
||||
// - Set this value to more than 10000 shards in a stream (the default limit for
|
||||
// shard count per stream is 10000 per account per region), unless you request a
|
||||
// limit increase.
|
||||
//
|
||||
// - Scale a stream with more than 10000 shards down unless you set this value
|
||||
// to less than 10000 shards.
|
||||
//
|
||||
|
|
|
|||
1
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateStreamMode.go
generated
vendored
1
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/api_op_UpdateStreamMode.go
generated
vendored
|
|
@ -13,6 +13,7 @@ import (
|
|||
)
|
||||
|
||||
// Updates the capacity mode of the data stream. Currently, in Kinesis Data
|
||||
//
|
||||
// Streams, you can choose between an on-demand capacity mode and a provisioned
|
||||
// capacity mode for your data stream.
|
||||
func (c *Client) UpdateStreamMode(ctx context.Context, params *UpdateStreamModeInput, optFns ...func(*Options)) (*UpdateStreamModeOutput, error) {
|
||||
|
|
|
|||
9
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/deserializers.go
generated
vendored
9
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/deserializers.go
generated
vendored
|
|
@ -21,8 +21,17 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func deserializeS3Expires(v string) (*time.Time, error) {
|
||||
t, err := smithytime.ParseHTTPDate(v)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
return &t, nil
|
||||
}
|
||||
|
||||
type awsAwsjson11_deserializeOpAddTagsToStream struct {
|
||||
}
|
||||
|
||||
|
|
|
|||
7
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/doc.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/doc.go
generated
vendored
|
|
@ -3,7 +3,8 @@
|
|||
// Package kinesis provides the API client, operations, and parameter types for
|
||||
// Amazon Kinesis.
|
||||
//
|
||||
// Amazon Kinesis Data Streams Service API Reference Amazon Kinesis Data Streams
|
||||
// is a managed service that scales elastically for real-time processing of
|
||||
// streaming big data.
|
||||
// # Amazon Kinesis Data Streams Service API Reference
|
||||
//
|
||||
// Amazon Kinesis Data Streams is a managed service that scales elastically for
|
||||
// real-time processing of streaming big data.
|
||||
package kinesis
|
||||
|
|
|
|||
2
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/go_module_metadata.go
generated
vendored
|
|
@ -3,4 +3,4 @@
|
|||
package kinesis
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.27.4"
|
||||
const goModuleVersion = "1.27.8"
|
||||
|
|
|
|||
31
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/options.go
generated
vendored
31
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/options.go
generated
vendored
|
|
@ -50,8 +50,10 @@ type Options struct {
|
|||
// Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
|
||||
// value for this field will likely prevent you from using any endpoint-related
|
||||
// service features released after the introduction of EndpointResolverV2 and
|
||||
// BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom
|
||||
// endpoint, set the client option BaseEndpoint instead.
|
||||
// BaseEndpoint.
|
||||
//
|
||||
// To migrate an EndpointResolver implementation that uses a custom endpoint, set
|
||||
// the client option BaseEndpoint instead.
|
||||
EndpointResolver EndpointResolver
|
||||
|
||||
// Resolves the endpoint used for a particular service operation. This should be
|
||||
|
|
@ -70,17 +72,20 @@ type Options struct {
|
|||
// RetryMaxAttempts specifies the maximum number attempts an API client will call
|
||||
// an operation that fails with a retryable error. A value of 0 is ignored, and
|
||||
// will not be used to configure the API client created default retryer, or modify
|
||||
// per operation call's retry max attempts. If specified in an operation call's
|
||||
// functional options with a value that is different than the constructed client's
|
||||
// Options, the Client's Retryer will be wrapped to use the operation's specific
|
||||
// RetryMaxAttempts value.
|
||||
// per operation call's retry max attempts.
|
||||
//
|
||||
// If specified in an operation call's functional options with a value that is
|
||||
// different than the constructed client's Options, the Client's Retryer will be
|
||||
// wrapped to use the operation's specific RetryMaxAttempts value.
|
||||
RetryMaxAttempts int
|
||||
|
||||
// RetryMode specifies the retry mode the API client will be created with, if
|
||||
// Retryer option is not also specified. When creating a new API Clients this
|
||||
// member will only be used if the Retryer Options member is nil. This value will
|
||||
// be ignored if Retryer is not nil. Currently does not support per operation call
|
||||
// overrides, may in the future.
|
||||
// Retryer option is not also specified.
|
||||
//
|
||||
// When creating a new API Clients this member will only be used if the Retryer
|
||||
// Options member is nil. This value will be ignored if Retryer is not nil.
|
||||
//
|
||||
// Currently does not support per operation call overrides, may in the future.
|
||||
RetryMode aws.RetryMode
|
||||
|
||||
// Retryer guides how HTTP requests should be retried in case of recoverable
|
||||
|
|
@ -97,8 +102,9 @@ type Options struct {
|
|||
|
||||
// The initial DefaultsMode used when the client options were constructed. If the
|
||||
// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
|
||||
// value was at that point in time. Currently does not support per operation call
|
||||
// overrides, may in the future.
|
||||
// value was at that point in time.
|
||||
//
|
||||
// Currently does not support per operation call overrides, may in the future.
|
||||
resolvedDefaultsMode aws.DefaultsMode
|
||||
|
||||
// The HTTP client to invoke API calls with. Defaults to client's default HTTP
|
||||
|
|
@ -143,6 +149,7 @@ func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
|
|||
// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
|
||||
// this field will likely prevent you from using any endpoint-related service
|
||||
// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
|
||||
//
|
||||
// To migrate an EndpointResolver implementation that uses a custom endpoint, set
|
||||
// the client option BaseEndpoint instead.
|
||||
func WithEndpointResolver(v EndpointResolver) func(*Options) {
|
||||
|
|
|
|||
40
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/enums.go
generated
vendored
40
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/enums.go
generated
vendored
|
|
@ -12,8 +12,9 @@ const (
|
|||
)
|
||||
|
||||
// Values returns all known values for ConsumerStatus. Note that this can be
|
||||
// expanded in the future, and so it is only as up to date as the client. The
|
||||
// ordering of this slice is not guaranteed to be stable across updates.
|
||||
// expanded in the future, and so it is only as up to date as the client.
|
||||
//
|
||||
// The ordering of this slice is not guaranteed to be stable across updates.
|
||||
func (ConsumerStatus) Values() []ConsumerStatus {
|
||||
return []ConsumerStatus{
|
||||
"CREATING",
|
||||
|
|
@ -31,8 +32,9 @@ const (
|
|||
)
|
||||
|
||||
// Values returns all known values for EncryptionType. Note that this can be
|
||||
// expanded in the future, and so it is only as up to date as the client. The
|
||||
// ordering of this slice is not guaranteed to be stable across updates.
|
||||
// expanded in the future, and so it is only as up to date as the client.
|
||||
//
|
||||
// The ordering of this slice is not guaranteed to be stable across updates.
|
||||
func (EncryptionType) Values() []EncryptionType {
|
||||
return []EncryptionType{
|
||||
"NONE",
|
||||
|
|
@ -55,8 +57,9 @@ const (
|
|||
)
|
||||
|
||||
// Values returns all known values for MetricsName. Note that this can be expanded
|
||||
// in the future, and so it is only as up to date as the client. The ordering of
|
||||
// this slice is not guaranteed to be stable across updates.
|
||||
// in the future, and so it is only as up to date as the client.
|
||||
//
|
||||
// The ordering of this slice is not guaranteed to be stable across updates.
|
||||
func (MetricsName) Values() []MetricsName {
|
||||
return []MetricsName{
|
||||
"IncomingBytes",
|
||||
|
|
@ -78,8 +81,9 @@ const (
|
|||
)
|
||||
|
||||
// Values returns all known values for ScalingType. Note that this can be expanded
|
||||
// in the future, and so it is only as up to date as the client. The ordering of
|
||||
// this slice is not guaranteed to be stable across updates.
|
||||
// in the future, and so it is only as up to date as the client.
|
||||
//
|
||||
// The ordering of this slice is not guaranteed to be stable across updates.
|
||||
func (ScalingType) Values() []ScalingType {
|
||||
return []ScalingType{
|
||||
"UNIFORM_SCALING",
|
||||
|
|
@ -99,8 +103,9 @@ const (
|
|||
)
|
||||
|
||||
// Values returns all known values for ShardFilterType. Note that this can be
|
||||
// expanded in the future, and so it is only as up to date as the client. The
|
||||
// ordering of this slice is not guaranteed to be stable across updates.
|
||||
// expanded in the future, and so it is only as up to date as the client.
|
||||
//
|
||||
// The ordering of this slice is not guaranteed to be stable across updates.
|
||||
func (ShardFilterType) Values() []ShardFilterType {
|
||||
return []ShardFilterType{
|
||||
"AFTER_SHARD_ID",
|
||||
|
|
@ -124,8 +129,9 @@ const (
|
|||
)
|
||||
|
||||
// Values returns all known values for ShardIteratorType. Note that this can be
|
||||
// expanded in the future, and so it is only as up to date as the client. The
|
||||
// ordering of this slice is not guaranteed to be stable across updates.
|
||||
// expanded in the future, and so it is only as up to date as the client.
|
||||
//
|
||||
// The ordering of this slice is not guaranteed to be stable across updates.
|
||||
func (ShardIteratorType) Values() []ShardIteratorType {
|
||||
return []ShardIteratorType{
|
||||
"AT_SEQUENCE_NUMBER",
|
||||
|
|
@ -145,8 +151,9 @@ const (
|
|||
)
|
||||
|
||||
// Values returns all known values for StreamMode. Note that this can be expanded
|
||||
// in the future, and so it is only as up to date as the client. The ordering of
|
||||
// this slice is not guaranteed to be stable across updates.
|
||||
// in the future, and so it is only as up to date as the client.
|
||||
//
|
||||
// The ordering of this slice is not guaranteed to be stable across updates.
|
||||
func (StreamMode) Values() []StreamMode {
|
||||
return []StreamMode{
|
||||
"PROVISIONED",
|
||||
|
|
@ -165,8 +172,9 @@ const (
|
|||
)
|
||||
|
||||
// Values returns all known values for StreamStatus. Note that this can be
|
||||
// expanded in the future, and so it is only as up to date as the client. The
|
||||
// ordering of this slice is not guaranteed to be stable across updates.
|
||||
// expanded in the future, and so it is only as up to date as the client.
|
||||
//
|
||||
// The ordering of this slice is not guaranteed to be stable across updates.
|
||||
func (StreamStatus) Values() []StreamStatus {
|
||||
return []StreamStatus{
|
||||
"CREATING",
|
||||
|
|
|
|||
20
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/errors.go
generated
vendored
20
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/errors.go
generated
vendored
|
|
@ -195,9 +195,10 @@ func (e *KMSDisabledException) ErrorCode() string {
|
|||
func (e *KMSDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
|
||||
|
||||
// The request was rejected because the state of the specified resource isn't
|
||||
// valid for this request. For more information, see How Key State Affects Use of
|
||||
// a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html)
|
||||
// in the Amazon Web Services Key Management Service Developer Guide.
|
||||
// valid for this request. For more information, see [How Key State Affects Use of a Customer Master Key]in the Amazon Web Services
|
||||
// Key Management Service Developer Guide.
|
||||
//
|
||||
// [How Key State Affects Use of a Customer Master Key]: https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html
|
||||
type KMSInvalidStateException struct {
|
||||
Message *string
|
||||
|
||||
|
|
@ -277,8 +278,10 @@ func (e *KMSOptInRequired) ErrorCode() string {
|
|||
func (e *KMSOptInRequired) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
|
||||
|
||||
// The request was denied due to request throttling. For more information about
|
||||
// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second)
|
||||
// in the Amazon Web Services Key Management Service Developer Guide.
|
||||
// throttling, see [Limits]in the Amazon Web Services Key Management Service Developer
|
||||
// Guide.
|
||||
//
|
||||
// [Limits]: https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second
|
||||
type KMSThrottlingException struct {
|
||||
Message *string
|
||||
|
||||
|
|
@ -333,10 +336,11 @@ func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.
|
|||
|
||||
// The request rate for the stream is too high, or the requested data is too large
|
||||
// for the available throughput. Reduce the frequency or size of your requests. For
|
||||
// more information, see Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide, and Error Retries and
|
||||
// Exponential Backoff in Amazon Web Services (https://docs.aws.amazon.com/general/latest/gr/api-retries.html)
|
||||
// more information, see [Streams Limits]in the Amazon Kinesis Data Streams Developer Guide, and [Error Retries and Exponential Backoff in Amazon Web Services]
|
||||
// in the Amazon Web Services General Reference.
|
||||
//
|
||||
// [Error Retries and Exponential Backoff in Amazon Web Services]: https://docs.aws.amazon.com/general/latest/gr/api-retries.html
|
||||
// [Streams Limits]: https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html
|
||||
type ProvisionedThroughputExceededException struct {
|
||||
Message *string
|
||||
|
||||
|
|
|
|||
115
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/types.go
generated
vendored
115
vendor/github.com/aws/aws-sdk-go-v2/service/kinesis/types/types.go
generated
vendored
|
|
@ -35,10 +35,12 @@ type ChildShard struct {
|
|||
type Consumer struct {
|
||||
|
||||
// When you register a consumer, Kinesis Data Streams generates an ARN for it. You
|
||||
// need this ARN to be able to call SubscribeToShard . If you delete a consumer and
|
||||
// then create a new one with the same name, it won't have the same ARN. That's
|
||||
// because consumer ARNs contain the creation timestamp. This is important to keep
|
||||
// in mind if you have IAM policies that reference consumer ARNs.
|
||||
// need this ARN to be able to call SubscribeToShard.
|
||||
//
|
||||
// If you delete a consumer and then create a new one with the same name, it won't
|
||||
// have the same ARN. That's because consumer ARNs contain the creation timestamp.
|
||||
// This is important to keep in mind if you have IAM policies that reference
|
||||
// consumer ARNs.
|
||||
//
|
||||
// This member is required.
|
||||
ConsumerARN *string
|
||||
|
|
@ -66,10 +68,12 @@ type Consumer struct {
|
|||
type ConsumerDescription struct {
|
||||
|
||||
// When you register a consumer, Kinesis Data Streams generates an ARN for it. You
|
||||
// need this ARN to be able to call SubscribeToShard . If you delete a consumer and
|
||||
// then create a new one with the same name, it won't have the same ARN. That's
|
||||
// because consumer ARNs contain the creation timestamp. This is important to keep
|
||||
// in mind if you have IAM policies that reference consumer ARNs.
|
||||
// need this ARN to be able to call SubscribeToShard.
|
||||
//
|
||||
// If you delete a consumer and then create a new one with the same name, it won't
|
||||
// have the same ARN. That's because consumer ARNs contain the creation timestamp.
|
||||
// This is important to keep in mind if you have IAM policies that reference
|
||||
// consumer ARNs.
|
||||
//
|
||||
// This member is required.
|
||||
ConsumerARN *string
|
||||
|
|
@ -100,19 +104,30 @@ type ConsumerDescription struct {
|
|||
// Represents enhanced metrics types.
|
||||
type EnhancedMetrics struct {
|
||||
|
||||
// List of shard-level metrics. The following are the valid shard-level metrics.
|
||||
// The value " ALL " enhances every metric.
|
||||
// List of shard-level metrics.
|
||||
//
|
||||
// The following are the valid shard-level metrics. The value " ALL " enhances
|
||||
// every metric.
|
||||
//
|
||||
// - IncomingBytes
|
||||
//
|
||||
// - IncomingRecords
|
||||
//
|
||||
// - OutgoingBytes
|
||||
//
|
||||
// - OutgoingRecords
|
||||
//
|
||||
// - WriteProvisionedThroughputExceeded
|
||||
//
|
||||
// - ReadProvisionedThroughputExceeded
|
||||
//
|
||||
// - IteratorAgeMilliseconds
|
||||
//
|
||||
// - ALL
|
||||
// For more information, see Monitoring the Amazon Kinesis Data Streams Service
|
||||
// with Amazon CloudWatch (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html)
|
||||
// in the Amazon Kinesis Data Streams Developer Guide.
|
||||
//
|
||||
// For more information, see [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch] in the Amazon Kinesis Data Streams Developer Guide.
|
||||
//
|
||||
// [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch]: https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html
|
||||
ShardLevelMetrics []MetricsName
|
||||
|
||||
noSmithyDocumentSerde
|
||||
|
|
@ -218,7 +233,9 @@ type Record struct {
|
|||
|
||||
// The encryption type used on the record. This parameter can be one of the
|
||||
// following values:
|
||||
//
|
||||
// - NONE : Do not encrypt the records in the stream.
|
||||
//
|
||||
// - KMS : Use server-side encryption on the records in the stream using a
|
||||
// customer-managed Amazon Web Services KMS key.
|
||||
EncryptionType EncryptionType
|
||||
|
|
@ -273,19 +290,26 @@ type Shard struct {
|
|||
type ShardFilter struct {
|
||||
|
||||
// The shard type specified in the ShardFilter parameter. This is a required
|
||||
// property of the ShardFilter parameter. You can specify the following valid
|
||||
// values:
|
||||
// property of the ShardFilter parameter.
|
||||
//
|
||||
// You can specify the following valid values:
|
||||
//
|
||||
// - AFTER_SHARD_ID - the response includes all the shards, starting with the
|
||||
// shard whose ID immediately follows the ShardId that you provided.
|
||||
//
|
||||
// - AT_TRIM_HORIZON - the response includes all the shards that were open at
|
||||
// TRIM_HORIZON .
|
||||
//
|
||||
// - FROM_TRIM_HORIZON - (default), the response includes all the shards within
|
||||
// the retention period of the data stream (trim to tip).
|
||||
//
|
||||
// - AT_LATEST - the response includes only the currently open shards of the data
|
||||
// stream.
|
||||
//
|
||||
// - AT_TIMESTAMP - the response includes all shards whose start timestamp is
|
||||
// less than or equal to the given timestamp and end timestamp is greater than or
|
||||
// equal to the given timestamp or still open.
|
||||
//
|
||||
// - FROM_TIMESTAMP - the response incldues all closed shards whose end timestamp
|
||||
// is greater than or equal to the given timestamp and also all open shards.
|
||||
// Corrected to TRIM_HORIZON of the data stream if FROM_TIMESTAMP is less than
|
||||
|
|
@ -311,14 +335,21 @@ type ShardFilter struct {
|
|||
type StartingPosition struct {
|
||||
|
||||
// You can set the starting position to one of the following values:
|
||||
//
|
||||
// AT_SEQUENCE_NUMBER : Start streaming from the position denoted by the sequence
|
||||
// number specified in the SequenceNumber field. AFTER_SEQUENCE_NUMBER : Start
|
||||
// streaming right after the position denoted by the sequence number specified in
|
||||
// the SequenceNumber field. AT_TIMESTAMP : Start streaming from the position
|
||||
// denoted by the time stamp specified in the Timestamp field. TRIM_HORIZON : Start
|
||||
// streaming at the last untrimmed record in the shard, which is the oldest data
|
||||
// record in the shard. LATEST : Start streaming just after the most recent record
|
||||
// in the shard, so that you always read the most recent data in the shard.
|
||||
// number specified in the SequenceNumber field.
|
||||
//
|
||||
// AFTER_SEQUENCE_NUMBER : Start streaming right after the position denoted by the
|
||||
// sequence number specified in the SequenceNumber field.
|
||||
//
|
||||
// AT_TIMESTAMP : Start streaming from the position denoted by the time stamp
|
||||
// specified in the Timestamp field.
|
||||
//
|
||||
// TRIM_HORIZON : Start streaming at the last untrimmed record in the shard, which
|
||||
// is the oldest data record in the shard.
|
||||
//
|
||||
// LATEST : Start streaming just after the most recent record in the shard, so that
|
||||
// you always read the most recent data in the shard.
|
||||
//
|
||||
// This member is required.
|
||||
Type ShardIteratorType
|
||||
|
|
@ -381,13 +412,17 @@ type StreamDescription struct {
|
|||
|
||||
// The current status of the stream being described. The stream status is one of
|
||||
// the following states:
|
||||
//
|
||||
// - CREATING - The stream is being created. Kinesis Data Streams immediately
|
||||
// returns and sets StreamStatus to CREATING .
|
||||
//
|
||||
// - DELETING - The stream is being deleted. The specified stream is in the
|
||||
// DELETING state until Kinesis Data Streams completes the deletion.
|
||||
//
|
||||
// - ACTIVE - The stream exists and is ready for read and write operations or
|
||||
// deletion. You should perform read and write operations only on an ACTIVE
|
||||
// stream.
|
||||
//
|
||||
// - UPDATING - Shards in the stream are being merged or split. Read and write
|
||||
// operations continue to work while the stream is in the UPDATING state.
|
||||
//
|
||||
|
|
@ -396,7 +431,9 @@ type StreamDescription struct {
|
|||
|
||||
// The server-side encryption type used on the stream. This parameter can be one
|
||||
// of the following values:
|
||||
//
|
||||
// - NONE : Do not encrypt the records in the stream.
|
||||
//
|
||||
// - KMS : Use server-side encryption on the records in the stream using a
|
||||
// customer-managed Amazon Web Services KMS key.
|
||||
EncryptionType EncryptionType
|
||||
|
|
@ -406,11 +443,16 @@ type StreamDescription struct {
|
|||
// ARN to either an alias or a key, or an alias name prefixed by "alias/".You can
|
||||
// also use a master key owned by Kinesis Data Streams by specifying the alias
|
||||
// aws/kinesis .
|
||||
//
|
||||
// - Key ARN example:
|
||||
// arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
|
||||
//
|
||||
// - Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
|
||||
//
|
||||
// - Globally unique key ID example: 12345678-1234-1234-1234-123456789012
|
||||
//
|
||||
// - Alias name example: alias/MyAliasName
|
||||
//
|
||||
// - Master key owned by Kinesis Data Streams: alias/aws/kinesis
|
||||
KeyId *string
|
||||
|
||||
|
|
@ -457,13 +499,17 @@ type StreamDescriptionSummary struct {
|
|||
|
||||
// The current status of the stream being described. The stream status is one of
|
||||
// the following states:
|
||||
//
|
||||
// - CREATING - The stream is being created. Kinesis Data Streams immediately
|
||||
// returns and sets StreamStatus to CREATING .
|
||||
//
|
||||
// - DELETING - The stream is being deleted. The specified stream is in the
|
||||
// DELETING state until Kinesis Data Streams completes the deletion.
|
||||
//
|
||||
// - ACTIVE - The stream exists and is ready for read and write operations or
|
||||
// deletion. You should perform read and write operations only on an ACTIVE
|
||||
// stream.
|
||||
//
|
||||
// - UPDATING - Shards in the stream are being merged or split. Read and write
|
||||
// operations continue to work while the stream is in the UPDATING state.
|
||||
//
|
||||
|
|
@ -474,7 +520,9 @@ type StreamDescriptionSummary struct {
|
|||
ConsumerCount *int32
|
||||
|
||||
// The encryption type used. This value is one of the following:
|
||||
//
|
||||
// - KMS
|
||||
//
|
||||
// - NONE
|
||||
EncryptionType EncryptionType
|
||||
|
||||
|
|
@ -483,11 +531,16 @@ type StreamDescriptionSummary struct {
|
|||
// ARN to either an alias or a key, or an alias name prefixed by "alias/".You can
|
||||
// also use a master key owned by Kinesis Data Streams by specifying the alias
|
||||
// aws/kinesis .
|
||||
//
|
||||
// - Key ARN example:
|
||||
// arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
|
||||
//
|
||||
// - Alias ARN example: arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
|
||||
//
|
||||
// - Globally unique key ID example: 12345678-1234-1234-1234-123456789012
|
||||
//
|
||||
// - Alias name example: alias/MyAliasName
|
||||
//
|
||||
// - Master key owned by Kinesis Data Streams: alias/aws/kinesis
|
||||
KeyId *string
|
||||
|
||||
|
|
@ -500,6 +553,7 @@ type StreamDescriptionSummary struct {
|
|||
}
|
||||
|
||||
// Specifies the capacity mode to which you want to set your data stream.
|
||||
//
|
||||
// Currently, in Kinesis Data Streams, you can choose between an on-demand capacity
|
||||
// mode and a provisioned capacity mode for your data streams.
|
||||
type StreamModeDetails struct {
|
||||
|
|
@ -543,14 +597,14 @@ type StreamSummary struct {
|
|||
noSmithyDocumentSerde
|
||||
}
|
||||
|
||||
// After you call SubscribeToShard , Kinesis Data Streams sends events of this type
|
||||
// over an HTTP/2 connection to your consumer.
|
||||
// After you call SubscribeToShard, Kinesis Data Streams sends events of this type over an HTTP/2
|
||||
// connection to your consumer.
|
||||
type SubscribeToShardEvent struct {
|
||||
|
||||
// Use this as SequenceNumber in the next call to SubscribeToShard , with
|
||||
// StartingPosition set to AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER . Use
|
||||
// ContinuationSequenceNumber for checkpointing because it captures your shard
|
||||
// progress even when no data is written to the shard.
|
||||
// Use this as SequenceNumber in the next call to SubscribeToShard, with StartingPosition set to
|
||||
// AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER . Use ContinuationSequenceNumber
|
||||
// for checkpointing because it captures your shard progress even when no data is
|
||||
// written to the shard.
|
||||
//
|
||||
// This member is required.
|
||||
ContinuationSequenceNumber *string
|
||||
|
|
@ -585,9 +639,8 @@ type SubscribeToShardEventStream interface {
|
|||
isSubscribeToShardEventStream()
|
||||
}
|
||||
|
||||
// After you call SubscribeToShard , Kinesis Data Streams sends events of this type
|
||||
// to your consumer. For an example of how to handle these events, see Enhanced
|
||||
// Fan-Out Using the Kinesis Data Streams API .
|
||||
// After you call SubscribeToShard, Kinesis Data Streams sends events of this type to your
|
||||
// consumer. For an example of how to handle these events, see Enhanced Fan-Out Using the Kinesis Data Streams API.
|
||||
type SubscribeToShardEventStreamMemberSubscribeToShardEvent struct {
|
||||
Value SubscribeToShardEvent
|
||||
|
||||
|
|
|
|||
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
|
@ -92,7 +92,7 @@ github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery
|
|||
# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.9
|
||||
## explicit; go 1.20
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url
|
||||
# github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.4
|
||||
# github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.8
|
||||
## explicit; go 1.20
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis
|
||||
github.com/aws/aws-sdk-go-v2/service/kinesis/internal/customizations
|
||||
|
|
|
|||
Loading…
Reference in a new issue