diff --git a/build/charts/Makefile b/build/charts/Makefile index 809f2c416da..19f094582ec 100644 --- a/build/charts/Makefile +++ b/build/charts/Makefile @@ -6,7 +6,7 @@ VERSION := $(shell head -n 1 ../../VERSION | cut -c 2-) .PHONY: helm-docs helm-docs: docker run --rm --volume "$(CURDIR):/helm-docs" --user=$(USERID):$(GRPID) jnorwood/helm-docs:v1.7.0 - sed -i.bak "s/0.0.0/$(VERSION)/g" antrea/README.md # replace version placeholder + sed -i.bak "s/0\.0\.0/$(VERSION)/g" antrea/README.md # replace version placeholder sed -i.bak "s/-dev-informational/--dev-informational/g" antrea/README.md # fix img.shields.io badge URLs - sed -i.bak "s/0.0.0/$(VERSION)/g" flow-aggregator/README.md # replace version placeholder + sed -i.bak "s/0\.0\.0/$(VERSION)/g" flow-aggregator/README.md # replace version placeholder sed -i.bak "s/-dev-informational/--dev-informational/g" flow-aggregator/README.md # fix img.shields.io badge URLs diff --git a/build/charts/flow-aggregator/README.md b/build/charts/flow-aggregator/README.md index cecb423d416..f88dd13fcb3 100644 --- a/build/charts/flow-aggregator/README.md +++ b/build/charts/flow-aggregator/README.md @@ -26,7 +26,7 @@ Kubernetes: `>= 1.16.0-0` | clickHouse.commitInterval | string | `"8s"` | CommitInterval is the periodical interval between batch commit of flow records to DB. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | | clickHouse.compress | bool | `true` | Compress enables lz4 compression when committing flow records. | | clickHouse.connectionSecret | object | `{"password":"clickhouse_operator_password","username":"clickhouse_operator"}` | Credentials to connect to ClickHouse. They will be stored in a Secret. | -| clickHouse.databaseURL | string | `"tcp://clickhouse-clickhouse.flow-visibility.svc:9000"` | | +| clickHouse.databaseURL | string | `"tcp://clickhouse-clickhouse.flow-visibility.svc:9000"` | DatabaseURL is the url to the database. TCP protocol is required. | | clickHouse.debug | bool | `false` | Debug enables debug logs from ClickHouse sql driver. | | clickHouse.enable | bool | `false` | Determine whether to enable exporting flow records to ClickHouse. | | flowAggregatorAddress | string | `"flow-aggregator.flow-aggregator.svc"` | Provide DNS name or IP address of flow aggregator for generating TLS certificate. It must match the flowCollectorAddr parameter in the antrea-agent config. | @@ -38,6 +38,15 @@ Kubernetes: `>= 1.16.0-0` | inactiveFlowRecordTimeout | string | `"90s"` | Provide the inactive flow record timeout as a duration string. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". | | logVerbosity | int | `0` | | | recordContents.podLabels | bool | `false` | Determine whether source and destination Pod labels will be included in the flow records. | +| s3Uploader.awsCredentials | object | `{"aws_access_key_id":"changeme","aws_secret_access_key":"changeme","aws_session_token":""}` | Credentials to authenticate to AWS. They will be stored in a Secret and injected into the Pod as environment variables. | +| s3Uploader.bucketName | string | `""` | BucketName is the name of the S3 bucket to which flow records will be uploaded. It is required. | +| s3Uploader.bucketPrefix | string | `""` | BucketPrefix is the prefix ("folder") under which flow records will be uploaded. | +| s3Uploader.compress | bool | `true` | Compress enables gzip compression when uploading files to S3. | +| s3Uploader.enable | bool | `false` | Determine whether to enable exporting flow records to AWS S3. | +| s3Uploader.maxRecordsPerFile | int | `1000000` | MaxRecordsPerFile is the maximum number of records per file uploaded. It is not recommended to change this value. | +| s3Uploader.recordFormat | string | `"CSV"` | RecordFormat defines the format of the flow records uploaded to S3. Only "CSV" is supported at the moment. | +| s3Uploader.region | string | `"us-west-2"` | Region is used as a "hint" to get the region in which the provided bucket is located. An error will occur if the bucket does not exist in the AWS partition the region hint belongs to. | +| s3Uploader.uploadInterval | string | `"60s"` | UploadInterval is the duration between each file upload to S3. | | testing.coverage | bool | `false` | | ---------------------------------------------- diff --git a/build/charts/flow-aggregator/conf/flow-aggregator.conf b/build/charts/flow-aggregator/conf/flow-aggregator.conf index 015ea9e2297..7a43a1e6fb1 100644 --- a/build/charts/flow-aggregator/conf/flow-aggregator.conf +++ b/build/charts/flow-aggregator/conf/flow-aggregator.conf @@ -86,3 +86,40 @@ clickHouse: # Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". # The minimum interval is 1s based on ClickHouse documentation for best performance. commitInterval: {{ .Values.clickHouse.commitInterval | quote }} + +# s3Uploader contains configuration options for uploading flow records to AWS S3. +s3Uploader: + # Enable is the switch to enable exporting flow records to AWS S3. + # At the moment, the flow aggregator will look for the "standard" environment variables to + # authenticate to AWS. These can be static credentials (AWS_ACCESS_KEY_ID, + # AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN) or a Web Identity Token + # (AWS_WEB_IDENTITY_TOKEN_FILE). + enable: {{ .Values.s3Uploader.enable }} + + # BucketName is the name of the S3 bucket to which flow records will be uploaded. If this + # field is empty, initialization will fail. + bucketName: {{ .Values.s3Uploader.bucketName | quote }} + + # BucketPrefix is the prefix ("folder") under which flow records will be uploaded. If this + # is omitted, flow records will be uploaded to the root of the bucket. + bucketPrefix: {{ .Values.s3Uploader.bucketPrefix | quote }} + + # Region is used as a "hint" to get the region in which the provided bucket is located. + # An error will occur if the bucket does not exist in the AWS partition the region hint + # belongs to. If region is omitted, the value of the AWS_REGION environment variable will + # be used, and if it is missing, we will default to "us-west-2". + region: {{ .Values.s3Uploader.region | quote }} + + # RecordFormat defines the format of the flow records uploaded to S3. Only "CSV" is + # supported at the moment. + recordFormat: {{ .Values.s3Uploader.recordFormat | quote }} + + # Compress enables gzip compression when uploading files to S3. Defaults to true. + compress: {{ .Values.s3Uploader.compress }} + + # MaxRecordsPerFile is the maximum number of records per file uploaded. It is not recommended + # to change this value. + maxRecordsPerFile: {{ .Values.s3Uploader.maxRecordsPerFile }} + + # UploadInterval is the duration between each file upload to S3. + uploadInterval: {{ .Values.s3Uploader.uploadInterval | quote }} diff --git a/build/charts/flow-aggregator/templates/deployment.yaml b/build/charts/flow-aggregator/templates/deployment.yaml index 725b3d2f230..fd4f438de5a 100644 --- a/build/charts/flow-aggregator/templates/deployment.yaml +++ b/build/charts/flow-aggregator/templates/deployment.yaml @@ -56,6 +56,21 @@ spec: key: password - name: FA_CONFIG_MAP_NAME value: flow-aggregator-configmap + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: flow-aggregator-aws-credentials + key: aws_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: flow-aggregator-aws-credentials + key: aws_secret_access_key + - name: AWS_SESSION_TOKEN + valueFrom: + secretKeyRef: + name: flow-aggregator-aws-credentials + key: aws_session_token ports: - containerPort: 4739 volumeMounts: diff --git a/build/charts/flow-aggregator/templates/secret.yaml b/build/charts/flow-aggregator/templates/secret.yaml deleted file mode 100644 index cd5da4267fe..00000000000 --- a/build/charts/flow-aggregator/templates/secret.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - labels: - app: flow-aggregator - name: clickhouse-secret - namespace: {{ .Release.Namespace }} -type: Opaque -stringData: - username: {{ .Values.clickHouse.connectionSecret.username }} - password: {{ .Values.clickHouse.connectionSecret.password }} diff --git a/build/charts/flow-aggregator/templates/secrets.yaml b/build/charts/flow-aggregator/templates/secrets.yaml new file mode 100644 index 00000000000..e579e1db326 --- /dev/null +++ b/build/charts/flow-aggregator/templates/secrets.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + app: flow-aggregator + name: clickhouse-secret + namespace: {{ .Release.Namespace }} +type: Opaque +stringData: + username: {{ .Values.clickHouse.connectionSecret.username }} + password: {{ .Values.clickHouse.connectionSecret.password }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + app: flow-aggregator + name: flow-aggregator-aws-credentials + namespace: {{ .Release.Namespace }} +type: Opaque +stringData: + aws_access_key_id: {{ .Values.s3Uploader.awsCredentials.aws_access_key_id | quote }} + aws_secret_access_key: {{ .Values.s3Uploader.awsCredentials.aws_secret_access_key | quote }} + aws_session_token: {{ .Values.s3Uploader.awsCredentials.aws_session_token | quote }} diff --git a/build/charts/flow-aggregator/values.yaml b/build/charts/flow-aggregator/values.yaml index 6a983c3c597..5caa3fbea5e 100644 --- a/build/charts/flow-aggregator/values.yaml +++ b/build/charts/flow-aggregator/values.yaml @@ -46,7 +46,7 @@ flowCollector: clickHouse: # -- Determine whether to enable exporting flow records to ClickHouse. enable: false - # DatabaseURL is the url to the database. TCP protocol is required. + # -- DatabaseURL is the url to the database. TCP protocol is required. databaseURL: "tcp://clickhouse-clickhouse.flow-visibility.svc:9000" # -- Debug enables debug logs from ClickHouse sql driver. debug: false @@ -59,6 +59,32 @@ clickHouse: connectionSecret: username : "clickhouse_operator" password: "clickhouse_operator_password" +# s3Uploader contains configuration options for uploading flow records to AWS S3. +s3Uploader: + # -- Determine whether to enable exporting flow records to AWS S3. + enable: false + # -- BucketName is the name of the S3 bucket to which flow records will be uploaded. It is required. + bucketName: "" + # -- BucketPrefix is the prefix ("folder") under which flow records will be uploaded. + bucketPrefix: "" + # -- Region is used as a "hint" to get the region in which the provided bucket is located. + # An error will occur if the bucket does not exist in the AWS partition the region hint belongs to. + region: "us-west-2" + # -- RecordFormat defines the format of the flow records uploaded to S3. Only "CSV" is supported at the moment. + recordFormat: "CSV" + # -- Compress enables gzip compression when uploading files to S3. + compress: true + # -- MaxRecordsPerFile is the maximum number of records per file uploaded. It is not recommended + # to change this value. + maxRecordsPerFile: 1000000 + # -- UploadInterval is the duration between each file upload to S3. + uploadInterval: "60s" + # -- Credentials to authenticate to AWS. They will be stored in a Secret and injected into the Pod + # as environment variables. + awsCredentials: + aws_access_key_id: "changeme" + aws_secret_access_key: "changeme" + aws_session_token: "" testing: ## -- Enable code coverage measurement (used when testing Flow Aggregator only). coverage: false diff --git a/build/images/flow-aggregator/Dockerfile b/build/images/flow-aggregator/Dockerfile index 28c3c404b83..72b84f0ac35 100644 --- a/build/images/flow-aggregator/Dockerfile +++ b/build/images/flow-aggregator/Dockerfile @@ -31,4 +31,9 @@ LABEL description="The docker image for the flow aggregator" COPY --from=flow-aggregator-build /antrea/bin/flow-aggregator / COPY --from=flow-aggregator-build /antrea/bin/antctl /usr/local/bin/ +# install ca-certificates +RUN apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates \ + && rm -rf /var/lib/apt/lists/* + ENTRYPOINT ["/flow-aggregator"] diff --git a/build/images/flow-aggregator/Dockerfile.coverage b/build/images/flow-aggregator/Dockerfile.coverage index db55b369357..924fff21f37 100644 --- a/build/images/flow-aggregator/Dockerfile.coverage +++ b/build/images/flow-aggregator/Dockerfile.coverage @@ -32,3 +32,7 @@ USER root COPY --from=flow-aggregator-build /antrea/bin/flow-aggregator* /usr/local/bin/ COPY --from=flow-aggregator-build /antrea/test/e2e/coverage/flow-aggregator-arg-file / COPY --from=flow-aggregator-build /antrea/bin/antctl* /usr/local/bin/ + +RUN apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates \ + && rm -rf /var/lib/apt/lists/* diff --git a/build/yamls/flow-aggregator.yml b/build/yamls/flow-aggregator.yml index 81a6be051be..6b5909687ab 100644 --- a/build/yamls/flow-aggregator.yml +++ b/build/yamls/flow-aggregator.yml @@ -238,6 +238,43 @@ data: # Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". # The minimum interval is 1s based on ClickHouse documentation for best performance. commitInterval: "8s" + + # s3Uploader contains configuration options for uploading flow records to AWS S3. + s3Uploader: + # Enable is the switch to enable exporting flow records to AWS S3. + # At the moment, the flow aggregator will look for the "standard" environment variables to + # authenticate to AWS. These can be static credentials (AWS_ACCESS_KEY_ID, + # AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN) or a Web Identity Token + # (AWS_WEB_IDENTITY_TOKEN_FILE). + enable: false + + # BucketName is the name of the S3 bucket to which flow records will be uploaded. If this + # field is empty, initialization will fail. + bucketName: "" + + # BucketPrefix is the prefix ("folder") under which flow records will be uploaded. If this + # is omitted, flow records will be uploaded to the root of the bucket. + bucketPrefix: "" + + # Region is used as a "hint" to get the region in which the provided bucket is located. + # An error will occur if the bucket does not exist in the AWS partition the region hint + # belongs to. If region is omitted, the value of the AWS_REGION environment variable will + # be used, and if it is missing, we will default to "us-west-2". + region: "us-west-2" + + # RecordFormat defines the format of the flow records uploaded to S3. Only "CSV" is + # supported at the moment. + recordFormat: "CSV" + + # Compress enables gzip compression when uploading files to S3. Defaults to true. + compress: true + + # MaxRecordsPerFile is the maximum number of records per file uploaded. It is not recommended + # to change this value. + maxRecordsPerFile: 1e+06 + + # UploadInterval is the duration between each file upload to S3. + uploadInterval: "60s" kind: ConfigMap metadata: labels: @@ -258,6 +295,19 @@ stringData: type: Opaque --- apiVersion: v1 +kind: Secret +metadata: + labels: + app: flow-aggregator + name: flow-aggregator-aws-credentials + namespace: flow-aggregator +stringData: + aws_access_key_id: changeme + aws_secret_access_key: changeme + aws_session_token: "" +type: Opaque +--- +apiVersion: v1 kind: Service metadata: labels: @@ -324,6 +374,21 @@ spec: name: clickhouse-secret - name: FA_CONFIG_MAP_NAME value: flow-aggregator-configmap + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + key: aws_access_key_id + name: flow-aggregator-aws-credentials + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + key: aws_secret_access_key + name: flow-aggregator-aws-credentials + - name: AWS_SESSION_TOKEN + valueFrom: + secretKeyRef: + key: aws_session_token + name: flow-aggregator-aws-credentials image: projects.registry.vmware.com/antrea/flow-aggregator:latest imagePullPolicy: IfNotPresent name: flow-aggregator diff --git a/go.mod b/go.mod index 050431a8f37..573978bcd5b 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,10 @@ require ( github.com/Microsoft/hcsshim v0.8.9 github.com/TomCodeLV/OVSDB-golang-lib v0.0.0-20200116135253-9bbdfadcd881 github.com/awalterschulze/gographviz v2.0.1+incompatible + github.com/aws/aws-sdk-go-v2 v1.16.10 + github.com/aws/aws-sdk-go-v2/config v1.16.0 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.23 + github.com/aws/aws-sdk-go-v2/service/s3 v1.27.4 github.com/blang/semver v3.5.1+incompatible github.com/cheggaaa/pb/v3 v3.1.0 github.com/confluentinc/bincover v0.1.0 @@ -92,6 +96,20 @@ require ( github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/VividCortex/ewma v1.1.1 // indirect github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.4 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.12.12 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.12 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.11 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.15 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.16.12 // indirect + github.com/aws/smithy-go v1.12.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenk/hub v1.0.1 // indirect @@ -115,7 +133,7 @@ require ( github.com/go-openapi/swag v0.19.14 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.5 // indirect + github.com/google/go-cmp v0.5.8 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect @@ -127,6 +145,7 @@ require ( github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 // indirect github.com/json-iterator/go v1.1.12 // indirect diff --git a/go.sum b/go.sum index 419aaa41dc9..08f1b5311df 100644 --- a/go.sum +++ b/go.sum @@ -128,6 +128,43 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/awalterschulze/gographviz v2.0.1+incompatible h1:XIECBRq9VPEQqkQL5pw2OtjCAdrtIgFKoJU8eT98AS8= github.com/awalterschulze/gographviz v2.0.1+incompatible/go.mod h1:GEV5wmg4YquNw7v1kkyoX9etIk8yVmXj+AkDHuuETHs= +github.com/aws/aws-sdk-go-v2 v1.16.10 h1:+yDD0tcuHRQZgqONkpDwzepqmElQaSlFPymHRHR9mrc= +github.com/aws/aws-sdk-go-v2 v1.16.10/go.mod h1:WTACcleLz6VZTp7fak4EO5b9Q4foxbn+8PIz3PmyKlo= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.4 h1:zfT11pa7ifu/VlLDpmc5OY2W4nYmnKkFDGeMVnmqAI0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.4/go.mod h1:ES0I1GBs+YYgcDS1ek47Erbn4TOL811JKqBXtgzqyZ8= +github.com/aws/aws-sdk-go-v2/config v1.15.17/go.mod h1:eatrtwIm5WdvASoYCy5oPkinfiwiYFg2jLG9tJoKzkE= +github.com/aws/aws-sdk-go-v2/config v1.16.0 h1:LxHC50cwOLxYo67NEpwpNUiOi6ngXfDpEETphSZ6bAw= +github.com/aws/aws-sdk-go-v2/config v1.16.0/go.mod h1:eatrtwIm5WdvASoYCy5oPkinfiwiYFg2jLG9tJoKzkE= +github.com/aws/aws-sdk-go-v2/credentials v1.12.12 h1:iShu6VaWZZZfUZvlGtRjl+g1lWk44g1QmiCTD4KS0jI= +github.com/aws/aws-sdk-go-v2/credentials v1.12.12/go.mod h1:vFHC2HifIWHebmoVsfpqliKuqbAY2LaVlvy03JzF4c4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11 h1:zZHPdM2x09/0F8D7XyVvQnP2/jaW7bEMmtcSCPYq/iI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.11/go.mod h1:38Asv/UyQbDNpSXCurZRlDMjzIl6J+wUe8vY3TtUuzA= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.23 h1:lzS1GSHBzvBMlCA030/ecL5tF2ip8RLr/LBq5fBpv/4= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.23/go.mod h1:yGuKwoNVv2eGUHlp7ciCQLHmFNeESebnHucZfRL9EkA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17 h1:U8DZvyFFesBmK62dYC6BRXm4Cd/wPP3aPcecu3xv/F4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.17/go.mod h1:6qtGip7sJEyvgsLjphRZWF9qPe3xJf1mL/MM01E35Wc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11 h1:GMp98usVW5tzQhxd26KWhoNQPlR2noIlfbzqjVGBhLU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.11/go.mod h1:cYAfnB+9ZkmZWpQWmPDsuIGm4EA+6k2ZVtxKjw/XJBY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18 h1:/spg6h3tG4pefphbvhpgdMtFMegSajPPSEJd1t8lnpc= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.18/go.mod h1:hTHq8hL4bAxJyng364s9d4IUGXZOs7Y5LSqAhIiIQ2A= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.8 h1:9PY5a+kHQzC6d9eR+KLNSJP3DHDLYmPFA5/+eSDBo9o= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.8/go.mod h1:pcQfUOFVK4lMnSzgX3dCA81UsA9YCilRUSYgkjSU2i8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.4 h1:akfcyqM9SvrBKWZOkBcXAGDrHfKaEP4Aca8H/bCiLW8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.4/go.mod h1:oehQLbMQkppKLXvpx/1Eo0X47Fe+0971DXC9UjGnKcI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.12 h1:eNQYkKjDSLDjIbBQ85rIkjpBGgnavrl/U3YKDdxAz14= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.12/go.mod h1:k2HaF2yfT082M+kKo3Xdf4rd5HGKvDmrPC5Kwzc2KUw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11 h1:GkYtp4gi4wdWUV+pPetjk5y2aDxbr0t8n5OjVBwZdII= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.11/go.mod h1:OEofCUKF7Hri4ShOCokF6k6hGq9PCB2sywt/9rLSXjY= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.11 h1:ZBLEKweAzBBtJa8H+MTFfVyvo+eHdM8xec5oTm9IlqI= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.11/go.mod h1:mNS1VHxYXPNqxIdCTxf87j9ROfTMa4fNpIkA+iAfz0g= +github.com/aws/aws-sdk-go-v2/service/s3 v1.27.4 h1:0RPAahwT63znFepvhfS+/WYtT+gEuAwaeNcCrzTQMH0= +github.com/aws/aws-sdk-go-v2/service/s3 v1.27.4/go.mod h1:wcpDmROpK5W7oWI6JcJIYGrVpHbF/Pu+FHxyBXyoa1E= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.15 h1:HaIE5/TtKr66qZTJpvMifDxH4lRt2JZawbkLYOo1F+Y= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.15/go.mod h1:dDVD4ElJRTQXx7dOQ59EkqGyNU9tnwy1RKln+oLIOTU= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.12 h1:YU9UHPukkCCnETHEExOptF/BxPvGJKXO/NBx+RMQ/2A= +github.com/aws/aws-sdk-go-v2/service/sts v1.16.12/go.mod h1:b53qpmhHk7mTL2J/tfG6f38neZiyBQSiNXGCuNKq4+4= +github.com/aws/smithy-go v1.12.1 h1:yQRC55aXN/y1W10HgwHle01DRuV9Dpf31iGkotjt3Ag= +github.com/aws/smithy-go v1.12.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -438,8 +475,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -535,6 +573,10 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= diff --git a/pkg/config/flowaggregator/config.go b/pkg/config/flowaggregator/config.go index 668b1509dc6..4fba73791b4 100644 --- a/pkg/config/flowaggregator/config.go +++ b/pkg/config/flowaggregator/config.go @@ -53,6 +53,8 @@ type FlowAggregatorConfig struct { FlowCollector FlowCollectorConfig `yaml:"flowCollector,omitempty"` // clickHouse contains ClickHouse related configuration options. ClickHouse ClickHouseConfig `yaml:"clickHouse,omitempty"` + // s3Uploader contains configuration options for uploading flow records to AWS S3. + S3Uploader S3UploaderConfig `yaml:"s3Uploader,omitempty"` } type RecordContentsConfig struct { @@ -88,7 +90,7 @@ type FlowCollectorConfig struct { } type ClickHouseConfig struct { - // Enable is the switch of enabling exporting flow records to ClickHouse. + // Enable is the switch to enable exporting flow records to ClickHouse. Enable bool `yaml:"enable,omitempty"` // Database is the name of database where Antrea "flows" table is created. Database string `yaml:"database,omitempty"` @@ -104,3 +106,33 @@ type ClickHouseConfig struct { // Min value allowed is "1s". CommitInterval string `yaml:"commitInterval,omitempty"` } + +type S3UploaderConfig struct { + // Enable is the switch to enable exporting flow records to AWS S3. + // At the moment, the flow aggregator will look for the "standard" environment variables to + // authenticate to AWS. These can be static credentials (AWS_ACCESS_KEY_ID, + // AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN) or a Web Identity Token + // (AWS_WEB_IDENTITY_TOKEN_FILE). + Enable bool `yaml:"enable,omitempty"` + // BucketName is the name of the S3 bucket to which flow records will be uploaded. If this + // field is empty, initialization will fail. + BucketName string `yaml:"bucketName"` + // BucketPrefix is the prefix ("folder") under which flow records will be uploaded. If this + // is omitted, flow records will be uploaded to the root of the bucket. + BucketPrefix string `yaml:"bucketPrefix,omitempty"` + // Region is used as a "hint" to get the region in which the provided bucket is located. + // An error will occur if the bucket does not exist in the AWS partition the region hint + // belongs to. If region is omitted, the value of the AWS_REGION environment variable will + // be used, and if it is missing, we will default to "us-west-2". + Region string `yaml:"region,omitempty"` + // RecordFormat defines the format of the flow records uploaded to S3. Only "CSV" is + // supported at the moment. + RecordFormat string `yaml:"recordFormat,omitempty"` + // Compress enables gzip compression when uploading files to S3. Defaults to true. + Compress *bool `yaml:"compress,omitempty"` + // MaxRecordsPerFile is the maximum number of records per file uploaded. It is not recommended + // to change this value. Defaults to 1,000,000. + MaxRecordsPerFile int32 `yaml:"maxRecordsPerFile,omitempty"` + // UploadInterval is the duration between each file upload to S3. + UploadInterval string `yaml:"uploadInterval,omitempty"` +} diff --git a/pkg/config/flowaggregator/default.go b/pkg/config/flowaggregator/default.go index fe4fbc490b5..43f49147e40 100644 --- a/pkg/config/flowaggregator/default.go +++ b/pkg/config/flowaggregator/default.go @@ -32,6 +32,11 @@ const ( DefaultClickHouseCommitInterval = "8s" MinClickHouseCommitInterval = 1 * time.Second DefaultClickHouseDatabaseUrl = "tcp://clickhouse-clickhouse.flow-visibility.svc:9000" + DefaultS3Region = "us-west-2" + DefaultS3RecordFormat = "CSV" + DefaultS3MaxRecordsPerFile = 1000000 + DefaultS3UploadInterval = "60s" + MinS3CommitInterval = 1 * time.Second ) func SetConfigDefaults(flowAggregatorConf *FlowAggregatorConfig) { @@ -66,4 +71,17 @@ func SetConfigDefaults(flowAggregatorConf *FlowAggregatorConfig) { if flowAggregatorConf.ClickHouse.CommitInterval == "" { flowAggregatorConf.ClickHouse.CommitInterval = DefaultClickHouseCommitInterval } + if flowAggregatorConf.S3Uploader.Compress == nil { + flowAggregatorConf.S3Uploader.Compress = new(bool) + *flowAggregatorConf.S3Uploader.Compress = true + } + if flowAggregatorConf.S3Uploader.MaxRecordsPerFile == 0 { + flowAggregatorConf.S3Uploader.MaxRecordsPerFile = DefaultS3MaxRecordsPerFile + } + if flowAggregatorConf.S3Uploader.RecordFormat == "" { + flowAggregatorConf.S3Uploader.RecordFormat = DefaultS3RecordFormat + } + if flowAggregatorConf.S3Uploader.UploadInterval == "" { + flowAggregatorConf.S3Uploader.UploadInterval = DefaultS3UploadInterval + } } diff --git a/pkg/flowaggregator/clickhouseclient/clickhouseclient.go b/pkg/flowaggregator/clickhouseclient/clickhouseclient.go index 7a3c9bd9b79..f6edeec48f1 100644 --- a/pkg/flowaggregator/clickhouseclient/clickhouseclient.go +++ b/pkg/flowaggregator/clickhouseclient/clickhouseclient.go @@ -27,6 +27,8 @@ import ( ipfixentities "github.com/vmware/go-ipfix/pkg/entities" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" + + "antrea.io/antrea/pkg/flowaggregator/flowrecord" ) const ( @@ -147,56 +149,6 @@ func (ci *ClickHouseInput) getDataSourceName() (string, error) { return sb.String(), nil } -type ClickHouseFlowRow struct { - flowStartSeconds time.Time - flowEndSeconds time.Time - flowEndSecondsFromSourceNode time.Time - flowEndSecondsFromDestinationNode time.Time - flowEndReason uint8 - sourceIP string - destinationIP string - sourceTransportPort uint16 - destinationTransportPort uint16 - protocolIdentifier uint8 - packetTotalCount uint64 - octetTotalCount uint64 - packetDeltaCount uint64 - octetDeltaCount uint64 - reversePacketTotalCount uint64 - reverseOctetTotalCount uint64 - reversePacketDeltaCount uint64 - reverseOctetDeltaCount uint64 - sourcePodName string - sourcePodNamespace string - sourceNodeName string - destinationPodName string - destinationPodNamespace string - destinationNodeName string - destinationClusterIP string - destinationServicePort uint16 - destinationServicePortName string - ingressNetworkPolicyName string - ingressNetworkPolicyNamespace string - ingressNetworkPolicyRuleName string - ingressNetworkPolicyRuleAction uint8 - ingressNetworkPolicyType uint8 - egressNetworkPolicyName string - egressNetworkPolicyNamespace string - egressNetworkPolicyRuleName string - egressNetworkPolicyRuleAction uint8 - egressNetworkPolicyType uint8 - tcpState string - flowType uint8 - sourcePodLabels string - destinationPodLabels string - throughput uint64 - reverseThroughput uint64 - throughputFromSourceNode uint64 - throughputFromDestinationNode uint64 - reverseThroughputFromSourceNode uint64 - reverseThroughputFromDestinationNode uint64 -} - func NewClickHouseClient(input ClickHouseInput) (*ClickHouseExportProcess, error) { dsn, connect, err := PrepareConnection(input) if err != nil { @@ -214,7 +166,7 @@ func NewClickHouseClient(input ClickHouseInput) (*ClickHouseExportProcess, error } func (ch *ClickHouseExportProcess) CacheRecord(record ipfixentities.Record) { - chRow := ch.getClickHouseFlowRow(record) + chRow := flowrecord.GetFlowRecord(record) ch.dequeMutex.Lock() defer ch.dequeMutex.Unlock() @@ -262,158 +214,6 @@ func (ch *ClickHouseExportProcess) stopExportProcess(flushQueue bool) { ch.exportWg.Wait() } -func (ch *ClickHouseExportProcess) getClickHouseFlowRow(record ipfixentities.Record) *ClickHouseFlowRow { - chFlowRow := ClickHouseFlowRow{} - if flowStartSeconds, _, ok := record.GetInfoElementWithValue("flowStartSeconds"); ok { - chFlowRow.flowStartSeconds = time.Unix(int64(flowStartSeconds.GetUnsigned32Value()), 0) - } - if flowEndSeconds, _, ok := record.GetInfoElementWithValue("flowEndSeconds"); ok { - chFlowRow.flowEndSeconds = time.Unix(int64(flowEndSeconds.GetUnsigned32Value()), 0) - } - if flowEndSecFromSrcNode, _, ok := record.GetInfoElementWithValue("flowEndSecondsFromSourceNode"); ok { - chFlowRow.flowEndSecondsFromSourceNode = time.Unix(int64(flowEndSecFromSrcNode.GetUnsigned32Value()), 0) - } - if flowEndSecFromDstNode, _, ok := record.GetInfoElementWithValue("flowEndSecondsFromDestinationNode"); ok { - chFlowRow.flowEndSecondsFromDestinationNode = time.Unix(int64(flowEndSecFromDstNode.GetUnsigned32Value()), 0) - } - if flowEndReason, _, ok := record.GetInfoElementWithValue("flowEndReason"); ok { - chFlowRow.flowEndReason = flowEndReason.GetUnsigned8Value() - } - if sourceIPv4, _, ok := record.GetInfoElementWithValue("sourceIPv4Address"); ok { - chFlowRow.sourceIP = sourceIPv4.GetIPAddressValue().String() - } else if sourceIPv6, _, ok := record.GetInfoElementWithValue("sourceIPv6Address"); ok { - chFlowRow.sourceIP = sourceIPv6.GetIPAddressValue().String() - } - if destinationIPv4, _, ok := record.GetInfoElementWithValue("destinationIPv4Address"); ok { - chFlowRow.destinationIP = destinationIPv4.GetIPAddressValue().String() - } else if destinationIPv6, _, ok := record.GetInfoElementWithValue("destinationIPv6Address"); ok { - chFlowRow.destinationIP = destinationIPv6.GetIPAddressValue().String() - } - if sourcePort, _, ok := record.GetInfoElementWithValue("sourceTransportPort"); ok { - chFlowRow.sourceTransportPort = sourcePort.GetUnsigned16Value() - } - if destinationPort, _, ok := record.GetInfoElementWithValue("destinationTransportPort"); ok { - chFlowRow.destinationTransportPort = destinationPort.GetUnsigned16Value() - } - if protocolIdentifier, _, ok := record.GetInfoElementWithValue("protocolIdentifier"); ok { - chFlowRow.protocolIdentifier = protocolIdentifier.GetUnsigned8Value() - } - if packetTotalCount, _, ok := record.GetInfoElementWithValue("packetTotalCount"); ok { - chFlowRow.packetTotalCount = packetTotalCount.GetUnsigned64Value() - } - if octetTotalCount, _, ok := record.GetInfoElementWithValue("octetTotalCount"); ok { - chFlowRow.octetTotalCount = octetTotalCount.GetUnsigned64Value() - } - if packetDeltaCount, _, ok := record.GetInfoElementWithValue("packetDeltaCount"); ok { - chFlowRow.packetDeltaCount = packetDeltaCount.GetUnsigned64Value() - } - if octetDeltaCount, _, ok := record.GetInfoElementWithValue("octetDeltaCount"); ok { - chFlowRow.octetDeltaCount = octetDeltaCount.GetUnsigned64Value() - } - if reversePacketTotalCount, _, ok := record.GetInfoElementWithValue("reversePacketTotalCount"); ok { - chFlowRow.reversePacketTotalCount = reversePacketTotalCount.GetUnsigned64Value() - } - if reverseOctetTotalCount, _, ok := record.GetInfoElementWithValue("reverseOctetTotalCount"); ok { - chFlowRow.reverseOctetTotalCount = reverseOctetTotalCount.GetUnsigned64Value() - } - if reversePacketDeltaCount, _, ok := record.GetInfoElementWithValue("reversePacketDeltaCount"); ok { - chFlowRow.reversePacketDeltaCount = reversePacketDeltaCount.GetUnsigned64Value() - } - if reverseOctetDeltaCount, _, ok := record.GetInfoElementWithValue("reverseOctetDeltaCount"); ok { - chFlowRow.reverseOctetDeltaCount = reverseOctetDeltaCount.GetUnsigned64Value() - } - if sourcePodName, _, ok := record.GetInfoElementWithValue("sourcePodName"); ok { - chFlowRow.sourcePodName = sourcePodName.GetStringValue() - } - if sourcePodNamespace, _, ok := record.GetInfoElementWithValue("sourcePodNamespace"); ok { - chFlowRow.sourcePodNamespace = sourcePodNamespace.GetStringValue() - } - if sourceNodeName, _, ok := record.GetInfoElementWithValue("sourceNodeName"); ok { - chFlowRow.sourceNodeName = sourceNodeName.GetStringValue() - } - if destinationPodName, _, ok := record.GetInfoElementWithValue("destinationPodName"); ok { - chFlowRow.destinationPodName = destinationPodName.GetStringValue() - } - if destinationPodNamespace, _, ok := record.GetInfoElementWithValue("destinationPodNamespace"); ok { - chFlowRow.destinationPodNamespace = destinationPodNamespace.GetStringValue() - } - if destinationNodeName, _, ok := record.GetInfoElementWithValue("destinationNodeName"); ok { - chFlowRow.destinationNodeName = destinationNodeName.GetStringValue() - } - if destinationClusterIPv4, _, ok := record.GetInfoElementWithValue("destinationClusterIPv4"); ok { - chFlowRow.destinationClusterIP = destinationClusterIPv4.GetIPAddressValue().String() - } else if destinationClusterIPv6, _, ok := record.GetInfoElementWithValue("destinationClusterIPv6"); ok { - chFlowRow.destinationClusterIP = destinationClusterIPv6.GetIPAddressValue().String() - } - if destinationServicePort, _, ok := record.GetInfoElementWithValue("destinationServicePort"); ok { - chFlowRow.destinationServicePort = destinationServicePort.GetUnsigned16Value() - } - if destinationServicePortName, _, ok := record.GetInfoElementWithValue("destinationServicePortName"); ok { - chFlowRow.destinationServicePortName = destinationServicePortName.GetStringValue() - } - if ingressNPName, _, ok := record.GetInfoElementWithValue("ingressNetworkPolicyName"); ok { - chFlowRow.ingressNetworkPolicyName = ingressNPName.GetStringValue() - } - if ingressNPNamespace, _, ok := record.GetInfoElementWithValue("ingressNetworkPolicyNamespace"); ok { - chFlowRow.ingressNetworkPolicyNamespace = ingressNPNamespace.GetStringValue() - } - if ingressNPRuleName, _, ok := record.GetInfoElementWithValue("ingressNetworkPolicyRuleName"); ok { - chFlowRow.ingressNetworkPolicyRuleName = ingressNPRuleName.GetStringValue() - } - if ingressNPType, _, ok := record.GetInfoElementWithValue("ingressNetworkPolicyType"); ok { - chFlowRow.ingressNetworkPolicyType = ingressNPType.GetUnsigned8Value() - } - if ingressNPRuleAction, _, ok := record.GetInfoElementWithValue("ingressNetworkPolicyRuleAction"); ok { - chFlowRow.ingressNetworkPolicyRuleAction = ingressNPRuleAction.GetUnsigned8Value() - } - if egressNPName, _, ok := record.GetInfoElementWithValue("egressNetworkPolicyName"); ok { - chFlowRow.egressNetworkPolicyName = egressNPName.GetStringValue() - } - if egressNPNamespace, _, ok := record.GetInfoElementWithValue("egressNetworkPolicyNamespace"); ok { - chFlowRow.egressNetworkPolicyNamespace = egressNPNamespace.GetStringValue() - } - if egressNPRuleName, _, ok := record.GetInfoElementWithValue("egressNetworkPolicyRuleName"); ok { - chFlowRow.egressNetworkPolicyRuleName = egressNPRuleName.GetStringValue() - } - if egressNPType, _, ok := record.GetInfoElementWithValue("egressNetworkPolicyType"); ok { - chFlowRow.egressNetworkPolicyType = egressNPType.GetUnsigned8Value() - } - if egressNPRuleAction, _, ok := record.GetInfoElementWithValue("egressNetworkPolicyRuleAction"); ok { - chFlowRow.egressNetworkPolicyRuleAction = egressNPRuleAction.GetUnsigned8Value() - } - if tcpState, _, ok := record.GetInfoElementWithValue("tcpState"); ok { - chFlowRow.tcpState = tcpState.GetStringValue() - } - if flowType, _, ok := record.GetInfoElementWithValue("flowType"); ok { - chFlowRow.flowType = flowType.GetUnsigned8Value() - } - if sourcePodLabels, _, ok := record.GetInfoElementWithValue("sourcePodLabels"); ok { - chFlowRow.sourcePodLabels = sourcePodLabels.GetStringValue() - } - if destinationPodLabels, _, ok := record.GetInfoElementWithValue("destinationPodLabels"); ok { - chFlowRow.destinationPodLabels = destinationPodLabels.GetStringValue() - } - if throughput, _, ok := record.GetInfoElementWithValue("throughput"); ok { - chFlowRow.throughput = throughput.GetUnsigned64Value() - } - if reverseThroughput, _, ok := record.GetInfoElementWithValue("reverseThroughput"); ok { - chFlowRow.reverseThroughput = reverseThroughput.GetUnsigned64Value() - } - if throughputFromSrcNode, _, ok := record.GetInfoElementWithValue("throughputFromSourceNode"); ok { - chFlowRow.throughputFromSourceNode = throughputFromSrcNode.GetUnsigned64Value() - } - if throughputFromDstNode, _, ok := record.GetInfoElementWithValue("throughputFromDestinationNode"); ok { - chFlowRow.throughputFromDestinationNode = throughputFromDstNode.GetUnsigned64Value() - } - if revTputFromSrcNode, _, ok := record.GetInfoElementWithValue("reverseThroughputFromSourceNode"); ok { - chFlowRow.reverseThroughputFromSourceNode = revTputFromSrcNode.GetUnsigned64Value() - } - if revTputFromDstNode, _, ok := record.GetInfoElementWithValue("reverseThroughputFromDestinationNode"); ok { - chFlowRow.reverseThroughputFromDestinationNode = revTputFromDstNode.GetUnsigned64Value() - } - return &chFlowRow -} - func (ch *ClickHouseExportProcess) flowRecordPeriodicCommit() { klog.InfoS("Starting ClickHouse exporting process") ctx := context.Background() @@ -477,9 +277,9 @@ func (ch *ClickHouseExportProcess) batchCommitAll(ctx context.Context) (int, err ch.dequeMutex.Lock() // currSize could have increased due to CacheRecord being called in between. currSize = ch.deque.Len() - recordsToExport := make([]*ClickHouseFlowRow, 0, currSize) + recordsToExport := make([]*flowrecord.FlowRecord, 0, currSize) for i := 0; i < currSize; i++ { - record, ok := ch.deque.PopFront().(*ClickHouseFlowRow) + record, ok := ch.deque.PopFront().(*flowrecord.FlowRecord) if !ok { continue } @@ -490,53 +290,53 @@ func (ch *ClickHouseExportProcess) batchCommitAll(ctx context.Context) (int, err for _, record := range recordsToExport { _, err := stmt.ExecContext( ctx, - record.flowStartSeconds, - record.flowEndSeconds, - record.flowEndSecondsFromSourceNode, - record.flowEndSecondsFromDestinationNode, - record.flowEndReason, - record.sourceIP, - record.destinationIP, - record.sourceTransportPort, - record.destinationTransportPort, - record.protocolIdentifier, - record.packetTotalCount, - record.octetTotalCount, - record.packetDeltaCount, - record.octetDeltaCount, - record.reversePacketTotalCount, - record.reverseOctetTotalCount, - record.reversePacketDeltaCount, - record.reverseOctetDeltaCount, - record.sourcePodName, - record.sourcePodNamespace, - record.sourceNodeName, - record.destinationPodName, - record.destinationPodNamespace, - record.destinationNodeName, - record.destinationClusterIP, - record.destinationServicePort, - record.destinationServicePortName, - record.ingressNetworkPolicyName, - record.ingressNetworkPolicyNamespace, - record.ingressNetworkPolicyRuleName, - record.ingressNetworkPolicyRuleAction, - record.ingressNetworkPolicyType, - record.egressNetworkPolicyName, - record.egressNetworkPolicyNamespace, - record.egressNetworkPolicyRuleName, - record.egressNetworkPolicyRuleAction, - record.egressNetworkPolicyType, - record.tcpState, - record.flowType, - record.sourcePodLabels, - record.destinationPodLabels, - record.throughput, - record.reverseThroughput, - record.throughputFromSourceNode, - record.throughputFromDestinationNode, - record.reverseThroughputFromSourceNode, - record.reverseThroughputFromDestinationNode) + record.FlowStartSeconds, + record.FlowEndSeconds, + record.FlowEndSecondsFromSourceNode, + record.FlowEndSecondsFromDestinationNode, + record.FlowEndReason, + record.SourceIP, + record.DestinationIP, + record.SourceTransportPort, + record.DestinationTransportPort, + record.ProtocolIdentifier, + record.PacketTotalCount, + record.OctetTotalCount, + record.PacketDeltaCount, + record.OctetDeltaCount, + record.ReversePacketTotalCount, + record.ReverseOctetTotalCount, + record.ReversePacketDeltaCount, + record.ReverseOctetDeltaCount, + record.SourcePodName, + record.SourcePodNamespace, + record.SourceNodeName, + record.DestinationPodName, + record.DestinationPodNamespace, + record.DestinationNodeName, + record.DestinationClusterIP, + record.DestinationServicePort, + record.DestinationServicePortName, + record.IngressNetworkPolicyName, + record.IngressNetworkPolicyNamespace, + record.IngressNetworkPolicyRuleName, + record.IngressNetworkPolicyRuleAction, + record.IngressNetworkPolicyType, + record.EgressNetworkPolicyName, + record.EgressNetworkPolicyNamespace, + record.EgressNetworkPolicyRuleName, + record.EgressNetworkPolicyRuleAction, + record.EgressNetworkPolicyType, + record.TcpState, + record.FlowType, + record.SourcePodLabels, + record.DestinationPodLabels, + record.Throughput, + record.ReverseThroughput, + record.ThroughputFromSourceNode, + record.ThroughputFromDestinationNode, + record.ReverseThroughputFromSourceNode, + record.ReverseThroughputFromDestinationNode) if err != nil { klog.ErrorS(err, "Error when adding record") @@ -557,7 +357,7 @@ func (ch *ClickHouseExportProcess) batchCommitAll(ctx context.Context) (int, err // pushRecordsToFrontOfQueue pushes records to the front of deque without exceeding its capacity. // Items with lower index (older records) will be dropped first if deque is to be filled. -func (ch *ClickHouseExportProcess) pushRecordsToFrontOfQueue(records []*ClickHouseFlowRow) { +func (ch *ClickHouseExportProcess) pushRecordsToFrontOfQueue(records []*flowrecord.FlowRecord) { ch.dequeMutex.Lock() defer ch.dequeMutex.Unlock() diff --git a/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go b/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go index 7b3705a776b..aeb0000d6e5 100644 --- a/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go +++ b/pkg/flowaggregator/clickhouseclient/clickhouseclient_test.go @@ -18,7 +18,6 @@ import ( "context" "database/sql/driver" "fmt" - "net" "reflect" "testing" "time" @@ -28,11 +27,13 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - ipfixentities "github.com/vmware/go-ipfix/pkg/entities" ipfixentitiestesting "github.com/vmware/go-ipfix/pkg/entities/testing" "github.com/vmware/go-ipfix/pkg/registry" - ipfixregistry "github.com/vmware/go-ipfix/pkg/registry" "k8s.io/apimachinery/pkg/util/wait" + + "antrea.io/antrea/pkg/flowaggregator/flowrecord" + flowrecordtesting "antrea.io/antrea/pkg/flowaggregator/flowrecord/testing" + flowaggregatortesting "antrea.io/antrea/pkg/flowaggregator/testing" ) func init() { @@ -72,296 +73,6 @@ func TestGetDataSourceName(t *testing.T) { } } -func TestGetClickHouseFlowRow(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - testcases := []struct { - isIPv4 bool - }{ - {true}, - {false}, - } - - for _, tc := range testcases { - mockRecord := ipfixentitiestesting.NewMockRecord(ctrl) - prepareMockRecord(mockRecord, tc.isIPv4) - - chClient := &ClickHouseExportProcess{} - flowRow := chClient.getClickHouseFlowRow(mockRecord) - assert.Equal(t, time.Unix(int64(1637706961), 0), flowRow.flowStartSeconds) - assert.Equal(t, time.Unix(int64(1637706973), 0), flowRow.flowEndSeconds) - assert.Equal(t, time.Unix(int64(1637706974), 0), flowRow.flowEndSecondsFromSourceNode) - assert.Equal(t, time.Unix(int64(1637706975), 0), flowRow.flowEndSecondsFromDestinationNode) - assert.Equal(t, uint8(3), flowRow.flowEndReason) - assert.Equal(t, uint16(44752), flowRow.sourceTransportPort) - assert.Equal(t, uint16(5201), flowRow.destinationTransportPort) - assert.Equal(t, uint8(6), flowRow.protocolIdentifier) - assert.Equal(t, uint64(823188), flowRow.packetTotalCount) - assert.Equal(t, uint64(30472817041), flowRow.octetTotalCount) - assert.Equal(t, uint64(241333), flowRow.packetDeltaCount) - assert.Equal(t, uint64(8982624938), flowRow.octetDeltaCount) - assert.Equal(t, uint64(471111), flowRow.reversePacketTotalCount) - assert.Equal(t, uint64(24500996), flowRow.reverseOctetTotalCount) - assert.Equal(t, uint64(136211), flowRow.reversePacketDeltaCount) - assert.Equal(t, uint64(7083284), flowRow.reverseOctetDeltaCount) - assert.Equal(t, "perftest-a", flowRow.sourcePodName) - assert.Equal(t, "antrea-test", flowRow.sourcePodNamespace) - assert.Equal(t, "k8s-node-control-plane", flowRow.sourceNodeName) - assert.Equal(t, "perftest-b", flowRow.destinationPodName) - assert.Equal(t, "antrea-test-b", flowRow.destinationPodNamespace) - assert.Equal(t, "k8s-node-control-plane-b", flowRow.destinationNodeName) - assert.Equal(t, uint16(5202), flowRow.destinationServicePort) - assert.Equal(t, "perftest", flowRow.destinationServicePortName) - assert.Equal(t, "test-flow-aggregator-networkpolicy-ingress-allow", flowRow.ingressNetworkPolicyName) - assert.Equal(t, "antrea-test-ns", flowRow.ingressNetworkPolicyNamespace) - assert.Equal(t, "test-flow-aggregator-networkpolicy-rule", flowRow.ingressNetworkPolicyRuleName) - assert.Equal(t, uint8(1), flowRow.ingressNetworkPolicyType) - assert.Equal(t, uint8(2), flowRow.ingressNetworkPolicyRuleAction) - assert.Equal(t, "test-flow-aggregator-networkpolicy-egress-allow", flowRow.egressNetworkPolicyName) - assert.Equal(t, "antrea-test-ns-e", flowRow.egressNetworkPolicyNamespace) - assert.Equal(t, "test-flow-aggregator-networkpolicy-rule-e", flowRow.egressNetworkPolicyRuleName) - assert.Equal(t, uint8(4), flowRow.egressNetworkPolicyType) - assert.Equal(t, uint8(5), flowRow.egressNetworkPolicyRuleAction) - assert.Equal(t, "TIME_WAIT", flowRow.tcpState) - assert.Equal(t, uint8(11), flowRow.flowType) - assert.Equal(t, "{\"antrea-e2e\":\"perftest-a\",\"app\":\"perftool\"}", flowRow.sourcePodLabels) - assert.Equal(t, "{\"antrea-e2e\":\"perftest-b\",\"app\":\"perftool\"}", flowRow.destinationPodLabels) - assert.Equal(t, uint64(15902813472), flowRow.throughput) - assert.Equal(t, uint64(12381344), flowRow.reverseThroughput) - assert.Equal(t, uint64(15902813473), flowRow.throughputFromSourceNode) - assert.Equal(t, uint64(15902813474), flowRow.throughputFromDestinationNode) - assert.Equal(t, uint64(12381345), flowRow.reverseThroughputFromSourceNode) - assert.Equal(t, uint64(12381346), flowRow.reverseThroughputFromDestinationNode) - - if tc.isIPv4 { - assert.Equal(t, "10.10.0.79", flowRow.sourceIP) - assert.Equal(t, "10.10.0.80", flowRow.destinationIP) - assert.Equal(t, "10.10.1.10", flowRow.destinationClusterIP) - } else { - assert.Equal(t, "2001:0:3238:dfe1:63::fefb", flowRow.sourceIP) - assert.Equal(t, "2001:0:3238:dfe1:63::fefc", flowRow.destinationIP) - assert.Equal(t, "2001:0:3238:dfe1:64::a", flowRow.destinationClusterIP) - } - } -} - -func createElement(name string, enterpriseID uint32) ipfixentities.InfoElementWithValue { - element, _ := ipfixregistry.GetInfoElement(name, enterpriseID) - ieWithValue, _ := ipfixentities.DecodeAndCreateInfoElementWithValue(element, nil) - return ieWithValue -} - -func prepareMockRecord(mockRecord *ipfixentitiestesting.MockRecord, isIPv4 bool) { - flowStartSecElem := createElement("flowStartSeconds", ipfixregistry.IANAEnterpriseID) - flowStartSecElem.SetUnsigned32Value(uint32(1637706961)) - mockRecord.EXPECT().GetInfoElementWithValue("flowStartSeconds").Return(flowStartSecElem, 0, true) - - flowEndSecElem := createElement("flowEndSeconds", ipfixregistry.IANAEnterpriseID) - flowEndSecElem.SetUnsigned32Value(uint32(1637706973)) - mockRecord.EXPECT().GetInfoElementWithValue("flowEndSeconds").Return(flowEndSecElem, 0, true) - - flowEndSecSrcNodeElem := createElement("flowEndSecondsFromSourceNode", ipfixregistry.AntreaEnterpriseID) - flowEndSecSrcNodeElem.SetUnsigned32Value(uint32(1637706974)) - mockRecord.EXPECT().GetInfoElementWithValue("flowEndSecondsFromSourceNode").Return(flowEndSecSrcNodeElem, 0, true) - - flowEndSecDstNodeElem := createElement("flowEndSecondsFromDestinationNode", ipfixregistry.AntreaEnterpriseID) - flowEndSecDstNodeElem.SetUnsigned32Value(uint32(1637706975)) - mockRecord.EXPECT().GetInfoElementWithValue("flowEndSecondsFromDestinationNode").Return(flowEndSecDstNodeElem, 0, true) - - flowEndReasonElem := createElement("flowEndReason", ipfixregistry.IANAEnterpriseID) - flowEndReasonElem.SetUnsigned8Value(uint8(3)) - mockRecord.EXPECT().GetInfoElementWithValue("flowEndReason").Return(flowEndReasonElem, 0, true) - - srcPortElem := createElement("sourceTransportPort", ipfixregistry.IANAEnterpriseID) - srcPortElem.SetUnsigned16Value(uint16(44752)) - mockRecord.EXPECT().GetInfoElementWithValue("sourceTransportPort").Return(srcPortElem, 0, true) - - dstPortElem := createElement("destinationTransportPort", ipfixregistry.IANAEnterpriseID) - dstPortElem.SetUnsigned16Value(uint16(5201)) - mockRecord.EXPECT().GetInfoElementWithValue("destinationTransportPort").Return(dstPortElem, 0, true) - - protoIdentifierElem := createElement("protocolIdentifier", ipfixregistry.IANAEnterpriseID) - protoIdentifierElem.SetUnsigned8Value(uint8(6)) - mockRecord.EXPECT().GetInfoElementWithValue("protocolIdentifier").Return(protoIdentifierElem, 0, true) - - packetTotalCountElem := createElement("packetTotalCount", ipfixregistry.IANAEnterpriseID) - packetTotalCountElem.SetUnsigned64Value(uint64(823188)) - mockRecord.EXPECT().GetInfoElementWithValue("packetTotalCount").Return(packetTotalCountElem, 0, true) - - octetTotalCountElem := createElement("octetTotalCount", ipfixregistry.IANAEnterpriseID) - octetTotalCountElem.SetUnsigned64Value(uint64(30472817041)) - mockRecord.EXPECT().GetInfoElementWithValue("octetTotalCount").Return(octetTotalCountElem, 0, true) - - packetDeltaCountElem := createElement("packetDeltaCount", ipfixregistry.IANAEnterpriseID) - packetDeltaCountElem.SetUnsigned64Value(uint64(241333)) - mockRecord.EXPECT().GetInfoElementWithValue("packetDeltaCount").Return(packetDeltaCountElem, 0, true) - - octetDeltaCountElem := createElement("octetDeltaCount", ipfixregistry.IANAEnterpriseID) - octetDeltaCountElem.SetUnsigned64Value(uint64(8982624938)) - mockRecord.EXPECT().GetInfoElementWithValue("octetDeltaCount").Return(octetDeltaCountElem, 0, true) - - reversePacketTotalCountElem := createElement("reversePacketTotalCount", ipfixregistry.IANAReversedEnterpriseID) - reversePacketTotalCountElem.SetUnsigned64Value(uint64(471111)) - mockRecord.EXPECT().GetInfoElementWithValue("reversePacketTotalCount").Return(reversePacketTotalCountElem, 0, true) - - reverseOctetTotalCountElem := createElement("reverseOctetTotalCount", ipfixregistry.IANAReversedEnterpriseID) - reverseOctetTotalCountElem.SetUnsigned64Value(uint64(24500996)) - mockRecord.EXPECT().GetInfoElementWithValue("reverseOctetTotalCount").Return(reverseOctetTotalCountElem, 0, true) - - reversePacketDeltaCountElem := createElement("reversePacketDeltaCount", ipfixregistry.IANAReversedEnterpriseID) - reversePacketDeltaCountElem.SetUnsigned64Value(uint64(136211)) - mockRecord.EXPECT().GetInfoElementWithValue("reversePacketDeltaCount").Return(reversePacketDeltaCountElem, 0, true) - - reverseOctetDeltaCountElem := createElement("reverseOctetDeltaCount", ipfixregistry.IANAReversedEnterpriseID) - reverseOctetDeltaCountElem.SetUnsigned64Value(uint64(7083284)) - mockRecord.EXPECT().GetInfoElementWithValue("reverseOctetDeltaCount").Return(reverseOctetDeltaCountElem, 0, true) - - sourcePodNameElem := createElement("sourcePodName", ipfixregistry.AntreaEnterpriseID) - sourcePodNameElem.SetStringValue("perftest-a") - mockRecord.EXPECT().GetInfoElementWithValue("sourcePodName").Return(sourcePodNameElem, 0, true) - - sourcePodNamespaceElem := createElement("sourcePodNamespace", ipfixregistry.AntreaEnterpriseID) - sourcePodNamespaceElem.SetStringValue("antrea-test") - mockRecord.EXPECT().GetInfoElementWithValue("sourcePodNamespace").Return(sourcePodNamespaceElem, 0, true) - - sourceNodeNameElem := createElement("sourceNodeName", ipfixregistry.AntreaEnterpriseID) - sourceNodeNameElem.SetStringValue("k8s-node-control-plane") - mockRecord.EXPECT().GetInfoElementWithValue("sourceNodeName").Return(sourceNodeNameElem, 0, true) - - destinationPodNameElem := createElement("destinationPodName", ipfixregistry.AntreaEnterpriseID) - destinationPodNameElem.SetStringValue("perftest-b") - mockRecord.EXPECT().GetInfoElementWithValue("destinationPodName").Return(destinationPodNameElem, 0, true) - - destinationPodNamespaceElem := createElement("destinationPodNamespace", ipfixregistry.AntreaEnterpriseID) - destinationPodNamespaceElem.SetStringValue("antrea-test-b") - mockRecord.EXPECT().GetInfoElementWithValue("destinationPodNamespace").Return(destinationPodNamespaceElem, 0, true) - - destinationNodeNameElem := createElement("destinationNodeName", ipfixregistry.AntreaEnterpriseID) - destinationNodeNameElem.SetStringValue("k8s-node-control-plane-b") - mockRecord.EXPECT().GetInfoElementWithValue("destinationNodeName").Return(destinationNodeNameElem, 0, true) - - destinationServicePortElem := createElement("destinationServicePort", ipfixregistry.AntreaEnterpriseID) - destinationServicePortElem.SetUnsigned16Value(uint16(5202)) - mockRecord.EXPECT().GetInfoElementWithValue("destinationServicePort").Return(destinationServicePortElem, 0, true) - - destinationServicePortNameElem := createElement("destinationServicePortName", ipfixregistry.AntreaEnterpriseID) - destinationServicePortNameElem.SetStringValue("perftest") - mockRecord.EXPECT().GetInfoElementWithValue("destinationServicePortName").Return(destinationServicePortNameElem, 0, true) - - ingressNetworkPolicyNameElem := createElement("ingressNetworkPolicyName", ipfixregistry.AntreaEnterpriseID) - ingressNetworkPolicyNameElem.SetStringValue("test-flow-aggregator-networkpolicy-ingress-allow") - mockRecord.EXPECT().GetInfoElementWithValue("ingressNetworkPolicyName").Return(ingressNetworkPolicyNameElem, 0, true) - - ingressNetworkPolicyNamespaceElem := createElement("ingressNetworkPolicyNamespace", ipfixregistry.AntreaEnterpriseID) - ingressNetworkPolicyNamespaceElem.SetStringValue("antrea-test-ns") - mockRecord.EXPECT().GetInfoElementWithValue("ingressNetworkPolicyNamespace").Return(ingressNetworkPolicyNamespaceElem, 0, true) - - ingressNetworkPolicyRuleNameElem := createElement("ingressNetworkPolicyRuleName", ipfixregistry.AntreaEnterpriseID) - ingressNetworkPolicyRuleNameElem.SetStringValue("test-flow-aggregator-networkpolicy-rule") - mockRecord.EXPECT().GetInfoElementWithValue("ingressNetworkPolicyRuleName").Return(ingressNetworkPolicyRuleNameElem, 0, true) - - ingressNetworkPolicyTypeElem := createElement("ingressNetworkPolicyType", ipfixregistry.AntreaEnterpriseID) - ingressNetworkPolicyTypeElem.SetUnsigned8Value(uint8(1)) - mockRecord.EXPECT().GetInfoElementWithValue("ingressNetworkPolicyType").Return(ingressNetworkPolicyTypeElem, 0, true) - - ingressNetworkPolicyRuleActionElem := createElement("ingressNetworkPolicyRuleAction", ipfixregistry.AntreaEnterpriseID) - ingressNetworkPolicyRuleActionElem.SetUnsigned8Value(uint8(2)) - mockRecord.EXPECT().GetInfoElementWithValue("ingressNetworkPolicyRuleAction").Return(ingressNetworkPolicyRuleActionElem, 0, true) - - egressNetworkPolicyNameElem := createElement("egressNetworkPolicyName", ipfixregistry.AntreaEnterpriseID) - egressNetworkPolicyNameElem.SetStringValue("test-flow-aggregator-networkpolicy-egress-allow") - mockRecord.EXPECT().GetInfoElementWithValue("egressNetworkPolicyName").Return(egressNetworkPolicyNameElem, 0, true) - - egressNetworkPolicyNamespaceElem := createElement("egressNetworkPolicyNamespace", ipfixregistry.AntreaEnterpriseID) - egressNetworkPolicyNamespaceElem.SetStringValue("antrea-test-ns-e") - mockRecord.EXPECT().GetInfoElementWithValue("egressNetworkPolicyNamespace").Return(egressNetworkPolicyNamespaceElem, 0, true) - - egressNetworkPolicyRuleNameElem := createElement("egressNetworkPolicyRuleName", ipfixregistry.AntreaEnterpriseID) - egressNetworkPolicyRuleNameElem.SetStringValue("test-flow-aggregator-networkpolicy-rule-e") - mockRecord.EXPECT().GetInfoElementWithValue("egressNetworkPolicyRuleName").Return(egressNetworkPolicyRuleNameElem, 0, true) - - egressNetworkPolicyTypeElem := createElement("egressNetworkPolicyType", ipfixregistry.AntreaEnterpriseID) - egressNetworkPolicyTypeElem.SetUnsigned8Value(uint8(4)) - mockRecord.EXPECT().GetInfoElementWithValue("egressNetworkPolicyType").Return(egressNetworkPolicyTypeElem, 0, true) - - egressNetworkPolicyRuleActionElem := createElement("egressNetworkPolicyRuleAction", ipfixregistry.AntreaEnterpriseID) - egressNetworkPolicyRuleActionElem.SetUnsigned8Value(uint8(5)) - mockRecord.EXPECT().GetInfoElementWithValue("egressNetworkPolicyRuleAction").Return(egressNetworkPolicyRuleActionElem, 0, true) - - tcpStateElem := createElement("tcpState", ipfixregistry.AntreaEnterpriseID) - tcpStateElem.SetStringValue("TIME_WAIT") - mockRecord.EXPECT().GetInfoElementWithValue("tcpState").Return(tcpStateElem, 0, true) - - flowTypeElem := createElement("flowType", ipfixregistry.AntreaEnterpriseID) - flowTypeElem.SetUnsigned8Value(uint8(11)) - mockRecord.EXPECT().GetInfoElementWithValue("flowType").Return(flowTypeElem, 0, true) - - sourcePodLabelsElem := createElement("sourcePodLabels", ipfixregistry.AntreaEnterpriseID) - sourcePodLabelsElem.SetStringValue("{\"antrea-e2e\":\"perftest-a\",\"app\":\"perftool\"}") - mockRecord.EXPECT().GetInfoElementWithValue("sourcePodLabels").Return(sourcePodLabelsElem, 0, true) - - destinationPodLabelsElem := createElement("destinationPodLabels", ipfixregistry.AntreaEnterpriseID) - destinationPodLabelsElem.SetStringValue("{\"antrea-e2e\":\"perftest-b\",\"app\":\"perftool\"}") - mockRecord.EXPECT().GetInfoElementWithValue("destinationPodLabels").Return(destinationPodLabelsElem, 0, true) - - throughputElem := createElement("throughput", ipfixregistry.AntreaEnterpriseID) - throughputElem.SetUnsigned64Value(uint64(15902813472)) - mockRecord.EXPECT().GetInfoElementWithValue("throughput").Return(throughputElem, 0, true) - - reverseThroughputElem := createElement("reverseThroughput", ipfixregistry.AntreaEnterpriseID) - reverseThroughputElem.SetUnsigned64Value(uint64(12381344)) - mockRecord.EXPECT().GetInfoElementWithValue("reverseThroughput").Return(reverseThroughputElem, 0, true) - - throughputFromSourceNodeElem := createElement("throughputFromSourceNode", ipfixregistry.AntreaEnterpriseID) - throughputFromSourceNodeElem.SetUnsigned64Value(uint64(15902813473)) - mockRecord.EXPECT().GetInfoElementWithValue("throughputFromSourceNode").Return(throughputFromSourceNodeElem, 0, true) - - throughputFromDestinationNodeElem := createElement("throughputFromDestinationNode", ipfixregistry.AntreaEnterpriseID) - throughputFromDestinationNodeElem.SetUnsigned64Value(uint64(15902813474)) - mockRecord.EXPECT().GetInfoElementWithValue("throughputFromDestinationNode").Return(throughputFromDestinationNodeElem, 0, true) - - reverseThroughputFromSourceNodeElem := createElement("reverseThroughputFromSourceNode", ipfixregistry.AntreaEnterpriseID) - reverseThroughputFromSourceNodeElem.SetUnsigned64Value(uint64(12381345)) - mockRecord.EXPECT().GetInfoElementWithValue("reverseThroughputFromSourceNode").Return(reverseThroughputFromSourceNodeElem, 0, true) - - reverseThroughputFromDestinationNodeElem := createElement("reverseThroughputFromDestinationNode", ipfixregistry.AntreaEnterpriseID) - reverseThroughputFromDestinationNodeElem.SetUnsigned64Value(uint64(12381346)) - mockRecord.EXPECT().GetInfoElementWithValue("reverseThroughputFromDestinationNode").Return(reverseThroughputFromDestinationNodeElem, 0, true) - - if isIPv4 { - sourceIPv4Elem := createElement("sourceIPv4Address", ipfixregistry.IANAEnterpriseID) - sourceIPv4Elem.SetIPAddressValue(net.ParseIP("10.10.0.79")) - mockRecord.EXPECT().GetInfoElementWithValue("sourceIPv4Address").Return(sourceIPv4Elem, 0, true).AnyTimes() - mockRecord.EXPECT().GetInfoElementWithValue("sourceIPv6Address").Return(nil, 0, false).AnyTimes() - - destinationIPv4Elem := createElement("destinationIPv4Address", ipfixregistry.IANAEnterpriseID) - destinationIPv4Elem.SetIPAddressValue(net.ParseIP("10.10.0.80")) - mockRecord.EXPECT().GetInfoElementWithValue("destinationIPv4Address").Return(destinationIPv4Elem, 0, true).AnyTimes() - mockRecord.EXPECT().GetInfoElementWithValue("destinationIPv6Address").Return(nil, 0, false).AnyTimes() - - destinationClusterIPv4Elem := createElement("destinationClusterIPv4", ipfixregistry.AntreaEnterpriseID) - destinationClusterIPv4Elem.SetIPAddressValue(net.ParseIP("10.10.1.10")) - mockRecord.EXPECT().GetInfoElementWithValue("destinationClusterIPv4").Return(destinationClusterIPv4Elem, 0, true).AnyTimes() - mockRecord.EXPECT().GetInfoElementWithValue("destinationClusterIPv6").Return(nil, 0, false).AnyTimes() - } else { - sourceIPv6Elem := createElement("sourceIPv6Address", ipfixregistry.IANAEnterpriseID) - sourceIPv6Elem.SetIPAddressValue(net.ParseIP("2001:0:3238:dfe1:63::fefb")) - mockRecord.EXPECT().GetInfoElementWithValue("sourceIPv6Address").Return(sourceIPv6Elem, 0, true).AnyTimes() - mockRecord.EXPECT().GetInfoElementWithValue("sourceIPv4Address").Return(nil, 0, false).AnyTimes() - - destinationIPv6Elem := createElement("destinationIPv6Address", ipfixregistry.IANAEnterpriseID) - destinationIPv6Elem.SetIPAddressValue(net.ParseIP("2001:0:3238:dfe1:63::fefc")) - mockRecord.EXPECT().GetInfoElementWithValue("destinationIPv6Address").Return(destinationIPv6Elem, 0, true).AnyTimes() - mockRecord.EXPECT().GetInfoElementWithValue("destinationIPv4Address").Return(nil, 0, false).AnyTimes() - - destinationClusterIPv6Elem := createElement("destinationClusterIPv6", ipfixregistry.AntreaEnterpriseID) - destinationClusterIPv6Elem.SetIPAddressValue(net.ParseIP("2001:0:3238:dfe1:64::a")) - mockRecord.EXPECT().GetInfoElementWithValue("destinationClusterIPv6").Return(destinationClusterIPv6Elem, 0, true).AnyTimes() - mockRecord.EXPECT().GetInfoElementWithValue("destinationClusterIPv4").Return(nil, 0, false).AnyTimes() - } -} - func TestCacheRecord(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -373,69 +84,17 @@ func TestCacheRecord(t *testing.T) { chExportProc.queueSize = 1 // First call. only populate row. mockRecord := ipfixentitiestesting.NewMockRecord(ctrl) - prepareMockRecord(mockRecord, true) + flowaggregatortesting.PrepareMockIpfixRecord(mockRecord, true) chExportProc.CacheRecord(mockRecord) assert.Equal(t, 1, chExportProc.deque.Len()) - assert.Equal(t, "10.10.0.79", chExportProc.deque.At(0).(*ClickHouseFlowRow).sourceIP) + assert.Equal(t, "10.10.0.79", chExportProc.deque.At(0).(*flowrecord.FlowRecord).SourceIP) // Second call. discard prev row and add new row. mockRecord = ipfixentitiestesting.NewMockRecord(ctrl) - prepareMockRecord(mockRecord, false) + flowaggregatortesting.PrepareMockIpfixRecord(mockRecord, false) chExportProc.CacheRecord(mockRecord) assert.Equal(t, 1, chExportProc.deque.Len()) - assert.Equal(t, "2001:0:3238:dfe1:63::fefb", chExportProc.deque.At(0).(*ClickHouseFlowRow).sourceIP) -} - -func getTestClickHouseFlowRow() *ClickHouseFlowRow { - return &ClickHouseFlowRow{ - flowStartSeconds: time.Unix(int64(1637706961), 0), - flowEndSeconds: time.Unix(int64(1637706973), 0), - flowEndSecondsFromSourceNode: time.Unix(int64(1637706974), 0), - flowEndSecondsFromDestinationNode: time.Unix(int64(1637706975), 0), - flowEndReason: 3, - sourceIP: "10.10.0.79", - destinationIP: "10.10.0.80", - sourceTransportPort: 44752, - destinationTransportPort: 5201, - protocolIdentifier: 6, - packetTotalCount: 823188, - octetTotalCount: 30472817041, - packetDeltaCount: 241333, - octetDeltaCount: 8982624938, - reversePacketTotalCount: 471111, - reverseOctetTotalCount: 24500996, - reversePacketDeltaCount: 136211, - reverseOctetDeltaCount: 7083284, - sourcePodName: "perftest-a", - sourcePodNamespace: "antrea-test", - sourceNodeName: "k8s-node-control-plane", - destinationPodName: "perftest-b", - destinationPodNamespace: "antrea-test-b", - destinationNodeName: "k8s-node-control-plane-b", - destinationClusterIP: "10.10.1.10", - destinationServicePort: 5202, - destinationServicePortName: "perftest", - ingressNetworkPolicyName: "test-flow-aggregator-networkpolicy-ingress-allow", - ingressNetworkPolicyNamespace: "antrea-test-ns", - ingressNetworkPolicyRuleName: "test-flow-aggregator-networkpolicy-rule", - ingressNetworkPolicyRuleAction: 2, - ingressNetworkPolicyType: 1, - egressNetworkPolicyName: "test-flow-aggregator-networkpolicy-egress-allow", - egressNetworkPolicyNamespace: "antrea-test-ns-e", - egressNetworkPolicyRuleName: "test-flow-aggregator-networkpolicy-rule-e", - egressNetworkPolicyRuleAction: 5, - egressNetworkPolicyType: 4, - tcpState: "TIME_WAIT", - flowType: 11, - sourcePodLabels: "{\"antrea-e2e\":\"perftest-a\",\"app\":\"perftool\"}", - destinationPodLabels: "{\"antrea-e2e\":\"perftest-b\",\"app\":\"perftool\"}", - throughput: 15902813472, - reverseThroughput: 12381344, - throughputFromSourceNode: 15902813473, - throughputFromDestinationNode: 15902813474, - reverseThroughputFromSourceNode: 12381345, - reverseThroughputFromDestinationNode: 12381346, - } + assert.Equal(t, "2001:0:3238:dfe1:63::fefb", chExportProc.deque.At(0).(*flowrecord.FlowRecord).SourceIP) } func TestBatchCommitAll(t *testing.T) { @@ -449,7 +108,7 @@ func TestBatchCommitAll(t *testing.T) { queueSize: maxQueueSize, } - recordRow := getTestClickHouseFlowRow() + recordRow := flowrecordtesting.PrepareTestFlowRecord() chExportProc.deque.PushBack(recordRow) @@ -523,7 +182,7 @@ func TestBatchCommitAllMultiRecord(t *testing.T) { deque: deque.New(), queueSize: maxQueueSize, } - recordRow := ClickHouseFlowRow{} + recordRow := flowrecord.FlowRecord{} fieldCount := reflect.TypeOf(recordRow).NumField() argList := make([]driver.Value, fieldCount) for i := 0; i < len(argList); i++ { @@ -555,7 +214,7 @@ func TestBatchCommitAllError(t *testing.T) { deque: deque.New(), queueSize: maxQueueSize, } - recordRow := ClickHouseFlowRow{} + recordRow := flowrecord.FlowRecord{} chExportProc.deque.PushBack(&recordRow) fieldCount := reflect.TypeOf(recordRow).NumField() argList := make([]driver.Value, fieldCount) @@ -582,9 +241,9 @@ func TestPushRecordsToFrontOfQueue(t *testing.T) { } // init deque [0] - records := make([]*ClickHouseFlowRow, 5) + records := make([]*flowrecord.FlowRecord, 5) for i := 0; i < 5; i++ { - records[i] = &ClickHouseFlowRow{sourceTransportPort: uint16(i)} + records[i] = &flowrecord.FlowRecord{SourceTransportPort: uint16(i)} } chExportProc.deque.PushBack(records[0]) @@ -627,7 +286,7 @@ func TestFlushCacheOnStop(t *testing.T) { commitInterval: commitInterval, } - recordRow := getTestClickHouseFlowRow() + recordRow := flowrecordtesting.PrepareTestFlowRecord() chExportProc.deque.PushBack(recordRow) mock.ExpectBegin() @@ -662,7 +321,7 @@ func TestUpdateCH(t *testing.T) { commitInterval: commitInterval, } - recordRow := getTestClickHouseFlowRow() + recordRow := flowrecordtesting.PrepareTestFlowRecord() func() { // commitTicker is ticking so the export process may be // accessing the queue at the same time. diff --git a/pkg/flowaggregator/exporter/s3.go b/pkg/flowaggregator/exporter/s3.go new file mode 100644 index 00000000000..32c7b888881 --- /dev/null +++ b/pkg/flowaggregator/exporter/s3.go @@ -0,0 +1,86 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package exporter + +import ( + ipfixentities "github.com/vmware/go-ipfix/pkg/entities" + "k8s.io/klog/v2" + + "antrea.io/antrea/pkg/flowaggregator/options" + "antrea.io/antrea/pkg/flowaggregator/s3uploader" +) + +type S3Exporter struct { + s3Input *s3uploader.S3Input + s3UploadProcess *s3uploader.S3UploadProcess +} + +func buildS3Input(opt *options.Options) s3uploader.S3Input { + return s3uploader.S3Input{ + Config: opt.Config.S3Uploader, + UploadInterval: opt.S3UploadInterval, + } +} + +func NewS3Exporter(opt *options.Options) (*S3Exporter, error) { + s3Input := buildS3Input(opt) + klog.InfoS("S3Uploader configuration", "bucketName", s3Input.Config.BucketName, "bucketPrefix", s3Input.Config.BucketPrefix, "region", s3Input.Config.Region, "recordFormat", s3Input.Config.RecordFormat, "compress", *s3Input.Config.Compress, "maxRecordsPerFile", s3Input.Config.MaxRecordsPerFile, "uploadInterval", s3Input.UploadInterval) + s3UploadProcess, err := s3uploader.NewS3UploadProcess(s3Input) + if err != nil { + return nil, err + } + return &S3Exporter{ + s3Input: &s3Input, + s3UploadProcess: s3UploadProcess, + }, nil +} + +func (e *S3Exporter) AddRecord(record ipfixentities.Record, isRecordIPv6 bool) error { + e.s3UploadProcess.CacheRecord(record) + return nil +} + +func (e *S3Exporter) Start() { + e.s3UploadProcess.Start() +} + +func (e *S3Exporter) Stop() { + e.s3UploadProcess.Stop() +} + +func (e *S3Exporter) UpdateOptions(opt *options.Options) { + s3Input := buildS3Input(opt) + config := s3Input.Config + if config.BucketName == e.s3UploadProcess.GetBucketName() && + config.BucketPrefix == e.s3UploadProcess.GetBucketPrefix() && + config.Region == e.s3UploadProcess.GetRegion() && + s3Input.UploadInterval == e.s3UploadProcess.GetUploadInterval() { + return + } + klog.InfoS("Updating S3Uploader") + if s3Input.UploadInterval != e.s3UploadProcess.GetUploadInterval() { + e.s3UploadProcess.SetUploadInterval(s3Input.UploadInterval) + } + if config.BucketName != e.s3UploadProcess.GetBucketName() || + config.BucketPrefix != e.s3UploadProcess.GetBucketPrefix() || + config.Region == e.s3UploadProcess.GetRegion() { + err := e.s3UploadProcess.UpdateS3Uploader(config.BucketName, config.BucketPrefix, config.Region) + if err != nil { + klog.ErrorS(err, "Error when updating S3Uploader config") + return + } + } + klog.InfoS("New S3Uploader configuration", "bucketName", s3Input.Config.BucketName, "bucketPrefix", s3Input.Config.BucketPrefix, "region", s3Input.Config.Region, "recordFormat", s3Input.Config.RecordFormat, "compress", *s3Input.Config.Compress, "maxRecordsPerFile", s3Input.Config.MaxRecordsPerFile, "uploadInterval", s3Input.Config.UploadInterval) +} diff --git a/pkg/flowaggregator/flowaggregator.go b/pkg/flowaggregator/flowaggregator.go index 1bb99890c00..3d79a80b21e 100644 --- a/pkg/flowaggregator/flowaggregator.go +++ b/pkg/flowaggregator/flowaggregator.go @@ -96,6 +96,9 @@ var ( newClickHouseExporter = func(opt *options.Options) (exporter.Interface, error) { return exporter.NewClickHouseExporter(opt) } + newS3Exporter = func(opt *options.Options) (exporter.Interface, error) { + return exporter.NewS3Exporter(opt) + } ) type flowAggregator struct { @@ -118,6 +121,7 @@ type flowAggregator struct { APIServer flowaggregatorconfig.APIServerConfig ipfixExporter exporter.Interface clickHouseExporter exporter.Interface + s3Exporter exporter.Interface logTickerDuration time.Duration } @@ -184,6 +188,13 @@ func NewFlowAggregator( return nil, fmt.Errorf("error when creating ClickHouse export process: %v", err) } } + if opt.Config.S3Uploader.Enable { + var err error + fa.s3Exporter, err = newS3Exporter(opt) + if err != nil { + return nil, fmt.Errorf("error when creating S3 export process: %v", err) + } + } if opt.Config.FlowCollector.Enable { fa.ipfixExporter = newIPFIXExporter(k8sClient, opt, registry) } @@ -285,6 +296,9 @@ func (fa *flowAggregator) Run(stopCh <-chan struct{}) { if fa.clickHouseExporter != nil { fa.clickHouseExporter.Start() } + if fa.s3Exporter != nil { + fa.s3Exporter.Start() + } var wg sync.WaitGroup wg.Add(1) go func() { @@ -317,6 +331,9 @@ func (fa *flowAggregator) flowExportLoop(stopCh <-chan struct{}) { if fa.clickHouseExporter != nil { fa.clickHouseExporter.Stop() } + if fa.s3Exporter != nil { + fa.s3Exporter.Stop() + } }() updateCh := fa.updateCh for { @@ -373,6 +390,11 @@ func (fa *flowAggregator) sendFlowKeyRecord(key ipfixintermediate.FlowKey, recor return err } } + if fa.s3Exporter != nil { + if err := fa.s3Exporter.AddRecord(record.Record, !isRecordIPv4); err != nil { + return err + } + } if err := fa.aggregationProcess.ResetStatAndThroughputElementsInRecord(record.Record); err != nil { return err } @@ -582,4 +604,26 @@ func (fa *flowAggregator) updateFlowAggregator(opt *options.Options) { klog.InfoS("Disabled ClickHouse") } } + if opt.Config.S3Uploader.Enable { + if fa.s3Exporter == nil { + klog.InfoS("Enabling S3Uploader") + var err error + fa.s3Exporter, err = newS3Exporter(opt) + if err != nil { + klog.ErrorS(err, "Error when creating S3 export process") + return + } + fa.s3Exporter.Start() + klog.InfoS("Enabled S3Uploader") + } else { + fa.s3Exporter.UpdateOptions(opt) + } + } else { + if fa.s3Exporter != nil { + klog.InfoS("Disabling S3Uploader") + fa.s3Exporter.Stop() + fa.s3Exporter = nil + klog.InfoS("Disabled S3Uploader") + } + } } diff --git a/pkg/flowaggregator/flowaggregator_test.go b/pkg/flowaggregator/flowaggregator_test.go index 064c19e3ef5..ec3b5dcbe32 100644 --- a/pkg/flowaggregator/flowaggregator_test.go +++ b/pkg/flowaggregator/flowaggregator_test.go @@ -183,6 +183,10 @@ func TestFlowAggregator_watchConfiguration(t *testing.T) { ClickHouse: flowaggregatorconfig.ClickHouseConfig{ Enable: true, }, + S3Uploader: flowaggregatorconfig.S3UploaderConfig{ + Enable: true, + BucketName: "test-bucket-name", + }, }, } wd, err := os.Getwd() @@ -233,6 +237,8 @@ func TestFlowAggregator_watchConfiguration(t *testing.T) { assert.Equal(t, opt.Config.FlowCollector.Enable, msg.Config.FlowCollector.Enable) assert.Equal(t, opt.Config.FlowCollector.Address, msg.Config.FlowCollector.Address) assert.Equal(t, opt.Config.ClickHouse.Enable, msg.Config.ClickHouse.Enable) + assert.Equal(t, opt.Config.S3Uploader.Enable, msg.Config.S3Uploader.Enable) + assert.Equal(t, opt.Config.S3Uploader.BucketName, msg.Config.S3Uploader.BucketName) case <-time.After(5 * time.Second): t.Errorf("Timeout while waiting for update") } @@ -245,12 +251,15 @@ func TestFlowAggregator_updateFlowAggregator(t *testing.T) { defer ctrl.Finish() mockIPFIXExporter := exportertesting.NewMockInterface(ctrl) mockClickHouseExporter := exportertesting.NewMockInterface(ctrl) + mockS3Exporter := exportertesting.NewMockInterface(ctrl) newIPFIXExporterSaved := newIPFIXExporter newClickHouseExporterSaved := newClickHouseExporter + newS3ExporterSaved := newS3Exporter defer func() { newIPFIXExporter = newIPFIXExporterSaved newClickHouseExporter = newClickHouseExporterSaved + newS3Exporter = newS3ExporterSaved }() newIPFIXExporter = func(kubernetes.Interface, *options.Options, ipfix.IPFIXRegistry) exporter.Interface { return mockIPFIXExporter @@ -258,6 +267,9 @@ func TestFlowAggregator_updateFlowAggregator(t *testing.T) { newClickHouseExporter = func(*options.Options) (exporter.Interface, error) { return mockClickHouseExporter, nil } + newS3Exporter = func(*options.Options) (exporter.Interface, error) { + return mockS3Exporter, nil + } t.Run("updateIPFIX", func(t *testing.T) { flowAggregator := &flowAggregator{ @@ -328,6 +340,48 @@ func TestFlowAggregator_updateFlowAggregator(t *testing.T) { mockClickHouseExporter.EXPECT().UpdateOptions(opt) flowAggregator.updateFlowAggregator(opt) }) + t.Run("enableS3Uploader", func(t *testing.T) { + flowAggregator := &flowAggregator{} + opt := &options.Options{ + Config: &flowaggregatorconfig.FlowAggregatorConfig{ + S3Uploader: flowaggregatorconfig.S3UploaderConfig{ + Enable: true, + BucketName: "test-bucket-name", + }, + }, + } + mockS3Exporter.EXPECT().Start() + flowAggregator.updateFlowAggregator(opt) + }) + t.Run("disableS3Uploader", func(t *testing.T) { + flowAggregator := &flowAggregator{ + s3Exporter: mockS3Exporter, + } + opt := &options.Options{ + Config: &flowaggregatorconfig.FlowAggregatorConfig{ + S3Uploader: flowaggregatorconfig.S3UploaderConfig{ + Enable: false, + }, + }, + } + mockS3Exporter.EXPECT().Stop() + flowAggregator.updateFlowAggregator(opt) + }) + t.Run("updateS3Uploader", func(t *testing.T) { + flowAggregator := &flowAggregator{ + s3Exporter: mockS3Exporter, + } + opt := &options.Options{ + Config: &flowaggregatorconfig.FlowAggregatorConfig{ + S3Uploader: flowaggregatorconfig.S3UploaderConfig{ + Enable: true, + BucketName: "test-bucket-name", + }, + }, + } + mockS3Exporter.EXPECT().UpdateOptions(opt) + flowAggregator.updateFlowAggregator(opt) + }) } func TestFlowAggregator_Run(t *testing.T) { @@ -336,14 +390,17 @@ func TestFlowAggregator_Run(t *testing.T) { mockIPFIXExporter := exportertesting.NewMockInterface(ctrl) mockClickHouseExporter := exportertesting.NewMockInterface(ctrl) + mockS3Exporter := exportertesting.NewMockInterface(ctrl) mockCollectingProcess := ipfixtesting.NewMockIPFIXCollectingProcess(ctrl) mockAggregationProcess := ipfixtesting.NewMockIPFIXAggregationProcess(ctrl) newIPFIXExporterSaved := newIPFIXExporter newClickHouseExporterSaved := newClickHouseExporter + newS3ExporterSaved := newS3Exporter defer func() { newIPFIXExporter = newIPFIXExporterSaved newClickHouseExporter = newClickHouseExporterSaved + newS3Exporter = newS3ExporterSaved }() newIPFIXExporter = func(kubernetes.Interface, *options.Options, ipfix.IPFIXRegistry) exporter.Interface { return mockIPFIXExporter @@ -351,6 +408,9 @@ func TestFlowAggregator_Run(t *testing.T) { newClickHouseExporter = func(*options.Options) (exporter.Interface, error) { return mockClickHouseExporter, nil } + newS3Exporter = func(*options.Options) (exporter.Interface, error) { + return mockS3Exporter, nil + } // create dummy watcher: we will not add any files or directory to it. configWatcher, err := fsnotify.NewWatcher() @@ -388,6 +448,7 @@ func TestFlowAggregator_Run(t *testing.T) { // implement updateOptions above. mockIPFIXExporter.EXPECT().UpdateOptions(gomock.Any()).AnyTimes() mockClickHouseExporter.EXPECT().UpdateOptions(gomock.Any()).AnyTimes() + mockS3Exporter.EXPECT().UpdateOptions(gomock.Any()).AnyTimes() stopCh := make(chan struct{}) var wg sync.WaitGroup @@ -425,11 +486,27 @@ func TestFlowAggregator_Run(t *testing.T) { }, }, } + enableS3UploaderOptions := &options.Options{ + Config: &flowaggregatorconfig.FlowAggregatorConfig{ + S3Uploader: flowaggregatorconfig.S3UploaderConfig{ + Enable: true, + }, + }, + } + disableS3UploaderOptions := &options.Options{ + Config: &flowaggregatorconfig.FlowAggregatorConfig{ + S3Uploader: flowaggregatorconfig.S3UploaderConfig{ + Enable: false, + }, + }, + } mockIPFIXExporter.EXPECT().Start().Times(2) mockIPFIXExporter.EXPECT().Stop().Times(2) mockClickHouseExporter.EXPECT().Start() mockClickHouseExporter.EXPECT().Stop() + mockS3Exporter.EXPECT().Start() + mockS3Exporter.EXPECT().Stop() // we do a few operations: the main purpose is to ensure that cleanup // (i.e., stopping the exporters) is done properly. This sequence of @@ -438,11 +515,15 @@ func TestFlowAggregator_Run(t *testing.T) { // 2. The IPFIXExporter is then disabled, so we expect a call to mockIPFIXExporter.Stop() // 3. The ClickHouseExporter is then enabled, so we expect a call to mockClickHouseExporter.Start() // 4. The ClickHouseExporter is then disabled, so we expect a call to mockClickHouseExporter.Stop() - // 5. The IPFIXExporter is then re-enabled, so we expect a second call to mockIPFIXExporter.Start() - // 6. Finally, when Run() is stopped, we expect a second call to mockIPFIXExporter.Stop() + // 5. The S3Uploader is then enabled, so we expect a call to mockS3Exporter.Start() + // 6. The S3Uploader is then disabled, so we expect a call to mockS3Exporter.Stop() + // 7. The IPFIXExporter is then re-enabled, so we expect a second call to mockIPFIXExporter.Start() + // 8. Finally, when Run() is stopped, we expect a second call to mockIPFIXExporter.Stop() updateOptions(disableIPFIXOptions) updateOptions(enableClickHouseOptions) updateOptions(disableClickHouseOptions) + updateOptions(enableS3UploaderOptions) + updateOptions(disableS3UploaderOptions) updateOptions(enableIPFIXOptions) close(stopCh) diff --git a/pkg/flowaggregator/flowrecord/record.go b/pkg/flowaggregator/flowrecord/record.go new file mode 100644 index 00000000000..59ddcd68785 --- /dev/null +++ b/pkg/flowaggregator/flowrecord/record.go @@ -0,0 +1,276 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flowrecord + +import ( + "time" + + ipfixentities "github.com/vmware/go-ipfix/pkg/entities" +) + +type FlowRecord struct { + FlowStartSeconds time.Time + FlowEndSeconds time.Time + FlowEndSecondsFromSourceNode time.Time + FlowEndSecondsFromDestinationNode time.Time + FlowEndReason uint8 + SourceIP string + DestinationIP string + SourceTransportPort uint16 + DestinationTransportPort uint16 + ProtocolIdentifier uint8 + PacketTotalCount uint64 + OctetTotalCount uint64 + PacketDeltaCount uint64 + OctetDeltaCount uint64 + ReversePacketTotalCount uint64 + ReverseOctetTotalCount uint64 + ReversePacketDeltaCount uint64 + ReverseOctetDeltaCount uint64 + SourcePodName string + SourcePodNamespace string + SourceNodeName string + DestinationPodName string + DestinationPodNamespace string + DestinationNodeName string + DestinationClusterIP string + DestinationServicePort uint16 + DestinationServicePortName string + IngressNetworkPolicyName string + IngressNetworkPolicyNamespace string + IngressNetworkPolicyRuleName string + IngressNetworkPolicyRuleAction uint8 + IngressNetworkPolicyType uint8 + EgressNetworkPolicyName string + EgressNetworkPolicyNamespace string + EgressNetworkPolicyRuleName string + EgressNetworkPolicyRuleAction uint8 + EgressNetworkPolicyType uint8 + TcpState string + FlowType uint8 + SourcePodLabels string + DestinationPodLabels string + Throughput uint64 + ReverseThroughput uint64 + ThroughputFromSourceNode uint64 + ThroughputFromDestinationNode uint64 + ReverseThroughputFromSourceNode uint64 + ReverseThroughputFromDestinationNode uint64 +} + +// GetFlowRecord converts ipfixentities.Record to FlowRecord +func GetFlowRecord(record ipfixentities.Record) *FlowRecord { + r := &FlowRecord{} + if flowStartSeconds, _, ok := record.GetInfoElementWithValue("flowStartSeconds"); ok { + r.FlowStartSeconds = time.Unix(int64(flowStartSeconds.GetUnsigned32Value()), 0) + } + if flowEndSeconds, _, ok := record.GetInfoElementWithValue("flowEndSeconds"); ok { + r.FlowEndSeconds = time.Unix(int64(flowEndSeconds.GetUnsigned32Value()), 0) + } + if flowEndSecFromSrcNode, _, ok := record.GetInfoElementWithValue("flowEndSecondsFromSourceNode"); ok { + r.FlowEndSecondsFromSourceNode = time.Unix(int64(flowEndSecFromSrcNode.GetUnsigned32Value()), 0) + } + if flowEndSecFromDstNode, _, ok := record.GetInfoElementWithValue("flowEndSecondsFromDestinationNode"); ok { + r.FlowEndSecondsFromDestinationNode = time.Unix(int64(flowEndSecFromDstNode.GetUnsigned32Value()), 0) + } + if flowEndReason, _, ok := record.GetInfoElementWithValue("flowEndReason"); ok { + r.FlowEndReason = flowEndReason.GetUnsigned8Value() + } + if sourceIPv4, _, ok := record.GetInfoElementWithValue("sourceIPv4Address"); ok { + r.SourceIP = sourceIPv4.GetIPAddressValue().String() + } else if sourceIPv6, _, ok := record.GetInfoElementWithValue("sourceIPv6Address"); ok { + r.SourceIP = sourceIPv6.GetIPAddressValue().String() + } + if destinationIPv4, _, ok := record.GetInfoElementWithValue("destinationIPv4Address"); ok { + r.DestinationIP = destinationIPv4.GetIPAddressValue().String() + } else if destinationIPv6, _, ok := record.GetInfoElementWithValue("destinationIPv6Address"); ok { + r.DestinationIP = destinationIPv6.GetIPAddressValue().String() + } + if sourcePort, _, ok := record.GetInfoElementWithValue("sourceTransportPort"); ok { + r.SourceTransportPort = sourcePort.GetUnsigned16Value() + } + if destinationPort, _, ok := record.GetInfoElementWithValue("destinationTransportPort"); ok { + r.DestinationTransportPort = destinationPort.GetUnsigned16Value() + } + if protocolIdentifier, _, ok := record.GetInfoElementWithValue("protocolIdentifier"); ok { + r.ProtocolIdentifier = protocolIdentifier.GetUnsigned8Value() + } + if packetTotalCount, _, ok := record.GetInfoElementWithValue("packetTotalCount"); ok { + r.PacketTotalCount = packetTotalCount.GetUnsigned64Value() + } + if octetTotalCount, _, ok := record.GetInfoElementWithValue("octetTotalCount"); ok { + r.OctetTotalCount = octetTotalCount.GetUnsigned64Value() + } + if packetDeltaCount, _, ok := record.GetInfoElementWithValue("packetDeltaCount"); ok { + r.PacketDeltaCount = packetDeltaCount.GetUnsigned64Value() + } + if octetDeltaCount, _, ok := record.GetInfoElementWithValue("octetDeltaCount"); ok { + r.OctetDeltaCount = octetDeltaCount.GetUnsigned64Value() + } + if reversePacketTotalCount, _, ok := record.GetInfoElementWithValue("reversePacketTotalCount"); ok { + r.ReversePacketTotalCount = reversePacketTotalCount.GetUnsigned64Value() + } + if reverseOctetTotalCount, _, ok := record.GetInfoElementWithValue("reverseOctetTotalCount"); ok { + r.ReverseOctetTotalCount = reverseOctetTotalCount.GetUnsigned64Value() + } + if reversePacketDeltaCount, _, ok := record.GetInfoElementWithValue("reversePacketDeltaCount"); ok { + r.ReversePacketDeltaCount = reversePacketDeltaCount.GetUnsigned64Value() + } + if reverseOctetDeltaCount, _, ok := record.GetInfoElementWithValue("reverseOctetDeltaCount"); ok { + r.ReverseOctetDeltaCount = reverseOctetDeltaCount.GetUnsigned64Value() + } + if sourcePodName, _, ok := record.GetInfoElementWithValue("sourcePodName"); ok { + r.SourcePodName = sourcePodName.GetStringValue() + } + if sourcePodNamespace, _, ok := record.GetInfoElementWithValue("sourcePodNamespace"); ok { + r.SourcePodNamespace = sourcePodNamespace.GetStringValue() + } + if sourceNodeName, _, ok := record.GetInfoElementWithValue("sourceNodeName"); ok { + r.SourceNodeName = sourceNodeName.GetStringValue() + } + if destinationPodName, _, ok := record.GetInfoElementWithValue("destinationPodName"); ok { + r.DestinationPodName = destinationPodName.GetStringValue() + } + if destinationPodNamespace, _, ok := record.GetInfoElementWithValue("destinationPodNamespace"); ok { + r.DestinationPodNamespace = destinationPodNamespace.GetStringValue() + } + if destinationNodeName, _, ok := record.GetInfoElementWithValue("destinationNodeName"); ok { + r.DestinationNodeName = destinationNodeName.GetStringValue() + } + if destinationClusterIPv4, _, ok := record.GetInfoElementWithValue("destinationClusterIPv4"); ok { + r.DestinationClusterIP = destinationClusterIPv4.GetIPAddressValue().String() + } else if destinationClusterIPv6, _, ok := record.GetInfoElementWithValue("destinationClusterIPv6"); ok { + r.DestinationClusterIP = destinationClusterIPv6.GetIPAddressValue().String() + } + if destinationServicePort, _, ok := record.GetInfoElementWithValue("destinationServicePort"); ok { + r.DestinationServicePort = destinationServicePort.GetUnsigned16Value() + } + if destinationServicePortName, _, ok := record.GetInfoElementWithValue("destinationServicePortName"); ok { + r.DestinationServicePortName = destinationServicePortName.GetStringValue() + } + if ingressNPName, _, ok := record.GetInfoElementWithValue("ingressNetworkPolicyName"); ok { + r.IngressNetworkPolicyName = ingressNPName.GetStringValue() + } + if ingressNPNamespace, _, ok := record.GetInfoElementWithValue("ingressNetworkPolicyNamespace"); ok { + r.IngressNetworkPolicyNamespace = ingressNPNamespace.GetStringValue() + } + if ingressNPRuleName, _, ok := record.GetInfoElementWithValue("ingressNetworkPolicyRuleName"); ok { + r.IngressNetworkPolicyRuleName = ingressNPRuleName.GetStringValue() + } + if ingressNPType, _, ok := record.GetInfoElementWithValue("ingressNetworkPolicyType"); ok { + r.IngressNetworkPolicyType = ingressNPType.GetUnsigned8Value() + } + if ingressNPRuleAction, _, ok := record.GetInfoElementWithValue("ingressNetworkPolicyRuleAction"); ok { + r.IngressNetworkPolicyRuleAction = ingressNPRuleAction.GetUnsigned8Value() + } + if egressNPName, _, ok := record.GetInfoElementWithValue("egressNetworkPolicyName"); ok { + r.EgressNetworkPolicyName = egressNPName.GetStringValue() + } + if egressNPNamespace, _, ok := record.GetInfoElementWithValue("egressNetworkPolicyNamespace"); ok { + r.EgressNetworkPolicyNamespace = egressNPNamespace.GetStringValue() + } + if egressNPRuleName, _, ok := record.GetInfoElementWithValue("egressNetworkPolicyRuleName"); ok { + r.EgressNetworkPolicyRuleName = egressNPRuleName.GetStringValue() + } + if egressNPType, _, ok := record.GetInfoElementWithValue("egressNetworkPolicyType"); ok { + r.EgressNetworkPolicyType = egressNPType.GetUnsigned8Value() + } + if egressNPRuleAction, _, ok := record.GetInfoElementWithValue("egressNetworkPolicyRuleAction"); ok { + r.EgressNetworkPolicyRuleAction = egressNPRuleAction.GetUnsigned8Value() + } + if tcpState, _, ok := record.GetInfoElementWithValue("tcpState"); ok { + r.TcpState = tcpState.GetStringValue() + } + if flowType, _, ok := record.GetInfoElementWithValue("flowType"); ok { + r.FlowType = flowType.GetUnsigned8Value() + } + if sourcePodLabels, _, ok := record.GetInfoElementWithValue("sourcePodLabels"); ok { + r.SourcePodLabels = sourcePodLabels.GetStringValue() + } + if destinationPodLabels, _, ok := record.GetInfoElementWithValue("destinationPodLabels"); ok { + r.DestinationPodLabels = destinationPodLabels.GetStringValue() + } + if throughput, _, ok := record.GetInfoElementWithValue("throughput"); ok { + r.Throughput = throughput.GetUnsigned64Value() + } + if reverseThroughput, _, ok := record.GetInfoElementWithValue("reverseThroughput"); ok { + r.ReverseThroughput = reverseThroughput.GetUnsigned64Value() + } + if throughputFromSrcNode, _, ok := record.GetInfoElementWithValue("throughputFromSourceNode"); ok { + r.ThroughputFromSourceNode = throughputFromSrcNode.GetUnsigned64Value() + } + if throughputFromDstNode, _, ok := record.GetInfoElementWithValue("throughputFromDestinationNode"); ok { + r.ThroughputFromDestinationNode = throughputFromDstNode.GetUnsigned64Value() + } + if revTputFromSrcNode, _, ok := record.GetInfoElementWithValue("reverseThroughputFromSourceNode"); ok { + r.ReverseThroughputFromSourceNode = revTputFromSrcNode.GetUnsigned64Value() + } + if revTputFromDstNode, _, ok := record.GetInfoElementWithValue("reverseThroughputFromDestinationNode"); ok { + r.ReverseThroughputFromDestinationNode = revTputFromDstNode.GetUnsigned64Value() + } + return r +} + +func GetTestFlowRecord() *FlowRecord { + return &FlowRecord{ + FlowStartSeconds: time.Unix(int64(1637706961), 0), + FlowEndSeconds: time.Unix(int64(1637706973), 0), + FlowEndSecondsFromSourceNode: time.Unix(int64(1637706974), 0), + FlowEndSecondsFromDestinationNode: time.Unix(int64(1637706975), 0), + FlowEndReason: 3, + SourceIP: "10.10.0.79", + DestinationIP: "10.10.0.80", + SourceTransportPort: 44752, + DestinationTransportPort: 5201, + ProtocolIdentifier: 6, + PacketTotalCount: 823188, + OctetTotalCount: 30472817041, + PacketDeltaCount: 241333, + OctetDeltaCount: 8982624938, + ReversePacketTotalCount: 471111, + ReverseOctetTotalCount: 24500996, + ReversePacketDeltaCount: 136211, + ReverseOctetDeltaCount: 7083284, + SourcePodName: "perftest-a", + SourcePodNamespace: "antrea-test", + SourceNodeName: "k8s-node-control-plane", + DestinationPodName: "perftest-b", + DestinationPodNamespace: "antrea-test-b", + DestinationNodeName: "k8s-node-control-plane-b", + DestinationClusterIP: "10.10.1.10", + DestinationServicePort: 5202, + DestinationServicePortName: "perftest", + IngressNetworkPolicyName: "test-flow-aggregator-networkpolicy-ingress-allow", + IngressNetworkPolicyNamespace: "antrea-test-ns", + IngressNetworkPolicyRuleName: "test-flow-aggregator-networkpolicy-rule", + IngressNetworkPolicyRuleAction: 2, + IngressNetworkPolicyType: 1, + EgressNetworkPolicyName: "test-flow-aggregator-networkpolicy-egress-allow", + EgressNetworkPolicyNamespace: "antrea-test-ns-e", + EgressNetworkPolicyRuleName: "test-flow-aggregator-networkpolicy-rule-e", + EgressNetworkPolicyRuleAction: 5, + EgressNetworkPolicyType: 4, + TcpState: "TIME_WAIT", + FlowType: 11, + SourcePodLabels: "{\"antrea-e2e\":\"perftest-a\",\"app\":\"perftool\"}", + DestinationPodLabels: "{\"antrea-e2e\":\"perftest-b\",\"app\":\"perftool\"}", + Throughput: 15902813472, + ReverseThroughput: 12381344, + ThroughputFromSourceNode: 15902813473, + ThroughputFromDestinationNode: 15902813474, + ReverseThroughputFromSourceNode: 12381345, + ReverseThroughputFromDestinationNode: 12381346, + } +} diff --git a/pkg/flowaggregator/flowrecord/record_test.go b/pkg/flowaggregator/flowrecord/record_test.go new file mode 100644 index 00000000000..1d613ffe8d2 --- /dev/null +++ b/pkg/flowaggregator/flowrecord/record_test.go @@ -0,0 +1,104 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package flowrecord + +import ( + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + ipfixentitiestesting "github.com/vmware/go-ipfix/pkg/entities/testing" + "github.com/vmware/go-ipfix/pkg/registry" + + flowaggregatortesting "antrea.io/antrea/pkg/flowaggregator/testing" +) + +func init() { + registry.LoadRegistry() +} + +func TestGetFlowRecord(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + testcases := []struct { + isIPv4 bool + }{ + {true}, + {false}, + } + + for _, tc := range testcases { + mockRecord := ipfixentitiestesting.NewMockRecord(ctrl) + flowaggregatortesting.PrepareMockIpfixRecord(mockRecord, tc.isIPv4) + + flowRecord := GetFlowRecord(mockRecord) + assert.Equal(t, time.Unix(int64(1637706961), 0), flowRecord.FlowStartSeconds) + assert.Equal(t, time.Unix(int64(1637706973), 0), flowRecord.FlowEndSeconds) + assert.Equal(t, time.Unix(int64(1637706974), 0), flowRecord.FlowEndSecondsFromSourceNode) + assert.Equal(t, time.Unix(int64(1637706975), 0), flowRecord.FlowEndSecondsFromDestinationNode) + assert.Equal(t, uint8(3), flowRecord.FlowEndReason) + assert.Equal(t, uint16(44752), flowRecord.SourceTransportPort) + assert.Equal(t, uint16(5201), flowRecord.DestinationTransportPort) + assert.Equal(t, uint8(6), flowRecord.ProtocolIdentifier) + assert.Equal(t, uint64(823188), flowRecord.PacketTotalCount) + assert.Equal(t, uint64(30472817041), flowRecord.OctetTotalCount) + assert.Equal(t, uint64(241333), flowRecord.PacketDeltaCount) + assert.Equal(t, uint64(8982624938), flowRecord.OctetDeltaCount) + assert.Equal(t, uint64(471111), flowRecord.ReversePacketTotalCount) + assert.Equal(t, uint64(24500996), flowRecord.ReverseOctetTotalCount) + assert.Equal(t, uint64(136211), flowRecord.ReversePacketDeltaCount) + assert.Equal(t, uint64(7083284), flowRecord.ReverseOctetDeltaCount) + assert.Equal(t, "perftest-a", flowRecord.SourcePodName) + assert.Equal(t, "antrea-test", flowRecord.SourcePodNamespace) + assert.Equal(t, "k8s-node-control-plane", flowRecord.SourceNodeName) + assert.Equal(t, "perftest-b", flowRecord.DestinationPodName) + assert.Equal(t, "antrea-test-b", flowRecord.DestinationPodNamespace) + assert.Equal(t, "k8s-node-control-plane-b", flowRecord.DestinationNodeName) + assert.Equal(t, uint16(5202), flowRecord.DestinationServicePort) + assert.Equal(t, "perftest", flowRecord.DestinationServicePortName) + assert.Equal(t, "test-flow-aggregator-networkpolicy-ingress-allow", flowRecord.IngressNetworkPolicyName) + assert.Equal(t, "antrea-test-ns", flowRecord.IngressNetworkPolicyNamespace) + assert.Equal(t, "test-flow-aggregator-networkpolicy-rule", flowRecord.IngressNetworkPolicyRuleName) + assert.Equal(t, uint8(1), flowRecord.IngressNetworkPolicyType) + assert.Equal(t, uint8(2), flowRecord.IngressNetworkPolicyRuleAction) + assert.Equal(t, "test-flow-aggregator-networkpolicy-egress-allow", flowRecord.EgressNetworkPolicyName) + assert.Equal(t, "antrea-test-ns-e", flowRecord.EgressNetworkPolicyNamespace) + assert.Equal(t, "test-flow-aggregator-networkpolicy-rule-e", flowRecord.EgressNetworkPolicyRuleName) + assert.Equal(t, uint8(4), flowRecord.EgressNetworkPolicyType) + assert.Equal(t, uint8(5), flowRecord.EgressNetworkPolicyRuleAction) + assert.Equal(t, "TIME_WAIT", flowRecord.TcpState) + assert.Equal(t, uint8(11), flowRecord.FlowType) + assert.Equal(t, "{\"antrea-e2e\":\"perftest-a\",\"app\":\"perftool\"}", flowRecord.SourcePodLabels) + assert.Equal(t, "{\"antrea-e2e\":\"perftest-b\",\"app\":\"perftool\"}", flowRecord.DestinationPodLabels) + assert.Equal(t, uint64(15902813472), flowRecord.Throughput) + assert.Equal(t, uint64(12381344), flowRecord.ReverseThroughput) + assert.Equal(t, uint64(15902813473), flowRecord.ThroughputFromSourceNode) + assert.Equal(t, uint64(15902813474), flowRecord.ThroughputFromDestinationNode) + assert.Equal(t, uint64(12381345), flowRecord.ReverseThroughputFromSourceNode) + assert.Equal(t, uint64(12381346), flowRecord.ReverseThroughputFromDestinationNode) + + if tc.isIPv4 { + assert.Equal(t, "10.10.0.79", flowRecord.SourceIP) + assert.Equal(t, "10.10.0.80", flowRecord.DestinationIP) + assert.Equal(t, "10.10.1.10", flowRecord.DestinationClusterIP) + } else { + assert.Equal(t, "2001:0:3238:dfe1:63::fefb", flowRecord.SourceIP) + assert.Equal(t, "2001:0:3238:dfe1:63::fefc", flowRecord.DestinationIP) + assert.Equal(t, "2001:0:3238:dfe1:64::a", flowRecord.DestinationClusterIP) + } + } +} diff --git a/pkg/flowaggregator/flowrecord/testing/util.go b/pkg/flowaggregator/flowrecord/testing/util.go new file mode 100644 index 00000000000..e573b955fe7 --- /dev/null +++ b/pkg/flowaggregator/flowrecord/testing/util.go @@ -0,0 +1,74 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testing + +import ( + "time" + + "antrea.io/antrea/pkg/flowaggregator/flowrecord" +) + +// used for unit testing +func PrepareTestFlowRecord() *flowrecord.FlowRecord { + return &flowrecord.FlowRecord{ + FlowStartSeconds: time.Unix(int64(1637706961), 0), + FlowEndSeconds: time.Unix(int64(1637706973), 0), + FlowEndSecondsFromSourceNode: time.Unix(int64(1637706974), 0), + FlowEndSecondsFromDestinationNode: time.Unix(int64(1637706975), 0), + FlowEndReason: 3, + SourceIP: "10.10.0.79", + DestinationIP: "10.10.0.80", + SourceTransportPort: 44752, + DestinationTransportPort: 5201, + ProtocolIdentifier: 6, + PacketTotalCount: 823188, + OctetTotalCount: 30472817041, + PacketDeltaCount: 241333, + OctetDeltaCount: 8982624938, + ReversePacketTotalCount: 471111, + ReverseOctetTotalCount: 24500996, + ReversePacketDeltaCount: 136211, + ReverseOctetDeltaCount: 7083284, + SourcePodName: "perftest-a", + SourcePodNamespace: "antrea-test", + SourceNodeName: "k8s-node-control-plane", + DestinationPodName: "perftest-b", + DestinationPodNamespace: "antrea-test-b", + DestinationNodeName: "k8s-node-control-plane-b", + DestinationClusterIP: "10.10.1.10", + DestinationServicePort: 5202, + DestinationServicePortName: "perftest", + IngressNetworkPolicyName: "test-flow-aggregator-networkpolicy-ingress-allow", + IngressNetworkPolicyNamespace: "antrea-test-ns", + IngressNetworkPolicyRuleName: "test-flow-aggregator-networkpolicy-rule", + IngressNetworkPolicyRuleAction: 2, + IngressNetworkPolicyType: 1, + EgressNetworkPolicyName: "test-flow-aggregator-networkpolicy-egress-allow", + EgressNetworkPolicyNamespace: "antrea-test-ns-e", + EgressNetworkPolicyRuleName: "test-flow-aggregator-networkpolicy-rule-e", + EgressNetworkPolicyRuleAction: 5, + EgressNetworkPolicyType: 4, + TcpState: "TIME_WAIT", + FlowType: 11, + SourcePodLabels: "{\"antrea-e2e\":\"perftest-a\",\"app\":\"perftool\"}", + DestinationPodLabels: "{\"antrea-e2e\":\"perftest-b\",\"app\":\"perftool\"}", + Throughput: 15902813472, + ReverseThroughput: 12381344, + ThroughputFromSourceNode: 15902813473, + ThroughputFromDestinationNode: 15902813474, + ReverseThroughputFromSourceNode: 12381345, + ReverseThroughputFromDestinationNode: 12381346, + } +} diff --git a/pkg/flowaggregator/options/options.go b/pkg/flowaggregator/options/options.go index 10f294c46f1..47d3b833928 100644 --- a/pkg/flowaggregator/options/options.go +++ b/pkg/flowaggregator/options/options.go @@ -40,6 +40,8 @@ type Options struct { ExternalFlowCollectorProto string // clickHouseCommitInterval flow records batch commit interval to clickhouse in the flow aggregator ClickHouseCommitInterval time.Duration + // Flow records batch upload interval from flow aggregator to S3 bucket + S3UploadInterval time.Duration } func LoadConfig(configBytes []byte) (*Options, error) { @@ -52,8 +54,11 @@ func LoadConfig(configBytes []byte) (*Options, error) { if opt.Config.FlowCollector.Enable && opt.Config.FlowCollector.Address == "" { return nil, fmt.Errorf("external flow collector enabled without providing address") } - if !opt.Config.FlowCollector.Enable && !opt.Config.ClickHouse.Enable { - return nil, fmt.Errorf("external flow collector or ClickHouse should be configured") + if opt.Config.S3Uploader.Enable && opt.Config.S3Uploader.BucketName == "" { + return nil, fmt.Errorf("s3Uploader enabled without specifying bucket name") + } + if !opt.Config.FlowCollector.Enable && !opt.Config.ClickHouse.Enable && !opt.Config.S3Uploader.Enable { + return nil, fmt.Errorf("external flow collector or ClickHouse or S3Uploader should be configured") } // Validate common parameters var err error @@ -91,9 +96,23 @@ func LoadConfig(configBytes []byte) (*Options, error) { return nil, err } if opt.ClickHouseCommitInterval < flowaggregatorconfig.MinClickHouseCommitInterval { - return nil, fmt.Errorf("commitInterval %s is too small: shortest supported interval is %s", + return nil, fmt.Errorf("commitInterval %s is too small: shortest supported interval is %v", opt.Config.ClickHouse.CommitInterval, flowaggregatorconfig.MinClickHouseCommitInterval) } } + // Validate S3Uploader specific parameters + if opt.Config.S3Uploader.Enable { + if opt.Config.S3Uploader.RecordFormat != "CSV" { + return nil, fmt.Errorf("record format %s is not supported", opt.Config.S3Uploader.RecordFormat) + } + opt.S3UploadInterval, err = time.ParseDuration(opt.Config.S3Uploader.UploadInterval) + if err != nil { + return nil, err + } + if opt.S3UploadInterval < flowaggregatorconfig.MinS3CommitInterval { + return nil, fmt.Errorf("uploadInterval %s is too small: shortest supported interval is %v", + opt.Config.S3Uploader.UploadInterval, flowaggregatorconfig.MinS3CommitInterval) + } + } return &opt, nil } diff --git a/pkg/flowaggregator/s3uploader/s3uploader.go b/pkg/flowaggregator/s3uploader/s3uploader.go new file mode 100644 index 00000000000..b239bc988d3 --- /dev/null +++ b/pkg/flowaggregator/s3uploader/s3uploader.go @@ -0,0 +1,451 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s3uploader + +import ( + "bytes" + "compress/gzip" + "context" + "fmt" + "io" + "math/rand" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + ipfixentities "github.com/vmware/go-ipfix/pkg/entities" + "k8s.io/klog/v2" + + config "antrea.io/antrea/pkg/config/flowaggregator" + "antrea.io/antrea/pkg/flowaggregator/flowrecord" +) + +const ( + bufferFlushTimeout = 1 * time.Minute + maxNumBuffersPendingUpload = 5 +) + +type stopPayload struct { + flushQueue bool +} + +type S3UploadProcess struct { + bucketName string + bucketPrefix string + region string + compress bool + maxRecordPerFile int32 + // uploadInterval is the interval between batch uploads + uploadInterval time.Duration + // uploadTicker is a ticker, containing a channel used to trigger batchUploadAll() for every uploadInterval period + uploadTicker *time.Ticker + // stopCh is the channel to receive stop message + stopCh chan stopPayload + // exportWg is to ensure that all messages have been flushed from the queue when we stop + exportWg sync.WaitGroup + exportProcessRunning bool + // mutex protects configuration state from concurrent access + mutex sync.Mutex + // queueMutex protects currentBuffer and bufferQueue from concurrent access + queueMutex sync.Mutex + // currentBuffer caches flow record + currentBuffer *bytes.Buffer + // cachedRecordCount keeps track of the number of flow records written into currentBuffer + cachedRecordCount int + // bufferQueue caches currentBuffer when it is full + bufferQueue []*bytes.Buffer + // buffersToUpload stores all the buffers to be uploaded for the current uploadFile() call + buffersToUpload []*bytes.Buffer + gzipWriter *gzip.Writer + // awsS3Client is used to initialize awsS3Uploader + awsS3Client *s3.Client + // awsS3Uploader makes the real call to aws-sdk Upload() method to upload an object to S3 + awsS3Uploader *s3manager.Uploader + // s3UploaderAPI wraps the call made by awsS3Uploader + s3UploaderAPI S3UploaderAPI + nameRand *rand.Rand +} + +type S3Input struct { + Config config.S3UploaderConfig + UploadInterval time.Duration +} + +// Define a wrapper interface S3UploaderAPI to assist unit testing. +type S3UploaderAPI interface { + Upload(ctx context.Context, input *s3.PutObjectInput, awsS3Uploader *s3manager.Uploader, opts ...func(*s3manager.Uploader)) ( + *s3manager.UploadOutput, error, + ) +} + +type S3Uploader struct{} + +func (u *S3Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, awsS3Uploader *s3manager.Uploader, opts ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) { + return awsS3Uploader.Upload(ctx, input, opts...) +} + +func NewS3UploadProcess(input S3Input) (*S3UploadProcess, error) { + config := input.Config + cfg, err := awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithRegion(config.Region)) + if err != nil { + return nil, fmt.Errorf("error when loading AWS config: %w", err) + } + awsS3Client := s3.NewFromConfig(cfg) + awsS3Uploader := s3manager.NewUploader(awsS3Client) + buf := &bytes.Buffer{} + // #nosec G404: random number generator not used for security purposes + nameRand := rand.New(rand.NewSource(time.Now().UnixNano())) + + s3ExportProcess := &S3UploadProcess{ + bucketName: config.BucketName, + bucketPrefix: config.BucketPrefix, + region: config.Region, + compress: *config.Compress, + maxRecordPerFile: config.MaxRecordsPerFile, + uploadInterval: input.UploadInterval, + currentBuffer: buf, + bufferQueue: make([]*bytes.Buffer, 0), + buffersToUpload: make([]*bytes.Buffer, 0, maxNumBuffersPendingUpload), + gzipWriter: gzip.NewWriter(buf), + awsS3Client: awsS3Client, + awsS3Uploader: awsS3Uploader, + s3UploaderAPI: &S3Uploader{}, + nameRand: nameRand, + } + return s3ExportProcess, nil +} + +func (p *S3UploadProcess) GetBucketName() string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.bucketName +} + +func (p *S3UploadProcess) GetBucketPrefix() string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.bucketPrefix +} + +func (p *S3UploadProcess) GetRegion() string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.region +} + +func (p *S3UploadProcess) GetUploadInterval() time.Duration { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.uploadInterval +} + +func (p *S3UploadProcess) UpdateS3Uploader(bucketName, bucketPrefix, region string) error { + p.stopExportProcess(false) + defer p.startExportProcess() + p.mutex.Lock() + defer p.mutex.Unlock() + if bucketName != p.bucketName { + p.bucketName = bucketName + } + if bucketPrefix != p.bucketPrefix { + p.bucketPrefix = bucketPrefix + } + if region != p.region { + cfg, err := awsconfig.LoadDefaultConfig(context.TODO(), awsconfig.WithRegion(region)) + if err != nil { + return fmt.Errorf("error when loading AWS config: %w", err) + } + p.region = region + p.awsS3Client = s3.NewFromConfig(cfg) + p.awsS3Uploader = s3manager.NewUploader(p.awsS3Client) + } + return nil +} + +func (p *S3UploadProcess) SetUploadInterval(uploadInterval time.Duration) { + p.mutex.Lock() + defer p.mutex.Unlock() + p.uploadInterval = uploadInterval + if p.uploadTicker != nil { + p.uploadTicker.Reset(p.uploadInterval) + } +} + +func (p *S3UploadProcess) CacheRecord(record ipfixentities.Record) { + r := flowrecord.GetFlowRecord(record) + p.queueMutex.Lock() + defer p.queueMutex.Unlock() + p.writeRecordToBuffer(r) + // If the number of pending records in the buffer reaches maxRecordPerFile, + // add the buffer to bufferQueue. + if int32(p.cachedRecordCount) == p.maxRecordPerFile { + p.appendBufferToQueue() + } +} + +func (p *S3UploadProcess) Start() { + p.startExportProcess() +} + +func (p *S3UploadProcess) Stop() { + p.stopExportProcess(true) +} + +func (p *S3UploadProcess) startExportProcess() { + p.mutex.Lock() + defer p.mutex.Unlock() + if p.exportProcessRunning { + return + } + p.exportProcessRunning = true + p.uploadTicker = time.NewTicker(p.uploadInterval) + p.stopCh = make(chan stopPayload, 1) + p.exportWg.Add(1) + go func() { + defer p.exportWg.Done() + p.flowRecordPeriodicCommit() + }() +} + +func (p *S3UploadProcess) stopExportProcess(flushQueue bool) { + p.mutex.Lock() + defer p.mutex.Unlock() + if !p.exportProcessRunning { + return + } + p.exportProcessRunning = false + defer p.uploadTicker.Stop() + p.stopCh <- stopPayload{ + flushQueue: flushQueue, + } + p.exportWg.Wait() +} + +func (p *S3UploadProcess) flowRecordPeriodicCommit() { + klog.InfoS("Starting S3 exporting process") + ctx := context.Background() + for { + select { + case stop := <-p.stopCh: + klog.InfoS("Stopping S3 exporting process") + if !stop.flushQueue { + return + } + ctx, cancelFn := context.WithTimeout(ctx, bufferFlushTimeout) + defer cancelFn() + err := p.batchUploadAll(ctx) + if err != nil { + klog.ErrorS(err, "Error when doing batchUploadAll on stop") + } + return + case <-p.uploadTicker.C: + err := p.batchUploadAll(ctx) + if err != nil { + klog.ErrorS(err, "Error when doing batchUploadAll on triggered timer") + } + } + } +} + +// batchUploadAll uploads all buffers cached in bufferQueue and previous fail- +// to-upload buffers stored in buffersToUpload. Returns error encountered +// during upload if any. +func (p *S3UploadProcess) batchUploadAll(ctx context.Context) error { + func() { + p.queueMutex.Lock() + defer p.queueMutex.Unlock() + + if p.cachedRecordCount != 0 { + p.appendBufferToQueue() + } + // dump cached buffers from bufferQueue to buffersToUpload + for _, buf := range p.bufferQueue { + p.buffersToUpload = append(p.buffersToUpload, buf) + if len(p.buffersToUpload) > maxNumBuffersPendingUpload { + p.buffersToUpload = p.buffersToUpload[1:] + } + } + p.bufferQueue = p.bufferQueue[:0] + }() + + uploaded := 0 + for _, buf := range p.buffersToUpload { + reader := bytes.NewReader(buf.Bytes()) + err := p.uploadFile(ctx, reader) + if err != nil { + p.bufferQueue = p.bufferQueue[uploaded:] + return err + } + uploaded += 1 + } + p.buffersToUpload = p.buffersToUpload[:0] + return nil +} + +func (p *S3UploadProcess) writeRecordToBuffer(record *flowrecord.FlowRecord) { + var writer io.Writer + writer = p.currentBuffer + if p.compress { + writer = p.gzipWriter + } + writeRecord(writer, record) + io.WriteString(writer, "\n") + p.cachedRecordCount += 1 +} + +func (p *S3UploadProcess) uploadFile(ctx context.Context, reader *bytes.Reader) error { + fileName := fmt.Sprintf("records-%s.csv", randSeq(p.nameRand, 12)) + if p.compress { + fileName += ".gz" + } + key := fileName + if p.bucketPrefix != "" { + key = fmt.Sprintf("%s/%s", p.bucketPrefix, fileName) + } + if _, err := p.s3UploaderAPI.Upload(ctx, &s3.PutObjectInput{ + Bucket: aws.String(p.bucketName), + Key: aws.String(key), + Body: reader, + }, p.awsS3Uploader); err != nil { + return fmt.Errorf("error when uploading file to S3: %v", err) + } + return nil +} + +// appendBufferToQueue appends currentBuffer to bufferQueue, and reset +// currentBuffer. Caller of this function should acquire queueMutex. +func (p *S3UploadProcess) appendBufferToQueue() { + p.bufferQueue = append(p.bufferQueue, p.currentBuffer) + newBuffer := &bytes.Buffer{} + // avoid too many memory allocations + newBuffer.Grow(p.currentBuffer.Cap()) + p.currentBuffer = newBuffer + p.cachedRecordCount = 0 + if p.compress { + p.gzipWriter.Close() + p.gzipWriter.Reset(p.currentBuffer) + } +} + +func randSeq(randSrc *rand.Rand, n int) string { + var alphabet = []rune("abcdefghijklmnopqrstuvwxyz0123456789") + b := make([]rune, n) + for i := range b { + randIdx := randSrc.Intn(len(alphabet)) + b[i] = alphabet[randIdx] + } + return string(b) +} + +func writeRecord(w io.Writer, r *flowrecord.FlowRecord) { + io.WriteString(w, fmt.Sprintf("%d", r.FlowStartSeconds.Unix())) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.FlowEndSeconds.Unix())) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.FlowEndSecondsFromSourceNode.Unix())) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.FlowEndSecondsFromDestinationNode.Unix())) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.FlowEndReason)) + io.WriteString(w, ",") + io.WriteString(w, r.SourceIP) + io.WriteString(w, ",") + io.WriteString(w, r.DestinationIP) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.SourceTransportPort)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.DestinationTransportPort)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.ProtocolIdentifier)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.PacketTotalCount)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.OctetTotalCount)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.PacketDeltaCount)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.OctetDeltaCount)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.ReversePacketTotalCount)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.ReverseOctetTotalCount)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.ReversePacketDeltaCount)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.ReverseOctetDeltaCount)) + io.WriteString(w, ",") + io.WriteString(w, r.SourcePodName) + io.WriteString(w, ",") + io.WriteString(w, r.SourcePodNamespace) + io.WriteString(w, ",") + io.WriteString(w, r.SourceNodeName) + io.WriteString(w, ",") + io.WriteString(w, r.DestinationPodName) + io.WriteString(w, ",") + io.WriteString(w, r.DestinationPodNamespace) + io.WriteString(w, ",") + io.WriteString(w, r.DestinationNodeName) + io.WriteString(w, ",") + io.WriteString(w, r.DestinationClusterIP) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.DestinationServicePort)) + io.WriteString(w, ",") + io.WriteString(w, r.DestinationServicePortName) + io.WriteString(w, ",") + io.WriteString(w, r.IngressNetworkPolicyName) + io.WriteString(w, ",") + io.WriteString(w, r.IngressNetworkPolicyNamespace) + io.WriteString(w, ",") + io.WriteString(w, r.IngressNetworkPolicyRuleName) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.IngressNetworkPolicyRuleAction)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.IngressNetworkPolicyType)) + io.WriteString(w, ",") + io.WriteString(w, r.EgressNetworkPolicyName) + io.WriteString(w, ",") + io.WriteString(w, r.EgressNetworkPolicyNamespace) + io.WriteString(w, ",") + io.WriteString(w, r.EgressNetworkPolicyRuleName) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.EgressNetworkPolicyRuleAction)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.EgressNetworkPolicyType)) + io.WriteString(w, ",") + io.WriteString(w, r.TcpState) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.FlowType)) + io.WriteString(w, ",") + io.WriteString(w, r.SourcePodLabels) + io.WriteString(w, ",") + io.WriteString(w, r.DestinationPodLabels) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.Throughput)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.ReverseThroughput)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.ThroughputFromSourceNode)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.ThroughputFromDestinationNode)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.ReverseThroughputFromSourceNode)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", r.ReverseThroughputFromDestinationNode)) + io.WriteString(w, ",") + io.WriteString(w, fmt.Sprintf("%d", time.Now().Unix())) +} diff --git a/pkg/flowaggregator/s3uploader/s3uploader_test.go b/pkg/flowaggregator/s3uploader/s3uploader_test.go new file mode 100644 index 00000000000..40e3cec0e38 --- /dev/null +++ b/pkg/flowaggregator/s3uploader/s3uploader_test.go @@ -0,0 +1,223 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s3uploader + +import ( + "bytes" + "context" + "math/rand" + "sync" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/config" + s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + ipfixentitiestesting "github.com/vmware/go-ipfix/pkg/entities/testing" + "github.com/vmware/go-ipfix/pkg/registry" + "k8s.io/apimachinery/pkg/util/wait" + + flowrecordtesting "antrea.io/antrea/pkg/flowaggregator/flowrecord/testing" + flowaggregatortesting "antrea.io/antrea/pkg/flowaggregator/testing" +) + +const ( + seed = 1 + recordStrIPv4 = "1637706961,1637706973,1637706974,1637706975,3,10.10.0.79,10.10.0.80,44752,5201,6,823188,30472817041,241333,8982624938,471111,24500996,136211,7083284,perftest-a,antrea-test,k8s-node-control-plane,perftest-b,antrea-test-b,k8s-node-control-plane-b,10.10.1.10,5202,perftest,test-flow-aggregator-networkpolicy-ingress-allow,antrea-test-ns,test-flow-aggregator-networkpolicy-rule,2,1,test-flow-aggregator-networkpolicy-egress-allow,antrea-test-ns-e,test-flow-aggregator-networkpolicy-rule-e,5,4,TIME_WAIT,11,{\"antrea-e2e\":\"perftest-a\",\"app\":\"perftool\"},{\"antrea-e2e\":\"perftest-b\",\"app\":\"perftool\"},15902813472,12381344,15902813473,15902813474,12381345,12381346" + recordStrIPv6 = "1637706961,1637706973,1637706974,1637706975,3,2001:0:3238:dfe1:63::fefb,2001:0:3238:dfe1:63::fefc,44752,5201,6,823188,30472817041,241333,8982624938,471111,24500996,136211,7083284,perftest-a,antrea-test,k8s-node-control-plane,perftest-b,antrea-test-b,k8s-node-control-plane-b,2001:0:3238:dfe1:64::a,5202,perftest,test-flow-aggregator-networkpolicy-ingress-allow,antrea-test-ns,test-flow-aggregator-networkpolicy-rule,2,1,test-flow-aggregator-networkpolicy-egress-allow,antrea-test-ns-e,test-flow-aggregator-networkpolicy-rule-e,5,4,TIME_WAIT,11,{\"antrea-e2e\":\"perftest-a\",\"app\":\"perftool\"},{\"antrea-e2e\":\"perftest-b\",\"app\":\"perftool\"},15902813472,12381344,15902813473,15902813474,12381345,12381346" +) + +type mockS3Uploader struct { + testReader *bytes.Buffer + testReaderMutex sync.Mutex +} + +func (m *mockS3Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, awsS3Uploader *s3manager.Uploader, opts ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) { + m.testReaderMutex.Lock() + defer m.testReaderMutex.Unlock() + m.testReader.ReadFrom(input.Body) + return nil, nil +} + +func init() { + registry.LoadRegistry() +} + +func TestUpdateS3Uploader(t *testing.T) { + s3UploadProc := S3UploadProcess{ + bucketName: "test-bucket-name-old", + bucketPrefix: "test-bucket-prefix-old", + region: "us-west-2", + uploadInterval: 1 * time.Minute, + } + newBucketName := "test-bucket-name-new" + newBucketPrefix := "test-bucket-prefix-new" + newRegion := "us-west-1" + s3UploadProc.UpdateS3Uploader(newBucketName, newBucketPrefix, newRegion) + assert.Equal(t, newBucketName, s3UploadProc.bucketName) + assert.Equal(t, newBucketPrefix, s3UploadProc.bucketPrefix) + assert.Equal(t, newRegion, s3UploadProc.region) + assert.NotNil(t, s3UploadProc.awsS3Client) + assert.NotNil(t, s3UploadProc.awsS3Uploader) +} + +func TestCacheRecord(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + s3UploadProc := S3UploadProcess{ + compress: false, + maxRecordPerFile: 2, + currentBuffer: &bytes.Buffer{}, + bufferQueue: make([]*bytes.Buffer, 0, maxNumBuffersPendingUpload), + } + + // First call, cache the record in currentBuffer. + mockRecord := ipfixentitiestesting.NewMockRecord(ctrl) + flowaggregatortesting.PrepareMockIpfixRecord(mockRecord, true) + s3UploadProc.CacheRecord(mockRecord) + assert.Equal(t, 1, s3UploadProc.cachedRecordCount) + assert.Contains(t, s3UploadProc.currentBuffer.String(), recordStrIPv4) + + // Second call, reach currentBuffer max size, add the currentBuffer to bufferQueue. + mockRecord = ipfixentitiestesting.NewMockRecord(ctrl) + flowaggregatortesting.PrepareMockIpfixRecord(mockRecord, false) + s3UploadProc.CacheRecord(mockRecord) + assert.Equal(t, 1, len(s3UploadProc.bufferQueue)) + buf := s3UploadProc.bufferQueue[0] + assert.Contains(t, buf.String(), recordStrIPv6) + assert.Equal(t, 0, s3UploadProc.cachedRecordCount) + assert.Equal(t, "", s3UploadProc.currentBuffer.String()) +} + +func TestBatchUploadAll(t *testing.T) { + mockS3Uploader := &mockS3Uploader{testReader: &bytes.Buffer{}} + // #nosec G404: random number generator not used for security purposes + nameRand := rand.New(rand.NewSource(seed)) + s3UploadProc := S3UploadProcess{ + compress: false, + maxRecordPerFile: 10, + currentBuffer: &bytes.Buffer{}, + bufferQueue: make([]*bytes.Buffer, 0), + buffersToUpload: make([]*bytes.Buffer, 0, maxNumBuffersPendingUpload), + s3UploaderAPI: mockS3Uploader, + nameRand: nameRand, + } + testRecord := flowrecordtesting.PrepareTestFlowRecord() + s3UploadProc.writeRecordToBuffer(testRecord) + s3UploadProc.cachedRecordCount = 1 + err := s3UploadProc.batchUploadAll(context.TODO()) + assert.NoError(t, err) + assert.Equal(t, 0, len(s3UploadProc.bufferQueue)) + assert.Equal(t, 0, len(s3UploadProc.buffersToUpload)) + assert.Equal(t, "", s3UploadProc.currentBuffer.String()) + assert.Equal(t, 0, s3UploadProc.cachedRecordCount) + assert.Contains(t, mockS3Uploader.testReader.String(), recordStrIPv4) +} + +func TestBatchUploadAllError(t *testing.T) { + s3uploader := &S3Uploader{} + // #nosec G404: random number generator not used for security purposes + nameRand := rand.New(rand.NewSource(seed)) + s3UploadProc := S3UploadProcess{ + bucketName: "test-bucket-name", + compress: false, + maxRecordPerFile: 10, + currentBuffer: &bytes.Buffer{}, + bufferQueue: make([]*bytes.Buffer, 0), + buffersToUpload: make([]*bytes.Buffer, 0, maxNumBuffersPendingUpload), + s3UploaderAPI: s3uploader, + nameRand: nameRand, + } + cfg, _ := config.LoadDefaultConfig(context.TODO(), config.WithRegion("us-west-2")) + s3UploadProc.awsS3Client = s3.NewFromConfig(cfg) + s3UploadProc.awsS3Uploader = s3manager.NewUploader(s3UploadProc.awsS3Client) + + testRecord := flowrecordtesting.PrepareTestFlowRecord() + s3UploadProc.writeRecordToBuffer(testRecord) + s3UploadProc.cachedRecordCount = 1 + // It is expected to fail when calling uploadFile, as the correct S3 bucket + // configuration is not provided. + err := s3UploadProc.batchUploadAll(context.TODO()) + assert.Equal(t, 1, len(s3UploadProc.buffersToUpload)) + assert.Equal(t, 0, len(s3UploadProc.bufferQueue)) + assert.Equal(t, "", s3UploadProc.currentBuffer.String()) + assert.Equal(t, 0, s3UploadProc.cachedRecordCount) + expectedErrMsg := "error when uploading file to S3: operation error S3: PutObject, https response error StatusCode: 301" + assert.Contains(t, err.Error(), expectedErrMsg) +} + +func TestFlowRecordPeriodicCommit(t *testing.T) { + mockS3Uploader := &mockS3Uploader{testReader: &bytes.Buffer{}} + // #nosec G404: random number generator not used for security purposes + nameRand := rand.New(rand.NewSource(seed)) + s3UploadProc := S3UploadProcess{ + compress: false, + maxRecordPerFile: 10, + uploadInterval: 100 * time.Millisecond, + currentBuffer: &bytes.Buffer{}, + bufferQueue: make([]*bytes.Buffer, 0), + buffersToUpload: make([]*bytes.Buffer, 0, maxNumBuffersPendingUpload), + s3UploaderAPI: mockS3Uploader, + nameRand: nameRand, + } + testRecord := flowrecordtesting.PrepareTestFlowRecord() + s3UploadProc.writeRecordToBuffer(testRecord) + s3UploadProc.cachedRecordCount = 1 + s3UploadProc.startExportProcess() + // wait for ticker to tick + err := wait.PollImmediate(10*time.Millisecond, 1*time.Second, func() (bool, error) { + mockS3Uploader.testReaderMutex.Lock() + defer mockS3Uploader.testReaderMutex.Unlock() + if mockS3Uploader.testReader.Len() != 0 { + return true, nil + } + return false, nil + }) + assert.NoError(t, err) + s3UploadProc.stopExportProcess(false) + assert.Equal(t, 0, len(s3UploadProc.bufferQueue)) + assert.Equal(t, 0, len(s3UploadProc.buffersToUpload)) + assert.Equal(t, "", s3UploadProc.currentBuffer.String()) + assert.Equal(t, 0, s3UploadProc.cachedRecordCount) + assert.Contains(t, mockS3Uploader.testReader.String(), recordStrIPv4) +} + +func TestFlushCacheOnStop(t *testing.T) { + mockS3Uploader := &mockS3Uploader{testReader: &bytes.Buffer{}} + // #nosec G404: random number generator not used for security purposes + nameRand := rand.New(rand.NewSource(seed)) + s3UploadProc := S3UploadProcess{ + compress: false, + maxRecordPerFile: 10, + uploadInterval: 100 * time.Second, + currentBuffer: &bytes.Buffer{}, + bufferQueue: make([]*bytes.Buffer, 0), + buffersToUpload: make([]*bytes.Buffer, 0, maxNumBuffersPendingUpload), + s3UploaderAPI: mockS3Uploader, + nameRand: nameRand, + } + testRecord := flowrecordtesting.PrepareTestFlowRecord() + s3UploadProc.writeRecordToBuffer(testRecord) + s3UploadProc.cachedRecordCount = 1 + s3UploadProc.startExportProcess() + s3UploadProc.stopExportProcess(true) + assert.Equal(t, 0, len(s3UploadProc.bufferQueue)) + assert.Equal(t, 0, len(s3UploadProc.buffersToUpload)) + assert.Equal(t, "", s3UploadProc.currentBuffer.String()) + assert.Equal(t, 0, s3UploadProc.cachedRecordCount) + assert.Contains(t, mockS3Uploader.testReader.String(), recordStrIPv4) +} diff --git a/pkg/flowaggregator/testing/util.go b/pkg/flowaggregator/testing/util.go new file mode 100644 index 00000000000..6d169d5f3da --- /dev/null +++ b/pkg/flowaggregator/testing/util.go @@ -0,0 +1,241 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testing + +import ( + "net" + + ipfixentities "github.com/vmware/go-ipfix/pkg/entities" + ipfixentitiestesting "github.com/vmware/go-ipfix/pkg/entities/testing" + ipfixregistry "github.com/vmware/go-ipfix/pkg/registry" +) + +// used for unit testing +func createElement(name string, enterpriseID uint32) ipfixentities.InfoElementWithValue { + element, _ := ipfixregistry.GetInfoElement(name, enterpriseID) + ieWithValue, _ := ipfixentities.DecodeAndCreateInfoElementWithValue(element, nil) + return ieWithValue +} + +// used for unit testing +func PrepareMockIpfixRecord(mockRecord *ipfixentitiestesting.MockRecord, isIPv4 bool) { + flowStartSecElem := createElement("flowStartSeconds", ipfixregistry.IANAEnterpriseID) + flowStartSecElem.SetUnsigned32Value(uint32(1637706961)) + mockRecord.EXPECT().GetInfoElementWithValue("flowStartSeconds").Return(flowStartSecElem, 0, true) + + flowEndSecElem := createElement("flowEndSeconds", ipfixregistry.IANAEnterpriseID) + flowEndSecElem.SetUnsigned32Value(uint32(1637706973)) + mockRecord.EXPECT().GetInfoElementWithValue("flowEndSeconds").Return(flowEndSecElem, 0, true) + + flowEndSecSrcNodeElem := createElement("flowEndSecondsFromSourceNode", ipfixregistry.AntreaEnterpriseID) + flowEndSecSrcNodeElem.SetUnsigned32Value(uint32(1637706974)) + mockRecord.EXPECT().GetInfoElementWithValue("flowEndSecondsFromSourceNode").Return(flowEndSecSrcNodeElem, 0, true) + + flowEndSecDstNodeElem := createElement("flowEndSecondsFromDestinationNode", ipfixregistry.AntreaEnterpriseID) + flowEndSecDstNodeElem.SetUnsigned32Value(uint32(1637706975)) + mockRecord.EXPECT().GetInfoElementWithValue("flowEndSecondsFromDestinationNode").Return(flowEndSecDstNodeElem, 0, true) + + flowEndReasonElem := createElement("flowEndReason", ipfixregistry.IANAEnterpriseID) + flowEndReasonElem.SetUnsigned8Value(uint8(3)) + mockRecord.EXPECT().GetInfoElementWithValue("flowEndReason").Return(flowEndReasonElem, 0, true) + + srcPortElem := createElement("sourceTransportPort", ipfixregistry.IANAEnterpriseID) + srcPortElem.SetUnsigned16Value(uint16(44752)) + mockRecord.EXPECT().GetInfoElementWithValue("sourceTransportPort").Return(srcPortElem, 0, true) + + dstPortElem := createElement("destinationTransportPort", ipfixregistry.IANAEnterpriseID) + dstPortElem.SetUnsigned16Value(uint16(5201)) + mockRecord.EXPECT().GetInfoElementWithValue("destinationTransportPort").Return(dstPortElem, 0, true) + + protoIdentifierElem := createElement("protocolIdentifier", ipfixregistry.IANAEnterpriseID) + protoIdentifierElem.SetUnsigned8Value(uint8(6)) + mockRecord.EXPECT().GetInfoElementWithValue("protocolIdentifier").Return(protoIdentifierElem, 0, true) + + packetTotalCountElem := createElement("packetTotalCount", ipfixregistry.IANAEnterpriseID) + packetTotalCountElem.SetUnsigned64Value(uint64(823188)) + mockRecord.EXPECT().GetInfoElementWithValue("packetTotalCount").Return(packetTotalCountElem, 0, true) + + octetTotalCountElem := createElement("octetTotalCount", ipfixregistry.IANAEnterpriseID) + octetTotalCountElem.SetUnsigned64Value(uint64(30472817041)) + mockRecord.EXPECT().GetInfoElementWithValue("octetTotalCount").Return(octetTotalCountElem, 0, true) + + packetDeltaCountElem := createElement("packetDeltaCount", ipfixregistry.IANAEnterpriseID) + packetDeltaCountElem.SetUnsigned64Value(uint64(241333)) + mockRecord.EXPECT().GetInfoElementWithValue("packetDeltaCount").Return(packetDeltaCountElem, 0, true) + + octetDeltaCountElem := createElement("octetDeltaCount", ipfixregistry.IANAEnterpriseID) + octetDeltaCountElem.SetUnsigned64Value(uint64(8982624938)) + mockRecord.EXPECT().GetInfoElementWithValue("octetDeltaCount").Return(octetDeltaCountElem, 0, true) + + reversePacketTotalCountElem := createElement("reversePacketTotalCount", ipfixregistry.IANAReversedEnterpriseID) + reversePacketTotalCountElem.SetUnsigned64Value(uint64(471111)) + mockRecord.EXPECT().GetInfoElementWithValue("reversePacketTotalCount").Return(reversePacketTotalCountElem, 0, true) + + reverseOctetTotalCountElem := createElement("reverseOctetTotalCount", ipfixregistry.IANAReversedEnterpriseID) + reverseOctetTotalCountElem.SetUnsigned64Value(uint64(24500996)) + mockRecord.EXPECT().GetInfoElementWithValue("reverseOctetTotalCount").Return(reverseOctetTotalCountElem, 0, true) + + reversePacketDeltaCountElem := createElement("reversePacketDeltaCount", ipfixregistry.IANAReversedEnterpriseID) + reversePacketDeltaCountElem.SetUnsigned64Value(uint64(136211)) + mockRecord.EXPECT().GetInfoElementWithValue("reversePacketDeltaCount").Return(reversePacketDeltaCountElem, 0, true) + + reverseOctetDeltaCountElem := createElement("reverseOctetDeltaCount", ipfixregistry.IANAReversedEnterpriseID) + reverseOctetDeltaCountElem.SetUnsigned64Value(uint64(7083284)) + mockRecord.EXPECT().GetInfoElementWithValue("reverseOctetDeltaCount").Return(reverseOctetDeltaCountElem, 0, true) + + sourcePodNameElem := createElement("sourcePodName", ipfixregistry.AntreaEnterpriseID) + sourcePodNameElem.SetStringValue("perftest-a") + mockRecord.EXPECT().GetInfoElementWithValue("sourcePodName").Return(sourcePodNameElem, 0, true) + + sourcePodNamespaceElem := createElement("sourcePodNamespace", ipfixregistry.AntreaEnterpriseID) + sourcePodNamespaceElem.SetStringValue("antrea-test") + mockRecord.EXPECT().GetInfoElementWithValue("sourcePodNamespace").Return(sourcePodNamespaceElem, 0, true) + + sourceNodeNameElem := createElement("sourceNodeName", ipfixregistry.AntreaEnterpriseID) + sourceNodeNameElem.SetStringValue("k8s-node-control-plane") + mockRecord.EXPECT().GetInfoElementWithValue("sourceNodeName").Return(sourceNodeNameElem, 0, true) + + destinationPodNameElem := createElement("destinationPodName", ipfixregistry.AntreaEnterpriseID) + destinationPodNameElem.SetStringValue("perftest-b") + mockRecord.EXPECT().GetInfoElementWithValue("destinationPodName").Return(destinationPodNameElem, 0, true) + + destinationPodNamespaceElem := createElement("destinationPodNamespace", ipfixregistry.AntreaEnterpriseID) + destinationPodNamespaceElem.SetStringValue("antrea-test-b") + mockRecord.EXPECT().GetInfoElementWithValue("destinationPodNamespace").Return(destinationPodNamespaceElem, 0, true) + + destinationNodeNameElem := createElement("destinationNodeName", ipfixregistry.AntreaEnterpriseID) + destinationNodeNameElem.SetStringValue("k8s-node-control-plane-b") + mockRecord.EXPECT().GetInfoElementWithValue("destinationNodeName").Return(destinationNodeNameElem, 0, true) + + destinationServicePortElem := createElement("destinationServicePort", ipfixregistry.AntreaEnterpriseID) + destinationServicePortElem.SetUnsigned16Value(uint16(5202)) + mockRecord.EXPECT().GetInfoElementWithValue("destinationServicePort").Return(destinationServicePortElem, 0, true) + + destinationServicePortNameElem := createElement("destinationServicePortName", ipfixregistry.AntreaEnterpriseID) + destinationServicePortNameElem.SetStringValue("perftest") + mockRecord.EXPECT().GetInfoElementWithValue("destinationServicePortName").Return(destinationServicePortNameElem, 0, true) + + ingressNetworkPolicyNameElem := createElement("ingressNetworkPolicyName", ipfixregistry.AntreaEnterpriseID) + ingressNetworkPolicyNameElem.SetStringValue("test-flow-aggregator-networkpolicy-ingress-allow") + mockRecord.EXPECT().GetInfoElementWithValue("ingressNetworkPolicyName").Return(ingressNetworkPolicyNameElem, 0, true) + + ingressNetworkPolicyNamespaceElem := createElement("ingressNetworkPolicyNamespace", ipfixregistry.AntreaEnterpriseID) + ingressNetworkPolicyNamespaceElem.SetStringValue("antrea-test-ns") + mockRecord.EXPECT().GetInfoElementWithValue("ingressNetworkPolicyNamespace").Return(ingressNetworkPolicyNamespaceElem, 0, true) + + ingressNetworkPolicyRuleNameElem := createElement("ingressNetworkPolicyRuleName", ipfixregistry.AntreaEnterpriseID) + ingressNetworkPolicyRuleNameElem.SetStringValue("test-flow-aggregator-networkpolicy-rule") + mockRecord.EXPECT().GetInfoElementWithValue("ingressNetworkPolicyRuleName").Return(ingressNetworkPolicyRuleNameElem, 0, true) + + ingressNetworkPolicyTypeElem := createElement("ingressNetworkPolicyType", ipfixregistry.AntreaEnterpriseID) + ingressNetworkPolicyTypeElem.SetUnsigned8Value(uint8(1)) + mockRecord.EXPECT().GetInfoElementWithValue("ingressNetworkPolicyType").Return(ingressNetworkPolicyTypeElem, 0, true) + + ingressNetworkPolicyRuleActionElem := createElement("ingressNetworkPolicyRuleAction", ipfixregistry.AntreaEnterpriseID) + ingressNetworkPolicyRuleActionElem.SetUnsigned8Value(uint8(2)) + mockRecord.EXPECT().GetInfoElementWithValue("ingressNetworkPolicyRuleAction").Return(ingressNetworkPolicyRuleActionElem, 0, true) + + egressNetworkPolicyNameElem := createElement("egressNetworkPolicyName", ipfixregistry.AntreaEnterpriseID) + egressNetworkPolicyNameElem.SetStringValue("test-flow-aggregator-networkpolicy-egress-allow") + mockRecord.EXPECT().GetInfoElementWithValue("egressNetworkPolicyName").Return(egressNetworkPolicyNameElem, 0, true) + + egressNetworkPolicyNamespaceElem := createElement("egressNetworkPolicyNamespace", ipfixregistry.AntreaEnterpriseID) + egressNetworkPolicyNamespaceElem.SetStringValue("antrea-test-ns-e") + mockRecord.EXPECT().GetInfoElementWithValue("egressNetworkPolicyNamespace").Return(egressNetworkPolicyNamespaceElem, 0, true) + + egressNetworkPolicyRuleNameElem := createElement("egressNetworkPolicyRuleName", ipfixregistry.AntreaEnterpriseID) + egressNetworkPolicyRuleNameElem.SetStringValue("test-flow-aggregator-networkpolicy-rule-e") + mockRecord.EXPECT().GetInfoElementWithValue("egressNetworkPolicyRuleName").Return(egressNetworkPolicyRuleNameElem, 0, true) + + egressNetworkPolicyTypeElem := createElement("egressNetworkPolicyType", ipfixregistry.AntreaEnterpriseID) + egressNetworkPolicyTypeElem.SetUnsigned8Value(uint8(4)) + mockRecord.EXPECT().GetInfoElementWithValue("egressNetworkPolicyType").Return(egressNetworkPolicyTypeElem, 0, true) + + egressNetworkPolicyRuleActionElem := createElement("egressNetworkPolicyRuleAction", ipfixregistry.AntreaEnterpriseID) + egressNetworkPolicyRuleActionElem.SetUnsigned8Value(uint8(5)) + mockRecord.EXPECT().GetInfoElementWithValue("egressNetworkPolicyRuleAction").Return(egressNetworkPolicyRuleActionElem, 0, true) + + tcpStateElem := createElement("tcpState", ipfixregistry.AntreaEnterpriseID) + tcpStateElem.SetStringValue("TIME_WAIT") + mockRecord.EXPECT().GetInfoElementWithValue("tcpState").Return(tcpStateElem, 0, true) + + flowTypeElem := createElement("flowType", ipfixregistry.AntreaEnterpriseID) + flowTypeElem.SetUnsigned8Value(uint8(11)) + mockRecord.EXPECT().GetInfoElementWithValue("flowType").Return(flowTypeElem, 0, true) + + sourcePodLabelsElem := createElement("sourcePodLabels", ipfixregistry.AntreaEnterpriseID) + sourcePodLabelsElem.SetStringValue("{\"antrea-e2e\":\"perftest-a\",\"app\":\"perftool\"}") + mockRecord.EXPECT().GetInfoElementWithValue("sourcePodLabels").Return(sourcePodLabelsElem, 0, true) + + destinationPodLabelsElem := createElement("destinationPodLabels", ipfixregistry.AntreaEnterpriseID) + destinationPodLabelsElem.SetStringValue("{\"antrea-e2e\":\"perftest-b\",\"app\":\"perftool\"}") + mockRecord.EXPECT().GetInfoElementWithValue("destinationPodLabels").Return(destinationPodLabelsElem, 0, true) + + throughputElem := createElement("throughput", ipfixregistry.AntreaEnterpriseID) + throughputElem.SetUnsigned64Value(uint64(15902813472)) + mockRecord.EXPECT().GetInfoElementWithValue("throughput").Return(throughputElem, 0, true) + + reverseThroughputElem := createElement("reverseThroughput", ipfixregistry.AntreaEnterpriseID) + reverseThroughputElem.SetUnsigned64Value(uint64(12381344)) + mockRecord.EXPECT().GetInfoElementWithValue("reverseThroughput").Return(reverseThroughputElem, 0, true) + + throughputFromSourceNodeElem := createElement("throughputFromSourceNode", ipfixregistry.AntreaEnterpriseID) + throughputFromSourceNodeElem.SetUnsigned64Value(uint64(15902813473)) + mockRecord.EXPECT().GetInfoElementWithValue("throughputFromSourceNode").Return(throughputFromSourceNodeElem, 0, true) + + throughputFromDestinationNodeElem := createElement("throughputFromDestinationNode", ipfixregistry.AntreaEnterpriseID) + throughputFromDestinationNodeElem.SetUnsigned64Value(uint64(15902813474)) + mockRecord.EXPECT().GetInfoElementWithValue("throughputFromDestinationNode").Return(throughputFromDestinationNodeElem, 0, true) + + reverseThroughputFromSourceNodeElem := createElement("reverseThroughputFromSourceNode", ipfixregistry.AntreaEnterpriseID) + reverseThroughputFromSourceNodeElem.SetUnsigned64Value(uint64(12381345)) + mockRecord.EXPECT().GetInfoElementWithValue("reverseThroughputFromSourceNode").Return(reverseThroughputFromSourceNodeElem, 0, true) + + reverseThroughputFromDestinationNodeElem := createElement("reverseThroughputFromDestinationNode", ipfixregistry.AntreaEnterpriseID) + reverseThroughputFromDestinationNodeElem.SetUnsigned64Value(uint64(12381346)) + mockRecord.EXPECT().GetInfoElementWithValue("reverseThroughputFromDestinationNode").Return(reverseThroughputFromDestinationNodeElem, 0, true) + + if isIPv4 { + sourceIPv4Elem := createElement("sourceIPv4Address", ipfixregistry.IANAEnterpriseID) + sourceIPv4Elem.SetIPAddressValue(net.ParseIP("10.10.0.79")) + mockRecord.EXPECT().GetInfoElementWithValue("sourceIPv4Address").Return(sourceIPv4Elem, 0, true).AnyTimes() + mockRecord.EXPECT().GetInfoElementWithValue("sourceIPv6Address").Return(nil, 0, false).AnyTimes() + + destinationIPv4Elem := createElement("destinationIPv4Address", ipfixregistry.IANAEnterpriseID) + destinationIPv4Elem.SetIPAddressValue(net.ParseIP("10.10.0.80")) + mockRecord.EXPECT().GetInfoElementWithValue("destinationIPv4Address").Return(destinationIPv4Elem, 0, true).AnyTimes() + mockRecord.EXPECT().GetInfoElementWithValue("destinationIPv6Address").Return(nil, 0, false).AnyTimes() + + destinationClusterIPv4Elem := createElement("destinationClusterIPv4", ipfixregistry.AntreaEnterpriseID) + destinationClusterIPv4Elem.SetIPAddressValue(net.ParseIP("10.10.1.10")) + mockRecord.EXPECT().GetInfoElementWithValue("destinationClusterIPv4").Return(destinationClusterIPv4Elem, 0, true).AnyTimes() + mockRecord.EXPECT().GetInfoElementWithValue("destinationClusterIPv6").Return(nil, 0, false).AnyTimes() + } else { + sourceIPv6Elem := createElement("sourceIPv6Address", ipfixregistry.IANAEnterpriseID) + sourceIPv6Elem.SetIPAddressValue(net.ParseIP("2001:0:3238:dfe1:63::fefb")) + mockRecord.EXPECT().GetInfoElementWithValue("sourceIPv6Address").Return(sourceIPv6Elem, 0, true).AnyTimes() + mockRecord.EXPECT().GetInfoElementWithValue("sourceIPv4Address").Return(nil, 0, false).AnyTimes() + + destinationIPv6Elem := createElement("destinationIPv6Address", ipfixregistry.IANAEnterpriseID) + destinationIPv6Elem.SetIPAddressValue(net.ParseIP("2001:0:3238:dfe1:63::fefc")) + mockRecord.EXPECT().GetInfoElementWithValue("destinationIPv6Address").Return(destinationIPv6Elem, 0, true).AnyTimes() + mockRecord.EXPECT().GetInfoElementWithValue("destinationIPv4Address").Return(nil, 0, false).AnyTimes() + + destinationClusterIPv6Elem := createElement("destinationClusterIPv6", ipfixregistry.AntreaEnterpriseID) + destinationClusterIPv6Elem.SetIPAddressValue(net.ParseIP("2001:0:3238:dfe1:64::a")) + mockRecord.EXPECT().GetInfoElementWithValue("destinationClusterIPv6").Return(destinationClusterIPv6Elem, 0, true).AnyTimes() + mockRecord.EXPECT().GetInfoElementWithValue("destinationClusterIPv4").Return(nil, 0, false).AnyTimes() + } +} diff --git a/plugins/octant/go.mod b/plugins/octant/go.mod index 18c29be730c..c43b99735ad 100644 --- a/plugins/octant/go.mod +++ b/plugins/octant/go.mod @@ -43,7 +43,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.3 // indirect - github.com/google/go-cmp v0.5.5 // indirect + github.com/google/go-cmp v0.5.8 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect diff --git a/plugins/octant/go.sum b/plugins/octant/go.sum index d71188629af..8a7aa3cf66e 100644 --- a/plugins/octant/go.sum +++ b/plugins/octant/go.sum @@ -426,8 +426,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -1226,7 +1227,6 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=