diff --git a/ci/jenkins/Nightly2.groovy b/ci/jenkins/Nightly2.groovy index 4da45da11c..6846e9a398 100644 --- a/ci/jenkins/Nightly2.groovy +++ b/ci/jenkins/Nightly2.groovy @@ -1,4 +1,4 @@ -@Library('jenkins-shared-library@v0.34.0') _ +@Library('jenkins-shared-library@v0.40.0') _ def pod = libraryResource 'io/milvus/pod/tekton-4am.yaml' @@ -57,10 +57,11 @@ pipeline { gitBaseRef: gitBaseRef, pullRequestNumber: "$env.CHANGE_ID", suppress_suffix_of_image_tag: true, - test_client_type: '["pytest"]' + images: '["milvus","pytest","helm"]' milvus_image_tag = tekton.query_result job_name, 'milvus-image-tag' pytest_image = tekton.query_result job_name, 'pytest-image-fqdn' + helm_image = tekton.query_result job_name, 'helm-image-fqdn' } } } @@ -100,7 +101,9 @@ pipeline { ciMode: 'nightly', milvus_image_tag: milvus_image_tag, pytest_image: pytest_image, - milvus_deployment_option: milvus_deployment_option + helm_image: helm_image, + milvus_deployment_option: milvus_deployment_option, + verbose: 'false' } } } diff --git a/ci/jenkins/PR-for-go-sdk.groovy b/ci/jenkins/PR-for-go-sdk.groovy index 3542980212..b6e42716e9 100644 --- a/ci/jenkins/PR-for-go-sdk.groovy +++ b/ci/jenkins/PR-for-go-sdk.groovy @@ -1,4 +1,4 @@ -@Library('jenkins-shared-library@v0.34.0') _ +@Library('jenkins-shared-library@v0.40.0') _ def pod = libraryResource 'io/milvus/pod/tekton-4am.yaml' @@ -45,10 +45,12 @@ pipeline { gitMode: gitMode , gitBaseRef: gitBaseRef, pullRequestNumber: "$env.CHANGE_ID", - suppress_suffix_of_image_tag: true + suppress_suffix_of_image_tag: true, + images: '["milvus","gotestsum","helm"]' milvus_image_tag = tekton.query_result job_name, 'milvus-image-tag' milvus_sdk_go_image = tekton.query_result job_name, 'gotestsum-image-fqdn' + helm_image = tekton.query_result job_name, 'helm-image-fqdn' } } } @@ -88,7 +90,9 @@ pipeline { ciMode: 'e2e', milvus_image_tag: milvus_image_tag, milvus_sdk_go_image: milvus_sdk_go_image, - milvus_deployment_option: milvus_deployment_option + helm_image: helm_image, + milvus_deployment_option: milvus_deployment_option, + verbose: 'false' } } } diff --git a/ci/jenkins/PR2.groovy b/ci/jenkins/PR2.groovy index 392d55f15c..7e3d1f79ff 100644 --- a/ci/jenkins/PR2.groovy +++ b/ci/jenkins/PR2.groovy @@ -1,4 +1,4 @@ -@Library('jenkins-shared-library@v0.34.0') _ +@Library('jenkins-shared-library@v0.40.0') _ def pod = libraryResource 'io/milvus/pod/tekton-4am.yaml' def milvus_helm_chart_version = '4.2.8' @@ -43,10 +43,11 @@ pipeline { gitBaseRef: gitBaseRef, pullRequestNumber: "$env.CHANGE_ID", suppress_suffix_of_image_tag: true, - test_client_type: '["pytest"]' + images: '["milvus","pytest","helm"]' milvus_image_tag = tekton.query_result job_name, 'milvus-image-tag' pytest_image = tekton.query_result job_name, 'pytest-image-fqdn' + helm_image = tekton.query_result job_name, 'helm-image-fqdn' } } } @@ -88,7 +89,9 @@ pipeline { ciMode: 'e2e', milvus_image_tag: milvus_image_tag, pytest_image: pytest_image, - milvus_deployment_option: milvus_deployment_option + helm_image: helm_image, + milvus_deployment_option: milvus_deployment_option, + verbose: 'false' } catch (Exception e) { println e } @@ -98,7 +101,9 @@ pipeline { ciMode: 'e2e', milvus_image_tag: milvus_image_tag, pytest_image: pytest_image, - milvus_deployment_option: milvus_deployment_option + helm_image: helm_image, + milvus_deployment_option: milvus_deployment_option, + verbose: 'false' } } } diff --git a/tests/_helm/Dockerfile b/tests/_helm/Dockerfile new file mode 100644 index 0000000000..ecc98bd6ac --- /dev/null +++ b/tests/_helm/Dockerfile @@ -0,0 +1,5 @@ +FROM alpine/helm:3.15.3 + +WORKDIR /app + +COPY tests/_helm/values values diff --git a/tests/_helm/values/e2e/distributed b/tests/_helm/values/e2e/distributed new file mode 100644 index 0000000000..2419662438 --- /dev/null +++ b/tests/_helm/values/e2e/distributed @@ -0,0 +1,267 @@ +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 +cluster: + enabled: true +dataCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.1" + memory: 50Mi +dataNode: + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +etcd: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + metrics: + enabled: true + podMonitor: + enabled: true + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 100Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +image: + all: + pullPolicy: Always + repository: harbor.milvus.io/milvus/milvus + tag: PR-35426-20240812-46dadb120 +indexCoordinator: + gc: + interval: 1 + resources: + limits: + cpu: "1" + requests: + cpu: "0.1" + memory: 50Mi +indexNode: + disk: + enabled: true + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +kafka: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + resources: + requests: + cpu: "0.5" + memory: 1Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + zookeeper: + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 512Mi +log: + level: debug +indexCoord: + scheduler: + interval: 100 +metrics: + serviceMonitor: + enabled: true +minio: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + mode: standalone + resources: + requests: + cpu: "0.3" + memory: 512Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +proxy: + resources: + limits: + cpu: "1" + requests: + cpu: "0.3" + memory: 256Mi +pulsar: + bookkeeper: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails + PULSAR_MEM: | + -Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m + nettyMaxFrameSizeBytes: "104867840" + resources: + requests: + cpu: "0.5" + memory: 4Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + broker: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError + PULSAR_MEM: | + -Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m + backlogQuotaDefaultLimitGB: "8" + backlogQuotaDefaultRetentionPolicy: producer_exception + defaultRetentionSizeInMB: "8192" + defaultRetentionTimeInMinutes: "10080" + maxMessageSize: "104857600" + replicaCount: 2 + resources: + requests: + cpu: "0.5" + memory: 4Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + components: + autorecovery: false + proxy: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -XX:MaxDirectMemorySize=2048m + PULSAR_MEM: | + -Xms1024m -Xmx1024m + httpNumThreads: "50" + resources: + requests: + cpu: "0.5" + memory: 1Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + wsResources: + requests: + cpu: "0.1" + memory: 100Mi + zookeeper: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no + PULSAR_MEM: | + -Xms1024m -Xmx1024m + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 512Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +queryCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.2" + memory: 100Mi +queryNode: + disk: + enabled: true + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +rootCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.2" + memory: 256Mi +service: + type: ClusterIP +standalone: + disk: + enabled: true + resources: + limits: + cpu: "4" + requests: + cpu: "1" + memory: 3.5Gi +tolerations: +- effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists diff --git a/tests/_helm/values/e2e/distributed-streaming-service b/tests/_helm/values/e2e/distributed-streaming-service new file mode 100644 index 0000000000..b145d8899b --- /dev/null +++ b/tests/_helm/values/e2e/distributed-streaming-service @@ -0,0 +1,269 @@ +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 +cluster: + enabled: true +streaming: + enabled: true +dataCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.1" + memory: 50Mi +dataNode: + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +etcd: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + metrics: + enabled: true + podMonitor: + enabled: true + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 100Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +image: + all: + pullPolicy: Always + repository: harbor.milvus.io/milvus/milvus + tag: PR-35426-20240812-46dadb120 +indexCoordinator: + gc: + interval: 1 + resources: + limits: + cpu: "1" + requests: + cpu: "0.1" + memory: 50Mi +indexNode: + disk: + enabled: true + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +kafka: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + resources: + requests: + cpu: "0.5" + memory: 1Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + zookeeper: + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 512Mi +log: + level: debug +indexCoord: + scheduler: + interval: 100 +metrics: + serviceMonitor: + enabled: true +minio: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + mode: standalone + resources: + requests: + cpu: "0.3" + memory: 512Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +proxy: + resources: + limits: + cpu: "1" + requests: + cpu: "0.3" + memory: 256Mi +pulsar: + bookkeeper: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails + PULSAR_MEM: | + -Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m + nettyMaxFrameSizeBytes: "104867840" + resources: + requests: + cpu: "0.5" + memory: 4Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + broker: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError + PULSAR_MEM: | + -Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m + backlogQuotaDefaultLimitGB: "8" + backlogQuotaDefaultRetentionPolicy: producer_exception + defaultRetentionSizeInMB: "8192" + defaultRetentionTimeInMinutes: "10080" + maxMessageSize: "104857600" + replicaCount: 2 + resources: + requests: + cpu: "0.5" + memory: 4Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + components: + autorecovery: false + proxy: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -XX:MaxDirectMemorySize=2048m + PULSAR_MEM: | + -Xms1024m -Xmx1024m + httpNumThreads: "50" + resources: + requests: + cpu: "0.5" + memory: 1Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + wsResources: + requests: + cpu: "0.1" + memory: 100Mi + zookeeper: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no + PULSAR_MEM: | + -Xms1024m -Xmx1024m + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 512Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +queryCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.2" + memory: 100Mi +queryNode: + disk: + enabled: true + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +rootCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.2" + memory: 256Mi +service: + type: ClusterIP +standalone: + disk: + enabled: true + resources: + limits: + cpu: "4" + requests: + cpu: "1" + memory: 3.5Gi +tolerations: +- effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists diff --git a/tests/_helm/values/e2e/standalone b/tests/_helm/values/e2e/standalone new file mode 100644 index 0000000000..0d815a303e --- /dev/null +++ b/tests/_helm/values/e2e/standalone @@ -0,0 +1,267 @@ +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 +cluster: + enabled: false +dataCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.1" + memory: 50Mi +dataNode: + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +etcd: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + metrics: + enabled: true + podMonitor: + enabled: true + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 100Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +image: + all: + pullPolicy: Always + repository: harbor.milvus.io/milvus/milvus + tag: PR-35402-20240812-402f716b5 +indexCoordinator: + gc: + interval: 1 + resources: + limits: + cpu: "1" + requests: + cpu: "0.1" + memory: 50Mi +indexNode: + disk: + enabled: true + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +kafka: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + resources: + requests: + cpu: "0.5" + memory: 1Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + zookeeper: + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 512Mi +log: + level: debug +indexCoord: + scheduler: + interval: 100 +metrics: + serviceMonitor: + enabled: true +minio: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + mode: standalone + resources: + requests: + cpu: "0.3" + memory: 512Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +proxy: + resources: + limits: + cpu: "1" + requests: + cpu: "0.3" + memory: 256Mi +pulsar: + bookkeeper: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails + PULSAR_MEM: | + -Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m + nettyMaxFrameSizeBytes: "104867840" + resources: + requests: + cpu: "0.5" + memory: 4Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + broker: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError + PULSAR_MEM: | + -Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m + backlogQuotaDefaultLimitGB: "8" + backlogQuotaDefaultRetentionPolicy: producer_exception + defaultRetentionSizeInMB: "8192" + defaultRetentionTimeInMinutes: "10080" + maxMessageSize: "104857600" + resources: + requests: + cpu: "0.5" + memory: 4Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + components: + autorecovery: false + enabled: false + proxy: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -XX:MaxDirectMemorySize=2048m + PULSAR_MEM: | + -Xms1024m -Xmx1024m + httpNumThreads: "50" + resources: + requests: + cpu: "0.5" + memory: 1Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + wsResources: + requests: + cpu: "0.1" + memory: 100Mi + zookeeper: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no + PULSAR_MEM: | + -Xms1024m -Xmx1024m + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 512Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +queryCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.2" + memory: 100Mi +queryNode: + disk: + enabled: true + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +rootCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.2" + memory: 256Mi +service: + type: ClusterIP +standalone: + disk: + enabled: true + resources: + limits: + cpu: "4" + requests: + cpu: "1" + memory: 3.5Gi +tolerations: +- effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists diff --git a/tests/_helm/values/e2e/standalone-kafka b/tests/_helm/values/e2e/standalone-kafka new file mode 100644 index 0000000000..e959875a2c --- /dev/null +++ b/tests/_helm/values/e2e/standalone-kafka @@ -0,0 +1,275 @@ +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 +cluster: + enabled: false +dataCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.1" + memory: 50Mi +dataNode: + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +etcd: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + metrics: + enabled: true + podMonitor: + enabled: true + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 100Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +image: + all: + pullPolicy: Always + repository: harbor.milvus.io/milvus/milvus + tag: PR-35426-20240812-46dadb120 +indexCoordinator: + gc: + interval: 1 + resources: + limits: + cpu: "1" + requests: + cpu: "0.1" + memory: 50Mi +indexNode: + disk: + enabled: true + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +kafka: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + enabled: true + metrics: + jmx: + enabled: true + kafka: + enabled: true + serviceMonitor: + enabled: true + resources: + requests: + cpu: "0.5" + memory: 1Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + zookeeper: + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 512Mi +log: + level: debug +indexCoord: + scheduler: + interval: 100 +metrics: + serviceMonitor: + enabled: true +minio: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + mode: standalone + resources: + requests: + cpu: "0.3" + memory: 512Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +proxy: + resources: + limits: + cpu: "1" + requests: + cpu: "0.3" + memory: 256Mi +pulsar: + bookkeeper: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails + PULSAR_MEM: | + -Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m + nettyMaxFrameSizeBytes: "104867840" + resources: + requests: + cpu: "0.5" + memory: 4Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + broker: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError + PULSAR_MEM: | + -Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m + backlogQuotaDefaultLimitGB: "8" + backlogQuotaDefaultRetentionPolicy: producer_exception + defaultRetentionSizeInMB: "8192" + defaultRetentionTimeInMinutes: "10080" + maxMessageSize: "104857600" + resources: + requests: + cpu: "0.5" + memory: 4Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + components: + autorecovery: false + enabled: false + proxy: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -XX:MaxDirectMemorySize=2048m + PULSAR_MEM: | + -Xms1024m -Xmx1024m + httpNumThreads: "50" + resources: + requests: + cpu: "0.5" + memory: 1Gi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists + wsResources: + requests: + cpu: "0.1" + memory: 100Mi + zookeeper: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 + configData: + PULSAR_GC: | + -Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no + PULSAR_MEM: | + -Xms1024m -Xmx1024m + replicaCount: 1 + resources: + requests: + cpu: "0.3" + memory: 512Mi + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists +queryCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.2" + memory: 100Mi +queryNode: + disk: + enabled: true + resources: + limits: + cpu: "2" + requests: + cpu: "0.5" + memory: 500Mi +rootCoordinator: + resources: + limits: + cpu: "1" + requests: + cpu: "0.2" + memory: 256Mi +service: + type: ClusterIP +standalone: + disk: + enabled: true + resources: + limits: + cpu: "4" + requests: + cpu: "1" + memory: 3.5Gi +tolerations: +- effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists diff --git a/tests/_helm/values/e2e/standalone-one-pod b/tests/_helm/values/e2e/standalone-one-pod new file mode 100644 index 0000000000..bcf78fd1dd --- /dev/null +++ b/tests/_helm/values/e2e/standalone-one-pod @@ -0,0 +1,65 @@ +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 +cluster: + enabled: false +etcd: + enabled: false + metrics: + enabled: true + podMonitor: + enabled: true + replicaCount: 1 +extraConfigFiles: + user.yaml: | + etcd: + use: + embed: true + data: + dir: /var/lib/milvus/etcd + common: + storageType: local +image: + all: + pullPolicy: Always + repository: harbor.milvus.io/milvus/milvus + tag: PR-35432-20240812-71a1562ea +indexCoordinator: + gc: + interval: 1 +indexCoord: + scheduler: + interval: 100 +indexNode: + disk: + enabled: true +metrics: + serviceMonitor: + enabled: true +minio: + enabled: false + mode: standalone + tls: + enabled: false +pulsar: + enabled: false +queryNode: + disk: + enabled: true +service: + type: ClusterIP +standalone: + disk: + enabled: true + extraEnv: + - name: ETCD_CONFIG_PATH + value: /milvus/configs/advanced/etcd.yaml +tolerations: +- effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists diff --git a/tests/_helm/values/nightly/distributed-kafka b/tests/_helm/values/nightly/distributed-kafka new file mode 100644 index 0000000000..fe4eb8f170 --- /dev/null +++ b/tests/_helm/values/nightly/distributed-kafka @@ -0,0 +1,75 @@ +cluster: + enabled: true +common: + security: + authorizationEnabled: false +dataCoordinator: + gc: + dropTolerance: 86400 + missingTolerance: 86400 + profiling: + enabled: true +dataNode: + profiling: + enabled: true + replicas: 2 +etcd: + metrics: + enabled: true + podMonitor: + enabled: true +image: + all: + pullPolicy: Always + repository: harbor.milvus.io/milvus/milvus + tag: nightly-20240821-ed4eaff +indexCoordinator: + gc: + interval: 1 + profiling: + enabled: true +indexNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +kafka: + defaultReplicationFactor: 2 + enabled: true + metrics: + jmx: + enabled: true + kafka: + enabled: true + serviceMonitor: + enabled: true +log: + level: debug +metrics: + serviceMonitor: + enabled: true +minio: + mode: standalone +proxy: + profiling: + enabled: true + replicas: 2 +pulsar: + broker: + replicaCount: 2 + enabled: false +queryCoordinator: + profiling: + enabled: true +queryNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +service: + type: ClusterIP +standalone: + disk: + enabled: true diff --git a/tests/_helm/values/nightly/distributed-pulsar b/tests/_helm/values/nightly/distributed-pulsar new file mode 100644 index 0000000000..8b68385291 --- /dev/null +++ b/tests/_helm/values/nightly/distributed-pulsar @@ -0,0 +1,74 @@ +cluster: + enabled: true +common: + security: + authorizationEnabled: false +dataCoordinator: + gc: + dropTolerance: 86400 + missingTolerance: 86400 + profiling: + enabled: true +dataNode: + profiling: + enabled: true + replicas: 2 +etcd: + metrics: + enabled: true + podMonitor: + enabled: true +image: + all: + pullPolicy: Always + repository: harbor.milvus.io/milvus/milvus + tag: nightly-20240821-ed4eaff +indexCoordinator: + gc: + interval: 1 + profiling: + enabled: true +indexNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +kafka: + enabled: false + metrics: + jmx: + enabled: true + kafka: + enabled: true + serviceMonitor: + enabled: true +log: + level: debug +metrics: + serviceMonitor: + enabled: true +minio: + mode: standalone +proxy: + profiling: + enabled: true + replicas: 2 +pulsar: + broker: + replicaCount: 2 + enabled: true +queryCoordinator: + profiling: + enabled: true +queryNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +service: + type: ClusterIP +standalone: + disk: + enabled: true diff --git a/tests/_helm/values/nightly/distributed-streaming-service b/tests/_helm/values/nightly/distributed-streaming-service new file mode 100644 index 0000000000..53b9495d3b --- /dev/null +++ b/tests/_helm/values/nightly/distributed-streaming-service @@ -0,0 +1,76 @@ +cluster: + enabled: true +streaming: + enabled: true +common: + security: + authorizationEnabled: false +dataCoordinator: + gc: + dropTolerance: 86400 + missingTolerance: 86400 + profiling: + enabled: true +dataNode: + profiling: + enabled: true + replicas: 2 +etcd: + metrics: + enabled: true + podMonitor: + enabled: true +image: + all: + pullPolicy: Always + repository: harbor.milvus.io/milvus/milvus + tag: nightly-20240821-ed4eaff +indexCoordinator: + gc: + interval: 1 + profiling: + enabled: true +indexNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +kafka: + enabled: false + metrics: + jmx: + enabled: true + kafka: + enabled: true + serviceMonitor: + enabled: true +log: + level: debug +metrics: + serviceMonitor: + enabled: true +minio: + mode: standalone +proxy: + profiling: + enabled: true + replicas: 2 +pulsar: + broker: + replicaCount: 2 + enabled: true +queryCoordinator: + profiling: + enabled: true +queryNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +service: + type: ClusterIP +standalone: + disk: + enabled: true diff --git a/tests/_helm/values/nightly/standalone b/tests/_helm/values/nightly/standalone new file mode 100644 index 0000000000..ec37b40bda --- /dev/null +++ b/tests/_helm/values/nightly/standalone @@ -0,0 +1,73 @@ +cluster: + enabled: false +common: + security: + authorizationEnabled: false +dataCoordinator: + gc: + dropTolerance: 86400 + missingTolerance: 86400 + profiling: + enabled: true +dataNode: + profiling: + enabled: true + replicas: 2 +etcd: + metrics: + enabled: true + podMonitor: + enabled: true + replicaCount: 1 +image: + all: + pullPolicy: Always + repository: harbor.milvus.io/milvus/milvus + tag: nightly-20240821-ed4eaff +indexCoordinator: + gc: + interval: 1 + profiling: + enabled: true +indexNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +kafka: + enabled: false + metrics: + jmx: + enabled: true + kafka: + enabled: true + serviceMonitor: + enabled: true +log: + level: debug +metrics: + serviceMonitor: + enabled: true +minio: + mode: standalone +proxy: + profiling: + enabled: true + replicas: 2 +pulsar: + enabled: false +queryCoordinator: + profiling: + enabled: true +queryNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +service: + type: ClusterIP +standalone: + disk: + enabled: true diff --git a/tests/_helm/values/nightly/standalone-authentication b/tests/_helm/values/nightly/standalone-authentication new file mode 100644 index 0000000000..387965aae2 --- /dev/null +++ b/tests/_helm/values/nightly/standalone-authentication @@ -0,0 +1,73 @@ +cluster: + enabled: false +common: + security: + authorizationEnabled: true +dataCoordinator: + gc: + dropTolerance: 86400 + missingTolerance: 86400 + profiling: + enabled: true +dataNode: + profiling: + enabled: true + replicas: 2 +etcd: + metrics: + enabled: true + podMonitor: + enabled: true + replicaCount: 1 +image: + all: + pullPolicy: Always + repository: harbor.milvus.io/milvus/milvus + tag: nightly-20240821-ed4eaff +indexCoordinator: + gc: + interval: 1 + profiling: + enabled: true +indexNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +kafka: + enabled: false + metrics: + jmx: + enabled: true + kafka: + enabled: true + serviceMonitor: + enabled: true +log: + level: debug +metrics: + serviceMonitor: + enabled: true +minio: + mode: standalone +proxy: + profiling: + enabled: true + replicas: 2 +pulsar: + enabled: false +queryCoordinator: + profiling: + enabled: true +queryNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +service: + type: ClusterIP +standalone: + disk: + enabled: true diff --git a/tests/_helm/values/nightly/standalone-one-pod b/tests/_helm/values/nightly/standalone-one-pod new file mode 100644 index 0000000000..0ef90b172f --- /dev/null +++ b/tests/_helm/values/nightly/standalone-one-pod @@ -0,0 +1,94 @@ +affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: node-role.kubernetes.io/e2e + operator: Exists + weight: 1 +cluster: + enabled: false +common: + security: + authorizationEnabled: false +dataCoordinator: + gc: + dropTolerance: 86400 + missingTolerance: 86400 + profiling: + enabled: true +dataNode: + profiling: + enabled: true + replicas: 2 +etcd: + enabled: false + metrics: + enabled: true + podMonitor: + enabled: true + replicaCount: 1 +extraConfigFiles: + user.yaml: | + etcd: + use: + embed: true + data: + dir: /var/lib/milvus/etcd + common: + storageType: local +image: + all: + pullPolicy: Always + repository: harbor.milvus.io/milvus/milvus + tag: nightly-20240821-ed4eaff +indexCoordinator: + gc: + interval: 1 + profiling: + enabled: true +indexNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +kafka: + enabled: false +log: + level: debug +metrics: + serviceMonitor: + enabled: true +minio: + enabled: false + mode: standalone + tls: + enabled: false +proxy: + profiling: + enabled: true + replicas: 2 +pulsar: + enabled: false +queryCoordinator: + profiling: + enabled: true +queryNode: + disk: + enabled: true + profiling: + enabled: true + replicas: 2 +service: + type: ClusterIP +standalone: + disk: + enabled: true + extraEnv: + - name: ETCD_CONFIG_PATH + value: /milvus/configs/advanced/etcd.yaml +tolerations: +- effect: NoSchedule + key: node-role.kubernetes.io/e2e + operator: Exists