test: Upgrade helm chart version and resource limit (#41881)

related issue: https://github.com/milvus-io/milvus/issues/41819
pr: #41820

---------

Signed-off-by: yanliang567 <yanliang.qiao@zilliz.com>
This commit is contained in:
yanliang567 2025-05-16 13:24:23 +08:00 committed by GitHub
parent e0c79ffee8
commit bfdd6adb7b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 711 additions and 1191 deletions

View File

@ -8,7 +8,7 @@ String cron_string = BRANCH_NAME == '2.5' ? '50 23 * * * ' : ''
// Make timeout 4 hours so that we can run two nightly during the ci
int total_timeout_minutes = 7 * 60
def milvus_helm_chart_version = '4.2.18'
def milvus_helm_chart_version = '4.2.48'
pipeline {
triggers {
@ -96,7 +96,7 @@ pipeline {
axes {
axis {
name 'milvus_deployment_option'
values 'standalone', 'distributed-pulsar', 'distributed-kafka', 'standalone-authentication', 'distributed-streaming-service'
values 'standalone', 'distributed-pulsar', 'distributed-kafka', 'standalone-authentication'
}
}
stages {

View File

@ -2,7 +2,7 @@
def pod = libraryResource 'io/milvus/pod/tekton-4am.yaml'
def milvus_helm_chart_version = '4.2.8'
def milvus_helm_chart_version = '4.2.48'
pipeline {
options {

View File

@ -1,7 +1,7 @@
@Library('jenkins-shared-library@tekton') _
def pod = libraryResource 'io/milvus/pod/tekton-4am.yaml'
def milvus_helm_chart_version = '4.2.8'
def milvus_helm_chart_version = '4.2.48'
pipeline {
options {
@ -89,7 +89,7 @@ pipeline {
axes {
axis {
name 'milvus_deployment_option'
values 'standalone', 'distributed', 'standalone-kafka-mmap', 'distributed-streaming-service'
values 'standalone', 'distributed', 'standalone-kafka-mmap'
}
}
stages {

View File

@ -4,7 +4,7 @@ int total_timeout_minutes = 60 * 5
int e2e_timeout_seconds = 70 * 60
def imageTag=''
int case_timeout_seconds = 10 * 60
def chart_version='4.1.8'
def chart_version='4.2.48'
pipeline {
options {
timestamps()

View File

@ -1,7 +1,7 @@
@Library('jenkins-shared-library@tekton') _
def pod = libraryResource 'io/milvus/pod/tekton-4am.yaml'
def milvus_helm_chart_version = '4.2.8'
def milvus_helm_chart_version = '4.2.48'
pipeline {
options {

View File

@ -8,20 +8,64 @@ affinity:
weight: 1
cluster:
enabled: true
dataCoordinator:
proxy:
resources:
limits:
cpu: "1"
memory: 4Gi
requests:
cpu: "0.1"
memory: 50Mi
cpu: "0.3"
memory: 256Mi
dataNode:
resources:
limits:
cpu: "1"
cpu: "2"
memory: 8Gi
requests:
cpu: "0.5"
memory: 500Mi
indexNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "0.5"
memory: 500Mi
queryNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
memory: 4Gi
requests:
cpu: "1"
memory: 2Gi
mixCoordinator:
resources:
limits:
cpu: "1"
memory: 4Gi
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
log:
level: debug
extraConfigFiles:
user.yaml: |+
dataCoord:
gc:
interval: 1800
missingTolerance: 1800
dropTolerance: 1800
metrics:
serviceMonitor:
enabled: true
etcd:
affinity:
nodeAffinity:
@ -38,8 +82,11 @@ etcd:
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 100Mi
cpu: "0.2"
memory: 256Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
@ -49,57 +96,6 @@ image:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35426-20240812-46dadb120
indexCoordinator:
gc:
interval: 1
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
indexNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
kafka:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
zookeeper:
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
log:
level: debug
extraConfigFiles:
user.yaml: |+
indexCoord:
scheduler:
interval: 100
metrics:
serviceMonitor:
enabled: true
minio:
affinity:
nodeAffinity:
@ -112,20 +108,17 @@ minio:
mode: standalone
resources:
requests:
cpu: "0.3"
cpu: "0.2"
memory: 512Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
proxy:
resources:
limits:
cpu: "1"
requests:
cpu: "0.3"
memory: 256Mi
pulsar:
pulsarv3:
enabled: true
bookkeeper:
affinity:
nodeAffinity:
@ -135,16 +128,13 @@ pulsar:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
nettyMaxFrameSizeBytes: "104867840"
resources:
requests:
cpu: "0.1"
memory: 256Mi
limits:
cpu: "0.5"
memory: 4Gi
memory: 2Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
@ -158,19 +148,12 @@ pulsar:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
backlogQuotaDefaultLimitGB: "8"
backlogQuotaDefaultRetentionPolicy: producer_exception
defaultRetentionSizeInMB: "8192"
defaultRetentionTimeInMinutes: "10080"
maxMessageSize: "104857600"
replicaCount: 2
resources:
requests:
cpu: "0.1"
memory: 256Mi
limits:
cpu: "0.5"
memory: 4Gi
tolerations:
@ -188,16 +171,13 @@ pulsar:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-XX:MaxDirectMemorySize=2048m
PULSAR_MEM: |
-Xms1024m -Xmx1024m
httpNumThreads: "50"
resources:
requests:
cpu: "0.1"
memory: 256Mi
limits:
cpu: "0.5"
memory: 1Gi
memory: 2Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
@ -205,7 +185,10 @@ pulsar:
wsResources:
requests:
cpu: "0.1"
memory: 100Mi
memory: 256Mi
limits:
cpu: "0.5"
memory: 2Gi
zookeeper:
affinity:
nodeAffinity:
@ -215,69 +198,19 @@ pulsar:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no
PULSAR_MEM: |
-Xms1024m -Xmx1024m
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
cpu: "0.1"
memory: 256Mi
limits:
cpu: "0.5"
memory: 2Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
queryCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 100Mi
queryNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 512Mi
streamingNode:
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 512Mi
mixCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 256Mi
rootCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
standalone:
disk:
enabled: true
resources:
limits:
cpu: "4"
requests:
cpu: "1"
memory: 3.5Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
operator: Exists

View File

@ -1,285 +1 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: true
streaming:
enabled: true
dataCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
dataNode:
resources:
limits:
cpu: "1"
requests:
cpu: "0.5"
memory: 500Mi
etcd:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 100Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35426-20240812-46dadb120
indexCoordinator:
gc:
interval: 1
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
indexNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
kafka:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
zookeeper:
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
log:
level: debug
extraConfigFiles:
user.yaml: |+
indexCoord:
scheduler:
interval: 100
metrics:
serviceMonitor:
enabled: true
minio:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
mode: standalone
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
proxy:
resources:
limits:
cpu: "1"
requests:
cpu: "0.3"
memory: 256Mi
pulsar:
bookkeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
nettyMaxFrameSizeBytes: "104867840"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
broker:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
backlogQuotaDefaultLimitGB: "8"
backlogQuotaDefaultRetentionPolicy: producer_exception
defaultRetentionSizeInMB: "8192"
defaultRetentionTimeInMinutes: "10080"
maxMessageSize: "104857600"
replicaCount: 2
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
components:
autorecovery: false
proxy:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-XX:MaxDirectMemorySize=2048m
PULSAR_MEM: |
-Xms1024m -Xmx1024m
httpNumThreads: "50"
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
wsResources:
requests:
cpu: "0.1"
memory: 100Mi
zookeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no
PULSAR_MEM: |
-Xms1024m -Xmx1024m
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
queryCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 100Mi
queryNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 512Mi
streamingNode:
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 512Mi
mixCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 256Mi
rootCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
standalone:
disk:
enabled: true
resources:
limits:
cpu: "4"
requests:
cpu: "1"
memory: 3.5Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
# to be deleted

View File

@ -8,20 +8,31 @@ affinity:
weight: 1
cluster:
enabled: false
dataCoordinator:
service:
type: ClusterIP
standalone:
messageQueue: rocksmq
disk:
enabled: true
resources:
limits:
cpu: "4"
memory: 16Gi
requests:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
dataNode:
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
memory: 3.5Gi
log:
level: debug
extraConfigFiles:
user.yaml: |+
dataCoord:
gc:
interval: 1800
missingTolerance: 1800
dropTolerance: 1800
metrics:
serviceMonitor:
enabled: true
etcd:
affinity:
nodeAffinity:
@ -38,8 +49,11 @@ etcd:
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 100Mi
cpu: "0.2"
memory: 256Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
@ -49,57 +63,6 @@ image:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35402-20240812-402f716b5
indexCoordinator:
gc:
interval: 1
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
indexNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
kafka:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
zookeeper:
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
log:
level: debug
extraConfigFiles:
user.yaml: |+
indexCoord:
scheduler:
interval: 100
metrics:
serviceMonitor:
enabled: true
minio:
affinity:
nodeAffinity:
@ -112,164 +75,18 @@ minio:
mode: standalone
resources:
requests:
cpu: "0.3"
cpu: "0.2"
memory: 512Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
proxy:
resources:
limits:
cpu: "1"
requests:
cpu: "0.3"
memory: 256Mi
pulsar:
bookkeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
nettyMaxFrameSizeBytes: "104867840"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
broker:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
backlogQuotaDefaultLimitGB: "8"
backlogQuotaDefaultRetentionPolicy: producer_exception
defaultRetentionSizeInMB: "8192"
defaultRetentionTimeInMinutes: "10080"
maxMessageSize: "104857600"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
components:
autorecovery: false
pulsarv3:
enabled: false
proxy:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-XX:MaxDirectMemorySize=2048m
PULSAR_MEM: |
-Xms1024m -Xmx1024m
httpNumThreads: "50"
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
wsResources:
requests:
cpu: "0.1"
memory: 100Mi
zookeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no
PULSAR_MEM: |
-Xms1024m -Xmx1024m
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
queryCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 100Mi
queryNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
mmap:
vectorField: true
vectorIndex: true
scalarField: true
scalarIndex: true
growingMmapEnabled: true
rootCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
standalone:
disk:
enabled: true
resources:
limits:
cpu: "4"
requests:
cpu: "1"
memory: 3.5Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
operator: Exists

View File

@ -8,20 +8,38 @@ affinity:
weight: 1
cluster:
enabled: false
dataCoordinator:
service:
type: ClusterIP
standalone:
messageQueue: kafka
disk:
enabled: true
resources:
limits:
cpu: "4"
memory: 16Gi
requests:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
dataNode:
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
memory: 3.5Gi
log:
level: debug
extraConfigFiles:
user.yaml: |+
dataCoord:
gc:
interval: 1800
missingTolerance: 1800
dropTolerance: 1800
queryNode:
mmap:
vectorField: true
vectorIndex: true
scalarField: true
scalarIndex: true
growingMmapEnabled: true
metrics:
serviceMonitor:
enabled: true
etcd:
affinity:
nodeAffinity:
@ -38,8 +56,11 @@ etcd:
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 100Mi
cpu: "0.2"
memory: 256Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
@ -49,22 +70,6 @@ image:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35426-20240812-46dadb120
indexCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.1"
memory: 50Mi
indexNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
kafka:
affinity:
nodeAffinity:
@ -84,8 +89,11 @@ kafka:
enabled: true
resources:
requests:
cpu: "0.2"
memory: 512Mi
limits:
cpu: "0.5"
memory: 1Gi
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
@ -94,27 +102,11 @@ kafka:
replicaCount: 1
resources:
requests:
cpu: "0.3"
cpu: "0.2"
memory: 512Mi
log:
level: debug
extraConfigFiles:
user.yaml: |+
indexCoord:
gc:
interval: 1
scheduler:
interval: 100
queryNode:
mmap:
vectorField: true
vectorIndex: true
scalarField: true
scalarIndex: true
growingMmapEnabled: true
metrics:
serviceMonitor:
enabled: true
limits:
cpu: "0.5"
memory: 2Gi
minio:
affinity:
nodeAffinity:
@ -127,157 +119,17 @@ minio:
mode: standalone
resources:
requests:
cpu: "0.3"
cpu: "0.2"
memory: 512Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
proxy:
resources:
limits:
cpu: "1"
requests:
cpu: "0.3"
memory: 256Mi
pulsar:
bookkeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+UseG1GC -XX:MaxGCPauseMillis=10 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError -XX:+PerfDisableSharedMem -XX:+PrintGCDetails
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
nettyMaxFrameSizeBytes: "104867840"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
broker:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dio.netty.leakDetectionLevel=disabled -Dio.netty.recycler.linkCapacity=1024 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:ParallelGCThreads=32 -XX:ConcGCThreads=32 -XX:G1NewSizePercent=50 -XX:+DisableExplicitGC -XX:-ResizePLAB -XX:+ExitOnOutOfMemoryError
PULSAR_MEM: |
-Xms4096m -Xmx4096m -XX:MaxDirectMemorySize=8192m
backlogQuotaDefaultLimitGB: "8"
backlogQuotaDefaultRetentionPolicy: producer_exception
defaultRetentionSizeInMB: "8192"
defaultRetentionTimeInMinutes: "10080"
maxMessageSize: "104857600"
resources:
requests:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
components:
autorecovery: false
pulsarv3:
enabled: false
proxy:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-XX:MaxDirectMemorySize=2048m
PULSAR_MEM: |
-Xms1024m -Xmx1024m
httpNumThreads: "50"
resources:
requests:
cpu: "0.5"
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
wsResources:
requests:
cpu: "0.1"
memory: 100Mi
zookeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
configData:
PULSAR_GC: |
-Dcom.sun.management.jmxremote -Djute.maxbuffer=10485760 -XX:+ParallelRefProcEnabled -XX:+UnlockExperimentalVMOptions -XX:+DoEscapeAnalysis -XX:+DisableExplicitGC -XX:+PerfDisableSharedMem -Dzookeeper.forceSync=no
PULSAR_MEM: |
-Xms1024m -Xmx1024m
replicaCount: 1
resources:
requests:
cpu: "0.3"
memory: 512Mi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
queryCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 100Mi
queryNode:
disk:
enabled: true
resources:
limits:
cpu: "2"
requests:
cpu: "0.5"
memory: 500Mi
rootCoordinator:
resources:
limits:
cpu: "1"
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
standalone:
disk:
enabled: true
resources:
limits:
cpu: "4"
requests:
cpu: "1"
memory: 3.5Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e

View File

@ -1,75 +1,155 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: true
common:
security:
authorizationEnabled: false
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
proxy:
replicas: 2
resources:
limits:
cpu: "1"
memory: 4Gi
requests:
cpu: "0.3"
memory: 256Mi
dataNode:
replicas: 2
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "0.5"
memory: 500Mi
indexNode:
replicas: 2
disk:
enabled: true
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "0.5"
memory: 500Mi
queryNode:
replicas: 2
disk:
enabled: true
resources:
limits:
cpu: "2"
memory: 4Gi
requests:
cpu: "1"
memory: 2Gi
mixCoordinator:
resources:
limits:
cpu: "1"
memory: 4Gi
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
log:
level: debug
extraConfigFiles:
user.yaml: |+
dataCoord:
gc:
interval: 1800
missingTolerance: 1800
dropTolerance: 1800
metrics:
serviceMonitor:
enabled: true
etcd:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
resources:
requests:
cpu: "0.2"
memory: 256Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
kafka:
enabled: true
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
resources:
requests:
cpu: "0.2"
memory: 512Mi
limits:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
zookeeper:
replicaCount: 1
resources:
requests:
cpu: "0.2"
memory: 512Mi
limits:
cpu: "0.5"
memory: 2Gi
minio:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
mode: standalone
resources:
requests:
cpu: "0.2"
memory: 512Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
pulsarv3:
enabled: false
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
defaultReplicationFactor: 2
enabled: true
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
mode: standalone
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
broker:
replicaCount: 2
enabled: false
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
service:
type: ClusterIP
standalone:
disk:
enabled: true
tag: nightly-20240821-ed4eaff

View File

@ -1,82 +1,223 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: true
common:
enabledJSONKeyStats: true
enabledGrowingSegmentJSONKeyStats: true
security:
authorizationEnabled: false
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
proxy:
replicas: 2
resources:
limits:
cpu: "1"
memory: 4Gi
requests:
cpu: "0.3"
memory: 256Mi
dataNode:
replicas: 2
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "0.5"
memory: 500Mi
indexNode:
replicas: 2
disk:
enabled: true
resources:
limits:
cpu: "2"
memory: 8Gi
requests:
cpu: "0.5"
memory: 500Mi
queryNode:
replicas: 2
disk:
enabled: true
resources:
limits:
cpu: "2"
memory: 4Gi
requests:
cpu: "1"
memory: 2Gi
mixCoordinator:
resources:
limits:
cpu: "1"
memory: 4Gi
requests:
cpu: "0.2"
memory: 256Mi
service:
type: ClusterIP
log:
level: debug
extraConfigFiles:
user.yaml: |+
dataCoord:
gc:
interval: 1800
missingTolerance: 1800
dropTolerance: 1800
queryNode:
mmap:
vectorField: true
vectorIndex: true
scalarField: true
scalarIndex: true
growingMmapEnabled: true
metrics:
serviceMonitor:
enabled: true
etcd:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
resources:
requests:
cpu: "0.2"
memory: 256Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
pulsarv3:
enabled: true
bookkeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
resources:
requests:
cpu: "0.1"
memory: 256Mi
limits:
cpu: "0.5"
memory: 2Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
broker:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
replicaCount: 2
resources:
requests:
cpu: "0.1"
memory: 256Mi
limits:
cpu: "0.5"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
components:
autorecovery: false
proxy:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
resources:
requests:
cpu: "0.1"
memory: 256Mi
limits:
cpu: "0.5"
memory: 2Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
wsResources:
requests:
cpu: "0.1"
memory: 256Mi
limits:
cpu: "0.5"
memory: 2Gi
zookeeper:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
replicaCount: 1
resources:
requests:
cpu: "0.1"
memory: 256Mi
limits:
cpu: "0.5"
memory: 2Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
minio:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
mode: standalone
resources:
requests:
cpu: "0.2"
memory: 512Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
enabled: false
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
mode: standalone
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
broker:
replicaCount: 2
enabled: true
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
mmap:
vectorField: true
vectorIndex: true
scalarField: true
scalarIndex: true
growingMmapEnabled: true
service:
type: ClusterIP
standalone:
disk:
enabled: true
tag: nightly-20240821-ed4eaff

View File

@ -1,76 +1 @@
cluster:
enabled: true
streaming:
enabled: true
common:
security:
authorizationEnabled: false
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
replicas: 2
etcd:
metrics:
enabled: true
podMonitor:
enabled: true
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
enabled: false
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
mode: standalone
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
broker:
replicaCount: 2
enabled: true
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
service:
type: ClusterIP
standalone:
disk:
enabled: true
# to be deleted

View File

@ -1,73 +1,101 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: false
common:
security:
authorizationEnabled: false
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
replicas: 2
etcd:
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
enabled: false
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
mode: standalone
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
enabled: false
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
service:
type: ClusterIP
standalone:
disk:
enabled: true
resources:
limits:
cpu: "4"
memory: 16Gi
requests:
cpu: "1"
memory: 3.5Gi
log:
level: debug
extraConfigFiles:
user.yaml: |+
dataCoord:
gc:
interval: 1800
missingTolerance: 1800
dropTolerance: 1800
queryNode:
mmap:
vectorField: true
vectorIndex: true
scalarField: true
scalarIndex: true
growingMmapEnabled: true
common:
security:
authorizationEnabled: true
metrics:
serviceMonitor:
enabled: true
etcd:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
resources:
requests:
cpu: "0.2"
memory: 256Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35402-20240812-402f716b5
minio:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
mode: standalone
resources:
requests:
cpu: "0.2"
memory: 512Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
pulsarv3:
enabled: false
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists

View File

@ -1,73 +1,101 @@
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
cluster:
enabled: false
common:
security:
authorizationEnabled: true
dataCoordinator:
gc:
dropTolerance: 86400
missingTolerance: 86400
profiling:
enabled: true
dataNode:
profiling:
enabled: true
replicas: 2
etcd:
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: nightly-20240821-ed4eaff
indexCoordinator:
gc:
interval: 1
profiling:
enabled: true
indexNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
kafka:
enabled: false
metrics:
jmx:
enabled: true
kafka:
enabled: true
serviceMonitor:
enabled: true
log:
level: debug
metrics:
serviceMonitor:
enabled: true
minio:
mode: standalone
proxy:
profiling:
enabled: true
replicas: 2
pulsar:
enabled: false
queryCoordinator:
profiling:
enabled: true
queryNode:
disk:
enabled: true
profiling:
enabled: true
replicas: 2
service:
type: ClusterIP
standalone:
disk:
enabled: true
resources:
limits:
cpu: "4"
memory: 16Gi
requests:
cpu: "1"
memory: 3.5Gi
log:
level: debug
extraConfigFiles:
user.yaml: |+
dataCoord:
gc:
interval: 1800
missingTolerance: 1800
dropTolerance: 1800
queryNode:
mmap:
vectorField: true
vectorIndex: true
scalarField: true
scalarIndex: true
growingMmapEnabled: true
common:
security:
authorizationEnabled: true
metrics:
serviceMonitor:
enabled: true
etcd:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
metrics:
enabled: true
podMonitor:
enabled: true
replicaCount: 1
resources:
requests:
cpu: "0.2"
memory: 256Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
image:
all:
pullPolicy: Always
repository: harbor.milvus.io/milvus/milvus
tag: PR-35402-20240812-402f716b5
minio:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/e2e
operator: Exists
weight: 1
mode: standalone
resources:
requests:
cpu: "0.2"
memory: 512Mi
limits:
cpu: "1"
memory: 4Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists
pulsarv3:
enabled: false
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/e2e
operator: Exists