mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-06 17:18:35 +08:00
issue: #43107 pr: #43108 - Add checkLoadConfigChanges() to apply load config during startup - Call config check in startQueryCoord() after restart - Skip auto-updates for collections with user-specified replica numbers - Add is_user_specified_replica_mode field to preserve user settings - Add comprehensive unit tests with mockey Ensures existing collections use latest cluster-level config after restart. --------- Signed-off-by: Wei Liu <wei.liu@zilliz.com>
This commit is contained in:
parent
33e9b873de
commit
4952b8c416
7
go.mod
7
go.mod
@ -102,6 +102,7 @@ require (
|
||||
github.com/ardielle/ardielle-go v1.5.2 // indirect
|
||||
github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bytedance/mockey v1.2.14 // indirect
|
||||
github.com/bytedance/sonic/loader v0.2.4 // indirect
|
||||
github.com/campoy/embedmd v1.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
@ -146,6 +147,7 @@ require (
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.5 // indirect
|
||||
github.com/gopherjs/gopherjs v1.12.80 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
||||
@ -157,6 +159,7 @@ require (
|
||||
github.com/ianlancetaylor/cgosymbolizer v0.0.0-20221217025313-27d3c9f66b6a // indirect
|
||||
github.com/jonboulle/clockwork v0.2.2 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/asmfmt v1.3.2 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
@ -201,6 +204,8 @@ require (
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.22.9 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/smartystreets/assertions v1.2.0 // indirect
|
||||
github.com/smartystreets/goconvey v1.7.2 // indirect
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/spf13/afero v1.6.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
@ -240,7 +245,7 @@ require (
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.uber.org/automaxprocs v1.6.0 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/arch v0.11.0 // indirect
|
||||
golang.org/x/mod v0.17.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/term v0.30.0 // indirect
|
||||
|
||||
22
go.sum
22
go.sum
@ -140,6 +140,8 @@ github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqO
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bytedance/mockey v1.2.14 h1:KZaFgPdiUwW+jOWFieo3Lr7INM1P+6adO3hxZhDswY8=
|
||||
github.com/bytedance/mockey v1.2.14/go.mod h1:1BPHF9sol5R1ud/+0VEHGQq/+i2lN+GTsr3O2Q9IENY=
|
||||
github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ=
|
||||
github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
@ -456,6 +458,8 @@ github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBY
|
||||
github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v1.12.80 h1:aC68NT6VK715WeUapxcPSFq/a3gZdS32HdtghdOIgAo=
|
||||
github.com/gopherjs/gopherjs v1.12.80/go.mod h1:d55Q4EjGQHeJVms+9LGtXul6ykz5Xzx1E1gaXQXdimY=
|
||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
@ -540,6 +544,7 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/juju/qthttptest v0.1.1/go.mod h1:aTlAv8TYaflIiTDIQYzxnl1QdPjAg8Q8qJMErpKy6A4=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
@ -683,6 +688,8 @@ github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
|
||||
github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nrwiersma/avro-benchmarks v0.0.0-20210913175520-21aec48c8f76/go.mod h1:iKyFMidsk/sVYONJRE372sJuX/QTRPacU7imPqqsu7g=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
@ -782,6 +789,7 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
|
||||
github.com/rogpeppe/clock v0.0.0-20190514195947-2896927a307a/go.mod h1:4r5QyqhjIWCcK8DO4KMclc5Iknq5qVBAlbYYzAbUScQ=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.0.1-alpha.1/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
@ -809,7 +817,10 @@ github.com/shirou/gopsutil/v3 v3.22.9 h1:yibtJhIVEMcdw+tCTbOPiF1VcsuDeTE4utJ8Dm4
|
||||
github.com/shirou/gopsutil/v3 v3.22.9/go.mod h1:bBYl1kjgEJpWpxeHmLI+dVHWtyAwfcmSBLDsp2TNT8A=
|
||||
github.com/shirou/gopsutil/v4 v4.24.10 h1:7VOzPtfw/5YDU+jLEoBwXwxJbQetULywoSV4RYY7HkM=
|
||||
github.com/shirou/gopsutil/v4 v4.24.10/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/httpfs v0.0.0-20181222201310-74dc9339e414/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20180915214035-33ae1944be3f/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
@ -818,7 +829,11 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
||||
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
|
||||
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
|
||||
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
@ -831,6 +846,7 @@ github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
|
||||
@ -838,6 +854,7 @@ github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUq
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
@ -1027,6 +1044,9 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
|
||||
golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.11.0 h1:KXV8WWKCXm6tRpLirl2szsO5j/oOODwZf4hATmGVNs4=
|
||||
golang.org/x/arch v0.11.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/crypto v0.0.0-20180807104621-f027049dab0a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
@ -1170,6 +1190,7 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180807162357-acbc56fc7007/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -1279,6 +1300,7 @@ golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190308142131-b40df0fb21c3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
|
||||
@ -20,36 +20,20 @@ import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"go.uber.org/zap"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/milvus-io/milvus/internal/util/mock"
|
||||
"github.com/milvus-io/milvus/pkg/v2/log"
|
||||
"github.com/milvus-io/milvus/pkg/v2/proto/querypb"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/etcd"
|
||||
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// init embed etcd
|
||||
embedetcdServer, tempDir, err := etcd.StartTestEmbedEtcdServer()
|
||||
if err != nil {
|
||||
log.Fatal("failed to start embed etcd server", zap.Error(err))
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
defer embedetcdServer.Close()
|
||||
|
||||
addrs := etcd.GetEmbedEtcdEndpoints(embedetcdServer)
|
||||
|
||||
paramtable.Init()
|
||||
paramtable.Get().Save(Params.EtcdCfg.Endpoints.Key, strings.Join(addrs, ","))
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
@ -47,13 +47,14 @@ type LoadCollectionJob struct {
|
||||
req *querypb.LoadCollectionRequest
|
||||
undo *UndoList
|
||||
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
broker meta.Broker
|
||||
targetMgr meta.TargetManagerInterface
|
||||
targetObserver *observers.TargetObserver
|
||||
collectionObserver *observers.CollectionObserver
|
||||
nodeMgr *session.NodeManager
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
broker meta.Broker
|
||||
targetMgr meta.TargetManagerInterface
|
||||
targetObserver *observers.TargetObserver
|
||||
collectionObserver *observers.CollectionObserver
|
||||
nodeMgr *session.NodeManager
|
||||
userSpecifiedReplicaMode bool
|
||||
}
|
||||
|
||||
func NewLoadCollectionJob(
|
||||
@ -66,18 +67,20 @@ func NewLoadCollectionJob(
|
||||
targetObserver *observers.TargetObserver,
|
||||
collectionObserver *observers.CollectionObserver,
|
||||
nodeMgr *session.NodeManager,
|
||||
userSpecifiedReplicaMode bool,
|
||||
) *LoadCollectionJob {
|
||||
return &LoadCollectionJob{
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
undo: NewUndoList(ctx, meta, targetMgr, targetObserver),
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
broker: broker,
|
||||
targetMgr: targetMgr,
|
||||
targetObserver: targetObserver,
|
||||
collectionObserver: collectionObserver,
|
||||
nodeMgr: nodeMgr,
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
undo: NewUndoList(ctx, meta, targetMgr, targetObserver),
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
broker: broker,
|
||||
targetMgr: targetMgr,
|
||||
targetObserver: targetObserver,
|
||||
collectionObserver: collectionObserver,
|
||||
nodeMgr: nodeMgr,
|
||||
userSpecifiedReplicaMode: userSpecifiedReplicaMode,
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,13 +211,14 @@ func (job *LoadCollectionJob) Execute() error {
|
||||
ctx, sp := otel.Tracer(typeutil.QueryCoordRole).Start(job.ctx, "LoadCollection", trace.WithNewRoot())
|
||||
collection := &meta.Collection{
|
||||
CollectionLoadInfo: &querypb.CollectionLoadInfo{
|
||||
CollectionID: req.GetCollectionID(),
|
||||
ReplicaNumber: req.GetReplicaNumber(),
|
||||
Status: querypb.LoadStatus_Loading,
|
||||
FieldIndexID: req.GetFieldIndexID(),
|
||||
LoadType: querypb.LoadType_LoadCollection,
|
||||
LoadFields: req.GetLoadFields(),
|
||||
DbID: collectionInfo.GetDbId(),
|
||||
CollectionID: req.GetCollectionID(),
|
||||
ReplicaNumber: req.GetReplicaNumber(),
|
||||
Status: querypb.LoadStatus_Loading,
|
||||
FieldIndexID: req.GetFieldIndexID(),
|
||||
LoadType: querypb.LoadType_LoadCollection,
|
||||
LoadFields: req.GetLoadFields(),
|
||||
DbID: collectionInfo.GetDbId(),
|
||||
UserSpecifiedReplicaMode: job.userSpecifiedReplicaMode,
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
LoadSpan: sp,
|
||||
@ -255,13 +259,14 @@ type LoadPartitionJob struct {
|
||||
req *querypb.LoadPartitionsRequest
|
||||
undo *UndoList
|
||||
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
broker meta.Broker
|
||||
targetMgr meta.TargetManagerInterface
|
||||
targetObserver *observers.TargetObserver
|
||||
collectionObserver *observers.CollectionObserver
|
||||
nodeMgr *session.NodeManager
|
||||
dist *meta.DistributionManager
|
||||
meta *meta.Meta
|
||||
broker meta.Broker
|
||||
targetMgr meta.TargetManagerInterface
|
||||
targetObserver *observers.TargetObserver
|
||||
collectionObserver *observers.CollectionObserver
|
||||
nodeMgr *session.NodeManager
|
||||
userSpecifiedReplicaMode bool
|
||||
}
|
||||
|
||||
func NewLoadPartitionJob(
|
||||
@ -274,18 +279,20 @@ func NewLoadPartitionJob(
|
||||
targetObserver *observers.TargetObserver,
|
||||
collectionObserver *observers.CollectionObserver,
|
||||
nodeMgr *session.NodeManager,
|
||||
userSpecifiedReplicaMode bool,
|
||||
) *LoadPartitionJob {
|
||||
return &LoadPartitionJob{
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
undo: NewUndoList(ctx, meta, targetMgr, targetObserver),
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
broker: broker,
|
||||
targetMgr: targetMgr,
|
||||
targetObserver: targetObserver,
|
||||
collectionObserver: collectionObserver,
|
||||
nodeMgr: nodeMgr,
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), req.GetCollectionID()),
|
||||
req: req,
|
||||
undo: NewUndoList(ctx, meta, targetMgr, targetObserver),
|
||||
dist: dist,
|
||||
meta: meta,
|
||||
broker: broker,
|
||||
targetMgr: targetMgr,
|
||||
targetObserver: targetObserver,
|
||||
collectionObserver: collectionObserver,
|
||||
nodeMgr: nodeMgr,
|
||||
userSpecifiedReplicaMode: userSpecifiedReplicaMode,
|
||||
}
|
||||
}
|
||||
|
||||
@ -411,13 +418,14 @@ func (job *LoadPartitionJob) Execute() error {
|
||||
|
||||
collection := &meta.Collection{
|
||||
CollectionLoadInfo: &querypb.CollectionLoadInfo{
|
||||
CollectionID: req.GetCollectionID(),
|
||||
ReplicaNumber: req.GetReplicaNumber(),
|
||||
Status: querypb.LoadStatus_Loading,
|
||||
FieldIndexID: req.GetFieldIndexID(),
|
||||
LoadType: querypb.LoadType_LoadPartition,
|
||||
LoadFields: req.GetLoadFields(),
|
||||
DbID: collectionInfo.GetDbId(),
|
||||
CollectionID: req.GetCollectionID(),
|
||||
ReplicaNumber: req.GetReplicaNumber(),
|
||||
Status: querypb.LoadStatus_Loading,
|
||||
FieldIndexID: req.GetFieldIndexID(),
|
||||
LoadType: querypb.LoadType_LoadPartition,
|
||||
LoadFields: req.GetLoadFields(),
|
||||
DbID: collectionInfo.GetDbId(),
|
||||
UserSpecifiedReplicaMode: job.userSpecifiedReplicaMode,
|
||||
},
|
||||
CreatedAt: time.Now(),
|
||||
LoadSpan: sp,
|
||||
@ -430,6 +438,17 @@ func (job *LoadPartitionJob) Execute() error {
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
} else { // collection exists, put partitions only
|
||||
coll := job.meta.GetCollection(job.ctx, req.GetCollectionID())
|
||||
if job.userSpecifiedReplicaMode && !coll.CollectionLoadInfo.UserSpecifiedReplicaMode {
|
||||
coll.CollectionLoadInfo.UserSpecifiedReplicaMode = job.userSpecifiedReplicaMode
|
||||
err = job.meta.CollectionManager.PutCollection(job.ctx, coll)
|
||||
if err != nil {
|
||||
msg := "failed to store collection"
|
||||
log.Warn(msg, zap.Error(err))
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
}
|
||||
|
||||
err = job.meta.CollectionManager.PutPartition(job.ctx, partitions...)
|
||||
if err != nil {
|
||||
msg := "failed to store partitions"
|
||||
|
||||
@ -249,6 +249,7 @@ func (suite *JobSuite) TestLoadCollection() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -276,6 +277,7 @@ func (suite *JobSuite) TestLoadCollection() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -301,6 +303,7 @@ func (suite *JobSuite) TestLoadCollection() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -328,6 +331,7 @@ func (suite *JobSuite) TestLoadCollection() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -363,6 +367,7 @@ func (suite *JobSuite) TestLoadCollection() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -384,6 +389,7 @@ func (suite *JobSuite) TestLoadCollection() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err = job.Wait()
|
||||
@ -413,6 +419,7 @@ func (suite *JobSuite) TestLoadCollectionWithReplicas() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -444,6 +451,7 @@ func (suite *JobSuite) TestLoadCollectionWithLoadFields() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -473,6 +481,7 @@ func (suite *JobSuite) TestLoadCollectionWithLoadFields() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -500,6 +509,7 @@ func (suite *JobSuite) TestLoadCollectionWithLoadFields() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -533,6 +543,7 @@ func (suite *JobSuite) TestLoadCollectionWithLoadFields() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -565,6 +576,7 @@ func (suite *JobSuite) TestLoadPartition() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -595,6 +607,7 @@ func (suite *JobSuite) TestLoadPartition() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -622,6 +635,7 @@ func (suite *JobSuite) TestLoadPartition() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -649,6 +663,7 @@ func (suite *JobSuite) TestLoadPartition() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -675,6 +690,7 @@ func (suite *JobSuite) TestLoadPartition() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -710,6 +726,7 @@ func (suite *JobSuite) TestLoadPartition() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -732,6 +749,7 @@ func (suite *JobSuite) TestLoadPartition() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err = job.Wait()
|
||||
@ -764,6 +782,7 @@ func (suite *JobSuite) TestLoadPartitionWithLoadFields() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -796,6 +815,7 @@ func (suite *JobSuite) TestLoadPartitionWithLoadFields() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -825,6 +845,7 @@ func (suite *JobSuite) TestLoadPartitionWithLoadFields() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -860,6 +881,7 @@ func (suite *JobSuite) TestLoadPartitionWithLoadFields() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -889,6 +911,7 @@ func (suite *JobSuite) TestDynamicLoad() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
return job
|
||||
}
|
||||
@ -907,6 +930,7 @@ func (suite *JobSuite) TestDynamicLoad() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
return job
|
||||
}
|
||||
@ -1006,6 +1030,7 @@ func (suite *JobSuite) TestLoadPartitionWithReplicas() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -1299,6 +1324,7 @@ func (suite *JobSuite) TestLoadCollectionStoreFailed() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
loadErr := job.Wait()
|
||||
@ -1312,7 +1338,7 @@ func (suite *JobSuite) TestLoadPartitionStoreFailed() {
|
||||
store := mocks.NewQueryCoordCatalog(suite.T())
|
||||
suite.meta = meta.NewMeta(RandomIncrementIDAllocator(), store, suite.nodeMgr)
|
||||
|
||||
store.EXPECT().SaveResourceGroup(mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
store.EXPECT().SaveResourceGroup(mock.Anything, mock.Anything).Return(nil)
|
||||
suite.meta.HandleNodeUp(ctx, 1000)
|
||||
suite.meta.HandleNodeUp(ctx, 2000)
|
||||
suite.meta.HandleNodeUp(ctx, 3000)
|
||||
@ -1341,6 +1367,7 @@ func (suite *JobSuite) TestLoadPartitionStoreFailed() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
loadErr := job.Wait()
|
||||
@ -1368,6 +1395,7 @@ func (suite *JobSuite) TestLoadCreateReplicaFailed() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -1457,6 +1485,7 @@ func (suite *JobSuite) loadAll() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -1481,6 +1510,7 @@ func (suite *JobSuite) loadAll() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -1628,6 +1658,156 @@ func (suite *JobSuite) updateChannelDist(ctx context.Context, collection int64,
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *JobSuite) TestLoadCollectionWithUserSpecifiedReplicaMode() {
|
||||
ctx := context.Background()
|
||||
|
||||
// Test load collection with userSpecifiedReplicaMode = true
|
||||
for _, collection := range suite.collections {
|
||||
if suite.loadTypes[collection] != querypb.LoadType_LoadCollection {
|
||||
continue
|
||||
}
|
||||
|
||||
req := &querypb.LoadCollectionRequest{
|
||||
CollectionID: collection,
|
||||
ReplicaNumber: 1,
|
||||
}
|
||||
|
||||
job := NewLoadCollectionJob(
|
||||
ctx,
|
||||
req,
|
||||
suite.dist,
|
||||
suite.meta,
|
||||
suite.broker,
|
||||
suite.targetMgr,
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
true, // userSpecifiedReplicaMode = true
|
||||
)
|
||||
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
suite.NoError(err)
|
||||
|
||||
// Verify UserSpecifiedReplicaMode is set correctly
|
||||
loadedCollection := suite.meta.GetCollection(ctx, collection)
|
||||
suite.NotNil(loadedCollection)
|
||||
suite.True(loadedCollection.GetUserSpecifiedReplicaMode())
|
||||
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(ctx, collection)
|
||||
suite.assertCollectionLoaded(collection)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *JobSuite) TestLoadPartitionWithUserSpecifiedReplicaMode() {
|
||||
ctx := context.Background()
|
||||
|
||||
// Test load partition with userSpecifiedReplicaMode = true
|
||||
for _, collection := range suite.collections {
|
||||
if suite.loadTypes[collection] != querypb.LoadType_LoadPartition {
|
||||
continue
|
||||
}
|
||||
|
||||
req := &querypb.LoadPartitionsRequest{
|
||||
CollectionID: collection,
|
||||
PartitionIDs: suite.partitions[collection],
|
||||
ReplicaNumber: 1,
|
||||
}
|
||||
|
||||
job := NewLoadPartitionJob(
|
||||
ctx,
|
||||
req,
|
||||
suite.dist,
|
||||
suite.meta,
|
||||
suite.broker,
|
||||
suite.targetMgr,
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
true, // userSpecifiedReplicaMode = true
|
||||
)
|
||||
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
suite.NoError(err)
|
||||
|
||||
// Verify UserSpecifiedReplicaMode is set correctly
|
||||
loadedCollection := suite.meta.GetCollection(ctx, collection)
|
||||
suite.NotNil(loadedCollection)
|
||||
suite.True(loadedCollection.GetUserSpecifiedReplicaMode())
|
||||
|
||||
suite.targetMgr.UpdateCollectionCurrentTarget(ctx, collection)
|
||||
suite.assertCollectionLoaded(collection)
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *JobSuite) TestLoadPartitionUpdateUserSpecifiedReplicaMode() {
|
||||
ctx := context.Background()
|
||||
|
||||
// First load partition with userSpecifiedReplicaMode = false
|
||||
collection := suite.collections[1] // Use partition load type collection
|
||||
if suite.loadTypes[collection] != querypb.LoadType_LoadPartition {
|
||||
return
|
||||
}
|
||||
|
||||
req := &querypb.LoadPartitionsRequest{
|
||||
CollectionID: collection,
|
||||
PartitionIDs: suite.partitions[collection][:1], // Load first partition
|
||||
ReplicaNumber: 1,
|
||||
}
|
||||
|
||||
job := NewLoadPartitionJob(
|
||||
ctx,
|
||||
req,
|
||||
suite.dist,
|
||||
suite.meta,
|
||||
suite.broker,
|
||||
suite.targetMgr,
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false, // userSpecifiedReplicaMode = false
|
||||
)
|
||||
|
||||
suite.scheduler.Add(job)
|
||||
err := job.Wait()
|
||||
suite.NoError(err)
|
||||
|
||||
// Verify UserSpecifiedReplicaMode is false
|
||||
loadedCollection := suite.meta.GetCollection(ctx, collection)
|
||||
suite.NotNil(loadedCollection)
|
||||
suite.False(loadedCollection.GetUserSpecifiedReplicaMode())
|
||||
|
||||
// Load another partition with userSpecifiedReplicaMode = true
|
||||
req2 := &querypb.LoadPartitionsRequest{
|
||||
CollectionID: collection,
|
||||
PartitionIDs: suite.partitions[collection][1:2], // Load second partition
|
||||
ReplicaNumber: 1,
|
||||
}
|
||||
|
||||
job2 := NewLoadPartitionJob(
|
||||
ctx,
|
||||
req2,
|
||||
suite.dist,
|
||||
suite.meta,
|
||||
suite.broker,
|
||||
suite.targetMgr,
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
true, // userSpecifiedReplicaMode = true
|
||||
)
|
||||
|
||||
suite.scheduler.Add(job2)
|
||||
err = job2.Wait()
|
||||
suite.NoError(err)
|
||||
|
||||
// Verify UserSpecifiedReplicaMode is updated to true
|
||||
updatedCollection := suite.meta.GetCollection(ctx, collection)
|
||||
suite.NotNil(updatedCollection)
|
||||
suite.True(updatedCollection.GetUserSpecifiedReplicaMode())
|
||||
}
|
||||
|
||||
func TestJob(t *testing.T) {
|
||||
suite.Run(t, new(JobSuite))
|
||||
}
|
||||
|
||||
@ -32,13 +32,14 @@ import (
|
||||
|
||||
type UpdateLoadConfigJob struct {
|
||||
*BaseJob
|
||||
collectionID int64
|
||||
newReplicaNumber int32
|
||||
newResourceGroups []string
|
||||
meta *meta.Meta
|
||||
targetMgr meta.TargetManagerInterface
|
||||
targetObserver *observers.TargetObserver
|
||||
collectionObserver *observers.CollectionObserver
|
||||
collectionID int64
|
||||
newReplicaNumber int32
|
||||
newResourceGroups []string
|
||||
meta *meta.Meta
|
||||
targetMgr meta.TargetManagerInterface
|
||||
targetObserver *observers.TargetObserver
|
||||
collectionObserver *observers.CollectionObserver
|
||||
userSpecifiedReplicaMode bool
|
||||
}
|
||||
|
||||
func NewUpdateLoadConfigJob(ctx context.Context,
|
||||
@ -47,17 +48,19 @@ func NewUpdateLoadConfigJob(ctx context.Context,
|
||||
targetMgr meta.TargetManagerInterface,
|
||||
targetObserver *observers.TargetObserver,
|
||||
collectionObserver *observers.CollectionObserver,
|
||||
userSpecifiedReplicaMode bool,
|
||||
) *UpdateLoadConfigJob {
|
||||
collectionID := req.GetCollectionIDs()[0]
|
||||
return &UpdateLoadConfigJob{
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), collectionID),
|
||||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
targetObserver: targetObserver,
|
||||
collectionObserver: collectionObserver,
|
||||
collectionID: collectionID,
|
||||
newReplicaNumber: req.GetReplicaNumber(),
|
||||
newResourceGroups: req.GetResourceGroups(),
|
||||
BaseJob: NewBaseJob(ctx, req.Base.GetMsgID(), collectionID),
|
||||
meta: meta,
|
||||
targetMgr: targetMgr,
|
||||
targetObserver: targetObserver,
|
||||
collectionObserver: collectionObserver,
|
||||
collectionID: collectionID,
|
||||
newReplicaNumber: req.GetReplicaNumber(),
|
||||
newResourceGroups: req.GetResourceGroups(),
|
||||
userSpecifiedReplicaMode: userSpecifiedReplicaMode,
|
||||
}
|
||||
}
|
||||
|
||||
@ -158,7 +161,7 @@ func (job *UpdateLoadConfigJob) Execute() error {
|
||||
utils.RecoverReplicaOfCollection(job.ctx, job.meta, job.collectionID)
|
||||
|
||||
// 7. update replica number in meta
|
||||
err = job.meta.UpdateReplicaNumber(job.ctx, job.collectionID, job.newReplicaNumber)
|
||||
err = job.meta.UpdateReplicaNumber(job.ctx, job.collectionID, job.newReplicaNumber, job.userSpecifiedReplicaMode)
|
||||
if err != nil {
|
||||
msg := "failed to update replica number"
|
||||
log.Warn(msg, zap.Error(err))
|
||||
|
||||
@ -619,7 +619,7 @@ func (m *CollectionManager) removePartition(ctx context.Context, collectionID ty
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CollectionManager) UpdateReplicaNumber(ctx context.Context, collectionID typeutil.UniqueID, replicaNumber int32) error {
|
||||
func (m *CollectionManager) UpdateReplicaNumber(ctx context.Context, collectionID typeutil.UniqueID, replicaNumber int32, userSpecifiedReplicaMode bool) error {
|
||||
m.rwmutex.Lock()
|
||||
defer m.rwmutex.Unlock()
|
||||
|
||||
@ -629,7 +629,7 @@ func (m *CollectionManager) UpdateReplicaNumber(ctx context.Context, collectionI
|
||||
}
|
||||
newCollection := collection.Clone()
|
||||
newCollection.ReplicaNumber = replicaNumber
|
||||
|
||||
newCollection.UserSpecifiedReplicaMode = userSpecifiedReplicaMode
|
||||
partitions := m.getPartitionsByCollection(collectionID)
|
||||
newPartitions := make([]*Partition, 0, len(partitions))
|
||||
for _, partition := range partitions {
|
||||
|
||||
@ -558,6 +558,8 @@ func (s *Server) startQueryCoord() error {
|
||||
go s.handleNodeUpLoop()
|
||||
go s.watchNodes(revision)
|
||||
|
||||
// check replica changes after restart
|
||||
s.checkLoadConfigChanges(s.ctx)
|
||||
// watch load config changes
|
||||
s.watchLoadConfigChanges()
|
||||
|
||||
@ -951,6 +953,30 @@ func (s *Server) updateBalanceConfig() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *Server) checkLoadConfigChanges(ctx context.Context) {
|
||||
// try to check load config changes after restart, and try to update replicas
|
||||
collectionIDs := s.meta.GetAll(ctx)
|
||||
collectionIDs = lo.Filter(collectionIDs, func(collectionID int64, _ int) bool {
|
||||
collection := s.meta.GetCollection(ctx, collectionID)
|
||||
if collection.UserSpecifiedReplicaMode {
|
||||
log.Info("collection is user specified replica mode, skip update load config", zap.Int64("collectionID", collectionID))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
replicaNum := paramtable.Get().QueryCoordCfg.ClusterLevelLoadReplicaNumber.GetAsUint32()
|
||||
rgs := paramtable.Get().QueryCoordCfg.ClusterLevelLoadResourceGroups.GetAsStrings()
|
||||
log.Info("apply load config changes",
|
||||
zap.Int64s("collectionIDs", collectionIDs),
|
||||
zap.Int32("replicaNum", int32(replicaNum)),
|
||||
zap.Strings("resourceGroups", rgs))
|
||||
s.UpdateLoadConfig(ctx, &querypb.UpdateLoadConfigRequest{
|
||||
CollectionIDs: collectionIDs,
|
||||
ReplicaNumber: int32(replicaNum),
|
||||
ResourceGroups: rgs,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) watchLoadConfigChanges() {
|
||||
log := log.Ctx(s.ctx)
|
||||
replicaNumHandler := config.NewHandler("watchReplicaNumberChanges", func(e *config.Event) {
|
||||
@ -961,6 +987,14 @@ func (s *Server) watchLoadConfigChanges() {
|
||||
log.Warn("no collection loaded, skip to trigger update load config")
|
||||
return
|
||||
}
|
||||
collectionIDs = lo.Filter(collectionIDs, func(collectionID int64, _ int) bool {
|
||||
collection := s.meta.GetCollection(s.ctx, collectionID)
|
||||
if collection.UserSpecifiedReplicaMode {
|
||||
log.Info("collection is user specified replica mode, skip update load config", zap.Int64("collectionID", collectionID))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
replicaNum, err := strconv.ParseInt(e.Value, 10, 64)
|
||||
if err != nil {
|
||||
@ -988,6 +1022,14 @@ func (s *Server) watchLoadConfigChanges() {
|
||||
log.Warn("no collection loaded, skip to trigger update load config")
|
||||
return
|
||||
}
|
||||
collectionIDs = lo.Filter(collectionIDs, func(collectionID int64, _ int) bool {
|
||||
collection := s.meta.GetCollection(s.ctx, collectionID)
|
||||
if collection.UserSpecifiedReplicaMode {
|
||||
log.Info("collection is user specified replica mode, skip update load config", zap.Int64("collectionID", collectionID))
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if len(e.Value) == 0 {
|
||||
log.Warn("invalid cluster level load config, skip it", zap.String("key", e.Key), zap.String("value", e.Value))
|
||||
|
||||
@ -20,15 +20,15 @@ import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/bytedance/mockey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/tikv/client-go/v2/txnkv"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/commonpb"
|
||||
"github.com/milvus-io/milvus-proto/go-api/v2/milvuspb"
|
||||
@ -56,18 +56,7 @@ import (
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// init embed etcd
|
||||
embedetcdServer, tempDir, err := etcd.StartTestEmbedEtcdServer()
|
||||
if err != nil {
|
||||
log.Fatal("failed to start embed etcd server", zap.Error(err))
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
defer embedetcdServer.Close()
|
||||
|
||||
addrs := etcd.GetEmbedEtcdEndpoints(embedetcdServer)
|
||||
|
||||
paramtable.Init()
|
||||
paramtable.Get().Save(Params.EtcdCfg.Endpoints.Key, strings.Join(addrs, ","))
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
os.Exit(m.Run())
|
||||
@ -436,6 +425,72 @@ func (suite *ServerSuite) TestUpdateAutoBalanceConfigLoop() {
|
||||
})
|
||||
}
|
||||
|
||||
func TestCheckLoadConfigChanges(t *testing.T) {
|
||||
mockey.PatchConvey("TestCheckLoadConfigChanges", t, func() {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create mock server
|
||||
testServer := &Server{}
|
||||
testServer.meta = &meta.Meta{}
|
||||
testServer.ctx = ctx
|
||||
|
||||
// Create mock collection with IsUserSpecifiedReplicaMode = false
|
||||
mockCollection1 := &meta.Collection{
|
||||
CollectionLoadInfo: &querypb.CollectionLoadInfo{
|
||||
CollectionID: 1001,
|
||||
UserSpecifiedReplicaMode: false,
|
||||
},
|
||||
}
|
||||
|
||||
// Create mock collection with IsUserSpecifiedReplicaMode = true
|
||||
mockCollection2 := &meta.Collection{
|
||||
CollectionLoadInfo: &querypb.CollectionLoadInfo{
|
||||
CollectionID: 1002,
|
||||
UserSpecifiedReplicaMode: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Mock meta.CollectionManager.GetAll to return collection IDs
|
||||
mockey.Mock((*meta.CollectionManager).GetAll).Return([]int64{1001, 1002}).Build()
|
||||
|
||||
// Mock meta.CollectionManager.GetCollection to return different collections
|
||||
mockey.Mock((*meta.CollectionManager).GetCollection).To(func(m *meta.CollectionManager, ctx context.Context, collectionID int64) *meta.Collection {
|
||||
if collectionID == 1001 {
|
||||
return mockCollection1
|
||||
} else if collectionID == 1002 {
|
||||
return mockCollection2
|
||||
}
|
||||
return nil
|
||||
}).Build()
|
||||
|
||||
// Mock paramtable.ParamItem.GetAsUint32() for ClusterLevelLoadReplicaNumber
|
||||
mockey.Mock((*paramtable.ParamItem).GetAsUint32).Return(uint32(2)).Build()
|
||||
|
||||
// Mock paramtable.ParamItem.GetAsStrings() for ClusterLevelLoadResourceGroups
|
||||
mockey.Mock((*paramtable.ParamItem).GetAsStrings).Return([]string{"default"}).Build()
|
||||
|
||||
// Mock UpdateLoadConfig to capture the call
|
||||
var updateLoadConfigCalled bool
|
||||
var capturedRequest *querypb.UpdateLoadConfigRequest
|
||||
mockey.Mock((*Server).UpdateLoadConfig).To(func(s *Server, ctx context.Context, req *querypb.UpdateLoadConfigRequest) (*commonpb.Status, error) {
|
||||
updateLoadConfigCalled = true
|
||||
capturedRequest = req
|
||||
return merr.Success(), nil
|
||||
}).Build()
|
||||
|
||||
// Call checkLoadConfigChanges
|
||||
testServer.checkLoadConfigChanges(ctx)
|
||||
|
||||
// Verify UpdateLoadConfig was called
|
||||
assert.True(t, updateLoadConfigCalled, "UpdateLoadConfig should be called")
|
||||
|
||||
// Verify that only collections with IsUserSpecifiedReplicaMode = false are included
|
||||
assert.Equal(t, []int64{1001}, capturedRequest.CollectionIDs, "Only collections with IsUserSpecifiedReplicaMode = false should be included")
|
||||
assert.Equal(t, int32(2), capturedRequest.ReplicaNumber, "ReplicaNumber should match cluster level config")
|
||||
assert.Equal(t, []string{"default"}, capturedRequest.ResourceGroups, "ResourceGroups should match cluster level config")
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *ServerSuite) waitNodeUp(node *mocks.MockQueryNode, timeout time.Duration) bool {
|
||||
start := time.Now()
|
||||
for time.Since(start) < timeout {
|
||||
@ -601,7 +656,7 @@ func (suite *ServerSuite) hackServer() {
|
||||
suite.server.meta,
|
||||
suite.server.targetMgr,
|
||||
suite.server.dist,
|
||||
suite.broker,
|
||||
suite.server.broker,
|
||||
suite.server.cluster,
|
||||
suite.server.nodeMgr,
|
||||
)
|
||||
|
||||
@ -232,6 +232,8 @@ func (s *Server) LoadCollection(ctx context.Context, req *querypb.LoadCollection
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
// if user specified the replica number in load request, load config changes won't be apply to the collection automatically
|
||||
userSpecifiedReplicaMode := req.GetReplicaNumber() > 0
|
||||
// to be compatible with old sdk, which set replica=1 if replica is not specified
|
||||
// so only both replica and resource groups didn't set in request, it will turn to use the configured load info
|
||||
if req.GetReplicaNumber() <= 0 && len(req.GetResourceGroups()) == 0 {
|
||||
@ -285,6 +287,7 @@ func (s *Server) LoadCollection(ctx context.Context, req *querypb.LoadCollection
|
||||
s.targetMgr,
|
||||
s.targetObserver,
|
||||
s.collectionObserver,
|
||||
userSpecifiedReplicaMode,
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -299,6 +302,7 @@ func (s *Server) LoadCollection(ctx context.Context, req *querypb.LoadCollection
|
||||
s.targetObserver,
|
||||
s.collectionObserver,
|
||||
s.nodeMgr,
|
||||
userSpecifiedReplicaMode,
|
||||
)
|
||||
}
|
||||
|
||||
@ -385,6 +389,9 @@ func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitions
|
||||
return merr.Status(err), nil
|
||||
}
|
||||
|
||||
// if user specified the replica number in load request, load config changes won't be apply to the collection automatically
|
||||
userSpecifiedReplicaMode := req.GetReplicaNumber() > 0
|
||||
|
||||
// to be compatible with old sdk, which set replica=1 if replica is not specified
|
||||
// so only both replica and resource groups didn't set in request, it will turn to use the configured load info
|
||||
if req.GetReplicaNumber() <= 0 && len(req.GetResourceGroups()) == 0 {
|
||||
@ -414,6 +421,7 @@ func (s *Server) LoadPartitions(ctx context.Context, req *querypb.LoadPartitions
|
||||
s.targetObserver,
|
||||
s.collectionObserver,
|
||||
s.nodeMgr,
|
||||
userSpecifiedReplicaMode,
|
||||
)
|
||||
s.jobScheduler.Add(loadJob)
|
||||
err := loadJob.Wait()
|
||||
@ -1224,6 +1232,7 @@ func (s *Server) UpdateLoadConfig(ctx context.Context, req *querypb.UpdateLoadCo
|
||||
s.targetMgr,
|
||||
s.targetObserver,
|
||||
s.collectionObserver,
|
||||
false,
|
||||
)
|
||||
|
||||
jobs = append(jobs, updateJob)
|
||||
|
||||
@ -22,6 +22,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/bytedance/mockey"
|
||||
"github.com/cockroachdb/errors"
|
||||
"github.com/samber/lo"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -382,6 +383,58 @@ func (suite *ServiceSuite) TestLoadCollection() {
|
||||
suite.Equal(resp.GetCode(), merr.Code(merr.ErrServiceNotReady))
|
||||
}
|
||||
|
||||
func (suite *ServiceSuite) TestLoadCollectionWithUserSpecifiedReplicaMode() {
|
||||
mockey.PatchConvey("TestLoadCollectionWithUserSpecifiedReplicaMode", suite.T(), func() {
|
||||
ctx := context.Background()
|
||||
server := suite.server
|
||||
collectionID := suite.collections[0]
|
||||
|
||||
// Mock broker methods using mockey
|
||||
mockey.Mock(mockey.GetMethod(suite.broker, "DescribeCollection")).Return(nil, nil).Build()
|
||||
suite.expectGetRecoverInfo(collectionID)
|
||||
|
||||
// Test when user specifies replica number - should set IsUserSpecifiedReplicaMode to true
|
||||
req := &querypb.LoadCollectionRequest{
|
||||
CollectionID: collectionID,
|
||||
ReplicaNumber: 2, // User specified replica number
|
||||
}
|
||||
resp, err := server.LoadCollection(ctx, req)
|
||||
suite.NoError(err)
|
||||
suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode)
|
||||
|
||||
// Verify that IsUserSpecifiedReplicaMode is set to true
|
||||
collection := suite.meta.GetCollection(ctx, collectionID)
|
||||
suite.NotNil(collection)
|
||||
suite.True(collection.UserSpecifiedReplicaMode)
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *ServiceSuite) TestLoadCollectionWithoutUserSpecifiedReplicaMode() {
|
||||
mockey.PatchConvey("TestLoadCollectionWithoutUserSpecifiedReplicaMode", suite.T(), func() {
|
||||
ctx := context.Background()
|
||||
server := suite.server
|
||||
collectionID := suite.collections[0]
|
||||
|
||||
// Mock broker methods using mockey
|
||||
mockey.Mock(mockey.GetMethod(suite.broker, "DescribeCollection")).Return(nil, nil).Build()
|
||||
suite.expectGetRecoverInfo(collectionID)
|
||||
|
||||
// Test when user doesn't specify replica number - should not set IsUserSpecifiedReplicaMode
|
||||
req := &querypb.LoadCollectionRequest{
|
||||
CollectionID: collectionID,
|
||||
ReplicaNumber: 0, // No user specified replica number
|
||||
}
|
||||
resp, err := server.LoadCollection(ctx, req)
|
||||
suite.NoError(err)
|
||||
suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode)
|
||||
|
||||
// Verify that IsUserSpecifiedReplicaMode is not set to true
|
||||
collection := suite.meta.GetCollection(ctx, collectionID)
|
||||
suite.NotNil(collection)
|
||||
suite.False(collection.UserSpecifiedReplicaMode)
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *ServiceSuite) TestResourceGroup() {
|
||||
ctx := context.Background()
|
||||
server := suite.server
|
||||
@ -958,6 +1011,62 @@ func (suite *ServiceSuite) TestLoadPartitionFailed() {
|
||||
}
|
||||
}
|
||||
|
||||
func (suite *ServiceSuite) TestLoadPartitionsWithUserSpecifiedReplicaMode() {
|
||||
mockey.PatchConvey("TestLoadPartitionsWithUserSpecifiedReplicaMode", suite.T(), func() {
|
||||
ctx := context.Background()
|
||||
server := suite.server
|
||||
collectionID := suite.collections[0]
|
||||
partitionIDs := suite.partitions[collectionID]
|
||||
|
||||
// Mock broker methods using mockey
|
||||
mockey.Mock(mockey.GetMethod(suite.broker, "DescribeCollection")).Return(nil, nil).Build()
|
||||
suite.expectGetRecoverInfo(collectionID)
|
||||
|
||||
// Test when user specifies replica number - should set IsUserSpecifiedReplicaMode to true
|
||||
req := &querypb.LoadPartitionsRequest{
|
||||
CollectionID: collectionID,
|
||||
PartitionIDs: partitionIDs,
|
||||
ReplicaNumber: 3, // User specified replica number
|
||||
}
|
||||
resp, err := server.LoadPartitions(ctx, req)
|
||||
suite.NoError(err)
|
||||
suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode)
|
||||
|
||||
// Verify that IsUserSpecifiedReplicaMode is set to true
|
||||
collection := suite.meta.GetCollection(ctx, collectionID)
|
||||
suite.NotNil(collection)
|
||||
suite.True(collection.UserSpecifiedReplicaMode)
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *ServiceSuite) TestLoadPartitionsWithoutUserSpecifiedReplicaMode() {
|
||||
mockey.PatchConvey("TestLoadPartitionsWithoutUserSpecifiedReplicaMode", suite.T(), func() {
|
||||
ctx := context.Background()
|
||||
server := suite.server
|
||||
collectionID := suite.collections[0]
|
||||
partitionIDs := suite.partitions[collectionID]
|
||||
|
||||
// Mock broker methods using mockey
|
||||
mockey.Mock(mockey.GetMethod(suite.broker, "DescribeCollection")).Return(nil, nil).Build()
|
||||
suite.expectGetRecoverInfo(collectionID)
|
||||
|
||||
// Test when user doesn't specify replica number - should not set IsUserSpecifiedReplicaMode
|
||||
req := &querypb.LoadPartitionsRequest{
|
||||
CollectionID: collectionID,
|
||||
PartitionIDs: partitionIDs,
|
||||
ReplicaNumber: 0, // No user specified replica number
|
||||
}
|
||||
resp, err := server.LoadPartitions(ctx, req)
|
||||
suite.NoError(err)
|
||||
suite.Equal(commonpb.ErrorCode_Success, resp.ErrorCode)
|
||||
|
||||
// Verify that IsUserSpecifiedReplicaMode is not set to true
|
||||
collection := suite.meta.GetCollection(ctx, collectionID)
|
||||
suite.NotNil(collection)
|
||||
suite.False(collection.UserSpecifiedReplicaMode)
|
||||
})
|
||||
}
|
||||
|
||||
func (suite *ServiceSuite) TestReleaseCollection() {
|
||||
suite.loadAll()
|
||||
ctx := context.Background()
|
||||
@ -1799,6 +1908,7 @@ func (suite *ServiceSuite) loadAll() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.jobScheduler.Add(job)
|
||||
err := job.Wait()
|
||||
@ -1823,6 +1933,7 @@ func (suite *ServiceSuite) loadAll() {
|
||||
suite.targetObserver,
|
||||
suite.collectionObserver,
|
||||
suite.nodeMgr,
|
||||
false,
|
||||
)
|
||||
suite.jobScheduler.Add(job)
|
||||
err := job.Wait()
|
||||
|
||||
@ -19,7 +19,6 @@ package task
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -92,10 +91,6 @@ type TaskSuite struct {
|
||||
|
||||
func (suite *TaskSuite) SetupSuite() {
|
||||
paramtable.Init()
|
||||
addressList, err := suite.SetupEtcd()
|
||||
suite.Require().NoError(err)
|
||||
params := paramtable.Get()
|
||||
params.Save(params.EtcdCfg.Endpoints.Key, strings.Join(addressList, ","))
|
||||
|
||||
suite.collection = 1000
|
||||
suite.replica = newReplicaDefaultRG(10)
|
||||
|
||||
@ -680,6 +680,7 @@ message CollectionLoadInfo {
|
||||
int32 recover_times = 7;
|
||||
repeated int64 load_fields = 8;
|
||||
int64 dbID= 9;
|
||||
bool user_specified_replica_mode = 10;
|
||||
}
|
||||
|
||||
message PartitionLoadInfo {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user