diff --git a/internal/datacoord/compaction_task_meta.go b/internal/datacoord/compaction_task_meta.go
index b055186dd4..6af1e13faf 100644
--- a/internal/datacoord/compaction_task_meta.go
+++ b/internal/datacoord/compaction_task_meta.go
@@ -63,7 +63,7 @@ func newCompactionTaskMeta(ctx context.Context, catalog metastore.DataCoordCatal
ctx: ctx,
catalog: catalog,
compactionTasks: make(map[int64]map[int64]*datapb.CompactionTask, 0),
- taskStats: expirable.NewLRU[UniqueID, *metricsinfo.CompactionTask](1024, nil, time.Minute*60),
+ taskStats: expirable.NewLRU[UniqueID, *metricsinfo.CompactionTask](32, nil, time.Minute*15),
}
if err := csm.reloadFromKV(); err != nil {
return nil, err
@@ -178,10 +178,6 @@ func (csm *compactionTaskMeta) DropCompactionTask(task *datapb.CompactionTask) e
func (csm *compactionTaskMeta) TaskStatsJSON() string {
tasks := csm.taskStats.Values()
- if len(tasks) == 0 {
- return ""
- }
-
ret, err := json.Marshal(tasks)
if err != nil {
return ""
diff --git a/internal/datacoord/compaction_task_meta_test.go b/internal/datacoord/compaction_task_meta_test.go
index 48050fb640..ce3cb85a7d 100644
--- a/internal/datacoord/compaction_task_meta_test.go
+++ b/internal/datacoord/compaction_task_meta_test.go
@@ -111,7 +111,7 @@ func (suite *CompactionTaskMetaSuite) TestTaskStatsJSON() {
// testing return empty string
actualJSON := suite.meta.TaskStatsJSON()
- suite.Equal("", actualJSON)
+ suite.Equal("[]", actualJSON)
err := suite.meta.SaveCompactionTask(task1)
suite.NoError(err)
diff --git a/internal/datacoord/import_meta.go b/internal/datacoord/import_meta.go
index da142157b5..b81de5950a 100644
--- a/internal/datacoord/import_meta.go
+++ b/internal/datacoord/import_meta.go
@@ -52,7 +52,7 @@ type importTasks struct {
func newImportTasks() *importTasks {
return &importTasks{
tasks: make(map[int64]ImportTask),
- taskStats: expirable.NewLRU[UniqueID, ImportTask](4096, nil, time.Minute*60),
+ taskStats: expirable.NewLRU[UniqueID, ImportTask](64, nil, time.Minute*30),
}
}
@@ -301,9 +301,6 @@ func (m *importMeta) RemoveTask(taskID int64) error {
func (m *importMeta) TaskStatsJSON() string {
tasks := m.tasks.listTaskStats()
- if len(tasks) == 0 {
- return ""
- }
ret, err := json.Marshal(tasks)
if err != nil {
diff --git a/internal/datacoord/import_meta_test.go b/internal/datacoord/import_meta_test.go
index c61abbf69e..a9ed20f5eb 100644
--- a/internal/datacoord/import_meta_test.go
+++ b/internal/datacoord/import_meta_test.go
@@ -251,7 +251,7 @@ func TestTaskStatsJSON(t *testing.T) {
assert.NoError(t, err)
statsJSON := im.TaskStatsJSON()
- assert.Equal(t, "", statsJSON)
+ assert.Equal(t, "[]", statsJSON)
task1 := &importTask{
ImportTaskV2: &datapb.ImportTaskV2{
diff --git a/internal/datacoord/import_task.go b/internal/datacoord/import_task.go
index fb2e59422a..719ad0035b 100644
--- a/internal/datacoord/import_task.go
+++ b/internal/datacoord/import_task.go
@@ -185,7 +185,7 @@ func (p *preImportTask) MarshalJSON() ([]byte, error) {
NodeID: p.GetNodeID(),
State: p.GetState().String(),
Reason: p.GetReason(),
- TaskType: "PreImportTask",
+ TaskType: p.GetType().String(),
CreatedTime: p.GetCreatedTime(),
CompleteTime: p.GetCompleteTime(),
}
@@ -231,7 +231,7 @@ func (t *importTask) MarshalJSON() ([]byte, error) {
NodeID: t.GetNodeID(),
State: t.GetState().String(),
Reason: t.GetReason(),
- TaskType: "ImportTask",
+ TaskType: t.GetType().String(),
CreatedTime: t.GetCreatedTime(),
CompleteTime: t.GetCompleteTime(),
}
diff --git a/internal/datacoord/index_meta.go b/internal/datacoord/index_meta.go
index dbc60101d0..e31f12ca2f 100644
--- a/internal/datacoord/index_meta.go
+++ b/internal/datacoord/index_meta.go
@@ -102,7 +102,7 @@ func newSegmentIndexBuildInfo() *segmentBuildInfo {
// build ID -> segment index
buildID2SegmentIndex: make(map[UniqueID]*model.SegmentIndex),
// build ID -> task stats
- taskStats: expirable.NewLRU[UniqueID, *indexTaskStats](1024, nil, time.Minute*60),
+ taskStats: expirable.NewLRU[UniqueID, *indexTaskStats](64, nil, time.Minute*30),
}
}
@@ -1075,10 +1075,6 @@ func (m *indexMeta) HasIndex(collectionID int64) bool {
func (m *indexMeta) TaskStatsJSON() string {
tasks := m.segmentBuildInfo.GetTaskStats()
- if len(tasks) == 0 {
- return ""
- }
-
ret, err := json.Marshal(tasks)
if err != nil {
return ""
diff --git a/internal/datacoord/index_meta_test.go b/internal/datacoord/index_meta_test.go
index c5b2fa9c0c..9b4de11076 100644
--- a/internal/datacoord/index_meta_test.go
+++ b/internal/datacoord/index_meta_test.go
@@ -1543,7 +1543,7 @@ func TestBuildIndexTaskStatsJSON(t *testing.T) {
}
actualJSON := im.TaskStatsJSON()
- assert.Equal(t, "", actualJSON)
+ assert.Equal(t, "[]", actualJSON)
im.segmentBuildInfo.Add(si1)
im.segmentBuildInfo.Add(si2)
diff --git a/internal/datacoord/job_manager_test.go b/internal/datacoord/job_manager_test.go
index 03ca4cf03a..a0d95e4cd5 100644
--- a/internal/datacoord/job_manager_test.go
+++ b/internal/datacoord/job_manager_test.go
@@ -104,7 +104,7 @@ func (s *jobManagerSuite) TestJobManager_triggerStatsTaskLoop() {
allocator: alloc,
tasks: make(map[int64]Task),
meta: mt,
- taskStats: expirable.NewLRU[UniqueID, Task](1024, nil, time.Minute*5),
+ taskStats: expirable.NewLRU[UniqueID, Task](64, nil, time.Minute*5),
},
allocator: alloc,
}
diff --git a/internal/datacoord/metrics_info_test.go b/internal/datacoord/metrics_info_test.go
index fe6114c067..1151dc70b5 100644
--- a/internal/datacoord/metrics_info_test.go
+++ b/internal/datacoord/metrics_info_test.go
@@ -325,7 +325,7 @@ func TestGetSyncTaskMetrics(t *testing.T) {
mockCluster.EXPECT().GetSessions().Return([]*session.Session{session.NewSession(&session.NodeInfo{NodeID: 1}, dataNodeCreator)})
svr.cluster = mockCluster
- expectedJSON := ""
+ expectedJSON := "null"
actualJSON, err := svr.getSyncTaskJSON(ctx, req)
assert.NoError(t, err)
assert.Equal(t, expectedJSON, actualJSON)
@@ -449,7 +449,7 @@ func TestGetSegmentsJSON(t *testing.T) {
mockCluster.EXPECT().GetSessions().Return([]*session.Session{session.NewSession(&session.NodeInfo{NodeID: 1}, dataNodeCreator)})
svr.cluster = mockCluster
- expectedJSON := ""
+ expectedJSON := "null"
actualJSON, err := svr.getSegmentsJSON(ctx, req)
assert.NoError(t, err)
assert.Equal(t, expectedJSON, actualJSON)
@@ -591,7 +591,7 @@ func TestGetChannelsJSON(t *testing.T) {
svr.cluster = mockCluster
svr.meta = &meta{channelCPs: newChannelCps()}
- expectedJSON := ""
+ expectedJSON := "null"
actualJSON, err := svr.getChannelsJSON(ctx, req)
assert.NoError(t, err)
assert.Equal(t, expectedJSON, actualJSON)
diff --git a/internal/datacoord/task_scheduler.go b/internal/datacoord/task_scheduler.go
index d176326ed0..5b26886630 100644
--- a/internal/datacoord/task_scheduler.go
+++ b/internal/datacoord/task_scheduler.go
@@ -91,7 +91,7 @@ func newTaskScheduler(
handler: handler,
indexEngineVersionManager: indexEngineVersionManager,
allocator: allocator,
- taskStats: expirable.NewLRU[UniqueID, Task](1024, nil, time.Minute*5),
+ taskStats: expirable.NewLRU[UniqueID, Task](64, nil, time.Minute*15),
}
ts.reloadFromMeta()
return ts
diff --git a/internal/distributed/proxy/httpserver/handler.go b/internal/distributed/proxy/httpserver/handler.go
index 6b781181d9..7bb8b5b20a 100644
--- a/internal/distributed/proxy/httpserver/handler.go
+++ b/internal/distributed/proxy/httpserver/handler.go
@@ -27,6 +27,9 @@ func (h *Handlers) RegisterRoutesTo(router gin.IRouter) {
router.GET("/health", wrapHandler(h.handleGetHealth))
router.POST("/dummy", wrapHandler(h.handleDummy))
+ router.GET("/databases", wrapHandler(h.handleListDatabases))
+ router.GET("/database", wrapHandler(h.handleDescribeDatabases))
+
router.POST("/collection", wrapHandler(h.handleCreateCollection))
router.DELETE("/collection", wrapHandler(h.handleDropCollection))
router.GET("/collection/existence", wrapHandler(h.handleHasCollection))
@@ -96,6 +99,24 @@ func (h *Handlers) handleDummy(c *gin.Context) (interface{}, error) {
return h.proxy.Dummy(c, &req)
}
+func (h *Handlers) handleListDatabases(c *gin.Context) (interface{}, error) {
+ req := milvuspb.ListDatabasesRequest{}
+ err := shouldBind(c, &req)
+ if err != nil {
+ return nil, fmt.Errorf("%w: parse body failed: %v", errBadRequest, err)
+ }
+ return h.proxy.ListDatabases(c, &req)
+}
+
+func (h *Handlers) handleDescribeDatabases(c *gin.Context) (interface{}, error) {
+ req := milvuspb.DescribeDatabaseRequest{}
+ err := shouldBind(c, &req)
+ if err != nil {
+ return nil, fmt.Errorf("%w: parse body failed: %v", errBadRequest, err)
+ }
+ return h.proxy.DescribeDatabase(c, &req)
+}
+
func (h *Handlers) handleCreateCollection(c *gin.Context) (interface{}, error) {
wrappedReq := WrappedCreateCollectionRequest{}
err := shouldBind(c, &wrappedReq)
diff --git a/internal/flushcommon/syncmgr/sync_manager.go b/internal/flushcommon/syncmgr/sync_manager.go
index aa52b04a1d..bed92083b5 100644
--- a/internal/flushcommon/syncmgr/sync_manager.go
+++ b/internal/flushcommon/syncmgr/sync_manager.go
@@ -70,7 +70,7 @@ func NewSyncManager(chunkManager storage.ChunkManager) SyncManager {
keyLockDispatcher: dispatcher,
chunkManager: chunkManager,
tasks: typeutil.NewConcurrentMap[string, Task](),
- taskStats: expirable.NewLRU[string, Task](512, nil, time.Minute*15),
+ taskStats: expirable.NewLRU[string, Task](16, nil, time.Minute*15),
}
// setup config update watcher
params.Watch(params.DataNodeCfg.MaxParallelSyncMgrTasks.Key, config.NewHandler("datanode.syncmgr.poolsize", syncMgr.resizeHandler))
diff --git a/internal/http/router.go b/internal/http/router.go
index 2859704f48..b0e4463f04 100644
--- a/internal/http/router.go
+++ b/internal/http/router.go
@@ -75,6 +75,8 @@ const (
ClusterDependenciesPath = "/_cluster/dependencies"
// HookConfigsPath is the path to get hook configurations.
HookConfigsPath = "/_hook/configs"
+ // SlowQueryPath is the path to get slow queries metrics
+ SlowQueryPath = "/_cluster/slow_query"
// QCDistPath is the path to get QueryCoord distribution.
QCDistPath = "/_qc/dist"
diff --git a/internal/http/webui/channels.html b/internal/http/webui/channels.html
deleted file mode 100644
index 08c3b9c629..0000000000
--- a/internal/http/webui/channels.html
+++ /dev/null
@@ -1,158 +0,0 @@
-
-
-
-
-
-
- Milvus WebUI - Channels
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Channel Checkpoints
-
-
-
-
- | Channel Name |
- Collection ID |
- Checkpoint Ts |
- Checkpoint Offset |
- Datanode |
-
-
-
-
- | channel1 |
- 11 |
- 2022-11-11 12:00:00 |
- {ledgerID:1, entryID:1, batchIdx:0} |
- datanode1 |
-
-
- | channel2 |
- 22 |
- 2022-11-11 12:00:00 |
- {ledgerID:2, entryID:2, batchIdx:0} |
- datanode1 |
-
-
-
-
-
-
- Watched Channels On Datanode
-
-
-
-
- | Channel Name |
- Collection ID |
- Consume Rate/s |
- Latency |
- TimeTickLag |
- State |
- Datanode |
-
-
-
-
- | channel1 |
- 200 |
- 11 |
- 50ms |
- 100ms |
- watching |
- datanode1 |
-
-
- | channel1 |
- 200 |
- 11 |
- 50ms |
- 100ms |
- watched |
- datanode2 |
-
-
-
-
-
-
- Watched Channels On QueryNode
-
-
-
-
- | Channel Name |
- Collection ID |
- Consume Rate/s |
-
- Latency |
- TimeTickLag |
- State |
- Datanode |
-
-
-
-
- | channel1 |
- 200 |
- 11 |
- 50ms |
- 100ms |
- watching |
- querynode1 |
-
-
- | channel2 |
- 200 |
- 11 |
- 50ms |
- 100ms |
- watching |
- querynode2 |
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/internal/http/webui/collections.html b/internal/http/webui/collections.html
index 83cd46ed84..e6a9904ff7 100644
--- a/internal/http/webui/collections.html
+++ b/internal/http/webui/collections.html
@@ -23,110 +23,52 @@
- Database List
+ Database
-
-
- | Database ID |
- Name |
- Create Time |
-
-
-
-
- | 1 |
- db1 |
- 2022-11-11 12:00:00 |
-
-
- | 2 |
- db2 |
- 2022-11-11 12:00:00 |
-
-
+
- Collection List
+ Collection
-
+
+
+ -
+ Base
+
+ -
+ Requests
+
-
- Collection Metrics
-
-
-
-
- | Collection Name |
- isQueryable |
- isWritable |
- Query Ops/s |
- Search Ops/s |
- Insert Throughput(MB/s) |
- Delete Throughput(MB/s) |
-
-
-
-
- | db1.coll-fake |
- true |
- true |
- 20 |
- 20 |
- 1 |
- 0.5 |
-
-
- | db1.col2-fake |
- true |
- true |
- 20 |
- 20 |
- 1 |
- 0.5 |
-
-
-
-
-
+
+
@@ -137,12 +79,33 @@
$('#footer').load("footer.html");
});
- fetchData(MILVUS_URI + "/", clientInfos)
+ function searchCollections() {
+ const searchTerm = document.getElementById('databaseSearch').value;
+ let dbName = 'default';
+ if (searchTerm !== '') {
+ dbName = searchTerm;
+ }
+ fetchCollections(dbName);
+ }
+ searchCollections()
+
+ // TODO - Implement the following functions and support search with db name
+ // fetchData(MILVUS_URI + "/_collection/metrics", collectionRequest)
+ // .then(data => {
+ // collectionRequestsData = data;
+ // renderCollectionRequests(startPage, paginationSize);
+ // })
+ // .catch(error => {
+ // handleError(error);
+ // });
+
+ fetchData(MILVUS_URI + "/databases", databases)
.then(data => {
- //TODO add collection render
+ databaseData = data;
+ renderDatabases(startPage, paginationSize)
})
.catch(error => {
- handleError(new Error("Unimplemented API"));
+ handleError(error);
});