milvus/internal/querycoordv2/observers/file_resource_observer.go
aoiasd ee216877bb
enhance: support compaction with file resource in ref mode (#46399)
Add support for DataNode compaction using file resources in ref mode.
SortCompation and StatsJobs will build text indexes, which may use file
resources.
relate: https://github.com/milvus-io/milvus/issues/43687

<!-- This is an auto-generated comment: release notes by coderabbit.ai
-->
- Core invariant: file resources (analyzer binaries/metadata) are only
fetched, downloaded and used when the node is configured in Ref mode
(fileresource.IsRefMode via CommonCfg.QNFileResourceMode /
DNFileResourceMode); Sync now carries a version and managers track
per-resource versions/resource IDs so newer resource sets win and older
entries are pruned (RefManager/SynchManager resource maps).
- Logic removed / simplified: component-specific FileResourceMode flags
and an indirection through a long-lived BinlogIO wrapper were
consolidated — file-resource mode moved to CommonCfg, Sync/Download APIs
became version- and context-aware, and compaction/index tasks accept a
ChunkManager directly (binlog IO wrapper creation inlined). This
eliminates duplicated config checks and wrapper indirection while
preserving the same chunk/IO semantics.
- Why no data loss or behavior regression: all file-resource code paths
are gated by the configured mode (default remains "sync"); when not in
ref-mode or when no resources exist, compaction and stats flows follow
existing code paths unchanged. Versioned Sync + resourceID maps ensure
newly synced sets replace older ones and RefManager prunes stale files;
GetFileResources returns an error if requested IDs are missing (prevents
silent use of wrong resources). Analyzer naming/parameter changes add
analyzer_extra_info but default-callers pass "" so existing analyzers
and index contents remain unchanged.
- New capability: DataNode compaction and StatsJobs can now build text
indexes using external file resources in Ref mode — DataCoord exposes
GetFileResources and populates CompactionPlan.file_resources;
SortCompaction/StatsTask download resources via fileresource.Manager,
produce an analyzer_extra_info JSON (storage + resource->id map) via
analyzer.BuildExtraResourceInfo, and propagate analyzer_extra_info into
BuildIndexInfo so the tantivy bindings can load custom analyzers during
text index creation.
<!-- end of auto-generated comment: release notes by coderabbit.ai -->

Signed-off-by: aoiasd <zhicheng.yue@zilliz.com>
2026-01-06 16:31:31 +08:00

168 lines
4.3 KiB
Go

// Licensed to the LF AI & Data foundation under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package observers
import (
"context"
"sync"
"time"
"go.uber.org/zap"
"github.com/milvus-io/milvus/internal/querycoordv2/session"
"github.com/milvus-io/milvus/internal/util/fileresource"
"github.com/milvus-io/milvus/pkg/v2/log"
"github.com/milvus-io/milvus/pkg/v2/proto/internalpb"
"github.com/milvus-io/milvus/pkg/v2/util/conc"
"github.com/milvus-io/milvus/pkg/v2/util/lock"
"github.com/milvus-io/milvus/pkg/v2/util/merr"
"github.com/milvus-io/milvus/pkg/v2/util/paramtable"
)
type FileResourceObserver struct {
lock.RWMutex
resources []*internalpb.FileResourceInfo
version uint64
ctx context.Context
distribution map[int64]uint64
// version distribution
nodeManager *session.NodeManager
cluster session.Cluster
notifyCh chan struct{}
closeCh chan struct{}
wg sync.WaitGroup
sf conc.Singleflight[any]
once sync.Once
closeOnce sync.Once
}
func NewFileResourceObserver(ctx context.Context, nodeManager *session.NodeManager, cluster session.Cluster) *FileResourceObserver {
return &FileResourceObserver{
ctx: ctx,
nodeManager: nodeManager,
cluster: cluster,
distribution: map[int64]uint64{},
notifyCh: make(chan struct{}, 1),
closeCh: make(chan struct{}),
sf: conc.Singleflight[any]{},
}
}
func (m *FileResourceObserver) getResources() ([]*internalpb.FileResourceInfo, uint64) {
m.RLock()
defer m.RUnlock()
return m.resources, m.version
}
func (m *FileResourceObserver) syncLoop() {
defer m.wg.Done()
for {
select {
case <-m.notifyCh:
resources, version := m.getResources()
err := m.sync(resources, version)
if err != nil {
// retry if error exist
m.sf.Do("retry", func() (any, error) {
time.Sleep(5 * time.Second)
m.Notify()
return nil, nil
})
}
case <-m.closeCh:
log.Info("file resource observer close")
return
case <-m.ctx.Done():
log.Info("file resource observer context done")
return
}
}
}
func (m *FileResourceObserver) Start() {
if fileresource.IsSyncMode(paramtable.Get().CommonCfg.QNFileResourceMode.GetValue()) {
m.once.Do(func() {
m.wg.Add(1)
go m.syncLoop()
m.Notify()
})
}
}
func (m *FileResourceObserver) Stop() {
m.closeOnce.Do(func() {
close(m.closeCh)
m.wg.Wait()
})
}
func (m *FileResourceObserver) Notify() {
select {
case m.notifyCh <- struct{}{}:
default:
}
}
func (m *FileResourceObserver) sync(resources []*internalpb.FileResourceInfo, version uint64) error {
nodes := m.nodeManager.GetAll()
var syncErr error
newDistribution := make(map[int64]uint64)
for _, node := range nodes {
newDistribution[node.ID()] = m.distribution[node.ID()]
if m.distribution[node.ID()] < version {
status, err := m.cluster.SyncFileResource(m.ctx, node.ID(), &internalpb.SyncFileResourceRequest{
Resources: resources,
Version: version,
})
if err != nil {
log.Warn("sync file resource failed", zap.Int64("nodeID", node.ID()), zap.Error(err))
syncErr = err
continue
}
if err = merr.Error(status); err != nil {
log.Warn("sync file resource failed", zap.Int64("nodeID", node.ID()), zap.Error(err))
syncErr = err
continue
}
newDistribution[node.ID()] = version
log.Info("finish sync file resource to query node", zap.Int64("node", node.ID()), zap.Uint64("version", version))
}
}
m.distribution = newDistribution
if syncErr != nil {
return syncErr
}
return nil
}
func (m *FileResourceObserver) UpdateResources(resources []*internalpb.FileResourceInfo, version uint64) {
m.Lock()
defer m.Unlock()
m.resources = resources
m.version = version
m.Notify()
}