mirror of
https://gitee.com/milvus-io/milvus.git
synced 2025-12-07 17:48:29 +08:00
1. Use blocking memory allocation to wait until memory becomes available 2. Perform memory allocation at the file level instead of per task 3. Limit Parquet file reader batch size to prevent excessive memory consumption 4. Limit import buffer size from 20% to 10% of total memory issue: https://github.com/milvus-io/milvus/issues/43387, https://github.com/milvus-io/milvus/issues/43131 --------- Signed-off-by: bigsheeper <yihao.dai@zilliz.com>
155 lines
3.9 KiB
Go
155 lines
3.9 KiB
Go
// Licensed to the LF AI & Data foundation under one
|
|
// or more contributor license agreements. See the NOTICE file
|
|
// distributed with this work for additional information
|
|
// regarding copyright ownership. The ASF licenses this file
|
|
// to you under the Apache License, Version 2.0 (the
|
|
// "License"); you may not use this file except in compliance
|
|
// with the License. You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package parquet
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
|
|
"github.com/apache/arrow/go/v17/arrow/memory"
|
|
"github.com/apache/arrow/go/v17/parquet"
|
|
"github.com/apache/arrow/go/v17/parquet/file"
|
|
"github.com/apache/arrow/go/v17/parquet/pqarrow"
|
|
"go.uber.org/atomic"
|
|
"go.uber.org/zap"
|
|
|
|
"github.com/milvus-io/milvus-proto/go-api/v2/schemapb"
|
|
"github.com/milvus-io/milvus/internal/storage"
|
|
"github.com/milvus-io/milvus/internal/util/importutilv2/common"
|
|
"github.com/milvus-io/milvus/pkg/v2/log"
|
|
"github.com/milvus-io/milvus/pkg/v2/util/merr"
|
|
)
|
|
|
|
const fileReaderBufferSize = int64(32 * 1024 * 1024)
|
|
|
|
type reader struct {
|
|
ctx context.Context
|
|
cm storage.ChunkManager
|
|
cmr storage.FileReader
|
|
schema *schemapb.CollectionSchema
|
|
|
|
path string
|
|
r *file.Reader
|
|
|
|
fileSize *atomic.Int64
|
|
bufferSize int
|
|
count int64
|
|
|
|
frs map[int64]*FieldReader // fieldID -> FieldReader
|
|
}
|
|
|
|
func NewReader(ctx context.Context, cm storage.ChunkManager, schema *schemapb.CollectionSchema, path string, bufferSize int) (*reader, error) {
|
|
cmReader, err := cm.Reader(ctx, path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
r, err := file.NewParquetReader(cmReader, file.WithReadProps(&parquet.ReaderProperties{
|
|
BufferSize: fileReaderBufferSize,
|
|
BufferedStreamEnabled: true,
|
|
}))
|
|
if err != nil {
|
|
return nil, merr.WrapErrImportFailed(fmt.Sprintf("new parquet reader failed, err=%v", err))
|
|
}
|
|
log.Info("parquet file info", zap.Int("row group num", r.NumRowGroups()),
|
|
zap.Int64("num rows", r.NumRows()))
|
|
|
|
count, err := common.EstimateReadCountPerBatch(bufferSize, schema)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
readProps := pqarrow.ArrowReadProperties{
|
|
BatchSize: count,
|
|
}
|
|
fileReader, err := pqarrow.NewFileReader(r, readProps, memory.DefaultAllocator)
|
|
if err != nil {
|
|
return nil, merr.WrapErrImportFailed(fmt.Sprintf("new parquet file reader failed, err=%v", err))
|
|
}
|
|
|
|
crs, err := CreateFieldReaders(ctx, fileReader, schema)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return &reader{
|
|
ctx: ctx,
|
|
cm: cm,
|
|
cmr: cmReader,
|
|
schema: schema,
|
|
fileSize: atomic.NewInt64(0),
|
|
path: path,
|
|
r: r,
|
|
bufferSize: bufferSize,
|
|
count: count,
|
|
frs: crs,
|
|
}, nil
|
|
}
|
|
|
|
func (r *reader) Read() (*storage.InsertData, error) {
|
|
insertData, err := storage.NewInsertData(r.schema)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
OUTER:
|
|
for {
|
|
for fieldID, cr := range r.frs {
|
|
data, validData, err := cr.Next(r.count)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
if data == nil {
|
|
break OUTER
|
|
}
|
|
err = insertData.Data[fieldID].AppendRows(data, validData)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
}
|
|
if insertData.GetMemorySize() >= r.bufferSize {
|
|
break
|
|
}
|
|
}
|
|
for fieldID := range r.frs {
|
|
if insertData.Data[fieldID].RowNum() == 0 {
|
|
return nil, io.EOF
|
|
}
|
|
}
|
|
return insertData, nil
|
|
}
|
|
|
|
func (r *reader) Size() (int64, error) {
|
|
if size := r.fileSize.Load(); size != 0 {
|
|
return size, nil
|
|
}
|
|
size, err := r.cm.Size(r.ctx, r.path)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
r.fileSize.Store(size)
|
|
return size, nil
|
|
}
|
|
|
|
func (r *reader) Close() {
|
|
err := r.r.Close()
|
|
if err != nil {
|
|
log.Warn("close parquet reader failed", zap.Error(err))
|
|
}
|
|
if r.cmr != nil {
|
|
r.cmr.Close()
|
|
}
|
|
}
|