diff --git a/docs/developer_guides/appendix_a_basic_components.md b/docs/developer_guides/appendix_a_basic_components.md
index dd2d8848c9..6bb1a9c6a7 100644
--- a/docs/developer_guides/appendix_a_basic_components.md
+++ b/docs/developer_guides/appendix_a_basic_components.md
@@ -2,85 +2,113 @@
## Appendix A. Basic Components
-// TODO
-#### A.1 Watchdog
+#### A.1 System Component
-``` go
-type ActiveComponent interface {
- Id() string
- Status() Status
- Clean() Status
- Restart() Status
+Milvus has 9 different components, and can be abstracted into basic Component.
+
+```go
+type Component interface {
+ Init() error
+ Start() error
+ Stop() error
+ GetComponentStates(ctx context.Context) (*internalpb.ComponentStates, error)
+ GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error)
}
-
-type ComponentHeartbeat interface {
- Id() string
- Status() Status
- Serialize() string
-}
-
-type Watchdog struct {
- targets [] *ActiveComponent
- heartbeats ComponentHeartbeat chan
-}
-
-// register ActiveComponent
-func (dog *Watchdog) Register(target *ActiveComponent)
-
-// called by ActiveComponents
-func (dog *Watchdog) PutHeartbeat(heartbeat *ComponentHeartbeat)
-
-// dump heatbeats as log stream
-func (dog *Watchdog) dumpHeartbeat(heartbeat *ComponentHeartbeat)
```
+* *GetComponentStates*
+
+```go
+
+type StateCode = int
+
+const (
+ INITIALIZING StateCode = 0
+ HEALTHY StateCode = 1
+ ABNORMAL StateCode = 2
+)
+
+type ComponentInfo struct {
+ NodeID UniqueID
+ Role string
+ StateCode StateCode
+ ExtraInfo []*commonpb.KeyValuePair
+}
+
+type ComponentStates struct {
+ State *ComponentInfo
+ SubcomponentStates []*ComponentInfo
+ Status *commonpb.Status
+}
+
+```
+
+If a component needs to process timetick message to align timetick, it needs to implement TimeTickProvider interface.
+
+
+```go
+type TimeTickProvider interface {
+ GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error)
+}
+```
+
+
#### A.2 Global Parameter Table
``` go
-type GlobalParamsTable struct {
- params memoryKV
+type BaseTable struct {
+ params *memkv.MemoryKV
}
-func (gparams *GlobalParamsTable) Save(key, value string) error
-func (gparams *GlobalParamsTable) Load(key string) (string, error)
-func (gparams *GlobalParamsTable) LoadRange(key, endKey string, limit int) ([]string, []string, error)
-func (gparams *GlobalParamsTable) Remove(key string) error
-func (gparams *GlobalParamsTable) LoadYaml(filePath string) error
+func (gp *BaseTable) Init()
+func (gp *BaseTable) LoadFromKVPair(kvPairs []*commonpb.KeyValuePair) error
+func (gp *BaseTable) Load(key string) (string, error)
+func (gp *BaseTable) LoadRange(key, endKey string, limit int) ([]string, []string, error)
+func (gp *BaseTable) LoadYaml(fileName string) error
+func (gp *BaseTable) LoadYaml(fileName string) error
+func (gp *BaseTable) LoadYaml(fileName string) error
+func (gp *BaseTable) ParseFloat(key string) float64
+func (gp *BaseTable) ParseInt64(key string) int64
+func (gp *BaseTable) ParseInt32(key string) int32
+func (gp *BaseTable) ParseInt(key string) int
+func (gp *BaseTable) WriteNodeIDList() []UniqueID
+func (gp *BaseTable) DataNodeIDList() []UniqueID
+func (gp *BaseTable) ProxyIDList() []UniqueID
+func (gp *BaseTable) QueryNodeIDList() []UniqueID
```
-
* *LoadYaml(filePath string)* turns a YAML file into multiple key-value pairs. For example, given the following YAML
```yaml
etcd:
address: localhost
- port: 12379
+ port: 2379
rootpath: milvus/etcd
```
-*GlobalParamsTable.LoadYaml* will insert three key-value pairs into *params*
+*BaseTable.LoadYaml* will insert three key-value pairs into *params*
```go
-"etcd.address" -> "localhost"
-"etcd.port" -> "12379"
-"etcd.rootpath" -> "milvus/etcd"
+ "etcd.address" -> "localhost"
+ "etcd.port" -> "2379"
+ "etcd.rootpath" -> "milvus/etcd"
```
#### A.4 Time Ticked Flow Graph
-//TODO
+//TODO remove?
###### A.4.1 Flow Graph States
```go
type flowGraphStates struct {
- startTick Timestamp
- numActiveTasks map[string]int32
- numCompletedTasks map[string]int64
+ startTick Timestamp
+ numActiveTasks map[string]int32
+ numCompletedTasks map[string]int64
}
```
@@ -88,7 +116,7 @@ type flowGraphStates struct {
```go
type Msg interface {
- TimeTick() Timestamp
+ TimeTick() Timestamp
}
```
@@ -96,40 +124,34 @@ type Msg interface {
```go
type Node interface {
- Name() string
- MaxQueueLength() int32
- MaxParallelism() int32
- Operate(ctx context.Context, in []Msg) ([]Msg, context.Context)
- IsInputNode() bool
+ Name() string
+ MaxQueueLength() int32
+ MaxParallelism() int32
+ Operate(ctx context.Context, in []Msg) ([]Msg, context.Context)
+ IsInputNode() bool
+ Close()
}
```
-
-
```go
-type baseNode struct {
- maxQueueLength int32
- maxParallelism int32
+type BaseNode struct {
+ maxQueueLength int32
+ maxParallelism int32
}
-func (node *baseNode) MaxQueueLength() int32
-func (node *baseNode) MaxParallelism() int32
-func (node *baseNode) SetMaxQueueLength(n int32)
-func (node *baseNode) SetMaxParallelism(n int32)
-func (node *BaseNode) IsInputNode() bool
```
###### A.4.4 Flow Graph
```go
type nodeCtx struct {
- node Node
- inputChannels []chan *MsgWithCtx
- inputMessages []Msg
- downstream []*nodeCtx
- downstreamInputChanIdx map[string]int
-
- NumActiveTasks int64
- NumCompletedTasks int64
+ node Node
+ inputChannels []chan Msg
+ inputMessages []Msg
+ downstream []*nodeCtx
+ downstreamInputChanIdx map[string]int
+
+ NumActiveTasks int64
+ NumCompletedTasks int64
}
func (nodeCtx *nodeCtx) Start(ctx context.Context) error
@@ -139,8 +161,8 @@ func (nodeCtx *nodeCtx) Start(ctx context.Context) error
```go
type TimeTickedFlowGraph struct {
- ctx context.Context
- nodeCtx map[NodeName]*nodeCtx
+ ctx context.Context
+ nodeCtx map[NodeName]*nodeCtx
}
func (*pipeline TimeTickedFlowGraph) AddNode(node Node)
@@ -151,24 +173,52 @@ func (*pipeline TimeTickedFlowGraph) Close() error
func NewTimeTickedFlowGraph(ctx context.Context) *TimeTickedFlowGraph
```
+#### A.5 Allocator
+
+```go
+type Allocator struct {
+ Ctx context.Context
+ CancelFunc context.CancelFunc
+
+ wg sync.WaitGroup
+
+ Reqs chan Request
+ ToDoReqs []Request
+ CanDoReqs []Request
+ SyncReqs []Request
+
+ TChan TickerChan
+ ForceSyncChan chan Request
+
+ SyncFunc func() bool
+ ProcessFunc func(req Request) error
+
+ CheckSyncFunc func(timeout bool) bool
+ PickCanDoFunc func()
+}
+func (ta *Allocator) Start() error
+func (ta *Allocator) Init() error
+func (ta *Allocator) Close() error
+func (ta *Allocator) CleanCache() error
+
+```
-#### A.5 ID Allocator
+#### A.6 ID Allocator
```go
type IDAllocator struct {
- Allocator
-
- masterAddress string
- masterConn *grpc.ClientConn
- masterClient masterpb.MasterServiceClient
-
- countPerRPC uint32
-
- idStart UniqueID
- idEnd UniqueID
-
- PeerID UniqueID
+ Allocator
+
+ masterAddress string
+ master types.MasterService
+
+ countPerRPC uint32
+
+ idStart UniqueID
+ idEnd UniqueID
+
+ PeerID UniqueID
}
func (ia *IDAllocator) Start() error
@@ -195,7 +245,7 @@ Let's take a brief review of Hybrid Logical Clock (HLC). HLC uses 64bits timesta
-HLC's logical part is advanced on each request. The phsical part can be increased in two cases:
+HLC's logical part is advanced on each request. The phsical part can be increased in two cases:
A. when the local wall time is greater than HLC's physical part,
@@ -210,14 +260,14 @@ Milvus does not support transaction, but it should gurantee the deterministic ex
- have its physical part close to wall time (has an acceptable bounded error, a.k.a. uncertainty interval in transaction senarios),
- and be globally unique.
-HLC leverages on physical clocks at nodes that are synchronized using the NTP. NTP usually maintain time to within tens of milliseconds over local networks in datacenter. Asymmetric routes and network congestion occasionally cause errors of hundreds of milliseconds. Both the normal time error and the spike are acceptable for Milvus use cases.
+HLC leverages on physical clocks at nodes that are synchronized using the NTP. NTP usually maintain time to within tens of milliseconds over local networks in datacenter. Asymmetric routes and network congestion occasionally cause errors of hundreds of milliseconds. Both the normal time error and the spike are acceptable for Milvus use cases.
The interface of Timestamp is as follows.
```
type timestamp struct {
- physical uint64 // 18-63 bits
- logical uint64 // 0-17 bits
+ physical uint64 // 18-63 bits
+ logical uint64 // 0-17 bits
}
type Timestamp uint64
@@ -229,20 +279,18 @@ type Timestamp uint64
```go
type timestampOracle struct {
- key string
- kvBase kv.TxnBase
-
- saveInterval time.Duration
- maxResetTSGap func() time.Duration
-
- TSO unsafe.Pointer
- lastSavedTime atomic.Value
+ key string
+ txnkv kv.TxnBase
+
+ saveInterval time.Duration
+ maxResetTSGap func() time.Duration
+
+ TSO unsafe.Pointer
+ lastSavedTime atomic.Value
}
func (t *timestampOracle) InitTimestamp() error
func (t *timestampOracle) ResetUserTimestamp(tso uint64) error
-func (t *timestampOracle) saveTimestamp(ts time.time) error
-func (t *timestampOracle) loadTimestamp() (time.time, error)
func (t *timestampOracle) UpdateTimestamp() error
func (t *timestampOracle) ResetTimestamp()
```
@@ -253,24 +301,18 @@ func (t *timestampOracle) ResetTimestamp()
```go
type TimestampAllocator struct {
- Allocator
-
- masterAddress string
- masterConn *grpc.ClientConn
- masterClient masterpb.MasterServiceClient
-
- countPerRPC uint32
- lastTsBegin Timestamp
- lastTsEnd Timestamp
- PeerID UniqueID
+ Allocator
+
+ masterAddress string
+ masterClient types.MasterService
+
+ countPerRPC uint32
+ lastTsBegin Timestamp
+ lastTsEnd Timestamp
+ PeerID UniqueID
}
func (ta *TimestampAllocator) Start() error
-func (ta *TimestampAllocator) connectMaster() error
-func (ta *TimestampAllocator) syncID() bool
-func (ta *TimestampAllocator) checkSyncFunc(timeout bool) bool
-func (ta *TimestampAllocator) pickCanDoFunc()
-func (ta *TimestampAllocator) processFunc(req Request) error
func (ta *TimestampAllocator) AllocOne() (UniqueID, error)
func (ta *TimestampAllocator) Alloc(count uint32) (UniqueID, UniqueID, error)
func (ta *TimestampAllocator) ClearCache()
@@ -293,25 +335,28 @@ func NewTimestampAllocator(ctx context.Context, masterAddr string) (*TimestampAl
###### A.7.1 KV Base
```go
-type Base interface {
- Load(key string) (string, error)
- MultiLoad(keys []string) ([]string, error)
- LoadWithPrefix(key string) ([]string, []string, error)
- Save(key, value string) error
- MultiSave(kvs map[string]string) error
- Remove(key string) error
- MultiRemove(keys []string) error
-
- Close()
+type BaseKV interface {
+ Load(key string) (string, error)
+ MultiLoad(keys []string) ([]string, error)
+ LoadWithPrefix(key string) ([]string, []string, error)
+ Save(key, value string) error
+ MultiSave(kvs map[string]string) error
+ Remove(key string) error
+ MultiRemove(keys []string) error
+
+ Close()
}
```
###### A.7.2 Txn Base
```go
-type TxnBase interface {
- Base
- MultiSaveAndRemove(saves map[string]string, removals []string) error
+type TxnKV interface {
+ BaseKV
+
+ MultiSaveAndRemove(saves map[string]string, removals []string) error
+ MultiRemoveWithPrefix(keys []string) error
+ MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string) error
}
```
@@ -343,5 +388,80 @@ func (kv *EtcdKV) WatchWithPrefix(key string) clientv3.WatchChan
func NewEtcdKV(etcdAddr string, rootPath string) *EtcdKV
```
-EtcdKV implements all *TxnBase* interfaces.
+EtcdKV implements all *TxnKV* interfaces.
+
+###### A.7.4 Memory KV
+
+```go
+type MemoryKV struct {
+ sync.RWMutex
+ tree *btree.BTree
+}
+
+func (s memoryKVItem) Less(than btree.Item) bool
+func (kv *MemoryKV) Load(key string) (string, error)
+func (kv *MemoryKV) LoadRange(key, endKey string, limit int) ([]string, []string, error)
+func (kv *MemoryKV) Save(key, value string) error
+func (kv *MemoryKV) Remove(key string) error
+func (kv *MemoryKV) MultiLoad(keys []string) ([]string, error)
+func (kv *MemoryKV) MultiSave(kvs map[string]string) error
+func (kv *MemoryKV) MultiRemove(keys []string) error
+func (kv *MemoryKV) MultiSaveAndRemove(saves map[string]string, removals []string) error
+func (kv *MemoryKV) LoadWithPrefix(key string) ([]string, []string, error)
+func (kv *MemoryKV) Close()
+func (kv *MemoryKV) MultiRemoveWithPrefix(keys []string) error
+func (kv *MemoryKV) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string) error
+```
+
+MemoryKV implements all *TxnKV* interfaces.
+
+###### A.7.5 MinIO KV
+
+```go
+type MinIOKV struct {
+ ctx context.Context
+ minioClient *minio.Client
+ bucketName string
+}
+
+func (kv *MinIOKV) LoadWithPrefix(key string) ([]string, []string, error)
+func (kv *MinIOKV) Load(key string) (string, error)
+func (kv *MinIOKV) MultiLoad(keys []string) ([]string, error)
+func (kv *MinIOKV) Save(key, value string) error
+func (kv *MinIOKV) MultiSave(kvs map[string]string) error
+func (kv *MinIOKV) RemoveWithPrefix(key string) error
+func (kv *MinIOKV) Remove(key string) error
+func (kv *MinIOKV) MultiRemove(keys []string) error
+func (kv *MinIOKV) Close()
+```
+
+MinIOKV implements all *KV* interfaces.
+
+###### A.7.6 RocksdbKV KV
+
+```go
+type RocksdbKV struct {
+ opts *gorocksdb.Options
+ db *gorocksdb.DB
+ writeOptions *gorocksdb.WriteOptions
+ readOptions *gorocksdb.ReadOptions
+ name string
+}
+
+func (kv *RocksdbKV) Close()
+func (kv *RocksdbKV) GetName() string
+func (kv *RocksdbKV) Load(key string) (string, error)
+func (kv *RocksdbKV) LoadWithPrefix(key string) ([]string, []string, error)
+func (kv *RocksdbKV) MultiLoad(keys []string) ([]string, error)
+func (kv *RocksdbKV) Save(key, value string) error
+func (kv *RocksdbKV) MultiSave(kvs map[string]string) error
+func (kv *RocksdbKV) RemoveWithPrefix(key string) error
+func (kv *RocksdbKV) Remove(key string) error
+func (kv *RocksdbKV) MultiRemove(keys []string) error
+func (kv *RocksdbKV) MultiSaveAndRemove(saves map[string]string, removals []string) error
+func (kv *RocksdbKV) MultiRemoveWithPrefix(keys []string) error
+func (kv *RocksdbKV) MultiSaveAndRemoveWithPrefix(saves map[string]string, removals []string) error
+```
+
+RocksdbKV implements all *TxnKV* interfaces.h
diff --git a/docs/developer_guides/appendix_b_api_reference.md b/docs/developer_guides/appendix_b_api_reference.md
index fae84cb7ee..ebb4cab20c 100644
--- a/docs/developer_guides/appendix_b_api_reference.md
+++ b/docs/developer_guides/appendix_b_api_reference.md
@@ -6,18 +6,41 @@ In this section, we introduce the RPCs of milvus service. A brief description of
| RPC | description |
| :----------------- | ------------------------------------------------------------ |
-| CreateCollection | create a collection base on schema statement |
-| DropCollection | drop a collection |
-| HasCollection | whether or not a collection exists |
-| DescribeCollection | show a collection's schema and its descriptive statistics |
-| ShowCollections | list all collections |
-| CreatePartition | create a partition |
-| DropPartition | drop a partition |
-| HasPartition | whether or not a partition exists |
-| DescribePartition | show a partition's name and its descriptive statistics |
-| ShowPartitions | list a collection's all partitions |
-| Insert | insert a batch of rows into a collection or a partition |
-| Search | query the columns of a collection or a partition with ANNS statements and boolean expressions |
+| CreateCollection | create a collection base on schema statement |
+| DropCollection | drop a collection |
+| HasCollection | whether or not a collection exists |
+| LoadCollection | load collection to memory for future search |
+| ReleaseCollection | release the memory the collection memory |
+| DescribeCollection | show a collection's schema and its descriptive statistics |
+| GetCollectionStatistics | show a collection's statistics |
+| ShowCollections | list all collections |
+| CreatePartition | create a partition |
+| DropPartition | drop a partition |
+| HasPartition | whether or not a partition exists |
+| LoadPartition | load collection to memory for future search |
+| ReleasePartition | release the memory the collection memory |
+| GetPartitionStatistics | show a collection's statistics |
+| ShowPartitions | list a collection's all partitions |
+| CreateIndex | create index for a field in collection |
+| DescribeIndex | get index details for a field in a collection |
+| GetIndexStates | get build index state |
+| DropIndex | drop a specific index for a field in a collection |
+| Insert | insert a batch of rows into a collection or a partition |
+| Search | query the columns of a collection or a partition with ANNS statements and boolean expressions |
+| Flush | Perform persistent storage of data in memory |
+
+**MsgBase** is a base struct in each request.
+
+```protobuf
+message MsgBase {
+ MsgType msg_type = 1;
+ int64 msgID = 2;
+ uint64 timestamp = 3;
+ int64 sourceID = 4;
+}
+```
+
+**MsgType** is a enum to distingush diffrent message type in message queue, such as insert msg, search msg, etc. **msgID** is a unique id identifier of message. **timestamp** is the time when this message was generated. **sourceID** is a unique id identifier of the source.
@@ -28,60 +51,72 @@ In this section, we introduce the RPCs of milvus service. A brief description of
**Interface:**
```
-rpc CreateCollection(schema.CollectionSchema) returns (common.Status){}
+rpc CreateCollection(CreateCollectionRequest) returns (common.Status){}
```
**Description:**
-Create a collection through collection schema.
+Create a collection through CreateCollectionRequest.
-**Parameters:**
+**Parameters:**
-- **schema.CollectionSchema**
+- **CreateCollectionRequest**
-CollectionSchema struct is shown as follows:
+CreateCollectionRequest struct is shown as follows:
```protobuf
+message CreateCollectionRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ // `schema` is the serialized `schema.CollectionSchema`
+ bytes schema = 4;
+}
+
message CollectionSchema {
- string name = 1;
- string description = 2;
- bool autoID = 3;
- repeated FieldSchema fields = 4;
+ string name = 1;
+ string description = 2;
+ bool autoID = 3;
+ repeated FieldSchema fields = 4;
}
```
-Collection schema contains all the base information of a collection including **collection name**, **description**, **autoID** and **fields**. Collection description is defined by database manager to describe the collection. **autoID** determines whether the ID of each row of data is user-defined. If **autoID** is true, our system will generate a unique ID for each data. If **autoID** is false, user need to give each entity a ID when insert.
+CreateCollectionRequest contains **MsgBase**, **db_name**, **collection_name** and serialized collection schema **schema**. **db_name** contains only a string named **collection_name**. Collection with the same collection_name is going to be created.
+
+Collection schema contains all the base information of a collection including **collection name**, **description**, **autoID** and **fields**. Collection description is defined by database manager to describe the collection. **autoID** determines whether the ID of each row of data is user-defined. If **autoID** is true, our system will generate a unique ID for each data. If **autoID** is false, user need to give each entity a ID when insert.
**Fields** is a list of **FieldSchema**. Each schema should include Field **name**, **description**, **dataType**, **type_params** and **index_params**.
-FieldSchema struct is shown as follows:
+FieldSchema struct is shown as follows:
```protobuf
message FieldSchema {
- string name = 1;
- string description = 2;
- DataType data_type = 3;
- repeated common.KeyValuePair type_params = 4;
- repeated common.KeyValuePair index_params = 5;
+ int64 fieldID = 1;
+ string name = 2;
+ bool is_primary_key = 3;
+ string description = 4;
+ DataType data_type = 5;
+ repeated common.KeyValuePair type_params = 6;
+ repeated common.KeyValuePair index_params = 7;
}
```
-**Field schema** contains all the base information of a field including field **name**, **description**, **data_type**, **type_params** and **index_params**. **data_type** is a enum type to distingush different data type.Total enum is shown in the last of this doc
+**Field schema** contains all the base information of a field including field **fieldID**, **name**, **description**, **data_type**, **type_params** and **index_params**. **data_type** is a enum type to distingush different data type.Total enum is shown in the last of this doc
-**type_params** contains the detailed information of data_type. For example, vector data type should include dimension information. You can give a pair of to let the field store 8-dimension vector.
+**type_params** contains the detailed information of data_type. For example, vector data type should include dimension information. You can give a pair of to let the field store 8-dimension vector.
**index_params**:For fast search, you build index for field. You specify detailed index information for a field. Detailed information about index can be seen in chapter 2.2.3
-**Returns:**
+**Returns:**
- **common.Status**
```protobuf
message Status {
- ErrorCode error_code = 1;
- string reason = 2;
+ErrorCode error_code = 1;
+ string reason = 2;
}
```
@@ -94,35 +129,37 @@ message Status {
**Interface:**
```
-rpc DropCollection(CollectionName) returns (common.Status) {}
+rpc DropCollection(DropCollectionRequest) returns (common.Status) {}
```
**Description:**
This method is used to delete collection.
-**Parameters:**
+**Parameters:**
-- **CollectionName**
+- **DropCollectionRequest**
-CollectionName struct is shown as follows:
+DropCollectionRequest struct is shown as follows:
```protobuf
-message CollectionName {
- string collection_name = 1;
+message DropCollectionRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
}
```
-**CollectionName** contains only a string named **collection_name**. Collection with the same collection_name is going to be deleted.
+Collection with the same **collection_name** is going to be deleted.
-**Returns:**
+**Returns:**
- **common.Status**
```protobuf
message Status {
- ErrorCode error_code = 1;
- string reason = 2;
+ ErrorCode error_code = 1;
+ string reason = 2;
}
```
@@ -135,35 +172,37 @@ message Status {
**Interface:**
```
-rpc HasCollection(CollectionName) returns (BoolResponse) {}
+rpc HasCollection(HasCollectionRequest) returns (BoolResponse) {}
```
**Description:**
This method is used to test collection existence.
-**Parameters:**
+**Parameters:**
-- **CollectionName**
+- **HasCollectionRequest**
-CollectionName struct is shown as follows:
+HasCollectionRequest struct is shown as follows:
```protobuf
-message CollectionName {
- string collection_name = 1;
+message HasCollectionRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
}
```
-**CollectionName** contains only a string named **collection_name**. The server finds the collection through collection_name and judge whether the collection exists.
+The server finds the collection through **collection_name** and judge whether the collection exists.
-**Returns:**
+**Returns:**
- **BoolResponse**
```protobuf
message BoolResponse {
- common.Status status = 1;
- bool value = 2;
+ common.Status status = 1;
+ bool value = 2;
}
```
@@ -173,41 +212,128 @@ message BoolResponse {
-###### 3.1.4 DescribeCollection
+###### 3.1.4 LoadCollection
**Interface:**
```
- rpc DescribeCollection(CollectionName) returns (CollectionDescription) {}
+rpc LoadCollection(LoadCollectionRequest) returns (common.Status) {}
+```
+
+**Description:**
+
+This method is used to load collection.
+
+**Parameters:**
+
+- **LoadCollectionRequest**
+
+LoadCollectionRequest struct is shown as follows:
+
+```protobuf
+message LoadCollectionRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+}
+```
+
+Collection with the same **collection_name** is going to be loaded to memory.
+
+**Returns:**
+
+- **common.Status**
+
+```protobuf
+message Status {
+ ErrorCode error_code = 1;
+ string reason = 2;
+}
+```
+
+**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
+
+
+
+###### 3.1.5 ReleaseCollection
+
+**Interface:**
+
+```
+rpc ReleaseCollection(ReleaseCollectionRequest) returns (common.Status) {}
+```
+
+**Description:**
+
+This method is used to release collection.
+
+**Parameters:**
+
+- **ReleaseCollectionRequest**
+
+ReleaseCollectionRequest struct is shown as follows:
+
+```protobuf
+message ReleaseCollectionRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+}
+```
+
+Collection with the same **collection_name** is going to be released from memory.
+
+**Returns:**
+
+- **common.Status**
+
+```protobuf
+message Status {
+ ErrorCode error_code = 1;
+ string reason = 2;
+}
+```
+
+**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
+
+###### 3.1.6 DescribeCollection
+
+**Interface:**
+
+```
+ rpc DescribeCollection(DescribeCollectionRequest) returns (CollectionDescription) {}
```
**Description:**
This method is used to get collection schema.
-**Parameters:**
+**Parameters:**
-- **CollectionName**
+- **DescribeCollectionRequest**
-CollectionName struct is shown as follows:
+DescribeCollectionRequest struct is shown as follows:
```protobuf
-message CollectionName {
- string collection_name = 1;
+message DescribeCollectionRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ int64 collectionID = 4;
}
```
-**CollectionName** contains only a string named **collection_name**. The server finds the collection through collection_name and get detailed collection information
+The server finds the collection through **collection_name** and get detailed collection information. And **collectionID** is for internel component to get collection details.
-**Returns:**
+**Returns:**
-- **CollectionDescription**
+- **DescribeCollectionResponse**
```protobuf
-message CollectionDescription {
- common.Status status = 1;
- schema.CollectionSchema schema = 2;
- repeated common.KeyValuePair statistics = 3;
+message DescribeCollectionResponse {
+ common.Status status = 1;
+ schema.CollectionSchema schema = 2;
+ int64 collectionID = 3;
}
```
@@ -215,16 +341,57 @@ message CollectionDescription {
**schema** is collection schema same as the collection schema in [CreateCollection](#311-createcollection).
-**statitistics** is a statistic used to count various information, such as the number of segments, how many rows there are, the number of visits in the last hour, etc.
-
-
-
-###### 3.1.5 ShowCollections
+###### 3.1.7 GetCollectionStatistics
**Interface:**
```
-rpc ShowCollections(common.Empty) returns (StringListResponse) {}
+ rpc GetCollectionStatistics(GetCollectionStatisticsRequest) returns (GetCollectionStatisticsResponse) {}
+```
+
+**Description:**
+
+This method is used to get collection statistics.
+
+**Parameters:**
+
+- **GetCollectionStatisticsRequest**
+
+GetCollectionStatisticsRequest struct is shown as follows:
+
+```protobuf
+message GetCollectionStatisticsRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+}
+```
+
+The server finds the collection through **collection_name** and get detailed collection statistics.
+
+**Returns:**
+
+- **GetCollectionStatisticsResponse**
+
+```protobuf
+message GetCollectionStatisticsResponse {
+ common.Status status = 1;
+ repeated common.KeyValuePair stats = 2;
+}
+```
+
+**status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
+
+**stats** is a map saving diffrent statistics. For example, you can get row_count of a collection with key 'row_count'.
+
+
+
+###### 3.1.8 ShowCollections
+
+**Interface:**
+
+```
+rpc ShowCollections(ShowCollectionsRequest) returns (ShowCollectionsResponse) {}
```
**Description:**
@@ -233,57 +400,60 @@ This method is used to get collection schema.
**Parameters:** None
-**Returns:**
+**Returns:**
-- **StringListResponse**
+- **ShowCollectionsResponse**
```protobuf
-message StringListResponse {
- common.Status status = 1;
- repeated string values = 2;
+message ShowCollectionsResponse {
+ common.Status status = 1;
+ repeated string collection_names = 2;
}
```
**status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
-**values** is a list contains all collections' name.
+**collection_names** is a list contains all collections' name.
-###### 3.1.6 CreatePartition
+###### 3.1.9 CreatePartition
**Interface:**
```
-rpc CreatePartition(PartitionName) returns (common.Status) {}
+rpc CreatePartition(CreatePartitionRequest) returns (common.Status) {}
```
**Description:**
This method is used to create partition
-**Parameters:**
+**Parameters:**
-- **PartitionName**
+- **CreatePartitionRequest**
-PartitionName struct is shown as follows:
+CreatePartitionRequest struct is shown as follows:
```protobuf
-message PartitionName {
- string partition_name = 1;
+message CreatePartitionRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ string partition_name = 4;
}
```
-**PartitionName** contains only a string named **partition_name**. The server creates partition with the partition_name
+The server creates partition with the **partition_name** in collection with name of **collection_name**
-- **Returns:**
+- **Returns:**
- **common.Status**
```protobuf
message Status {
- ErrorCode error_code = 1;
- string reason = 2;
+ ErrorCode error_code = 1;
+ string reason = 2;
}
```
@@ -291,40 +461,43 @@ message Status {
-###### 3.1.7 DropPartition
+###### 3.1.10 DropPartition
**Interface:**
```
-rpc DropPartition(PartitionName) returns (common.Status) {}
+rpc DropPartition(DropPartitionRequest) returns (common.Status) {}
```
**Description:**
This method is used to drop partition.
-**Parameters:**
+**Parameters:**
-- **PartitionName**
+- **DropPartitionRequest**
-PartitionName struct is shown as follows:
+DropPartitionRequest struct is shown as follows:
```protobuf
-message PartitionName {
- string partition_name = 1;
+message DropPartitionRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ string partition_name = 4;
}
```
-**PartitionName** contains only a string named **partition_name**. Partition with the same partition_name is going to be deleted.
+Drop partition with the same **partition_name** in collection with **collection_name** is going to be deleted.
-**Returns:**
+**Returns:**
- **common.Status**
```protobuf
message Status {
- ErrorCode error_code = 1;
- string reason = 2;
+ ErrorCode error_code = 1;
+ string reason = 2;
}
```
@@ -332,40 +505,43 @@ message Status {
-###### 3.1.8 HasPartition
+###### 3.1.11 HasPartition
**Interface:**
```
-rpc HasPartition(PartitionName) returns (BoolResponse) {}
+rpc HasPartition(HasPartitionRequest) returns (BoolResponse) {}
```
**Description:**
This method is used to test partition existence.
-**Parameters:**
+**Parameters:**
-- **PartitionName**
+- **HasPartitionRequest**
-PartitionName struct is shown as follows:
+HasPartitionRequest struct is shown as follows:
```protobuf
-message PartitionName {
- string partition_name = 1;
+message HasPartitionRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ string partition_name = 4;
}
```
-**PartitionName** contains only a string named **partition_name**. Partition with the same partition_name is going to be tested.
+Partition with the same **partition_name** is going to be tested whether it is in collection with **collection_name**.
-**Returns:**
+**Returns:**
- **BoolResponse**
```protobuf
message BoolResponse {
- common.Status status = 1;
- bool value = 2;
+ common.Status status = 1;
+ bool value = 2;
}
```
@@ -374,101 +550,234 @@ message BoolResponse {
**value** represents whether the partition exists. If partition exists, value will be true. If partition doesn't exist, value will be false.
-
-###### 3.1.9 DescribePartition
+###### 3.1.12 LoadPartitions
**Interface:**
```
-rpc DescribePartition(PartitionName) returns (PartitionDescription) {}
+rpc LoadPartitions(LoadPartitionsRequest) returns (common.Status) {}
```
**Description:**
-This method is used to show partition information
+This method is used to load collection.
-**Parameters:**
+**Parameters:**
-- **PartitionName**
+- **LoadPartitionsRequest**
-PartitionName struct is shown as follows:
+LoadPartitionsRequest struct is shown as follows:
```protobuf
-message PartitionName {
- string partition_name = 1;
+message LoadPartitionsRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ repeated string partition_names = 4;
}
```
-**PartitionName** contains only a string named **partition_name**. The server finds the partition through partition_name and get detailed partition information
+**parition_names** is a list of parition_name. These partitions in collection with the **collection_name** is going to be loaded to memory.
-**Returns:**
+**Returns:**
-- **PartitionDescription**
+- **common.Status**
```protobuf
-message PartitionDescription {
- common.Status status = 1;
- PartitionName name = 2;
- repeated common.KeyValuePair statistics = 3;
+message Status {
+ ErrorCode error_code = 1;
+ string reason = 2;
+}
+```
+
+**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
+
+###### 3.1.13 ReleasePartitions
+
+**Interface:**
+
+```
+rpc ReleasePartitions(ReleasePartitionsRequest) returns (common.Status) {}
+```
+
+**Description:**
+
+This method is used to release partition.
+
+**Parameters:**
+
+- **ReleasePartitionsRequest**
+
+ReleasePartitionsRequest struct is shown as follows:
+
+```protobuf
+message ReleasePartitionsRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ repeated string partition_names = 4;
+}
+```
+
+**parition_names** is a list of parition_name. These partitions in collection with the **collection_name** is going to be released from memory.
+
+**Returns:**
+
+- **common.Status**
+
+```protobuf
+message Status {
+ ErrorCode error_code = 1;
+ string reason = 2;
+}
+```
+
+**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
+
+###### 3.1.14 GetPartitionStatistics
+
+**Interface:**
+
+```
+ rpc GetPartitionStatistics(GetPartitionStatisticsRequest) returns (GetPartitionStatisticsResponse) {}
+```
+
+**Description:**
+
+This method is used to get partition statistics.
+
+**Parameters:**
+
+- **GetPartitionStatisticsRequest**
+
+GetPartitionStatisticsRequest struct is shown as follows:
+
+```protobuf
+message GetPartitionStatisticsRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ string partition_name = 4;
+}
+```
+
+The server finds the partition through **partition_name** in collection with **collection_name** and get detailed partition statistics.
+
+**Returns:**
+
+- **GetCollectionStatisticsResponse**
+
+```protobuf
+message GetPartitionStatisticsResponse {
+ common.Status status = 1;
+ repeated common.KeyValuePair stats = 2;
}
```
**status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
-**name** is partition_name same as the PartitionName in [CreatePartition](#316-createpartition).
+**stats** is a map saving diffrent statistics. For example, you can get row_count of a partition with key 'row_count'.
-**statitistics** is a statistic used to count various information, such as the number of segments, how many rows there are, the number of visits in the last hour, etc.
-
-
-
-###### 3.1.10 ShowPartitions
+###### 3.1.15 ShowPartitions
**Interface:**
```
-rpc ShowPartitions(CollectionName) returns (StringListResponse) {}
+rpc ShowPartitions(ShowPartitionsRequest) returns (StringListResponse) {}
```
**Description:**
This method is used to get partition description.
-**Parameters:**
+**Parameters:**
-- **CollectionName**
+- **ShowPartitionsRequest**
-CollectionName struct is shown as follows:
+ShowPartitionsRequest struct is shown as follows:
```protobuf
-message CollectionName {
- string collection_name = 1;
+message ShowPartitionsRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ int64 collectionID = 4;
}
```
-**CollectionName** contains only a string named **collection_name**. Partition with the same collection_name is going to be listed.
+Partitions in the collection with **collection_name** is going to be listed.
-**Returns:**
+**Returns:**
- **StringListResponse**
```protobuf
-message StringListResponse {
- common.Status status = 1;
- repeated string values = 2;
+message ShowPartitionsResponse {
+ common.Status status = 1;
+ repeated string partition_names = 2;
+ repeated int64 partitionIDs = 3;
}
```
**status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
-**values** is a list contains all partitions' name.
-
+**partition_names** is a list contains all partitions' name.
+**partitionIDs** is a list contains all partitions' ids. And the index of a parition in **partition_names** and **partitionIDs** are same.
#### 3.2 Manipulation Requsts
###### 3.2.1 Insert
-* Insert
+
+**Interface:**
+
+```
+rpc Insert(InsertRequest) returns (InsertResponse){}
+```
+
+**Description:**
+
+Insert a batch of rows into a collection or a partition
+
+**Parameters:**
+
+- **InsertRequest**
+
+InsertRequest struct is shown as follows:
+
+```protobuf
+message InsertRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ string partition_name = 4;
+ repeated common.Blob row_data = 5;
+ repeated uint32 hash_keys = 6;
+}
+
+message Blob {
+ bytes value = 1;
+}
+```
+
+Insert a batch of **row_data** into collection with **collection_name** and partition with **partition_name**. Blob contains bytes of value.
+
+**Returns:**
+
+- **common.Status**
+
+```protobuf
+message InsertResponse {
+ common.Status status = 1;
+ int64 rowID_begin = 2;
+ int64 rowID_end = 3;
+}
+```
+
+**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
+
+**rowID_begin** and **rowID_end** are the ID of inserted values.
###### 3.2.2 Delete
@@ -479,4 +788,208 @@ message StringListResponse {
#### 3.3 Query
+#### 3.3 Index
+###### 3.3.1 CreateIndex
+
+**Interface:**
+
+```
+rpc CreateIndex(CreateIndexRequest) returns (common.Status){}
+```
+
+**Description:**
+
+Create a index for a collection.
+
+**Parameters:**
+
+- **CreateIndexRequest**
+
+CreateIndexRequest struct is shown as follows:
+
+```protobuf
+message CreateIndexRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ string field_name = 4;
+ repeated common.KeyValuePair extra_params = 5;
+}
+```
+
+CreateIndex for the field with **field_name** in collection with **collection_name**.
+
+**extra_params**:For fast search, you build index for field. You specify detailed index information for a field. Detailed information about index can be seen in chapter 2.2.3
+
+
+
+**Returns:**
+
+- **common.Status**
+
+```protobuf
+message Status {
+ ErrorCode error_code = 1;
+ string reason = 2;
+}
+```
+
+**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
+
+###### 3.3.2 DescribeIndex
+
+**Interface:**
+
+```
+rpc DescribeIndex(DescribeIndexRequest) returns (common.Status){}
+```
+
+**Description:**
+
+Get a index detailed info
+
+**Parameters:**
+
+- **DescribeIndexRequest**
+
+DescribeIndexRequest struct is shown as follows:
+
+```protobuf
+message DescribeIndexRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ string field_name = 4;
+ string index_name = 5;
+}
+```
+
+Get a index details for the field with **field_name** in collection with **collection_name**.
+
+**index_name**: A field can create multiple indexes. And you can drop specific index through index_name.
+
+**Returns:**
+
+- **common.Status**
+
+```protobuf
+message DescribeIndexResponse {
+ common.Status status = 1;
+ repeated IndexDescription index_descriptions = 2;
+}
+
+message IndexDescription {
+ string index_name = 1;
+ int64 indexID = 2;
+ repeated common.KeyValuePair params = 3;
+}
+```
+
+**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
+
+**index_descriptions** is a list of index descriptions. If index_name is specific in request, the list length will be 0. Otherwise if index_name is empty, the response will return all index in the field of a collection.
+
+**params**:For fast search, you build index for field. You specify detailed index information for a field. Detailed information about index can be seen in chapter 2.2.3
+
+###### 3.3.3 GetIndexStates
+
+**Interface:**
+
+```
+rpc GetIndexStates(GetIndexStatesRequest) returns (GetIndexStatesRequest){}
+```
+
+**Description:**
+
+Get a index build progress info.
+
+**Parameters:**
+
+- **GetIndexStatesRequest**
+
+GetIndexStatesRequest struct is shown as follows:
+
+```protobuf
+message GetIndexStatesRequest {
+ common.MsgBase base = 1;
+ string db_name = 2 ;
+ string collection_name = 3;
+ string field_name = 4;
+ string index_name = 5;
+}
+```
+
+Get a index build progress info for the field with **field_name** in collection with **collection_name**.
+
+**index_name**: A field can create multiple indexes. And you can get specific index state through index_name.
+
+**Returns:**
+
+- **common.Status**
+
+```protobuf
+message GetIndexStatesResponse {
+ common.Status status = 1;
+ common.IndexState state = 2;
+}
+
+enum IndexState {
+ IndexStateNone = 0;
+ Unissued = 1;
+ InProgress = 2;
+ Finished = 3;
+ Failed = 4;
+ Deleted = 5;
+}
+```
+
+**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
+
+**index state** is a enum type to distinguish the different processes in the index building process.
+
+
+###### 3.3.4 DropIndex
+
+**Interface:**
+
+```
+rpc DropIndex(DropIndexRequest) returns (common.Status){}
+```
+
+**Description:**
+
+Drop a index for a collection.
+
+**Parameters:**
+
+- **DropIndexRequest**
+
+DropIndexRequest struct is shown as follows:
+
+```protobuf
+message DropIndexRequest {
+ common.MsgBase base = 1;
+ string db_name = 2;
+ string collection_name = 3;
+ string field_name = 4;
+ string index_name = 5;
+}
+```
+
+DropIndex for the field with **field_name** in collection with **collection_name**.
+
+**index_name**: A field can create multiple indexes. And you can drop specific index through index_name.
+
+**Returns:**
+
+- **common.Status**
+
+```protobuf
+message Status {
+ ErrorCode error_code = 1;
+ string reason = 2;
+}
+```
+
+**Status** represents the server error code. It doesn't contains grpc error but contains the server error code. We can get the executing result in common status. **error_code** is a enum type to distingush the executing error type. The total Errorcode is shown in the last of this code. And the **reason** field is a string to describes the detailed error.
diff --git a/docs/developer_guides/appendix_d_error_code.md b/docs/developer_guides/appendix_d_error_code.md
index 647af7723d..b8e25d65b2 100644
--- a/docs/developer_guides/appendix_d_error_code.md
+++ b/docs/developer_guides/appendix_d_error_code.md
@@ -4,33 +4,33 @@
```protobuf
enum ErrorCode {
- SUCCESS = 0;
- UNEXPECTED_ERROR = 1;
- CONNECT_FAILED = 2;
- PERMISSION_DENIED = 3;
- COLLECTION_NOT_EXISTS = 4;
- ILLEGAL_ARGUMENT = 5;
- ILLEGAL_DIMENSION = 7;
- ILLEGAL_INDEX_TYPE = 8;
- ILLEGAL_COLLECTION_NAME = 9;
- ILLEGAL_TOPK = 10;
- ILLEGAL_ROWRECORD = 11;
- ILLEGAL_VECTOR_ID = 12;
- ILLEGAL_SEARCH_RESULT = 13;
- FILE_NOT_FOUND = 14;
- META_FAILED = 15;
- CACHE_FAILED = 16;
- CANNOT_CREATE_FOLDER = 17;
- CANNOT_CREATE_FILE = 18;
- CANNOT_DELETE_FOLDER = 19;
- CANNOT_DELETE_FILE = 20;
- BUILD_INDEX_ERROR = 21;
- ILLEGAL_NLIST = 22;
- ILLEGAL_METRIC_TYPE = 23;
- OUT_OF_MEMORY = 24;
+ SUCCESS = 0;
+ UNEXPECTED_ERROR = 1;
+ CONNECT_FAILED = 2;
+ PERMISSION_DENIED = 3;
+ COLLECTION_NOT_EXISTS = 4;
+ ILLEGAL_ARGUMENT = 5;
+ ILLEGAL_DIMENSION = 7;
+ ILLEGAL_INDEX_TYPE = 8;
+ ILLEGAL_COLLECTION_NAME = 9;
+ ILLEGAL_TOPK = 10;
+ ILLEGAL_ROWRECORD = 11;
+ ILLEGAL_VECTOR_ID = 12;
+ ILLEGAL_SEARCH_RESULT = 13;
+ FILE_NOT_FOUND = 14;
+ META_FAILED = 15;
+ CACHE_FAILED = 16;
+ CANNOT_CREATE_FOLDER = 17;
+ CANNOT_CREATE_FILE = 18;
+ CANNOT_DELETE_FOLDER = 19;
+ CANNOT_DELETE_FILE = 20;
+ BUILD_INDEX_ERROR = 21;
+ ILLEGAL_NLIST = 22;
+ ILLEGAL_METRIC_TYPE = 23;
+ OUT_OF_MEMORY = 24;
- // internal error code.
- DD_REQUEST_RACE = 1000;
+ // internal error code.
+ DD_REQUEST_RACE = 1000;
}
```
diff --git a/docs/developer_guides/chap01_system_overview.md b/docs/developer_guides/chap01_system_overview.md
index 67adfe45f7..4151169646 100644
--- a/docs/developer_guides/chap01_system_overview.md
+++ b/docs/developer_guides/chap01_system_overview.md
@@ -72,51 +72,3 @@ For better throughput, Milvus allows asynchronous state synchronization between
In order to boost throughput, we model Milvus as a stream-driven system.
-
-
-#### 1.6 System Model
-
-```go
-type Service interface {
- Init() error
- Start() error
- Stop() error
-}
-```
-
-```go
-
-type Component interface {
- GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
- GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error)
- GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error)
-}
-```
-
-* *GetComponentStates*
-
-```go
-
-type StateCode = int
-
-const (
- INITIALIZING StateCode = 0
- HEALTHY StateCode = 1
- ABNORMAL StateCode = 2
-)
-
-type ComponentInfo struct {
- NodeID UniqueID
- Role string
- StateCode StateCode
- ExtraInfo []*commonpb.KeyValuePair
-}
-
-type ComponentStates struct {
- State *ComponentInfo
- SubcomponentStates []*ComponentInfo
- Status *commonpb.Status
-}
-
-```
-
diff --git a/docs/developer_guides/chap02_schema.md b/docs/developer_guides/chap02_schema.md
index 65fd8511d7..34c1ed2640 100644
--- a/docs/developer_guides/chap02_schema.md
+++ b/docs/developer_guides/chap02_schema.md
@@ -6,10 +6,10 @@
``` go
type CollectionSchema struct {
- Name string
- Description string
- AutoId bool
- Fields []*FieldSchema
+ Name string
+ Description string
+ AutoId bool
+ Fields []*FieldSchema
}
```
@@ -17,13 +17,13 @@ type CollectionSchema struct {
``` go
type FieldSchema struct {
- FieldID int64
- Name string
- IsPrimaryKey bool
- Description string
- DataType DataType
- TypeParams []*commonpb.KeyValuePair
- IndexParams []*commonpb.KeyValuePair
+ FieldID int64
+ Name string
+ IsPrimaryKey bool
+ Description string
+ DataType DataType
+ TypeParams []*commonpb.KeyValuePair
+ IndexParams []*commonpb.KeyValuePair
}
```
@@ -33,20 +33,20 @@ type FieldSchema struct {
```protobuf
enum DataType {
- NONE = 0;
- BOOL = 1;
- INT8 = 2;
- INT16 = 3;
- INT32 = 4;
- INT64 = 5;
+ NONE = 0;
+ BOOL = 1;
+ INT8 = 2;
+ INT16 = 3;
+ INT32 = 4;
+ INT64 = 5;
- FLOAT = 10;
- DOUBLE = 11;
+ FLOAT = 10;
+ DOUBLE = 11;
- STRING = 20;
+ STRING = 20;
- VECTOR_BINARY = 100;
- VECTOR_FLOAT = 101;
+ VECTOR_BINARY = 100;
+ VECTOR_FLOAT = 101;
}
```
@@ -83,10 +83,7 @@ Different index types use different index params in construction and query. All
## IVF_FLAT
-**IVF** (*Inverted File*) is an index type based on quantization. It divides the points in space into `nlist`
-units by clustering method. During searching vectors, it compares the distances between the target vector
-and the center of all the units, and then select the `nprobe` nearest unit. Then, it compares all the vectors
-in these selected cells to get the final result.
+**IVF** (*Inverted File*) is an index type based on quantization. It divides the points in space into `nlist` units by clustering method. During searching vectors, it compares the distances between the target vector and the center of all the units, and then select the `nprobe` nearest unit. Then, it compares all the vectors in these selected cells to get the final result.
IVF_FLAT is the most basic IVF index, and the encoded data stored in each unit is consistent with the original data.
@@ -101,7 +98,7 @@ IVF_FLAT is the most basic IVF index, and the encoded data stored in each unit i
{
"index_type": "IVF_FLAT",
"metric_type": "L2", # one of L2, IP
-
+
#Special for IVF_FLAT
"nlist": 100 # int. 1~65536
}
@@ -117,7 +114,7 @@ IVF_FLAT is the most basic IVF index, and the encoded data stored in each unit i
"topk": top_k,
"query": queries,
"metric_type": "L2", # one of L2, IP
-
+
#Special for IVF_FLAT
"nprobe": 8 # int. 1~nlist(cpu), 1~min[2048, nlist](gpu)
}
@@ -136,7 +133,7 @@ IVF_FLAT is the most basic IVF index, and the encoded data stored in each unit i
{
"index_type": "BIN_IVF_FLAT",
"metric_type": "jaccard", # one of jaccard, hamming, tanimoto
-
+
#Special for BIN_IVF_FLAT
"nlist": 100 # int. 1~65536
}
@@ -152,7 +149,7 @@ IVF_FLAT is the most basic IVF index, and the encoded data stored in each unit i
{
"topk": top_k,
"query": queries,
-
+
#Special for BIN_IVF_FLAT
"metric_type": "jaccard", # one of jaccard, hamming, tanimoto
"nprobe": 8 # int. 1~nlist(cpu), 1~min[2048, nlist](gpu)
@@ -163,14 +160,9 @@ IVF_FLAT is the most basic IVF index, and the encoded data stored in each unit i
## IVF_PQ
-**PQ** (*Product Quantization*) uniformly decomposes the original high-dimensional vector space into
-Cartesian products of `m` low-dimensional vector spaces, and then quantizes the decomposed low-dimensional
-vector spaces. Instead of calculating the distances between the target vector and the center of all the units,
-product quantization enables the calculation of distances between the target vector and the clustering center
-of each low-dimensional space and greatly reduces the time complexity and space complexity of the algorithm.
+**PQ** (*Product Quantization*) uniformly decomposes the original high-dimensional vector space into Cartesian products of `m` low-dimensional vector spaces, and then quantizes the decomposed low-dimensional vector spaces. Instead of calculating the distances between the target vector and the center of all the units, product quantization enables the calculation of distances between the target vector and the clustering center of each low-dimensional space and greatly reduces the time complexity and space complexity of the algorithm.
-IVF_PQ performs IVF index clustering, and then quantizes the product of vectors. Its index file is even
-smaller than IVF_SQ8, but it also causes a loss of accuracy during searching.
+IVF_PQ performs IVF index clustering, and then quantizes the product of vectors. Its index file is even smaller than IVF_SQ8, but it also causes a loss of accuracy during searching.
- building parameters:
@@ -183,7 +175,7 @@ smaller than IVF_SQ8, but it also causes a loss of accuracy during searching.
{
"index_type": "IVF_PQ",
"metric_type": "L2", # one of L2, IP
-
+
#Special for IVF_PQ
"nlist": 100, # int. 1~65536
"m": 8
@@ -200,7 +192,7 @@ smaller than IVF_SQ8, but it also causes a loss of accuracy during searching.
"topk": top_k,
"query": queries,
"metric_type": "L2", # one of L2, IP
-
+
#Special for IVF_PQ
"nprobe": 8 # int. 1~nlist(cpu), 1~min[2048, nlist](gpu)
}
@@ -208,13 +200,10 @@ smaller than IVF_SQ8, but it also causes a loss of accuracy during searching.
## IVF_SQ8
-**IVF_SQ8** does scalar quantization for each vector placed in the unit based on IVF. Scalar quantization
-converts each dimension of the original vector from a 4-byte floating-point number to a 1-byte unsigned integer,
-so the IVF_SQ8 index file occupies much less space than the IVF_FLAT index file.
-However, scalar quantization results in a loss of accuracy during searching vectors.
+**IVF_SQ8** does scalar quantization for each vector placed in the unit based on IVF. Scalar quantization converts each dimension of the original vector from a 4-byte floating-point number to a 1-byte unsigned integer, so the IVF_SQ8 index file occupies much less space than the IVF_FLAT index file. However, scalar quantization results in a loss of accuracy during searching vectors.
- building parameters:
-
+
**nlist**: Number of cluster units.
```python
@@ -222,7 +211,7 @@ However, scalar quantization results in a loss of accuracy during searching vect
{
"index_type": "IVF_SQ8",
"metric_type": "L2", # one of L2, IP
-
+
#Special for IVF_SQ8
"nlist": 100 # int. 1~65536
}
@@ -238,7 +227,7 @@ However, scalar quantization results in a loss of accuracy during searching vect
"topk": top_k,
"query": queries,
"metric_type": "L2", # one of L2, IP
-
+
#Special for IVF_SQ8
"nprobe": 8 # int. 1~nlist(cpu), 1~min[2048, nlist](gpu)
}
@@ -246,16 +235,14 @@ However, scalar quantization results in a loss of accuracy during searching vect
## IVF_SQ8_HYBRID
-Optimized version of IVF_SQ8 that requires both CPU and GPU to work. Unlike IVF_SQ8, IVF_SQ8H uses a GPU-based
-coarse quantizer, which greatly reduces time to quantize.
+Optimized version of IVF_SQ8 that requires both CPU and GPU to work. Unlike IVF_SQ8, IVF_SQ8H uses a GPU-based coarse quantizer, which greatly reduces time to quantize.
IVF_SQ8H is an IVF_SQ8 index that optimizes query execution.
The query method is as follows:
- If `nq` ≥ `gpu_search_threshold`, GPU handles the entire query task.
-- If `nq` < `gpu_search_threshold`, GPU handles the task of retrieving the `nprobe` nearest unit in the IVF
-index file, and CPU handles the rest.
+- If `nq` < `gpu_search_threshold`, GPU handles the task of retrieving the `nprobe` nearest unit in the IVF index file, and CPU handles the rest.
- building parameters:
@@ -266,7 +253,7 @@ index file, and CPU handles the rest.
{
"index_type": "IVF_SQ8_HYBRID",
"metric_type": "L2", # one of L2, IP
-
+
#Special for IVF_SQ8_HYBRID
"nlist": 100 # int. 1~65536
}
@@ -282,7 +269,7 @@ index file, and CPU handles the rest.
"topk": top_k,
"query": queries,
"metric_type": "L2", # one of L2, IP
-
+
#Special for IVF_SQ8_HYBRID
"nprobe": 8 # int. 1~nlist(cpu), 1~min[2048, nlist](gpu)
}
@@ -290,15 +277,9 @@ index file, and CPU handles the rest.
## ANNOY
-**ANNOY** (*Approximate Nearest Neighbors Oh Yeah*) is an index that uses a hyperplane to divide a
-high-dimensional space into multiple subspaces, and then stores them in a tree structure.
+**ANNOY** (*Approximate Nearest Neighbors Oh Yeah*) is an index that uses a hyperplane to divide a high-dimensional space into multiple subspaces, and then stores them in a tree structure.
-When searching for vectors, ANNOY follows the tree structure to find subspaces closer to the target vector,
-and then compares all the vectors in these subspaces (The number of vectors being compared should not be
-less than `search_k`) to obtain the final result. Obviously, when the target vector is close to the edge of
-a certain subspace, sometimes it is necessary to greatly increase the number of searched subspaces to obtain
-a high recall rate. Therefore, ANNOY uses `n_trees` different methods to divide the whole space, and searches
-all the dividing methods simultaneously to reduce the probability that the target vector is always at the edge of the subspace.
+When searching for vectors, ANNOY follows the tree structure to find subspaces closer to the target vector, and then compares all the vectors in these subspaces (The number of vectors being compared should not be less than `search_k`) to obtain the final result. Obviously, when the target vector is close to the edge of a certain subspace, sometimes it is necessary to greatly increase the number of searched subspaces to obtain a high recall rate. Therefore, ANNOY uses `n_trees` different methods to divide the whole space, and searches all the dividing methods simultaneously to reduce the probability that the target vector is always at the edge of the subspace.
- building parameters:
@@ -309,7 +290,7 @@ all the dividing methods simultaneously to reduce the probability that the targe
{
"index_type": "ANNOY",
"metric_type": "L2", # one of L2, IP
-
+
#Special for ANNOY
"n_trees": 8 # int. 1~1024
}
@@ -325,7 +306,7 @@ all the dividing methods simultaneously to reduce the probability that the targe
"topk": top_k,
"query": queries,
"metric_type": "L2", # one of L2, IP
-
+
#Special for ANNOY
"search_k": -1 # int. {-1} U [top_k, n*n_trees], n represents vectors count.
}
@@ -333,12 +314,7 @@ all the dividing methods simultaneously to reduce the probability that the targe
## HNSW
-**HNSW** (*Hierarchical Navigable Small World Graph*) is a graph-based indexing algorithm. It builds a
-multi-layer navigation structure for an image according to certain rules. In this structure, the upper
-layers are more sparse and the distances between nodes are farther; the lower layers are denser and
-he distances between nodes are closer. The search starts from the uppermost layer, finds the node closest
-to the target in this layer, and then enters the next layer to begin another search. After multiple iterations,
-it can quickly approach the target position.
+**HNSW** (*Hierarchical Navigable Small World Graph*) is a graph-based indexing algorithm. It builds a multi-layer navigation structure for an image according to certain rules. In this structure, the upper layers are more sparse and the distances between nodes are farther; the lower layers are denser and the distances between nodes are closer. The search starts from the uppermost layer, finds the node closest to the target in this layer, and then enters the next layer to begin another search. After multiple iterations, it can quickly approach the target position.
In order to improve performance, HNSW limits the maximum degree of nodes on each layer of the graph to `M`.
In addition, you can use `efConstruction` (when building index) or `ef` (when searching targets) to specify a search range.
@@ -354,7 +330,7 @@ In addition, you can use `efConstruction` (when building index) or `ef` (when se
{
"index_type": "HNSW",
"metric_type": "L2", # one of L2, IP
-
+
#Special for HNSW
"M": 16, # int. 4~64
"efConstruction": 40 # int. 8~512
@@ -369,19 +345,18 @@ In addition, you can use `efConstruction` (when building index) or `ef` (when se
# HNSW
{
- "topk": top_k,
- "query": queries,
- "metric_type": "L2", # one of L2, IP
-
- #Special for HNSW
- "ef": 64 # int. top_k~32768
+ "topk": top_k,
+ "query": queries,
+ "metric_type": "L2", # one of L2, IP
+
+ #Special for HNSW
+ "ef": 64 # int. top_k~32768
}
```
## RHNSW_PQ
-**RHNSW_PQ** is a variant index type combining PQ and HNSW. It first uses PQ to quantize the vector,
-then uses HNSW to quantize the PQ quantization result to get the index.
+**RHNSW_PQ** is a variant index type combining PQ and HNSW. It first uses PQ to quantize the vector, then uses HNSW to quantize the PQ quantization result to get the index.
- building parameters:
@@ -396,7 +371,7 @@ then uses HNSW to quantize the PQ quantization result to get the index.
{
"index_type": "RHNSW_PQ",
"metric_type": "L2",
-
+
#Special for RHNSW_PQ
"M": 16, # int. 4~64
"efConstruction": 40, # int. 8~512
@@ -414,9 +389,9 @@ then uses HNSW to quantize the PQ quantization result to get the index.
"topk": top_k,
"query": queries,
"metric_type": "L2", # one of L2, IP
-
-
- #Special for RHNSW_PQ
+
+
+ #Special for RHNSW_PQ
"ef": 64 # int. top_k~32768
}
```
@@ -436,7 +411,7 @@ then uses HNSW to quantize the PQ quantization result to get the index.
{
"index_type": "RHNSW_SQ",
"metric_type": "L2", # one of L2, IP
-
+
#Special for RHNSW_SQ
"M": 16, # int. 4~64
"efConstruction": 40 # int. 8~512
@@ -453,7 +428,7 @@ then uses HNSW to quantize the PQ quantization result to get the index.
"topk": top_k,
"query": queries,
"metric_type": "L2", # one of L2, IP
-
+
#Special for RHNSW_SQ
"ef": 64 # int. top_k~32768
}
@@ -461,10 +436,7 @@ then uses HNSW to quantize the PQ quantization result to get the index.
## NSG
-**NSG** (*Refined Navigating Spreading-out Graph*) is a graph-based indexing algorithm. It sets the center
-position of the whole image as a navigation point, and then uses a specific edge selection strategy to control
-the out-degree of each point (less than or equal to `out_degree`). Therefore, it can reduce memory usage and
-quickly locate the target position nearby during searching vectors.
+**NSG** (*Refined Navigating Spreading-out Graph*) is a graph-based indexing algorithm. It sets the center position of the whole image as a navigation point, and then uses a specific edge selection strategy to control the out-degree of each point (less than or equal to `out_degree`). Therefore, it can reduce memory usage and quickly locate the target position nearby during searching vectors.
The graph construction process of NSG is as follows:
@@ -489,7 +461,7 @@ The query process is similar to the graph building process. It starts from the n
{
"index_type": "NSG",
"metric_type": "L2",
-
+
#Special for RHNSW_SQ
"search_length": 60, # int. 10~300
"out_degree": 30, # int. 5~300
@@ -508,8 +480,8 @@ The query process is similar to the graph building process. It starts from the n
"topk": top_k,
"query": queries,
"metric_type": "L2", # one of L2, IP
-
+
#Special for RHNSW_SQ
"search_length": 100 # int. 10~300
}
-```
\ No newline at end of file
+```
diff --git a/docs/developer_guides/chap03_index_service.md b/docs/developer_guides/chap03_index_service.md
index bc8345fb94..b160ecd90b 100644
--- a/docs/developer_guides/chap03_index_service.md
+++ b/docs/developer_guides/chap03_index_service.md
@@ -12,13 +12,15 @@
```go
type IndexService interface {
- Service
- Component
- RegisterNode(ctx context.Context, req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error)
- BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error)
- GetIndexStates(ctx context.Context, req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error)
- GetIndexFilePaths(ctx context.Context, req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error)
- NotifyBuildIndex(ctx context.Context, nty *indexpb.BuildIndexNotification) (*commonpb.Status, error)
+ Component
+ TimeTickProvider
+
+ RegisterNode(ctx context.Context, req *indexpb.RegisterNodeRequest) (*indexpb.RegisterNodeResponse, error)
+ BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*indexpb.BuildIndexResponse, error)
+ DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error)
+ GetIndexStates(ctx context.Context, req *indexpb.IndexStatesRequest) (*indexpb.IndexStatesResponse, error)
+ GetIndexFilePaths(ctx context.Context, req *indexpb.IndexFilePathsRequest) (*indexpb.IndexFilePathsResponse, error)
+ NotifyBuildIndex(ctx context.Context, nty *indexpb.BuildIndexNotification) (*commonpb.Status, error)
}
```
@@ -28,30 +30,30 @@ type IndexService interface {
```go
type MsgBase struct {
- MsgType MsgType
- MsgID UniqueID
- Timestamp uint64
- SourceID UniqueID
+ MsgType MsgType
+ MsgID UniqueID
+ Timestamp uint64
+ SourceID UniqueID
}
type Address struct {
- Ip string
- Port int64
+ Ip string
+ Port int64
}
type RegisterNodeRequest struct {
- Base *commonpb.MsgBase
- Address *commonpb.Address
+ Base *commonpb.MsgBase
+ Address *commonpb.Address
}
type InitParams struct {
- NodeID UniqueID
- StartParams []*commonpb.KeyValuePair
+ NodeID UniqueID
+ StartParams []*commonpb.KeyValuePair
}
type RegisterNodeResponse struct {
- InitParams *internalpb2.InitParams
- Status *commonpb.Status
+ InitParams *internalpb.InitParams
+ Status *commonpb.Status
}
```
@@ -59,72 +61,79 @@ type RegisterNodeResponse struct {
```go
type KeyValuePair struct {
- Key string
- Value string
+ Key string
+ Value string
}
type BuildIndexRequest struct {
- IndexName string
- IndexID UniqueID
- DataPaths []string
- TypeParams []*commonpb.KeyValuePair
- IndexParams []*commonpb.KeyValuePair
+ IndexBuildID UniqueID
+ IndexName string
+ IndexID UniqueID
+ DataPaths []string
+ TypeParams []*commonpb.KeyValuePair
+ IndexParams []*commonpb.KeyValuePair
}
type BuildIndexResponse struct {
- Status *commonpb.Status
- IndexBuildID UniqueID
+ Status *commonpb.Status
+ IndexBuildID UniqueID
+}
+```
+
+* *DropIndex*
+
+```go
+type DropIndexRequest struct {
+ IndexID UniqueID
}
```
* *GetIndexStates*
```go
-type IndexStatesRequest struct {
- IndexBuildIDs []UniqueID
+type GetIndexStatesRequest struct {
+ IndexBuildIDs []UniqueID
}
-enum IndexState {
- NONE = 0;
- UNISSUED = 1;
- INPROGRESS = 2;
- FINISHED = 3;
- FAILED = 4;
- DELETED = 5;
-}
+const (
+ IndexState_IndexStateNone IndexState = 0
+ IndexState_Unissued IndexState = 1
+ IndexState_InProgress IndexState = 2
+ IndexState_Finished IndexState = 3
+ IndexState_Failed IndexState = 4
+ IndexState_Deleted IndexState = 5
+)
type IndexInfo struct {
- State commonpb.IndexState
- IndexBuildID UniqueID
- IndexID UniqueID
- IndexName string
- Reason string
+ State commonpb.IndexState
+ IndexBuildID UniqueID
+ IndexID UniqueID
+ IndexName string
+ Reason string
}
-type IndexStatesResponse struct {
- Status *commonpb.Status
- States []*IndexInfo
+type GetIndexStatesResponse struct {
+ Status *commonpb.Status
+ States []*IndexInfo
}
-
-
```
* *GetIndexFilePaths*
```go
-type IndexFilePathRequest struct {
- IndexBuildIDs []UniqueID
+type GetIndexFilePathsRequest struct {
+ IndexBuildIDs []UniqueID
}
type IndexFilePathInfo struct {
- Status *commonpb.Status
- IndexBuildID UniqueID
- IndexFilePaths []string
+ Status *commonpb.Status
+ IndexBuildID UniqueID
+ IndexFilePaths []string
}
-type IndexFilePathsResponse struct {
- Status *commonpb.Status
- FilePaths []*IndexFilePathInfo
+type GetIndexFilePathsResponse struct {
+ Status *commonpb.Status
+ FilePaths []*IndexFilePathInfo
}
```
@@ -132,11 +141,11 @@ type IndexFilePathsResponse struct {
* *NotifyBuildIndex*
```go
-type BuildIndexNotification struct {
- Status *commonpb.Status
- IndexBuildID UniqueID
- IndexFilePaths []string
- NodeID UniqueID
+type NotifyBuildIndexRequest struct {
+ Status *commonpb.Status
+ IndexBuildID UniqueID
+ IndexFilePaths []string
+ NodeID UniqueID
}
```
@@ -146,10 +155,11 @@ type BuildIndexNotification struct {
```go
type IndexNode interface {
- Service
- Component
- BuildIndex(ctx context.Context, req *indexpb.BuildIndexCmd) (*commonpb.Status, error)
- DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error)
+ Component
+ TimeTickProvider
+
+ BuildIndex(ctx context.Context, req *indexpb.BuildIndexRequest) (*commonpb.Status, error)
+ DropIndex(ctx context.Context, req *indexpb.DropIndexRequest) (*commonpb.Status, error)
}
```
@@ -158,21 +168,17 @@ type IndexNode interface {
```go
type KeyValuePair struct {
- Key string
- Value string
+ Key string
+ Value string
}
type BuildIndexRequest struct {
- IndexName string
- IndexID UniqueID
- DataPaths []string
- TypeParams []*commonpb.KeyValuePair
- IndexParams []*commonpb.KeyValuePair
-}
-
-type BuildIndexCmd struct {
- IndexBuildID UniqueID
- Req *BuildIndexRequest
+ IndexBuildID UniqueID
+ IndexName string
+ IndexID UniqueID
+ DataPaths []string
+ TypeParams []*commonpb.KeyValuePair
+ IndexParams []*commonpb.KeyValuePair
}
```
@@ -180,6 +186,6 @@ type BuildIndexCmd struct {
```go
type DropIndexRequest struct {
- IndexID UniqueID
+ IndexID UniqueID
}
```
diff --git a/docs/developer_guides/chap04_message_stream.md b/docs/developer_guides/chap04_message_stream.md
index 33876c6c46..24f745783a 100644
--- a/docs/developer_guides/chap04_message_stream.md
+++ b/docs/developer_guides/chap04_message_stream.md
@@ -4,14 +4,14 @@
-// TODO
+// TODO remove?
#### 8.2 Message Stream Service API
```go
type Client interface {
- CreateChannels(req CreateChannelRequest) (CreateChannelResponse, error)
- DestoryChannels(req DestoryChannelRequest) error
- DescribeChannels(req DescribeChannelRequest) (DescribeChannelResponse, error)
+ CreateChannels(req CreateChannelRequest) (CreateChannelResponse, error)
+ DestoryChannels(req DestoryChannelRequest) error
+ DescribeChannels(req DescribeChannelRequest) (DescribeChannelResponse, error)
}
```
@@ -19,19 +19,19 @@ type Client interface {
```go
type OwnerDescription struct {
- Role string
- Address string
- //Token string
- DescriptionText string
+ Role string
+ Address string
+ //Token string
+ DescriptionText string
}
type CreateChannelRequest struct {
- OwnerDescription OwnerDescription
- NumChannels int
+ OwnerDescription OwnerDescription
+ NumChannels int
}
type CreateChannelResponse struct {
- ChannelNames []string
+ ChannelNames []string
}
```
@@ -39,7 +39,7 @@ type CreateChannelResponse struct {
```go
type DestoryChannelRequest struct {
- ChannelNames []string
+ ChannelNames []string
}
```
@@ -49,16 +49,16 @@ type DestoryChannelRequest struct {
```go
type DescribeChannelRequest struct {
- ChannelNames []string
+ ChannelNames []string
}
type ChannelDescription struct {
- ChannelName string
- Owner OwnerDescription
+ ChannelName string
+ Owner OwnerDescription
}
type DescribeChannelResponse struct {
- Descriptions []ChannelDescription
+ Descriptions []ChannelDescription
}
```
@@ -78,83 +78,85 @@ type DescribeChannelResponse struct {
// Msg
type MsgType uint32
-const {
- MsgType_kNone MsgType = 0
- // Definition Requests: collection
- MsgType_kCreateCollection MsgType = 100
- MsgType_kDropCollection MsgType = 101
- MsgType_kHasCollection MsgType = 102
- MsgType_kDescribeCollection MsgType = 103
- MsgType_kShowCollections MsgType = 104
- MsgType_kGetSysConfigs MsgType = 105
- MsgType_kLoadCollection MsgType = 106
- MsgType_kReleaseCollection MsgType = 107
- // Definition Requests: partition
- MsgType_kCreatePartition MsgType = 200
- MsgType_kDropPartition MsgType = 201
- MsgType_kHasPartition MsgType = 202
- MsgType_kDescribePartition MsgType = 203
- MsgType_kShowPartitions MsgType = 204
- MsgType_kLoadPartition MsgType = 205
- MsgType_kReleasePartition MsgType = 206
- // Define Requests: segment
- MsgType_kShowSegment MsgType = 250
- MsgType_kDescribeSegment MsgType = 251
- // Definition Requests: Index
- MsgType_kCreateIndex MsgType = 300
- MsgType_kDescribeIndex MsgType = 301
- MsgType_kDropIndex MsgType = 302
- // Manipulation Requests
- MsgType_kInsert MsgType = 400
- MsgType_kDelete MsgType = 401
- MsgType_kFlush MsgType = 402
- // Query
- MsgType_kSearch MsgType = 500
- MsgType_kSearchResult MsgType = 501
- MsgType_kGetIndexState MsgType = 502
- MsgType_kGetCollectionStatistics MsgType = 503
- MsgType_kGetPartitionStatistics MsgType = 504
- // Data Service
- MsgType_kSegmentInfo MsgType = 600
- // System Control
- MsgType_kTimeTick MsgType = 1200
- MsgType_kQueryNodeStats MsgType = 1201
- MsgType_kLoadIndex MsgType = 1202
- MsgType_kRequestID MsgType = 1203
- MsgType_kRequestTSO MsgType = 1204
- MsgType_kAllocateSegment MsgType = 1205
- MsgType_kSegmentStatistics MsgType = 1206
- MsgType_kSegmentFlushDone MsgType = 1207
-}
+const (
+ MsgType_Undefined MsgType = 0
+ // DEFINITION REQUESTS: COLLECTION
+ MsgType_CreateCollection MsgType = 100
+ MsgType_DropCollection MsgType = 101
+ MsgType_HasCollection MsgType = 102
+ MsgType_DescribeCollection MsgType = 103
+ MsgType_ShowCollections MsgType = 104
+ MsgType_GetSystemConfigs MsgType = 105
+ MsgType_LoadCollection MsgType = 106
+ MsgType_ReleaseCollection MsgType = 107
+ // DEFINITION REQUESTS: PARTITION
+ MsgType_CreatePartition MsgType = 200
+ MsgType_DropPartition MsgType = 201
+ MsgType_HasPartition MsgType = 202
+ MsgType_DescribePartition MsgType = 203
+ MsgType_ShowPartitions MsgType = 204
+ MsgType_LoadPartitions MsgType = 205
+ MsgType_ReleasePartitions MsgType = 206
+ // DEFINE REQUESTS: SEGMENT
+ MsgType_ShowSegments MsgType = 250
+ MsgType_DescribeSegment MsgType = 251
+ // DEFINITION REQUESTS: INDEX
+ MsgType_CreateIndex MsgType = 300
+ MsgType_DescribeIndex MsgType = 301
+ MsgType_DropIndex MsgType = 302
+ // MANIPULATION REQUESTS
+ MsgType_Insert MsgType = 400
+ MsgType_Delete MsgType = 401
+ MsgType_Flush MsgType = 402
+ // QUERY
+ MsgType_Search MsgType = 500
+ MsgType_SearchResult MsgType = 501
+ MsgType_GetIndexState MsgType = 502
+ MsgType_GetCollectionStatistics MsgType = 503
+ MsgType_GetPartitionStatistics MsgType = 504
+ // DATA SERVICE
+ MsgType_SegmentInfo MsgType = 600
+ // SYSTEM CONTROL
+ MsgType_TimeTick MsgType = 1200
+ MsgType_QueryNodeStats MsgType = 1201
+ MsgType_LoadIndex MsgType = 1202
+ MsgType_RequestID MsgType = 1203
+ MsgType_RequestTSO MsgType = 1204
+ MsgType_AllocateSegment MsgType = 1205
+ MsgType_SegmentStatistics MsgType = 1206
+ MsgType_SegmentFlushDone MsgType = 1207
+)
type MsgPosition struct{
- ChannelName string
- MsgID string
- Timestamp uint64
+ ChannelName string
+ MsgID []byte
+ MsgGroup string
+ Timestamp uint64
}
type MsgPack struct {
- BeginTs Timestamp
- EndTs Timestamp
- Msgs []TsMsg
- StartPositions []*MsgPosition
- endPositions []*MsgPosition
+ BeginTs Timestamp
+ EndTs Timestamp
+ Msgs []TsMsg
+ StartPositions []*MsgPosition
+ EndPositions []*MsgPosition
}
type TsMsg interface {
- ID() UniqueID
- BeginTs() Timestamp
- EndTs() Timestamp
- Type() MsgType
- HashKeys() []uint32
- Marshal(TsMsg) (MarshalType, error)
- Unmarshal(MarshalType) (TsMsg, error)
- Position() *MsgPosition
- SetPosition(*MsgPosition)
+ TraceCtx() context.Context
+ SetTraceCtx(ctx context.Context)
+ ID() UniqueID
+ BeginTs() Timestamp
+ EndTs() Timestamp
+ Type() MsgType
+ HashKeys() []uint32
+ Marshal(TsMsg) (MarshalType, error)
+ Unmarshal(MarshalType) (TsMsg, error)
+ Position() *MsgPosition
+ SetPosition(*MsgPosition)
}
-
-type RepackFunc(msgs []TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, error)
+type RepackFunc func(msgs []TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, error)
```
@@ -166,12 +168,12 @@ type RepackFunc(msgs []TsMsg, hashKeys [][]int32) (map[int32]*MsgPack, error)
type UnmarshalFunc func(interface{}) (TsMsg, error)
type UnmarshalDispatcher interface {
- Unmarshal(input interface{}, msgType commonpb.MsgType) (TsMsg, error)
- AddMsgTemplate(msgType commonpb.MsgType, unmarshalFunc UnmarshalFunc)
+ Unmarshal(input interface{}, msgType commonpb.MsgType) (TsMsg, error)
+ AddMsgTemplate(msgType commonpb.MsgType, unmarshalFunc UnmarshalFunc)
}
type UnmarshalDispatcherFactory interface {
- NewUnmarshalDispatcher() *UnmarshalDispatcher
+ NewUnmarshalDispatcher() *UnmarshalDispatcher
}
// Proto & Mem Implementation
@@ -191,23 +193,23 @@ func (mudf *MemUDFactory) NewUnmarshalDispatcher() *UnmarshalDispatcher
// Interface
type MsgStream interface {
- Start()
- Close()
- Chan() <-chan *MsgPack
- AsProducer(channels []string)
- AsConsumer(channels []string, subName string)
- SetRepackFunc(repackFunc RepackFunc)
-
- Produce(context.Context, *MsgPack) error
- Broadcast(context.Context, *MsgPack) error
- Consume() (*MsgPack, context.Context)
- Seek(offset *MsgPosition) error
+ Start()
+ Close()
+ Chan() <-chan *MsgPack
+ AsProducer(channels []string)
+ AsConsumer(channels []string, subName string)
+ SetRepackFunc(repackFunc RepackFunc)
+
+ Produce(context.Context, *MsgPack) error
+ Broadcast(context.Context, *MsgPack) error
+ Consume() (*MsgPack, context.Context)
+ Seek(offset *MsgPosition) error
}
type MsgStreamFactory interface {
- SetParams(params map[string]interface{}) error
- NewMsgStream(ctx context.Context) (MsgStream, error)
- NewTtMsgStream(ctx context.Context) (MsgStream, error)
+ SetParams(params map[string]interface{}) error
+ NewMsgStream(ctx context.Context) (MsgStream, error)
+ NewTtMsgStream(ctx context.Context) (MsgStream, error)
}
//TODO
@@ -229,21 +231,21 @@ func (rmsf *RmqMsgStreamFactory) NewTtMsgStream() *MsgStream
// PulsarMsgStream
type PulsarMsgStream struct {
- ctx context.Context
- client pulsar.Client
- producers []Producer
- consumers []Consumer
- consumerChannels []string
- repackFunc RepackFunc
- unmarshal UnmarshalDispatcher
- receiveBuf chan *MsgPack
- wait *sync.WaitGroup
- streamCancel func()
- pulsarBufSize int64
- consumerLock *sync.Mutex
- consumerReflects []reflect.SelectCase
-
- scMap *sync.Map
+ ctx context.Context
+ client pulsar.Client
+ producers []Producer
+ consumers []Consumer
+ consumerChannels []string
+ repackFunc RepackFunc
+ unmarshal UnmarshalDispatcher
+ receiveBuf chan *MsgPack
+ wait *sync.WaitGroup
+ streamCancel func()
+ pulsarBufSize int64
+ consumerLock *sync.Mutex
+ consumerReflects []reflect.SelectCase
+
+ scMap *sync.Map
}
func (ms *PulsarMsgStream) Start() error
@@ -260,14 +262,14 @@ func NewPulsarMsgStream(ctx context.Context, pulsarAddr string, bufferSize int64
type PulsarTtMsgStream struct {
- client *pulsar.Client
- repackFunc RepackFunc
- producers []*pulsar.Producer
- consumers []*pulsar.Consumer
- unmarshal *UnmarshalDispatcher
- inputBuf []*TsMsg
- unsolvedBuf []*TsMsg
- msgPacks []*MsgPack
+ client *pulsar.Client
+ repackFunc RepackFunc
+ producers []*pulsar.Producer
+ consumers []*pulsar.Consumer
+ unmarshal *UnmarshalDispatcher
+ inputBuf []*TsMsg
+ unsolvedBuf []*TsMsg
+ msgPacks []*MsgPack
}
func (ms *PulsarTtMsgStream) Start() error
@@ -285,12 +287,12 @@ func NewPulsarTtMsgStream(ctx context.Context, pulsarAddr string, bufferSize int
// RmqMsgStream
type RmqMsgStream struct {
- client *rockermq.RocksMQ
- repackFunc RepackFunc
- producers []string
- consumers []string
- subName string
- unmarshal *UnmarshalDispatcher
+ client *rockermq.RocksMQ
+ repackFunc RepackFunc
+ producers []string
+ consumers []string
+ subName string
+ unmarshal *UnmarshalDispatcher
}
func (ms *RmqMsgStream) Start() error
@@ -306,12 +308,12 @@ func (ms *RmqMsgStream) SetRepackFunc(repackFunc RepackFunc)
func NewRmqMsgStream(ctx context.Context) *RmqMsgStream
type RmqTtMsgStream struct {
- client *rockermq.RocksMQ
- repackFunc RepackFunc
- producers []string
- consumers []string
- subName string
- unmarshal *UnmarshalDispatcher
+ client *rockermq.RocksMQ
+ repackFunc RepackFunc
+ producers []string
+ consumers []string
+ subName string
+ unmarshal *UnmarshalDispatcher
}
func (ms *RmqTtMsgStream) Start() error
@@ -339,13 +341,13 @@ RocksMQ is a RocksDB-based messaging/streaming library.
// All the following UniqueIDs are 64-bit integer, which is combined with timestamp and increasing number
type ProducerMessage struct {
- payload []byte
-}
+ payload []byte
+}
type ConsumerMessage struct {
- msgID UniqueID
- payload []byte
-}
+ msgID UniqueID
+ payload []byte
+}
type IDAllocator interface {
Alloc(count uint32) (UniqueID, UniqueID, error)
@@ -355,11 +357,11 @@ type IDAllocator interface {
// Every collection has its RocksMQ
type RocksMQ struct {
- store *gorocksdb.DB
- kv kv.Base
+ store *gorocksdb.DB
+ kv kv.Base
idAllocator IDAllocator
- produceMu sync.Mutex
- consumeMu sync.Mutex
+ produceMu sync.Mutex
+ consumeMu sync.Mutex
}
func (rmq *RocksMQ) CreateChannel(channelName string) error
@@ -394,4 +396,4 @@ func NewRocksMQ(name string, idAllocator IDAllocator) (*RocksMQ, error)
```go
"$(channel_name)/$(unique_id)", []byte
-```
\ No newline at end of file
+```
diff --git a/docs/developer_guides/chap05_proxy.md b/docs/developer_guides/chap05_proxy.md
index b328c9801d..4d936a60ca 100644
--- a/docs/developer_guides/chap05_proxy.md
+++ b/docs/developer_guides/chap05_proxy.md
@@ -10,11 +10,11 @@
```go
type ProxyService interface {
- Component
- Service
- RegisterLink(ctx context.Context) (*milvuspb.RegisterLinkResponse, error)
- RegisterNode(ctx context.Context, request *proxypb.RegisterNodeRequest) (*proxypb.RegisterNodeResponse, error)
- InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
+ Component
+ TimeTickProvider
+
+ RegisterNode(ctx context.Context, request *proxypb.RegisterNodeRequest) (*proxypb.RegisterNodeResponse, error)
+ InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
}
```
@@ -23,24 +23,10 @@ type ProxyService interface {
```go
type MsgBase struct {
- MsgType MsgType
- MsgID UniqueID
- Timestamp uint64
- SourceID UniqueID
-}
-```
-
-* *RegisterLink*
-
-```go
-type Address struct {
- Ip string
- Port int64
-}
-
-type RegisterLinkResponse struct {
- Address *commonpb.Address
- Status *commonpb.Status
+ MsgType MsgType
+ MsgID UniqueID
+ Timestamp uint64
+ SourceID UniqueID
}
```
@@ -48,24 +34,24 @@ type RegisterLinkResponse struct {
```go
type Address struct {
- Ip string
- Port int64
+ Ip string
+ Port int64
}
type RegisterNodeRequest struct {
- Base *commonpb.MsgBase
- Address string
- Port int64
+ Base *commonpb.MsgBase
+ Address string
+ Port int64
}
type InitParams struct {
- NodeID UniqueID
- StartParams []*commonpb.KeyValuePair
+ NodeID UniqueID
+ StartParams []*commonpb.KeyValuePair
}
type RegisterNodeResponse struct {
- InitParams *internalpb2.InitParams
- Status *commonpb.Status
+ InitParams *internalpb.InitParams
+ Status *commonpb.Status
}
```
@@ -73,56 +59,73 @@ type RegisterNodeResponse struct {
```go
type InvalidateCollMetaCacheRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
}
```
-#### 6.0 Proxy Node Interface
+#### 6.1 Proxy Node Interface
```go
type ProxyNode interface {
- Service
-
- InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
-
- CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
- DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
- HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
- LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error)
- ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error)
- DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
- GetCollectionStatistics(ctx context.Context, request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error)
- ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
-
- CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
- DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
- HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
- LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitonRequest) (*commonpb.Status, error)
- ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionRequest) (*commonpb.Status, error)
- GetPartitionStatistics(ctx context.Context, request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error)
- ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
-
- CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
- DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
- GetIndexState(ctx context.Context, request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error)
- DropIndex(ctx context.Context, request *milvuspb.DropIndexRequest) (*commonpb.Status, error)
-
- Insert(ctx context.Context, request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error)
- Search(ctx context.Context, request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error)
- Flush(ctx context.Context, request *milvuspb.FlushRequest) (*commonpb.Status, error)
-
- GetDdChannel(ctx context.Context, request *commonpb.Empty) (*milvuspb.StringResponse, error)
-
- GetQuerySegmentInfo(ctx context.Context, req *milvuspb.QuerySegmentInfoRequest) (*milvuspb.QuerySegmentInfoResponse, error)
- GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.PersistentSegmentInfoRequest) (*milvuspb.PersistentSegmentInfoResponse, error)
+ Component
+
+ InvalidateCollectionMetaCache(ctx context.Context, request *proxypb.InvalidateCollMetaCacheRequest) (*commonpb.Status, error)
}
```
+* *InvalidateCollectionMetaCache*
+```go
+type InvalidateCollMetaCacheRequest struct {
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+}
+```
+
+#### 6.2 Milvus Service Interface
+
+ProxyNode also implements Milvus Service interface to receive client grpc call.
+
+```go
+type MilvusService interface {
+ CreateCollection(ctx context.Context, request *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
+ DropCollection(ctx context.Context, request *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
+ HasCollection(ctx context.Context, request *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
+ LoadCollection(ctx context.Context, request *milvuspb.LoadCollectionRequest) (*commonpb.Status, error)
+ ReleaseCollection(ctx context.Context, request *milvuspb.ReleaseCollectionRequest) (*commonpb.Status, error)
+ DescribeCollection(ctx context.Context, request *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
+ GetCollectionStatistics(ctx context.Context, request *milvuspb.CollectionStatsRequest) (*milvuspb.CollectionStatsResponse, error)
+ ShowCollections(ctx context.Context, request *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
+
+ CreatePartition(ctx context.Context, request *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
+ DropPartition(ctx context.Context, request *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
+ HasPartition(ctx context.Context, request *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
+ LoadPartitions(ctx context.Context, request *milvuspb.LoadPartitonRequest) (*commonpb.Status, error)
+ ReleasePartitions(ctx context.Context, request *milvuspb.ReleasePartitionRequest) (*commonpb.Status, error)
+ GetPartitionStatistics(ctx context.Context, request *milvuspb.PartitionStatsRequest) (*milvuspb.PartitionStatsResponse, error)
+ ShowPartitions(ctx context.Context, request *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
+
+ CreateIndex(ctx context.Context, request *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
+ DescribeIndex(ctx context.Context, request *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
+ GetIndexState(ctx context.Context, request *milvuspb.IndexStateRequest) (*milvuspb.IndexStateResponse, error)
+ DropIndex(ctx context.Context, request *milvuspb.DropIndexRequest) (*commonpb.Status, error)
+
+ Insert(ctx context.Context, request *milvuspb.InsertRequest) (*milvuspb.InsertResponse, error)
+ Search(ctx context.Context, request *milvuspb.SearchRequest) (*milvuspb.SearchResults, error)
+ Flush(ctx context.Context, request *milvuspb.FlushRequest) (*commonpb.Status, error)
+
+ GetDdChannel(ctx context.Context, request *commonpb.Empty) (*milvuspb.StringResponse, error)
+
+ GetQuerySegmentInfo(ctx context.Context, req *milvuspb.QuerySegmentInfoRequest) (*milvuspb.QuerySegmentInfoResponse, error)
+ GetPersistentSegmentInfo(ctx context.Context, req *milvuspb.PersistentSegmentInfoRequest) (*milvuspb.PersistentSegmentInfoResponse, error)
+}
+}
+```
* *CreateCollection*
@@ -140,9 +143,9 @@ See *Master API* for detailed definitions.
```go
type LoadCollectionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
}
```
@@ -150,9 +153,9 @@ type LoadCollectionRequest struct {
```go
type ReleaseCollectionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
}
```
@@ -184,18 +187,18 @@ See *Master API* for detailed definitions.
```go
type CollectionSchema struct {
- Name string
- Description string
- AutoID bool
- Fields []*FieldSchema
+ Name string
+ Description string
+ AutoID bool
+ Fields []*FieldSchema
}
type LoadPartitonRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
- PartitionIDs []UniqueID
- Schema *schemapb.CollectionSchema
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
+ PartitionIDs []UniqueID
+ Schema *schemapb.CollectionSchema
}
```
@@ -203,10 +206,10 @@ type LoadPartitonRequest struct {
```go
type ReleasePartitionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- PartitionNames []string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ PartitionNames []string
}
```
@@ -234,18 +237,18 @@ See *Master API* for detailed definitions.
```go
type InsertRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- PartitionName string
- RowData []Blob
- HashKeys []uint32
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ PartitionName string
+ RowData []Blob
+ HashKeys []uint32
}
type InsertResponse struct {
- Status *commonpb.Status
- RowIDBegin int64
- RowIDEnd int64
+ Status *commonpb.Status
+ RowIDBegin int64
+ RowIDEnd int64
}
```
@@ -253,17 +256,17 @@ type InsertResponse struct {
```go
type SearchRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- PartitionNames []string
- Dsl string
- PlaceholderGroup []byte
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ PartitionNames []string
+ Dsl string
+ PlaceholderGroup []byte
}
type SearchResults struct {
- Status commonpb.Status
- Hits byte
+ Status commonpb.Status
+ Hits byte
}
```
@@ -271,9 +274,9 @@ type SearchResults struct {
```go
type FlushRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
}
```
@@ -282,35 +285,35 @@ type FlushRequest struct {
```go
type PersistentSegmentInfoRequest struct{
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
}
type SegmentState int32
const (
- SegmentState_SegmentNone SegmentState = 0
- SegmentState_SegmentNotExist SegmentState = 1
- SegmentState_SegmentGrowing SegmentState = 2
- SegmentState_SegmentSealed SegmentState = 3
- SegmentState_SegmentFlushed SegmentState = 4
+ SegmentState_SegmentNone SegmentState = 0
+ SegmentState_SegmentNotExist SegmentState = 1
+ SegmentState_SegmentGrowing SegmentState = 2
+ SegmentState_SegmentSealed SegmentState = 3
+ SegmentState_SegmentFlushed SegmentState = 4
)
type PersistentSegmentInfo struct {
- SegmentID UniqueID
- CollectionID UniqueID
- PartitionID UniqueID
- OpenTime Timestamp
- SealedTime Timestamp
- FlushedTime Timestamp
- NumRows int64
- MemSize int64
- State SegmentState
+ SegmentID UniqueID
+ CollectionID UniqueID
+ PartitionID UniqueID
+ OpenTime Timestamp
+ SealedTime Timestamp
+ FlushedTime Timestamp
+ NumRows int64
+ MemSize int64
+ State SegmentState
}
type PersistentSegmentInfoResponse struct{
- infos []*milvuspb.SegmentInfo
+ infos []*milvuspb.SegmentInfo
}
```
@@ -319,36 +322,36 @@ type PersistentSegmentInfoResponse struct{
```go
type Proxy struct {
- ctx context.Context
- cancel func()
- wg sync.WaitGroup
-
- initParams *internalpb2.InitParams
- ip string
- port int
-
- stateCode internalpb2.StateCode
-
- masterClient MasterClient
- indexServiceClient IndexServiceClient
- dataServiceClient DataServiceClient
- proxyServiceClient ProxyServiceClient
- queryServiceClient QueryServiceClient
-
- sched *TaskScheduler
- tick *timeTick
-
- idAllocator *allocator.IDAllocator
- tsoAllocator *allocator.TimestampAllocator
- segAssigner *SegIDAssigner
-
- manipulationMsgStream msgstream.MsgStream
- queryMsgStream msgstream.MsgStream
- msFactory msgstream.Factory
-
- // Add callback functions at different stages
- startCallbacks []func()
- closeCallbacks []func()
+ ctx context.Context
+ cancel func()
+ wg sync.WaitGroup
+
+ initParams *internalpb.InitParams
+ ip string
+ port int
+
+ stateCode internalpb.StateCode
+
+ masterClient MasterClient
+ indexServiceClient IndexServiceClient
+ dataServiceClient DataServiceClient
+ proxyServiceClient ProxyServiceClient
+ queryServiceClient QueryServiceClient
+
+ sched *TaskScheduler
+ tick *timeTick
+
+ idAllocator *allocator.IDAllocator
+ tsoAllocator *allocator.TimestampAllocator
+ segAssigner *SegIDAssigner
+
+ manipulationMsgStream msgstream.MsgStream
+ queryMsgStream msgstream.MsgStream
+ msFactory msgstream.Factory
+
+ // Add callback functions at different stages
+ startCallbacks []func()
+ closeCallbacks []func()
}
func (node *NodeImpl) Init() error
@@ -371,56 +374,61 @@ func NewProxyNodeImpl(ctx context.Context, factory msgstream.Factory) (*NodeImpl
```go
type GlobalParamsTable struct {
- paramtable.BaseTable
-
- NetworkPort int
- IP string
- NetworkAddress string
-
- MasterAddress string
- PulsarAddress string
-
- QueryNodeNum int
- QueryNodeIDList []UniqueID
- ProxyID UniqueID
- TimeTickInterval time.Duration
- InsertChannelNames []string
- DeleteChannelNames []string
- K2SChannelNames []string
- SearchChannelNames []string
- SearchResultChannelNames []string
- ProxySubName string
- ProxyTimeTickChannelNames []string
- DataDefinitionChannelNames []string
- MsgStreamInsertBufSize int64
- MsgStreamSearchBufSize int64
- MsgStreamSearchResultBufSize int64
- MsgStreamSearchResultPulsarBufSize int64
- MsgStreamTimeTickBufSize int64
- MaxNameLength int64
- MaxFieldNum int64
- MaxDimension int64
- DefaultPartitionTag string
- DefaultIndexName string
+ paramtable.BaseTable
+
+ NetworkPort int
+ IP string
+ NetworkAddress string
+
+ MasterAddress string
+ PulsarAddress string
+
+ QueryNodeNum int
+ QueryNodeIDList []UniqueID
+ ProxyID UniqueID
+ TimeTickInterval time.Duration
+ InsertChannelNames []string
+ DeleteChannelNames []string
+ K2SChannelNames []string
+ SearchChannelNames []string
+ SearchResultChannelNames []string
+ ProxySubName string
+ ProxyTimeTickChannelNames []string
+ DataDefinitionChannelNames []string
+ MsgStreamInsertBufSize int64
+ MsgStreamSearchBufSize int64
+ MsgStreamSearchResultBufSize int64
+ MsgStreamSearchResultPulsarBufSize int64
+ MsgStreamTimeTickBufSize int64
+ MaxNameLength int64
+ MaxFieldNum int64
+ MaxDimension int64
+ DefaultPartitionTag string
+ DefaultIndexName string
}
var Params ParamTable
```
-
-
-
#### 6.2 Task
``` go
type task interface {
- Id() int64 // return ReqId
- PreExecute(ctx context.Context) error
- Execute(ctx context.Context) error
- PostExecute(ctx context.Context) error
- WaitToFinish() error
- Notify() error
+ TraceCtx() context.Context
+ ID() UniqueID // return ReqID
+ SetID(uid UniqueID) // set ReqID
+ Name() string
+ Type() commonpb.MsgType
+ BeginTs() Timestamp
+ EndTs() Timestamp
+ SetTs(ts Timestamp)
+ OnEnqueue() error
+ PreExecute(ctx context.Context) error
+ Execute(ctx context.Context) error
+ PostExecute(ctx context.Context) error
+ WaitToFinish() error
+ Notify(err error)
}
```
@@ -430,30 +438,30 @@ type task interface {
```go
type TaskQueue interface {
- utChan() <-chan int
- UTEmpty() bool
- utFull() bool
- addUnissuedTask(t task) error
- FrontUnissuedTask() task
- PopUnissuedTask() task
- AddActiveTask(t task)
- PopActiveTask(ts Timestamp) task
- getTaskByReqID(reqID UniqueID) task
- TaskDoneTest(ts Timestamp) bool
- Enqueue(t task) error
+ utChan() <-chan int
+ UTEmpty() bool
+ utFull() bool
+ addUnissuedTask(t task) error
+ FrontUnissuedTask() task
+ PopUnissuedTask() task
+ AddActiveTask(t task)
+ PopActiveTask(ts Timestamp) task
+ getTaskByReqID(reqID UniqueID) task
+ TaskDoneTest(ts Timestamp) bool
+ Enqueue(t task) error
}
type baseTaskQueue struct {
- unissuedTasks *list.List
- activeTasks map[Timestamp]task
- utLock sync.Mutex
- atLock sync.Mutex
-
- maxTaskNum int64
-
- utBufChan chan int
-
- sched *TaskScheduler
+ unissuedTasks *list.List
+ activeTasks map[Timestamp]task
+ utLock sync.Mutex
+ atLock sync.Mutex
+
+ maxTaskNum int64
+
+ utBufChan chan int
+
+ sched *TaskScheduler
}
```
@@ -467,8 +475,8 @@ type baseTaskQueue struct {
```go
type ddTaskQueue struct {
- baseTaskQueue
- lock sync.Mutex
+ baseTaskQueue
+ lock sync.Mutex
}
func (queue *ddTaskQueue) Enqueue(task *task) error
@@ -483,7 +491,7 @@ Data definition tasks (i.e. *CreateCollectionTask*) will be put into *DdTaskQueu
```go
type dmTaskQueue struct {
- baseTaskQueue
+ baseTaskQueue
}
func (queue *dmTaskQueue) Enqueue(task *task) error
@@ -500,7 +508,7 @@ If a *insertTask* is enqueued, *Enqueue(task \*task)* will set *Ts*, *ReqId*, *P
```go
type dqTaskQueue struct {
- baseTaskQueue
+ baseTaskQueue
}
func (queue *dqTaskQueue) Enqueue(task *task) error
@@ -515,18 +523,18 @@ Queries will be put into *DqTaskQueue*.
``` go
type taskScheduler struct {
- DdQueue TaskQueue
- DmQueue TaskQueue
- DqQueue TaskQueue
-
- idAllocator *allocator.IDAllocator
- tsoAllocator *allocator.TimestampAllocator
-
- wg sync.WaitGroup
- ctx context.Context
- cancel context.CancelFunc
-
- msFactory msgstream.Factory
+ DdQueue TaskQueue
+ DmQueue TaskQueue
+ DqQueue TaskQueue
+
+ idAllocator *allocator.IDAllocator
+ tsoAllocator *allocator.TimestampAllocator
+
+ wg sync.WaitGroup
+ ctx context.Context
+ cancel context.CancelFunc
+
+ msFactory msgstream.Factory
}
func (sched *taskScheduler) scheduleDdTask() *task
@@ -565,13 +573,13 @@ func (sched *taskScheduler) heartbeat()
// protobuf
message taskSchedulerHeartbeat {
- string id
- uint64 dd_queue_length
- uint64 dm_queue_length
- uint64 dq_queue_length
- uint64 num_dd_done
- uint64 num_dm_done
- uint64 num_dq_done
+ string id
+ uint64 dd_queue_length
+ uint64 dm_queue_length
+ uint64 dq_queue_length
+ uint64 num_dd_done
+ uint64 num_dm_done
+ uint64 num_dq_done
}
```
@@ -584,17 +592,17 @@ message taskSchedulerHeartbeat {
``` go
type timeTick struct {
- lastTick Timestamp
- currentTick Timestamp
- wallTick Timestamp
- tickStep Timestamp
- syncInterval Timestamp
-
- tsAllocator *TimestampAllocator
- scheduler *taskScheduler
- ttStream *MessageStream
-
- ctx context.Context
+ lastTick Timestamp
+ currentTick Timestamp
+ wallTick Timestamp
+ tickStep Timestamp
+ syncInterval Timestamp
+
+ tsAllocator *TimestampAllocator
+ scheduler *taskScheduler
+ ttStream *MessageStream
+
+ ctx context.Context
}
func (tt *timeTick) Start() error
diff --git a/docs/developer_guides/chap06_master.md b/docs/developer_guides/chap06_master.md
index 8344cf7b7b..f7919f77b8 100644
--- a/docs/developer_guides/chap06_master.md
+++ b/docs/developer_guides/chap06_master.md
@@ -7,42 +7,34 @@
#### 10.1 Master Interface
```go
-type Master interface {
- Service
- GetComponentStates(ctx context.Context) (*internalpb2.ComponentStates, error)
-
- //DDL request
- CreateCollection(ctx context.Context, in *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
- DropCollection(ctx context.Context, in *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
- HasCollection(ctx context.Context, in *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
- DescribeCollection(ctx context.Context, in *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
- ShowCollections(ctx context.Context, in *milvuspb.ShowCollectionRequest) (*milvuspb.ShowCollectionResponse, error)
- CreatePartition(ctx context.Context, in *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
- DropPartition(ctx context.Context, in *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
- HasPartition(ctx context.Context, in *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
- ShowPartitions(ctx context.Context, in *milvuspb.ShowPartitionRequest) (*milvuspb.ShowPartitionResponse, error)
-
- //index builder service
- CreateIndex(ctx context.Context, in *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
- DescribeIndex(ctx context.Context, in *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
- DropIndex(ctx context.Context, in *milvuspb.DropIndexRequest) (*commonpb.Status, error)
-
- //global timestamp allocator
- AllocTimestamp(ctx context.Context, in *masterpb.TsoRequest) (*masterpb.TsoResponse, error)
- AllocID(ctx context.Context, in *masterpb.IDRequest) (*masterpb.IDResponse, error)
-
- //receiver time tick from proxy service, and put it into this channel
- GetTimeTickChannel(ctx context.Context) (*milvuspb.StringResponse, error)
-
- //receive ddl from rpc and time tick from proxy service, and put them into this channel
- GetDdChannel(ctx context.Context) (*milvuspb.StringResponse, error)
-
- //just define a channel, not used currently
- GetStatisticsChannel(ctx context.Context) (*milvuspb.StringResponse, error)
-
- //segment
- DescribeSegment(ctx context.Context, in *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error)
- ShowSegments(ctx context.Context, in *milvuspb.ShowSegmentRequest) (*milvuspb.ShowSegmentResponse, error)
+type MasterService interface {
+ Component
+
+ //DDL request
+ CreateCollection(ctx context.Context, req *milvuspb.CreateCollectionRequest) (*commonpb.Status, error)
+ DropCollection(ctx context.Context, req *milvuspb.DropCollectionRequest) (*commonpb.Status, error)
+ HasCollection(ctx context.Context, req *milvuspb.HasCollectionRequest) (*milvuspb.BoolResponse, error)
+ DescribeCollection(ctx context.Context, req *milvuspb.DescribeCollectionRequest) (*milvuspb.DescribeCollectionResponse, error)
+ ShowCollections(ctx context.Context, req *milvuspb.ShowCollectionsRequest) (*milvuspb.ShowCollectionsResponse, error)
+ CreatePartition(ctx context.Context, req *milvuspb.CreatePartitionRequest) (*commonpb.Status, error)
+ DropPartition(ctx context.Context, req *milvuspb.DropPartitionRequest) (*commonpb.Status, error)
+ HasPartition(ctx context.Context, req *milvuspb.HasPartitionRequest) (*milvuspb.BoolResponse, error)
+ ShowPartitions(ctx context.Context, req *milvuspb.ShowPartitionsRequest) (*milvuspb.ShowPartitionsResponse, error)
+
+ //index builder service
+ CreateIndex(ctx context.Context, req *milvuspb.CreateIndexRequest) (*commonpb.Status, error)
+ DescribeIndex(ctx context.Context, req *milvuspb.DescribeIndexRequest) (*milvuspb.DescribeIndexResponse, error)
+ DropIndex(ctx context.Context, req *milvuspb.DropIndexRequest) (*commonpb.Status, error)
+
+ //global timestamp allocator
+ AllocTimestamp(ctx context.Context, req *masterpb.AllocTimestampRequest) (*masterpb.AllocTimestampResponse, error)
+ AllocID(ctx context.Context, req *masterpb.AllocIDRequest) (*masterpb.AllocIDResponse, error)
+
+ //segment
+ DescribeSegment(ctx context.Context, req *milvuspb.DescribeSegmentRequest) (*milvuspb.DescribeSegmentResponse, error)
+ ShowSegments(ctx context.Context, req *milvuspb.ShowSegmentsRequest) (*milvuspb.ShowSegmentsResponse, error)
+
+ GetDdChannel(ctx context.Context) (*milvuspb.StringResponse, error)
}
```
@@ -52,10 +44,10 @@ type Master interface {
```go
type MsgBase struct {
- MsgType MsgType
- MsgID UniqueID
- Timestamp Timestamp
- SourceID UniqueID
+ MsgType MsgType
+ MsgID UniqueID
+ Timestamp Timestamp
+ SourceID UniqueID
}
```
@@ -63,10 +55,10 @@ type MsgBase struct {
```go
type CreateCollectionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- Schema []byte
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ Schema []byte
}
```
@@ -74,9 +66,9 @@ type CreateCollectionRequest struct {
```go
type DropCollectionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
}
```
@@ -84,9 +76,9 @@ type DropCollectionRequest struct {
```go
type HasCollectionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
}
```
@@ -94,23 +86,23 @@ type HasCollectionRequest struct {
```go
type DescribeCollectionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- CollectionID UniqueID
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ CollectionID UniqueID
}
type CollectionSchema struct {
- Name string
- Description string
- AutoID bool
- Fields []*FieldSchema
+ Name string
+ Description string
+ AutoID bool
+ Fields []*FieldSchema
}
type DescribeCollectionResponse struct {
- Status *commonpb.Status
- Schema *schemapb.CollectionSchema
- CollectionID int64
+ Status *commonpb.Status
+ Schema *schemapb.CollectionSchema
+ CollectionID int64
}
```
@@ -118,13 +110,13 @@ type DescribeCollectionResponse struct {
```go
type ShowCollectionRequest struct {
- Base *commonpb.MsgBase
- DbName string
+ Base *commonpb.MsgBase
+ DbName string
}
type ShowCollectionResponse struct {
- Status *commonpb.Status
- CollectionNames []string
+ Status *commonpb.Status
+ CollectionNames []string
}
```
@@ -132,10 +124,10 @@ type ShowCollectionResponse struct {
```go
type CreatePartitionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- PartitionName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ PartitionName string
}
```
@@ -143,10 +135,10 @@ type CreatePartitionRequest struct {
```go
type DropPartitionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- PartitionName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ PartitionName string
}
```
@@ -154,10 +146,10 @@ type DropPartitionRequest struct {
```go
type HasPartitionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- PartitionName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ PartitionName string
}
```
@@ -165,47 +157,48 @@ type HasPartitionRequest struct {
```go
type ShowPartitionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- CollectionID UniqueID
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ CollectionID UniqueID
}
type ShowPartitionResponse struct {
- Status *commonpb.Status
- PartitionNames []string
- PartitionIDs []UniqueID
+ Status *commonpb.Status
+ PartitionNames []string
+ PartitionIDs []UniqueID
}
```
-* DescribeSegment
+* *DescribeSegment*
```go
type DescribeSegmentRequest struct {
- Base *commonpb.MsgBase
- CollectionID UniqueID
- SegmentID UniqueID
+ Base *commonpb.MsgBase
+ CollectionID UniqueID
+ SegmentID UniqueID
}
type DescribeSegmentResponse struct {
- Status *commonpb.Status
- IndexID UniqueID
- BuildID UniqueID
+ Status *commonpb.Status
+ IndexID UniqueID
+ BuildID UniqueID
+ EnableIndex bool
}
```
-* ShowSegments
+* *ShowSegments*
```go
-type ShowSegmentRequest struct {
- Base *commonpb.MsgBase
- CollectionID UniqueID
- PartitionID UniqueID
+type ShowSegmentsRequest struct {
+ Base *commonpb.MsgBase
+ CollectionID UniqueID
+ PartitionID UniqueID
}
-type ShowSegmentResponse struct {
- Status *commonpb.Status
- SegmentIDs []UniqueID
+type ShowSegmentsResponse struct {
+ Status *commonpb.Status
+ SegmentIDs []UniqueID
}
```
@@ -213,11 +206,11 @@ type ShowSegmentResponse struct {
```go
type CreateIndexRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- FieldName string
- ExtraParams []*commonpb.KeyValuePair
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ FieldName string
+ ExtraParams []*commonpb.KeyValuePair
}
```
@@ -225,22 +218,22 @@ type CreateIndexRequest struct {
```go
type DescribeIndexRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- FieldName string
- IndexName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ FieldName string
+ IndexName string
}
type IndexDescription struct {
- IndexName string
- IndexID UniqueID
- params []*commonpb.KeyValuePair
+ IndexName string
+ IndexID UniqueID
+ Params []*commonpb.KeyValuePair
}
type DescribeIndexResponse struct {
- Status *commonpb.Status
- IndexDescriptions []*IndexDescription
+ Status *commonpb.Status
+ IndexDescriptions []*IndexDescription
}
```
@@ -248,41 +241,42 @@ type DescribeIndexResponse struct {
```go
type DropIndexRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- FieldName string
- IndexName string
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ FieldName string
+ IndexName string
}
```
* *AllocTimestamp*
```go
-type TsoRequest struct {
- Base *commonpb.MsgBase
- Count uint32
+
+type BaseRequest struct {
+ Done chan error
+ Valid bool
}
-type TsoResponse struct {
- Status *commonpb.Status
- Timestamp uint64
- Count uint32
+type TSORequest struct {
+ BaseRequest
+ timestamp Timestamp
+ count uint32
}
```
* *AllocID*
```go
-type IDRequest struct {
- Base *commonpb.MsgBase
- Count uint32
+type BaseRequest struct {
+ Done chan error
+ Valid bool
}
-type IDResponse struct {
- Status *commonpb.Status
- ID UniqueID
- Count uint32
+type IDRequest struct {
+ BaseRequest
+ id UniqueID
+ count uint32
}
```
@@ -290,150 +284,146 @@ type IDResponse struct {
#### 10.2 Dd (Data definitions) Channel
-* *CreateCollection*
+* *CreateCollectionMsg*
```go
+
type CreateCollectionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
-
- DbID UniqueID
- CollectionID UniqueID
- Schema []byte
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ Schema []byte
+}
+
+type CreateCollectionMsg struct {
+ BaseMsg
+ CreateCollectionRequest
}
```
-* *DropCollection*
+* *DropCollectionMsg*
```go
type DropCollectionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- DbID UniqueID
- CollectionID UniqueID
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+}
+
+type DropCollectionMsg struct {
+ BaseMsg
+ DropCollectionRequest
}
```
-* *CreatePartition*
+* *CreatePartitionMsg*
```go
type CreatePartitionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- PartitionName string
- DbID UniqueID
- CollectionID UniqueID
- PartitionID UniqueID
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ PartitionName string
+}
+
+type CreatePartitionMsg struct {
+ BaseMsg
+ CreatePartitionRequest
}
```
-* *DropPartition*
+* *DropPartitionMsg*
```go
type DropPartitionRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- PartitionName string
- DbID UniqueID
- CollectionID UniqueID
- PartitionID UniqueID
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ PartitionName string
+ DbID int64
+ CollectionID int64
+ PartitionID int64
+}
+
+type DropPartitionMsg struct {
+ BaseMsg
+ DropPartitionRequest
}
```
-* *CreateIndex*
-
-```go
-type CreateIndexRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- FieldName string
- DbID UniqueID
- CollectionID UniqueID
- FieldID UniqueID
- ExtraParams []*commonpb.KeyValuePair
-}
-```
-
-
-
#### 10.2 Master Instance
```go
type Master interface {
- MetaTable *metaTable
- //id allocator
- idAllocator *allocator.GlobalIDAllocator
- //tso allocator
- tsoAllocator *tso.GlobalTSOAllocator
-
- //inner members
- ctx context.Context
- cancel context.CancelFunc
- etcdCli *clientv3.Client
- kvBase *etcdkv.EtcdKV
- metaKV *etcdkv.EtcdKV
-
- //setMsgStreams, receive time tick from proxy service time tick channel
- ProxyTimeTickChan chan typeutil.Timestamp
-
- //setMsgStreams, send time tick into dd channel and time tick channel
- SendTimeTick func(t typeutil.Timestamp) error
-
- //setMsgStreams, send create collection into dd channel
- DdCreateCollectionReq func(req *internalpb2.CreateCollectionRequest) error
-
- //setMsgStreams, send drop collection into dd channel, and notify the proxy to delete this collection
- DdDropCollectionReq func(req *internalpb2.DropCollectionRequest) error
-
- //setMsgStreams, send create partition into dd channel
- DdCreatePartitionReq func(req *internalpb2.CreatePartitionRequest) error
-
- //setMsgStreams, send drop partition into dd channel
- DdDropPartitionReq func(req *internalpb2.DropPartitionRequest) error
-
- //setMsgStreams segment channel, receive segment info from data service, if master create segment
- DataServiceSegmentChan chan *datapb.SegmentInfo
-
- //setMsgStreams ,if segment flush completed, data node would put segment id into msg stream
- DataNodeSegmentFlushCompletedChan chan typeutil.UniqueID
-
- //get binlog file path from data service,
- GetBinlogFilePathsFromDataServiceReq func(segID typeutil.UniqueID, fieldID typeutil.UniqueID) ([]string, error)
-
- //call index builder's client to build index, return build id
- BuildIndexReq func(binlog []string, typeParams []*commonpb.KeyValuePair, indexParams []*commonpb.KeyValuePair, indexID typeutil.UniqueID, indexName string) (typeutil.UniqueID, error)
- DropIndexReq func(indexID typeutil.UniqueID) error
-
- //proxy service interface, notify proxy service to drop collection
- InvalidateCollectionMetaCache func(ts typeutil.Timestamp, dbName string, collectionName string) error
-
- //query service interface, notify query service to release collection
- ReleaseCollection func(ts typeutil.Timestamp, dbID typeutil.UniqueID, collectionID typeutil.UniqueID) error
-
- // put create index task into this chan
- indexTaskQueue chan *CreateIndexTask
-
- //dd request scheduler
- ddReqQueue chan reqTask //dd request will be push into this chan
- lastDdTimeStamp typeutil.Timestamp
-
- //time tick loop
- lastTimeTick typeutil.Timestamp
-
- //states code
- stateCode atomic.Value
-
- //call once
- initOnce sync.Once
- startOnce sync.Once
- //isInit atomic.Value
-
- msFactory ms.Factory
+ MetaTable *metaTable
+ //id allocator
+ idAllocator *allocator.GlobalIDAllocator
+ //tso allocator
+ tsoAllocator *tso.GlobalTSOAllocator
+
+ //inner members
+ ctx context.Context
+ cancel context.CancelFunc
+ etcdCli *clientv3.Client
+ kvBase *etcdkv.EtcdKV
+ metaKV *etcdkv.EtcdKV
+
+ //setMsgStreams, receive time tick from proxy service time tick channel
+ ProxyTimeTickChan chan typeutil.Timestamp
+
+ //setMsgStreams, send time tick into dd channel and time tick channel
+ SendTimeTick func(t typeutil.Timestamp) error
+
+ //setMsgStreams, send create collection into dd channel
+ DdCreateCollectionReq func(req *internalpb.CreateCollectionRequest) error
+
+ //setMsgStreams, send drop collection into dd channel, and notify the proxy to delete this collection
+ DdDropCollectionReq func(req *internalpb.DropCollectionRequest) error
+
+ //setMsgStreams, send create partition into dd channel
+ DdCreatePartitionReq func(req *internalpb.CreatePartitionRequest) error
+
+ //setMsgStreams, send drop partition into dd channel
+ DdDropPartitionReq func(req *internalpb.DropPartitionRequest) error
+
+ //setMsgStreams segment channel, receive segment info from data service, if master create segment
+ DataServiceSegmentChan chan *datapb.SegmentInfo
+
+ //setMsgStreams ,if segment flush completed, data node would put segment id into msg stream
+ DataNodeSegmentFlushCompletedChan chan typeutil.UniqueID
+
+ //get binlog file path from data service,
+ GetBinlogFilePathsFromDataServiceReq func(segID typeutil.UniqueID, fieldID typeutil.UniqueID) ([]string, error)
+
+ //call index builder's client to build index, return build id
+ BuildIndexReq func(binlog []string, typeParams []*commonpb.KeyValuePair, indexParams []*commonpb.KeyValuePair, indexID typeutil.UniqueID, indexName string) (typeutil.UniqueID, error)
+ DropIndexReq func(indexID typeutil.UniqueID) error
+
+ //proxy service interface, notify proxy service to drop collection
+ InvalidateCollectionMetaCache func(ts typeutil.Timestamp, dbName string, collectionName string) error
+
+ //query service interface, notify query service to release collection
+ ReleaseCollection func(ts typeutil.Timestamp, dbID typeutil.UniqueID, collectionID typeutil.UniqueID) error
+
+ // put create index task into this chan
+ indexTaskQueue chan *CreateIndexTask
+
+ //dd request scheduler
+ ddReqQueue chan reqTask //dd request will be push into this chan
+ lastDdTimeStamp typeutil.Timestamp
+
+ //time tick loop
+ lastTimeTick typeutil.Timestamp
+
+ //states code
+ stateCode atomic.Value
+
+ //call once
+ initOnce sync.Once
+ startOnce sync.Once
+ //isInit atomic.Value
+
+ msFactory ms.Factory
}
```
@@ -445,44 +435,46 @@ type Master interface {
Master receives data definition requests via grpc. Each request (described by a proto) will be wrapped as a task for further scheduling. The task interface is
```go
-type task interface {
- Type() commonpb.MsgType
- Ts() (typeutil.Timestamp, error)
- IgnoreTimeStamp() bool
- Execute() error
- WaitToFinish() error
- Notify(err error)
+type reqTask interface {
+ Ctx() context.Context
+ Type() commonpb.MsgType
+ Ts() (typeutil.Timestamp, error)
+ IgnoreTimeStamp() bool
+ Execute(ctx context.Context) error
+ WaitToFinish() error
+ Notify(err error)
}
```
-A task example is as follows. In this example, we wrap a CreateCollectionRequest (a proto) as a createCollectionTask. The wrapper need to implement task interfaces.
+A task example is as follows. In this example, we wrap a CreateCollectionRequest (a proto) as a createCollectionTask. The wrapper need to implement task interfaces.
``` go
-type createCollectionTask struct {
- req *CreateCollectionRequest
- cv int chan
+type CreateCollectionReqTask struct {
+ baseReqTask
+ Req *milvuspb.CreateCollectionRequest
}
// Task interfaces
+func (task *createCollectionTask) Ctx() context.Context
func (task *createCollectionTask) Type() ReqType
func (task *createCollectionTask) Ts() Timestamp
func (task *createCollectionTask) IgnoreTimeStamp() bool
func (task *createCollectionTask) Execute() error
-func (task *createCollectionTask) Notify() error
func (task *createCollectionTask) WaitToFinish() error
+func (task *createCollectionTask) Notify() error
```
-// TODO
+// TODO remove?
###### 10.2.3 Scheduler
```go
type ddRequestScheduler struct {
- reqQueue *task chan
- ddStream *MsgStream
+ reqQueue *task chan
+ ddStream *MsgStream
}
func (rs *ddRequestScheduler) Enqueue(task *task) error
@@ -494,13 +486,13 @@ In most cases, a data definition task need to
* update system's meta data (via $metaTable$),
* and synchronize the data definition request to other related system components so that the quest can take effect system wide.
-Master
+Master
-//TODO
+//TODO remove?
#### 10.4 Meta Table
###### 10.4.1 Meta
@@ -566,7 +558,7 @@ message SegmentMeta {
Note that *tenantId*, *proxyId*, *collectionId*, *segmentId* are unique strings converted from int64.
-*tenantMeta*, *proxyMeta*, *collectionMeta*, *segmentMeta* are serialized protos.
+*tenantMeta*, *proxyMeta*, *collectionMeta*, *segmentMeta* are serialized protos.
@@ -574,20 +566,20 @@ Note that *tenantId*, *proxyId*, *collectionId*, *segmentId* are unique strings
```go
type metaTable struct {
- client kv.TxnBase // client of a reliable kv service, i.e. etcd client
- tenantID2Meta map[typeutil.UniqueID]pb.TenantMeta // tenant id to tenant meta
- proxyID2Meta map[typeutil.UniqueID]pb.ProxyMeta // proxy id to proxy meta
- collID2Meta map[typeutil.UniqueID]pb.CollectionInfo // collection id to collection meta,
- collName2ID map[string]typeutil.UniqueID // collection name to collection id
- partitionID2Meta map[typeutil.UniqueID]pb.PartitionInfo // partition id -> partition meta
- segID2IndexMeta map[typeutil.UniqueID]*map[typeutil.UniqueID]pb.SegmentIndexInfo // segment id -> index id -> segment index meta
- indexID2Meta map[typeutil.UniqueID]pb.IndexInfo // index id ->index meta
- segID2CollID map[typeutil.UniqueID]typeutil.UniqueID // segment id -> collection id
- partitionID2CollID map[typeutil.UniqueID]typeutil.UniqueID // partition id -> collection id
-
- tenantLock sync.RWMutex
- proxyLock sync.RWMutex
- ddLock sync.RWMutex
+ client kv.TxnBase // client of a reliable kv service, i.e. etcd client
+ tenantID2Meta map[typeutil.UniqueID]pb.TenantMeta // tenant id to tenant meta
+ proxyID2Meta map[typeutil.UniqueID]pb.ProxyMeta // proxy id to proxy meta
+ collID2Meta map[typeutil.UniqueID]pb.CollectionInfo // collection id to collection meta,
+ collName2ID map[string]typeutil.UniqueID // collection name to collection id
+ partitionID2Meta map[typeutil.UniqueID]pb.PartitionInfo // partition id -> partition meta
+ segID2IndexMeta map[typeutil.UniqueID]*map[typeutil.UniqueID]pb.SegmentIndexInfo // segment id -> index id -> segment index meta
+ indexID2Meta map[typeutil.UniqueID]pb.IndexInfo // index id ->index meta
+ segID2CollID map[typeutil.UniqueID]typeutil.UniqueID // segment id -> collection id
+ partitionID2CollID map[typeutil.UniqueID]typeutil.UniqueID // partition id -> collection id
+
+ tenantLock sync.RWMutex
+ proxyLock sync.RWMutex
+ ddLock sync.RWMutex
}
func (mt *metaTable) AddCollection(coll *pb.CollectionInfo, part *pb.PartitionInfo, idx []*pb.IndexInfo) error
@@ -636,12 +628,12 @@ func NewMetaTable(kv kv.TxnBase) (*metaTable, error)
```go
type softTimeTickBarrier struct {
- peer2LastTt map[UniqueID]Timestamp
- minTtInterval Timestamp
- lastTt int64
- outTt chan Timestamp
- ttStream ms.MsgStream
- ctx context.Context
+ peer2LastTt map[UniqueID]Timestamp
+ minTtInterval Timestamp
+ lastTt int64
+ outTt chan Timestamp
+ ttStream ms.MsgStream
+ ctx context.Context
}
func (ttBarrier *softTimeTickBarrier) GetTimeTick() (Timestamp,error)
@@ -659,13 +651,13 @@ func NewSoftTimeTickBarrier(ctx context.Context, ttStream ms.MsgStream, peerIds
```go
type hardTimeTickBarrier struct {
- peer2Tt map[UniqueID]Timestamp
- outTt chan Timestamp
- ttStream ms.MsgStream
- ctx context.Context
- wg sync.WaitGroup
- loopCtx context.Context
- loopCancel context.CancelFunc
+ peer2Tt map[UniqueID]Timestamp
+ outTt chan Timestamp
+ ttStream ms.MsgStream
+ ctx context.Context
+ wg sync.WaitGroup
+ loopCtx context.Context
+ loopCancel context.CancelFunc
}
func (ttBarrier *hardTimeTickBarrier) GetTimeTick() (Timestamp,error)
@@ -687,15 +679,15 @@ func NewHardTimeTickBarrier(ctx context.Context, ttStream ms.MsgStream, peerIds
type TimeTickBarrier interface {
GetTimeTick() (Timestamp,error)
Start()
- Close()
+ Close()
}
type timeSyncMsgProducer struct {
- ctx context.Context
- cancel context.CancelFunc
- wg sync.WaitGroup
- ttBarrier TimeTickBarrier
- watchers []TimeTickWatcher
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+ ttBarrier TimeTickBarrier
+ watchers []TimeTickWatcher
}
func (syncMsgProducer *timeSyncMsgProducer) SetProxyTtStreams(proxyTt *MsgStream, proxyIds []UniqueId)
@@ -725,9 +717,9 @@ message SegmentStats {
}
message QueryNodeStats {
- int64 id = 1;
- uint64 timestamp = 2;
- repeated SegmentStats seg_stats = 3;
+ int64 id = 1;
+ uint64 timestamp = 2;
+ repeated SegmentStats seg_stats = 3;
}
```
@@ -745,16 +737,16 @@ type assignment struct {
}
type segmentStatus struct {
- assignments []*assignment
+ assignments []*assignment
}
type collectionStatus struct {
- openedSegment []UniqueID
+ openedSegment []UniqueID
}
type SegmentManagement struct {
- segStatus map[UniqueID]*SegmentStatus
- collStatus map[UniqueID]*collectionStatus
+ segStatus map[UniqueID]*SegmentStatus
+ collStatus map[UniqueID]*collectionStatus
}
func NewSegmentManagement(ctx context.Context) *SegmentManagement
@@ -785,18 +777,18 @@ func (segMgr *SegmentManager) AssignSegmentID(segIDReq []*internalpb.SegIDReques
// "/msg_stream/insert"
message SysConfigRequest {
- MsgType msg_type = 1;
- int64 reqID = 2;
- int64 proxyID = 3;
- uint64 timestamp = 4;
- repeated string keys = 5;
- repeated string key_prefixes = 6;
+ MsgType msg_type = 1;
+ int64 reqID = 2;
+ int64 proxyID = 3;
+ uint64 timestamp = 4;
+ repeated string keys = 5;
+ repeated string key_prefixes = 6;
}
message SysConfigResponse {
- common.Status status = 1;
- repeated string keys = 2;
- repeated string values = 3;
+ common.Status status = 1;
+ repeated string keys = 2;
+ repeated string values = 3;
}
```
diff --git a/docs/developer_guides/chap07_query_service.md b/docs/developer_guides/chap07_query_service.md
index 7b1c948fd5..3d215e6a09 100644
--- a/docs/developer_guides/chap07_query_service.md
+++ b/docs/developer_guides/chap07_query_service.md
@@ -12,19 +12,19 @@
```go
type QueryService interface {
- Service
- Component
-
- RegisterNode(ctx context.Context, req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error)
- ShowCollections(ctx context.Context, req *querypb.ShowCollectionRequest) (*querypb.ShowCollectionResponse, error)
- LoadCollection(ctx context.Context, req *querypb.LoadCollectionRequest) (*commonpb.Status, error)
- ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
- ShowPartitions(ctx context.Context, req *querypb.ShowPartitionRequest) (*querypb.ShowPartitionResponse, error)
- LoadPartitions(ctx context.Context, req *querypb.LoadPartitionRequest) (*commonpb.Status, error)
- ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionRequest) (*commonpb.Status, error)
- CreateQueryChannel(ctx context.Context) (*querypb.CreateQueryChannelResponse, error)
- GetPartitionStates(ctx context.Context, req *querypb.PartitionStatesRequest) (*querypb.PartitionStatesResponse, error)
- GetSegmentInfo(ctx context.Context, req *querypb.SegmentInfoRequest) (*querypb.SegmentInfoResponse, error)
+ Component
+ TimeTickProvider
+
+ RegisterNode(ctx context.Context, req *querypb.RegisterNodeRequest) (*querypb.RegisterNodeResponse, error)
+ ShowCollections(ctx context.Context, req *querypb.ShowCollectionsRequest) (*querypb.ShowCollectionsResponse, error)
+ LoadCollection(ctx context.Context, req *querypb.LoadCollectionRequest) (*commonpb.Status, error)
+ ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
+ ShowPartitions(ctx context.Context, req *querypb.ShowPartitionsRequest) (*querypb.ShowPartitionsResponse, error)
+ LoadPartitions(ctx context.Context, req *querypb.LoadPartitionsRequest) (*commonpb.Status, error)
+ ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error)
+ CreateQueryChannel(ctx context.Context) (*querypb.CreateQueryChannelResponse, error)
+ GetPartitionStates(ctx context.Context, req *querypb.GetPartitionStatesRequest) (*querypb.GetPartitionStatesResponse, error)
+ GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error)
}
```
@@ -34,29 +34,29 @@ type QueryService interface {
```go
type MsgBase struct {
- MsgType MsgType
- MsgID UniqueID
- Timestamp Timestamp
- SourceID UniqueID
+ MsgType MsgType
+ MsgID UniqueID
+ Timestamp Timestamp
+ SourceID UniqueID
}
```
* *RegisterNode*
```go
-tyoe Address struct {
- Ip string
- port int64
+type Address struct {
+ Ip string
+ port int64
}
type RegisterNodeRequest struct {
- Base *commonpb.MsgBase
- Address *commonpb.Address
+ Base *commonpb.MsgBase
+ Address *commonpb.Address
}
type RegisterNodeResponse struct {
- Status *commonpb.Status
- InitParams *internalpb2.InitParams
+ Status *commonpb.Status
+ InitParams *internalpb.InitParams
}
```
@@ -64,13 +64,13 @@ type RegisterNodeResponse struct {
```go
type ShowCollectionRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
+ Base *commonpb.MsgBase
+ DbID UniqueID
}
type ShowCollectionResponse struct {
- Status *commonpb.Status
- CollectionIDs []UniqueID
+ Status *commonpb.Status
+ CollectionIDs []UniqueID
}
```
@@ -78,10 +78,10 @@ type ShowCollectionResponse struct {
```go
type LoadCollectionRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
- schema *schemapb.CollectionSchema
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
+ schema *schemapb.CollectionSchema
}
```
@@ -89,9 +89,9 @@ type LoadCollectionRequest struct {
```go
type ReleaseCollectionRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
}
```
@@ -99,14 +99,14 @@ type ReleaseCollectionRequest struct {
```go
type ShowPartitionRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
}
type ShowPartitionResponse struct {
- Status *commonpb.Status
- PartitionIDs []UniqueID
+ Status *commonpb.Status
+ PartitionIDs []UniqueID
}
```
@@ -116,30 +116,30 @@ type ShowPartitionResponse struct {
type PartitionState = int
const (
- PartitionState_NotExist PartitionState = 0
- PartitionState_NotPresent PartitionState = 1
- PartitionState_OnDisk PartitionState = 2
- PartitionState_PartialInMemory PartitionState = 3
- PartitionState_InMemory PartitionState = 4
- PartitionState_PartialInGPU PartitionState = 5
- PartitionState_InGPU PartitionState = 6
+ PartitionState_NotExist PartitionState = 0
+ PartitionState_NotPresent PartitionState = 1
+ PartitionState_OnDisk PartitionState = 2
+ PartitionState_PartialInMemory PartitionState = 3
+ PartitionState_InMemory PartitionState = 4
+ PartitionState_PartialInGPU PartitionState = 5
+ PartitionState_InGPU PartitionState = 6
)
type PartitionStatesRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
- PartitionIDs []UniqueID
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
+ PartitionIDs []UniqueID
}
type PartitionStates struct {
- PartitionID UniqueID
- State PartitionState
+ PartitionID UniqueID
+ State PartitionState
}
type PartitionStatesResponse struct {
- Status *commonpb.Status
- PartitionDescriptions []*PartitionStates
+ Status *commonpb.Status
+ PartitionDescriptions []*PartitionStates
}
```
@@ -147,11 +147,11 @@ type PartitionStatesResponse struct {
```go
type LoadPartitonRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
- PartitionIDs []UniqueID
- Schema *schemapb.CollectionSchema
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
+ PartitionIDs []UniqueID
+ Schema *schemapb.CollectionSchema
}
```
@@ -159,10 +159,10 @@ type LoadPartitonRequest struct {
```go
type ReleasePartitionRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
- PartitionIDs []UniqueID
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
+ PartitionIDs []UniqueID
}
```
@@ -170,50 +170,56 @@ type ReleasePartitionRequest struct {
```go
type CreateQueryChannelResponse struct {
- Status *commonpb.Status
- RequestChannelName string
- ResultChannelName string
+ Status *commonpb.Status
+ RequestChannelName string
+ ResultChannelName string
}
```
* *GetSegmentInfo* *
```go
-type SegmentInfoRequest struct {
- Base *commonpb.MsgBase
- SegmentIDs []UniqueID
+type GetSegmentInfoRequest struct {
+ Base *commonpb.MsgBase
+ SegmentIDs []UniqueID
}
type SegmentInfo struct {
- SegmentID UniqueID
- CollectionID UniqueID
- PartitionID UniqueID
- MemSize UniqueID
- NumRows UniqueID
- IndexName string
- IndexID UniqueID
+ SegmentID UniqueID
+ CollectionID UniqueID
+ PartitionID UniqueID
+ MemSize UniqueID
+ NumRows UniqueID
+ IndexName string
+ IndexID UniqueID
}
-type SegmentInfoResponse struct {
- Status *commonpb.Status
- Infos []*SegmentInfo
+type GetSegmentInfoResponse struct {
+ Status *commonpb.Status
+ Infos []*SegmentInfo
}
```
-//TODO
#### 8.2 Query Channel
+* *SearchMsg*
+
```go
type SearchRequest struct {
- RequestBase
- DbName string
- CollectionName string
- PartitionNames []string
- DbID UniqueID
- CollectionID UniqueID
- PartitionIDs []UniqueID
- Dsl string
- PlaceholderGroup []byte
+ Base *commonpb.MsgBase
+ ResultChannelID string
+ DbID int64
+ CollectionID int64
+ PartitionIDs []int64
+ Dsl string
+ // serialized `PlaceholderGroup`
+ PlaceholderGroup []byte
+ Query *commonpb.Blob
+}
+
+type SearchMsg struct {
+ BaseMsg
+ SearchRequest
}
```
@@ -223,16 +229,17 @@ type SearchRequest struct {
```go
type QueryNode interface {
- typeutil.Component
-
- AddQueryChannel(ctx context.Context, in *queryPb.AddQueryChannelsRequest) (*commonpb.Status, error)
- RemoveQueryChannel(ctx context.Context, in *queryPb.RemoveQueryChannelsRequest) (*commonpb.Status, error)
- WatchDmChannels(ctx context.Context, in *queryPb.WatchDmChannelsRequest) (*commonpb.Status, error)
- LoadSegments(ctx context.Context, in *queryPb.LoadSegmentRequest) (*commonpb.Status, error)
- ReleaseCollection(ctx context.Context, in *queryPb.ReleaseCollectionRequest) (*commonpb.Status, error)
- ReleasePartitions(ctx context.Context, in *queryPb.ReleasePartitionRequest) (*commonpb.Status, error)
- ReleaseSegments(ctx context.Context, in *queryPb.ReleaseSegmentRequest) (*commonpb.Status, error)
- GetSegmentInfo(ctx context.Context, in *queryPb.SegmentInfoRequest) (*queryPb.SegmentInfoResponse, error)
+ Component
+ TimeTickProvider
+
+ AddQueryChannel(ctx context.Context, req *querypb.AddQueryChannelRequest) (*commonpb.Status, error)
+ RemoveQueryChannel(ctx context.Context, req *querypb.RemoveQueryChannelRequest) (*commonpb.Status, error)
+ WatchDmChannels(ctx context.Context, req *querypb.WatchDmChannelsRequest) (*commonpb.Status, error)
+ LoadSegments(ctx context.Context, req *querypb.LoadSegmentsRequest) (*commonpb.Status, error)
+ ReleaseCollection(ctx context.Context, req *querypb.ReleaseCollectionRequest) (*commonpb.Status, error)
+ ReleasePartitions(ctx context.Context, req *querypb.ReleasePartitionsRequest) (*commonpb.Status, error)
+ ReleaseSegments(ctx context.Context, req *querypb.ReleaseSegmentsRequest) (*commonpb.Status, error)
+ GetSegmentInfo(ctx context.Context, req *querypb.GetSegmentInfoRequest) (*querypb.GetSegmentInfoResponse, error)
}
```
@@ -242,9 +249,9 @@ type QueryNode interface {
```go
type AddQueryChannelRequest struct {
- Base *commonpb.MsgBase
- RequestChannelID string
- ResultChannelID string
+ Base *commonpb.MsgBase
+ RequestChannelID string
+ ResultChannelID string
}
```
@@ -252,10 +259,10 @@ type AddQueryChannelRequest struct {
```go
type RemoveQueryChannelRequest struct {
- Status *commonpb.Status
- Base *commonpb.MsgBase
- RequestChannelID string
- ResultChannelID string
+ Status *commonpb.Status
+ Base *commonpb.MsgBase
+ RequestChannelID string
+ ResultChannelID string
}
```
@@ -263,77 +270,77 @@ type RemoveQueryChannelRequest struct {
```go
type WatchDmChannelInfo struct {
- ChannelID string
- Pos *internalpb2.MsgPosition
- ExcludedSegments []int64
+ ChannelID string
+ Pos *internalpb.MsgPosition
+ ExcludedSegments []int64
}
type WatchDmChannelsRequest struct {
- Base *commonpb.MsgBase
- CollectionID int64
- ChannelIDs []string
- Infos []*WatchDmChannelsInfo
+ Base *commonpb.MsgBase
+ CollectionID int64
+ ChannelIDs []string
+ Infos []*WatchDmChannelsInfo
}
```
* *LoadSegments*
```go
-type LoadSegmentRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
- PartitionID UniqueID
- SegmentIDs []UniqueID
- FieldIDs []UniqueID
- SegmentStates []*datapb.SegmentStateInfo
- Schema *schemapb.CollectionSchema
+type LoadSegmentsRequest struct {
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
+ PartitionID UniqueID
+ SegmentIDs []UniqueID
+ FieldIDs []UniqueID
+ SegmentStates []*datapb.SegmentStateInfo
+ Schema *schemapb.CollectionSchema
}
```
* *ReleaseCollection*
```go
type ReleaseCollectionRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
}
```
* *ReleasePartitions*
```go
-type ReleasePartitionRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
- PartitionIDs []UniqueID
+type ReleasePartitionsRequest struct {
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
+ PartitionIDs []UniqueID
}
```
* *ReleaseSegments*
```go
-type ReleaseSegmentRequest struct {
- Base *commonpb.MsgBas
- DbID UniqueID
- CollectionID UniqueID
- PartitionIDs []UniqueID
- SegmentIDs []UniqueID
+type ReleaseSegmentsRequest struct {
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
+ PartitionIDs []UniqueID
+ SegmentIDs []UniqueID
}
```
* *GetSegmentInfo*
```go
-type SegmentInfoRequest struct {
- Base *commonpb.MsgBase
- SegmentIDs []Unique
+type GetSegmentInfoRequest struct {
+ Base *commonpb.MsgBase
+ SegmentIDs []Unique
}
-type SegmentInfoResponse struct {
- Status *commonpb.Status
- Infos []*SegmentInfo
+type GetSegmentInfoResponse struct {
+ Status *commonpb.Status
+ Infos []*SegmentInfo
}
```
@@ -348,27 +355,56 @@ Every replica tracks a value called tSafe which is the maximum timestamp that th
###### 8.1.1 Collection
``` go
-type Collection struct {
- Name string
- Id uint64
- Fields map[string]FieldMeta
- SegmentsId []uint64
-
- cCollectionSchema C.CCollectionSchema
+type collectionReplica struct {
+ tSafes map[UniqueID]tSafer // map[collectionID]tSafer
+
+ mu sync.RWMutex // guards all
+ collections map[UniqueID]*Collection
+ partitions map[UniqueID]*Partition
+ segments map[UniqueID]*Segment
+
+ excludedSegments map[UniqueID][]UniqueID // map[collectionID]segmentIDs
}
```
-###### 8.1.2 Field Meta
+###### 8.1.2 Collection
```go
-type FieldMeta struct {
- Name string
- Id uint64
- IsPrimaryKey bool
- TypeParams map[string]string
- IndexParams map[string]string
+type FieldSchema struct {
+ FieldID int64
+ Name string
+ IsPrimaryKey bool
+ Description string
+ DataType DataType
+ TypeParams []*commonpb.KeyValuePair
+ IndexParams []*commonpb.KeyValuePair
+}
+
+type CollectionSchema struct {
+ Name string
+ Description string
+ AutoID bool
+ Fields []*FieldSchema
+}
+
+type Collection struct {
+ collectionPtr C.CCollection
+ id UniqueID
+ partitionIDs []UniqueID
+ schema *schemapb.CollectionSchema
+}
+```
+
+###### 8.1.3 Partition
+
+```go
+type Partition struct {
+ collectionID UniqueID
+ partitionID UniqueID
+ segmentIDs []UniqueID
+ enable bool
}
```
@@ -377,15 +413,39 @@ type FieldMeta struct {
###### 8.1.3 Segment
``` go
+type segmentType int32
+
+const (
+ segmentTypeInvalid segmentType = iota
+ segmentTypeGrowing
+ segmentTypeSealed
+ segmentTypeIndexing
+)
+type indexParam = map[string]string
+
type Segment struct {
- Id uint64
- ParitionName string
- CollectionId uint64
- OpenTime Timestamp
- CloseTime Timestamp
- NumRows uint64
-
- cSegment C.CSegmentBase
+ segmentPtr C.CSegmentInterface
+
+ segmentID UniqueID
+ partitionID UniqueID
+ collectionID UniqueID
+ lastMemSize int64
+ lastRowCount int64
+
+ once sync.Once // guards enableIndex
+ enableIndex bool
+ enableLoadBinLog bool
+
+ rmMutex sync.Mutex // guards recentlyModified
+ recentlyModified bool
+
+ typeMu sync.Mutex // guards builtIndex
+ segmentType segmentType
+
+ paramMutex sync.RWMutex // guards index
+ indexParam map[int64]indexParam
+ indexName string
+ indexID UniqueID
}
```
@@ -395,11 +455,16 @@ type Segment struct {
```go
type dataSyncService struct {
- ctx context.Context
- pulsarURL string
- fg *flowgraph.TimeTickedFlowGraph
- msgStream *msgstream.PulsarMsgStream
- dataReplica Replica
+ ctx context.Context
+ cancel context.CancelFunc
+
+ collectionID UniqueID
+ fg *flowgraph.TimeTickedFlowGraph
+
+ dmStream msgstream.MsgStream
+ msFactory msgstream.Factory
+
+ replica ReplicaInterface
}
```
diff --git a/docs/developer_guides/chap08_binlog.md b/docs/developer_guides/chap08_binlog.md
index fafb3d598a..7d7fb093d3 100644
--- a/docs/developer_guides/chap08_binlog.md
+++ b/docs/developer_guides/chap08_binlog.md
@@ -8,7 +8,7 @@ Binlog is stored in a columnar storage format, every column in schema should be
## Event format
-Binlog file consists of 4 bytes magic number and a series of events. The first event must be descriptor event.
+Binlog file consists of 4 bytes magic number and a series of events. The first event must be descriptor event.
### Event format
@@ -16,7 +16,7 @@ Binlog file consists of 4 bytes magic number and a series of events. The first e
+=====================================+
| event | timestamp 0 : 8 | create timestamp
| header +----------------------------+
-| | type_code 8 : 1 | event type code
+| | type_code 8 : 1 | event type code
| +----------------------------+
| | server_id 9 : 4 | write node id
| +----------------------------+
@@ -26,7 +26,7 @@ Binlog file consists of 4 bytes magic number and a series of events. The first e
| +----------------------------+
| | extra_headers 21 : x-21 | reserved part
+=====================================+
-| event | fixed part x : y |
+| event | fixed part x : y |
| data +----------------------------+
| | variable part |
+=====================================+
@@ -40,7 +40,7 @@ Binlog file consists of 4 bytes magic number and a series of events. The first e
+=====================================+
| event | timestamp 0 : 8 | create timestamp
| header +----------------------------+
-| | type_code 8 : 1 | event type code
+| | type_code 8 : 1 | event type code
| +----------------------------+
| | server_id 9 : 4 | write node id
| +----------------------------+
@@ -48,7 +48,7 @@ Binlog file consists of 4 bytes magic number and a series of events. The first e
| +----------------------------+
| | next_position 17 : 4 | offset of next event from the start of file
+=====================================+
-| event | binlog_version 21 : 2 | binlog version
+| event | binlog_version 21 : 2 | binlog version
| data +----------------------------+
| | server_version 23 : 8 | write node version
| +----------------------------+
@@ -67,7 +67,7 @@ Binlog file consists of 4 bytes magic number and a series of events. The first e
| | end_timestamp 65 : 1 | maximum timestamp allocated by master of all events in this file
| +----------------------------+
| | post-header 66 : n | array of n bytes, one byte per event type that the server knows about
-| | lengths for all |
+| | lengths for all |
| | event types |
+=====================================+
```
@@ -132,7 +132,7 @@ Schema
-Request:
+Request:
InsertRequest rows(1W)
@@ -142,7 +142,7 @@ Request:
-insert binlogs:
+insert binlogs:
rowid, pk, ts, string, int, float, vector 6 files
@@ -177,7 +177,7 @@ typedef struct CStatus {
const char* error_msg;
} CStatus
-
+
// C++ interface
// writer
CPayloadWriter NewPayloadWriter(int columnType);
diff --git a/docs/developer_guides/chap09_data_service.md b/docs/developer_guides/chap09_data_service.md
index 5596c641c2..8105851ef0 100644
--- a/docs/developer_guides/chap09_data_service.md
+++ b/docs/developer_guides/chap09_data_service.md
@@ -12,20 +12,21 @@
```go
type DataService interface {
- typeutil.Service
- typeutil.Component
- RegisterNode(ctx context.Context, req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error)
- Flush(ctx context.Context, req *datapb.FlushRequest) (*commonpb.Status, error)
-
- AssignSegmentID(ctx context.Context, req *datapb.AssignSegIDRequest) (*datapb.AssignSegIDResponse, error)
- ShowSegments(ctx context.Context, req *datapb.ShowSegmentRequest) (*datapb.ShowSegmentResponse, error)
- GetSegmentStates(ctx context.Context, req *datapb.SegmentStatesRequest) (*datapb.SegmentStatesResponse, error)
- GetInsertBinlogPaths(ctx context.Context, req *datapb.InsertBinlogPathRequest) (*datapb.InsertBinlogPathsResponse, error)
- GetSegmentInfoChannel(ctx context.Context) (*milvuspb.StringResponse, error)
- GetInsertChannels(ctx context.Context, req *datapb.InsertChannelRequest) (*internalpb2.StringList, error)
- GetCollectionStatistics(ctx context.Context, req *datapb.CollectionStatsRequest) (*datapb.CollectionStatsResponse, error)
- GetPartitionStatistics(ctx context.Context, req *datapb.PartitionStatsRequest) (*datapb.PartitionStatsResponse, error)
- GetSegmentInfo(ctx context.Context, req *datapb.SegmentInfoRequest) (*datapb.SegmentInfoResponse, error)
+ Component
+ TimeTickProvider
+
+ RegisterNode(ctx context.Context, req *datapb.RegisterNodeRequest) (*datapb.RegisterNodeResponse, error)
+ Flush(ctx context.Context, req *datapb.FlushRequest) (*commonpb.Status, error)
+
+ AssignSegmentID(ctx context.Context, req *datapb.AssignSegmentIDRequest) (*datapb.AssignSegmentIDResponse, error)
+ ShowSegments(ctx context.Context, req *datapb.ShowSegmentsRequest) (*datapb.ShowSegmentsResponse, error)
+ GetSegmentStates(ctx context.Context, req *datapb.GetSegmentStatesRequest) (*datapb.GetSegmentStatesResponse, error)
+ GetInsertBinlogPaths(ctx context.Context, req *datapb.GetInsertBinlogPathsRequest) (*datapb.GetInsertBinlogPathsResponse, error)
+ GetSegmentInfoChannel(ctx context.Context) (*milvuspb.StringResponse, error)
+ GetInsertChannels(ctx context.Context, req *datapb.GetInsertChannelsRequest) (*internalpb.StringList, error)
+ GetCollectionStatistics(ctx context.Context, req *datapb.GetCollectionStatisticsRequest) (*datapb.GetCollectionStatisticsResponse, error)
+ GetPartitionStatistics(ctx context.Context, req *datapb.GetPartitionStatisticsRequest) (*datapb.GetPartitionStatisticsResponse, error)
+ GetSegmentInfo(ctx context.Context, req *datapb.GetSegmentInfoRequest) (*datapb.GetSegmentInfoResponse, error)
}
```
@@ -35,10 +36,10 @@ type DataService interface {
```go
type MsgBase struct {
- MsgType MsgType
- MsgID UniqueID
- Timestamp Timestamp
- SourceID UniqueID
+ MsgType MsgType
+ MsgID UniqueID
+ Timestamp Timestamp
+ SourceID UniqueID
}
```
@@ -46,13 +47,13 @@ type MsgBase struct {
```go
type RegisterNodeRequest struct {
- Base *commonpb.MsgBase
- Address *commonpb.Address
+ Base *commonpb.MsgBase
+ Address *commonpb.Address
}
type RegisterNodeResponse struct {
- InitParams *internalpb2.InitParams
- Status *commonpb.Status
+ InitParams *internalpb.InitParams
+ Status *commonpb.Status
}
```
@@ -60,60 +61,57 @@ type RegisterNodeResponse struct {
```go
type FlushRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
}
```
* *AssignSegmentID*
```go
-type SegIDRequest struct {
- Count uint32
- ChannelName string
- CollectionID UniqueID
- PartitionID UniqueID
- CollName string
- PartitionName string
+type SegmentIDRequest struct {
+ Count uint32
+ ChannelName string
+ CollectionID UniqueID
+ PartitionID UniqueID
}
-type AssignSegIDRequest struct {
- NodeID int64
- PeerRole string
- SegIDRequests []*SegIDRequest
+type AssignSegmentIDRequest struct {
+ NodeID int64
+ PeerRole string
+ SegIDRequests []*SegmentIDRequest
}
type SegIDAssignment struct {
- SegID UniqueID
- ChannelName string
- Count uint32
- CollectionID UniqueID
- PartitionID UniqueID
- ExpireTime uint64
- Status *commonpb.Status
- CollName string
- PartitionName string
+ SegID UniqueID
+ ChannelName string
+ Count uint32
+ CollectionID UniqueID
+ PartitionID UniqueID
+ ExpireTime uint64
+ Status *commonpb.Status
}
-type AssignSegIDResponse struct {
- SegIDAssignments []*SegIDAssignment
- Status *commonpb.Status
+type AssignSegmentIDResponse struct {
+ SegIDAssignments []*SegmentIDAssignment
+ Status *commonpb.Status
}
```
* *ShowSegments*
```go
-type ShowSegmentRequest struct {
- Base *commonpb.MsgBase
- CollectionID UniqueID
- PartitionID UniqueID
+type ShowSegmentsRequest struct {
+ Base *commonpb.MsgBase
+ CollectionID UniqueID
+ PartitionID UniqueID
+ DbID UniqueID
}
-type ShowSegmentResponse struct {
- SegmentIDs []UniqueID
- Status *commonpb.Status
+type ShowSegmentsResponse struct {
+ SegmentIDs []UniqueID
+ Status *commonpb.Status
}
```
@@ -122,117 +120,120 @@ type ShowSegmentResponse struct {
* *GetSegmentStates*
```go
-type SegmentStatesRequest struct {
- Base *commonpb.MsgBase
- SegmentID UniqueID
+type GetSegmentStatesRequest struct {
+ Base *commonpb.MsgBase
+ SegmentID UniqueID
}
-enum SegmentState {
- NONE = 0;
- NOT_EXIST = 1;
- GROWING = 2;
- SEALED = 3;
-}
+type SegmentState int32
+
+const (
+ SegmentState_SegmentStateNone SegmentState = 0
+ SegmentState_NotExist SegmentState = 1
+ SegmentState_Growing SegmentState = 2
+ SegmentState_Sealed SegmentState = 3
+ SegmentState_Flushed SegmentState = 4
+)
type SegmentStateInfo struct {
- SegmentID UniqueID
- State commonpb.SegmentState
- CreateTime uint64
- SealedTime uint64
- FlushedTime uint64
- StartPosition *internalpb2.MsgPosition
- EndPosition *internalpb2.MsgPosition
- Status *commonpb.Status
+ SegmentID UniqueID
+ State commonpb.SegmentState
+ CreateTime uint64
+ SealedTime uint64
+ FlushedTime uint64
+ StartPosition *internalpb.MsgPosition
+ EndPosition *internalpb.MsgPosition
+ Status *commonpb.Status
}
-type SegmentStatesResponse struct {
- Status *commonpb.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
- States []*SegmentStateInfo `protobuf:"bytes,2,rep,name=states,proto3" json:"states,omitempty"`
+type GetSegmentStatesResponse struct {
+ Status *commonpb.Status
+ States []*SegmentStateInfo
}
```
* *GetInsertBinlogPaths*
```go
-type InsertBinlogPathRequest struct {
- Base *commonpb.MsgBase
- SegmentID UniqueID
+type GetInsertBinlogPathsRequest struct {
+ Base *commonpb.MsgBase
+ SegmentID UniqueID
}
-type InsertBinlogPathsResponse struct {
- FieldIDs []int64
- Paths []*internalpb2.StringList
- Status *commonpb.Status
+type GetInsertBinlogPathsResponse struct {
+ FieldIDs []int64
+ Paths []*internalpb.StringList
+ Status *commonpb.Status
}
```
* *GetInsertChannels*
```go
-type InsertChannelRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
+type GetInsertChannelsRequest struct {
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
}
```
* *GetCollectionStatistics*
```go
-type CollectionStatsRequest struct {
- Base *commonpb.MsgBase
- DbID int64
- CollectionID int64
+type GetCollectionStatisticsRequest struct {
+ Base *commonpb.MsgBase
+ DbID int64
+ CollectionID int64
}
-type CollectionStatsResponse struct {
- Stats []*commonpb.KeyValuePair
- Status *commonpb.Status
+type GetCollectionStatisticsResponse struct {
+ Stats []*commonpb.KeyValuePair
+ Status *commonpb.Status
}
```
* *GetPartitionStatistics*
```go
-type PartitionStatsRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
- PartitionID UniqueID
+type GetPartitionStatisticsRequest struct {
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
+ PartitionID UniqueID
}
-type PartitionStatsResponse struct {
- Stats []*commonpb.KeyValuePair
- Status *commonpb.Status
+type GetPartitionStatisticsResponse struct {
+ Stats []*commonpb.KeyValuePair
+ Status *commonpb.Status
}
```
* *GetSegmentInfo*
```go
-type SegmentInfoRequest struct{
- Base *commonpb.MsgBase
- SegmentIDs []UniqueID
+type GetSegmentInfoRequest struct{
+ Base *commonpb.MsgBase
+ SegmentIDs []UniqueID
}
type SegmentInfo struct {
- SegmentID UniqueID
- CollectionID UniqueID
- PartitionID UniqueID
- InsertChannel string
- OpenTime Timestamp
- SealedTime Timestamp
- FlushedTime Timestamp
- NumRows int64
- MemSize int64
- State SegmentState
- StartPosition []*internalpb2.MsgPosition
- EndPosition []*internalpb2.MsgPosition
+ SegmentID UniqueID
+ CollectionID UniqueID
+ PartitionID UniqueID
+ InsertChannel string
+ OpenTime Timestamp
+ SealedTime Timestamp
+ FlushedTime Timestamp
+ NumRows int64
+ MemSize int64
+ State SegmentState
+ StartPosition []*internalpb.MsgPosition
+ EndPosition []*internalpb.MsgPosition
}
-type SegmentInfoResponse struct{
- Status *commonpb.Status
- infos []SegmentInfo
+type GetSegmentInfoResponse struct{
+ Status *commonpb.Status
+ infos []SegmentInfo
}
```
@@ -242,20 +243,27 @@ type SegmentInfoResponse struct{
#### 8.2 Insert Channel
+* *InsertMsg*
+
```go
type InsertRequest struct {
- Base *commonpb.MsgBase
- DbName string
- CollectionName string
- PartitionName string
- DbID UniqueID
- CollectionID UniqueID
- PartitionID UniqueID
- SegmentID UniqueID
- ChannelID string
- Timestamps []uint64
- RowIDs []int64
- RowData []*commonpb.Blob
+ Base *commonpb.MsgBase
+ DbName string
+ CollectionName string
+ PartitionName string
+ DbID UniqueID
+ CollectionID UniqueID
+ PartitionID UniqueID
+ SegmentID UniqueID
+ ChannelID string
+ Timestamps []uint64
+ RowIDs []int64
+ RowData []*commonpb.Blob
+}
+
+type InsertMsg struct {
+ BaseMsg
+ InsertRequest
}
```
@@ -265,14 +273,10 @@ type InsertRequest struct {
```go
type DataNode interface {
- Service
- Component
-
- WatchDmChannels(ctx context.Context, in *datapb.WatchDmChannelRequest) (*commonpb.Status, error)
- FlushSegments(ctx context.Context, in *datapb.FlushSegRequest) error
-
- SetMasterServiceInterface(ctx context.Context, ms MasterServiceInterface) error
- SetDataServiceInterface(ctx context.Context, ds DataServiceInterface) error
+ Component
+
+ WatchDmChannels(ctx context.Context, req *datapb.WatchDmChannelsRequest) (*commonpb.Status, error)
+ FlushSegments(ctx context.Context, req *datapb.FlushSegmentsRequest) (*commonpb.Status, error)
}
```
@@ -280,42 +284,46 @@ type DataNode interface {
```go
type WatchDmChannelRequest struct {
- Base *commonpb.MsgBase
- ChannelNames []string
+ Base *commonpb.MsgBase
+ ChannelNames []string
}
```
* *FlushSegments*
```go
-type FlushSegRequest struct {
- Base *commonpb.MsgBase
- DbID UniqueID
- CollectionID UniqueID
- SegmentIDs []int64
+type FlushSegmentsRequest struct {
+ Base *commonpb.MsgBase
+ DbID UniqueID
+ CollectionID UniqueID
+ SegmentIDs []int64
}
```
#### 8.2 SegmentStatistics Update Channel
-* *SegmentStatistics*
+* *SegmentStatisticsMsg*
```go
type SegmentStatisticsUpdates struct {
- SegmentID UniqueID
- MemorySize int64
- NumRows int64
- CreateTime uint64
- EndTime uint64
- StartPosition *internalpb2.MsgPosition
- EndPosition *internalpb2.MsgPosition
- IsNewSegment bool
+ SegmentID UniqueID
+ MemorySize int64
+ NumRows int64
+ CreateTime uint64
+ EndTime uint64
+ StartPosition *internalpb.MsgPosition
+ EndPosition *internalpb.MsgPosition
}
-type SegmentStatistics struct{
- Base *commonpb.MsgBase
- SegStats []*SegmentStatisticsUpdates
+type SegmentStatistics struct {
+ Base *commonpb.MsgBase
+ SegStats []*SegmentStatisticsUpdates
+}
+
+type SegmentStatisticsMsg struct {
+ BaseMsg
+ SegmentStatistics
}
```