diff --git a/internal/core/src/indexbuilder/CMakeLists.txt b/internal/core/src/indexbuilder/CMakeLists.txt index d2d6d1aa25..1449d2c23e 100644 --- a/internal/core/src/indexbuilder/CMakeLists.txt +++ b/internal/core/src/indexbuilder/CMakeLists.txt @@ -19,6 +19,11 @@ add_library(milvus_indexbuilder SHARED ${INDEXBUILDER_FILES} ) +set(PLATFORM_LIBS dl) +if (MSYS) +set(PLATFORM_LIBS ) +endif () + # link order matters target_link_libraries(milvus_indexbuilder milvus_config @@ -28,7 +33,7 @@ target_link_libraries(milvus_indexbuilder knowhere tbb log - dl + ${PLATFORM_LIBS} pthread ) diff --git a/internal/core/src/log/CMakeLists.txt b/internal/core/src/log/CMakeLists.txt index 45f2e364c0..5e490a575f 100644 --- a/internal/core/src/log/CMakeLists.txt +++ b/internal/core/src/log/CMakeLists.txt @@ -21,4 +21,9 @@ set(LOG_FILES ${MILVUS_ENGINE_SRC}/log/Log.cpp add_library(log STATIC ${LOG_FILES}) set_target_properties(log PROPERTIES RULE_LAUNCH_COMPILE "") set_target_properties(log PROPERTIES RULE_LAUNCH_LINK "") + +if(MSYS) +target_link_libraries( log PRIVATE ) +else() target_link_libraries( log PRIVATE fiu ) +endif() diff --git a/internal/core/src/log/Log.cpp b/internal/core/src/log/Log.cpp index d5b23814a3..7e4f122017 100644 --- a/internal/core/src/log/Log.cpp +++ b/internal/core/src/log/Log.cpp @@ -17,6 +17,9 @@ #include "log/Log.h" INITIALIZE_EASYLOGGINGPP +#ifdef WIN32 +#include +#endif #include #include #include @@ -65,6 +68,8 @@ get_now_timestamp() { return std::chrono::duration_cast(now).count(); } +#ifndef WIN32 + int64_t get_system_boottime() { FILE* uptime = fopen("/proc/uptime", "r"); @@ -110,4 +115,24 @@ get_thread_start_timestamp() { } } +#else + +#define WINDOWS_TICK 10000000 +#define SEC_TO_UNIX_EPOCH 11644473600LL + +int64_t +get_thread_start_timestamp() { + FILETIME dummy; + FILETIME ret; + + if (GetThreadTimes(GetCurrentThread(), &ret, &dummy, &dummy, &dummy)) { + auto ticks = Int64ShllMod32(ret.dwHighDateTime, 32) | ret.dwLowDateTime; + auto thread_started = ticks / WINDOWS_TICK - SEC_TO_UNIX_EPOCH; + return get_now_timestamp() - thread_started; + } + return 0; +} + +#endif + // } // namespace milvus diff --git a/internal/core/src/segcore/CMakeLists.txt b/internal/core/src/segcore/CMakeLists.txt index d054594420..95281d051a 100644 --- a/internal/core/src/segcore/CMakeLists.txt +++ b/internal/core/src/segcore/CMakeLists.txt @@ -32,8 +32,13 @@ add_library(milvus_segcore SHARED ${SEGCORE_FILES} ) +set(PLATFORM_LIBS dl) +if (MSYS) +set(PLATFORM_LIBS ) +endif () + target_link_libraries(milvus_segcore - dl + ${PLATFORM_LIBS} log pthread tbb diff --git a/internal/core/src/utils/CMakeLists.txt b/internal/core/src/utils/CMakeLists.txt index 5479bde671..44592d6a39 100644 --- a/internal/core/src/utils/CMakeLists.txt +++ b/internal/core/src/utils/CMakeLists.txt @@ -15,4 +15,8 @@ aux_source_directory( ${MILVUS_ENGINE_SRC}/utils UTILS_FILES ) add_library( milvus_utils STATIC ${UTILS_FILES} ) -target_link_libraries( milvus_utils PRIVATE fiu milvus_exceptions) +if(MSYS) +target_link_libraries( milvus_utils PRIVATE milvus_exceptions) +else() +target_link_libraries( milvus_utils PRIVATE fiu milvus_exceptions ) +endif() diff --git a/internal/core/thirdparty/fiu/CMakeLists.txt b/internal/core/thirdparty/fiu/CMakeLists.txt index 8099586305..60f3c83516 100644 --- a/internal/core/thirdparty/fiu/CMakeLists.txt +++ b/internal/core/thirdparty/fiu/CMakeLists.txt @@ -49,13 +49,18 @@ macro( build_fiu ) add_dependencies(fiu fiu_ep) endmacro() -build_fiu() - -install( FILES ${INSTALL_DIR}/lib/libfiu.so - ${INSTALL_DIR}/lib/libfiu.so.0 - ${INSTALL_DIR}/lib/libfiu.so.1.00 +if (WIN32) + # nothing + message("skip building fiu on windows") +else () + build_fiu() + install( FILES ${INSTALL_DIR}/lib/libfiu.so + ${INSTALL_DIR}/lib/libfiu.so.0 + ${INSTALL_DIR}/lib/libfiu.so.1.00 DESTINATION lib ) + get_target_property( var fiu INTERFACE_INCLUDE_DIRECTORIES ) + message( STATUS ${var} ) + set_directory_properties( PROPERTY INCLUDE_DIRECTORIES ${var} ) +endif () + -get_target_property( var fiu INTERFACE_INCLUDE_DIRECTORIES ) -message( STATUS ${var} ) -set_directory_properties( PROPERTY INCLUDE_DIRECTORIES ${var} ) diff --git a/internal/querynode/load_index_info.go b/internal/querynode/load_index_info.go index 7678a5978b..2387692558 100644 --- a/internal/querynode/load_index_info.go +++ b/internal/querynode/load_index_info.go @@ -65,7 +65,7 @@ func (li *LoadIndexInfo) appendIndexParam(indexKey string, indexValue string) er // appendFieldInfo appends fieldID to index func (li *LoadIndexInfo) appendFieldInfo(fieldID FieldID) error { - cFieldID := C.long(fieldID) + cFieldID := C.int64_t(fieldID) status := C.AppendFieldInfo(li.cLoadIndexInfo, cFieldID) return HandleCStatus(&status, "AppendFieldInfo failed") } @@ -82,7 +82,7 @@ func (li *LoadIndexInfo) appendIndex(bytesIndex [][]byte, indexKeys []string) er for i, byteIndex := range bytesIndex { indexPtr := unsafe.Pointer(&byteIndex[0]) - indexLen := C.long(len(byteIndex)) + indexLen := C.int64_t(len(byteIndex)) binarySetKey := filepath.Base(indexKeys[i]) log.Debug("", zap.String("index key", binarySetKey)) indexKey := C.CString(binarySetKey) diff --git a/internal/querynode/plan.go b/internal/querynode/plan.go index 7fb5507adb..f608a505c5 100644 --- a/internal/querynode/plan.go +++ b/internal/querynode/plan.go @@ -98,7 +98,7 @@ func parseSearchRequest(plan *SearchPlan, searchRequestBlob []byte) (*searchRequ return nil, errors.New("empty search request") } var blobPtr = unsafe.Pointer(&searchRequestBlob[0]) - blobSize := C.long(len(searchRequestBlob)) + blobSize := C.int64_t(len(searchRequestBlob)) var cPlaceholderGroup C.CPlaceholderGroup status := C.ParsePlaceholderGroup(plan.cSearchPlan, blobPtr, blobSize, &cPlaceholderGroup) diff --git a/internal/querynode/reduce.go b/internal/querynode/reduce.go index 9aa52edd3d..f720ebb256 100644 --- a/internal/querynode/reduce.go +++ b/internal/querynode/reduce.go @@ -55,7 +55,7 @@ func reduceSearchResultsAndFillData(plan *SearchPlan, searchResults []*SearchRes cSearchResults = append(cSearchResults, res.cSearchResult) } cSearchResultPtr := (*C.CSearchResult)(&cSearchResults[0]) - cNumSegments := C.long(numSegments) + cNumSegments := C.int64_t(numSegments) status := C.ReduceSearchResultsAndFillData(plan.cSearchPlan, cSearchResultPtr, cNumSegments) if err := HandleCStatus(&status, "ReduceSearchResultsAndFillData failed"); err != nil { @@ -71,7 +71,7 @@ func reorganizeSearchResults(searchResults []*SearchResult, numSegments int64) ( } cSearchResultPtr := (*C.CSearchResult)(&cSearchResults[0]) - var cNumSegments = C.long(numSegments) + var cNumSegments = C.int64_t(numSegments) var cMarshaledHits C.CMarshaledHits status := C.ReorganizeSearchResults(&cMarshaledHits, cSearchResultPtr, cNumSegments) @@ -95,10 +95,10 @@ func (mh *MarshaledHits) getHitsBlob() ([]byte, error) { } func (mh *MarshaledHits) hitBlobSizeInGroup(groupOffset int64) ([]int64, error) { - cGroupOffset := (C.long)(groupOffset) + cGroupOffset := (C.int64_t)(groupOffset) numQueries := C.GetNumQueriesPerGroup(mh.cMarshaledHits, cGroupOffset) result := make([]int64, int64(numQueries)) - cResult := (*C.long)(&result[0]) + cResult := (*C.int64_t)(&result[0]) C.GetHitSizePerQueries(mh.cMarshaledHits, cGroupOffset, cResult) return result, nil } diff --git a/internal/querynode/segment.go b/internal/querynode/segment.go index 9f744857b8..f85a86e2af 100644 --- a/internal/querynode/segment.go +++ b/internal/querynode/segment.go @@ -571,8 +571,8 @@ func (s *Segment) segmentPreInsert(numOfRecords int) (int64, error) { return 0, nil } var offset int64 - cOffset := (*C.long)(&offset) - status := C.PreInsert(s.segmentPtr, C.long(int64(numOfRecords)), cOffset) + cOffset := (*C.int64_t)(&offset) + status := C.PreInsert(s.segmentPtr, C.int64_t(int64(numOfRecords)), cOffset) if err := HandleCStatus(&status, "PreInsert failed"); err != nil { return 0, err } @@ -586,7 +586,7 @@ func (s *Segment) segmentPreDelete(numOfRecords int) int64 { */ s.segPtrMu.RLock() defer s.segPtrMu.RUnlock() // thread safe guaranteed by segCore, use RLock - var offset = C.PreDelete(s.segmentPtr, C.long(int64(numOfRecords))) + var offset = C.PreDelete(s.segmentPtr, C.int64_t(int64(numOfRecords))) return int64(offset) } @@ -630,10 +630,10 @@ func (s *Segment) segmentInsert(offset int64, entityIDs *[]UniqueID, timestamps copyOffset += sizeofPerRow } - var cOffset = C.long(offset) - var cNumOfRows = C.long(numOfRow) - var cEntityIdsPtr = (*C.long)(&(*entityIDs)[0]) - var cTimestampsPtr = (*C.ulong)(&(*timestamps)[0]) + var cOffset = C.int64_t(offset) + var cNumOfRows = C.int64_t(numOfRow) + var cEntityIdsPtr = (*C.int64_t)(&(*entityIDs)[0]) + var cTimestampsPtr = (*C.uint64_t)(&(*timestamps)[0]) var cSizeofPerRow = C.int(sizeofPerRow) var cRawDataVoidPtr = unsafe.Pointer(&rawData[0]) log.Debug("QueryNode::Segment::InsertBegin", zap.Any("cNumOfRows", cNumOfRows)) @@ -672,10 +672,10 @@ func (s *Segment) segmentDelete(offset int64, entityIDs *[]UniqueID, timestamps return errors.New("length of entityIDs not equal to length of timestamps") } - var cOffset = C.long(offset) - var cSize = C.long(len(*entityIDs)) - var cEntityIdsPtr = (*C.long)(&(*entityIDs)[0]) - var cTimestampsPtr = (*C.ulong)(&(*timestamps)[0]) + var cOffset = C.int64_t(offset) + var cSize = C.int64_t(len(*entityIDs)) + var cEntityIdsPtr = (*C.int64_t)(&(*entityIDs)[0]) + var cTimestampsPtr = (*C.uint64_t)(&(*timestamps)[0]) status := C.Delete(s.segmentPtr, cOffset, cSize, cEntityIdsPtr, cTimestampsPtr) if err := HandleCStatus(&status, "Delete failed"); err != nil { @@ -823,7 +823,7 @@ func (s *Segment) dropFieldData(fieldID int64) error { return errors.New(errMsg) } - status := C.DropFieldData(s.segmentPtr, C.long(fieldID)) + status := C.DropFieldData(s.segmentPtr, C.int64_t(fieldID)) if err := HandleCStatus(&status, "DropFieldData failed"); err != nil { return err } @@ -893,7 +893,7 @@ func (s *Segment) dropSegmentIndex(fieldID int64) error { return errors.New(errMsg) } - status := C.DropSealedSegmentIndex(s.segmentPtr, C.long(fieldID)) + status := C.DropSealedSegmentIndex(s.segmentPtr, C.int64_t(fieldID)) if err := HandleCStatus(&status, "DropSealedSegmentIndex failed"); err != nil { return err } diff --git a/internal/storage/cwrapper/CMakeLists.txt b/internal/storage/cwrapper/CMakeLists.txt index 503d6256cb..445543be3c 100644 --- a/internal/storage/cwrapper/CMakeLists.txt +++ b/internal/storage/cwrapper/CMakeLists.txt @@ -115,8 +115,11 @@ macro( build_arrow ) target_link_libraries(parquet INTERFACE arrow ) endmacro() - -build_arrow() +if (MSYS) + message("Using system arrow in msys") +else () + build_arrow() +endif () add_library(wrapper STATIC) target_sources(wrapper PUBLIC ParquetWrapper.cpp PayloadStream.cpp) diff --git a/internal/storage/payload.go b/internal/storage/payload.go index 57110c4503..0bb55e0b86 100644 --- a/internal/storage/payload.go +++ b/internal/storage/payload.go @@ -367,7 +367,7 @@ func NewPayloadReader(colType schemapb.DataType, buf []byte) (*PayloadReader, er if len(buf) == 0 { return nil, errors.New("create Payload reader failed, buffer is empty") } - r := C.NewPayloadReader(C.int(colType), (*C.uint8_t)(unsafe.Pointer(&buf[0])), C.long(len(buf))) + r := C.NewPayloadReader(C.int(colType), (*C.uint8_t)(unsafe.Pointer(&buf[0])), C.int64_t(len(buf))) if r == nil { return nil, errors.New("failed to read parquet from buffer") }