Skip to content

Commit

Permalink
Merge branch 'weaviate' into c-add-pinned-memory
Browse files Browse the repository at this point in the history
  • Loading branch information
ajit283 committed Aug 30, 2024
2 parents 61b0229 + 9c5bdde commit 4a9d25a
Show file tree
Hide file tree
Showing 36 changed files with 2,296 additions and 20 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -74,3 +74,4 @@ cagra_index
ivf_flat_index
ivf_pq_index

.aider*
10 changes: 9 additions & 1 deletion build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,15 @@ ARGS=$*
# scripts, and that this script resides in the repo dir!
REPODIR=$(cd $(dirname $0); pwd)

VALIDARGS="clean libcuvs python rust docs tests bench-ann examples --uninstall -v -g -n --compile-static-lib --allgpuarch --no-nvtx --show_depr_warn --incl-cache-stats --time -h"
VALIDARGS="clean libcuvs python rust go docs tests bench-ann examples --uninstall -v -g -n --compile-static-lib --allgpuarch --no-nvtx --show_depr_warn --incl-cache-stats --time -h"
HELP="$0 [<target> ...] [<flag> ...] [--cmake-args=\"<args>\"] [--cache-tool=<tool>] [--limit-tests=<targets>] [--limit-bench-ann=<targets>] [--build-metrics=<filename>]
where <target> is:
clean - remove all existing build artifacts and configuration (start over)
libcuvs - build the cuvs C++ code only. Also builds the C-wrapper library
around the C++ code.
python - build the cuvs Python package
rust - build the cuvs Rust bindings
go - build the cuvs Go bindings
docs - build the documentation
tests - build the tests
bench-ann - build end-to-end ann benchmarks
Expand Down Expand Up @@ -426,6 +427,13 @@ if (( ${NUMARGS} == 0 )) || hasArg rust; then
cargo test
fi

# Build the cuvs Go bindings
if (( ${NUMARGS} == 0 )) || hasArg go; then
cd ${REPODIR}/go
go build ./...
go test ./...
fi

export RAPIDS_VERSION="$(sed -E -e 's/^([0-9]{2})\.([0-9]{2})\.([0-9]{2}).*$/\1.\2.\3/' "${REPODIR}/VERSION")"
export RAPIDS_VERSION_MAJOR_MINOR="$(sed -E -e 's/^([0-9]{2})\.([0-9]{2})\.([0-9]{2}).*$/\1.\2/' "${REPODIR}/VERSION")"

Expand Down
33 changes: 33 additions & 0 deletions ci/build_go.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
#!/bin/bash
# Copyright (c) 2024, NVIDIA CORPORATION.

set -euo pipefail

rapids-logger "Create test conda environment"
. /opt/conda/etc/profile.d/conda.sh


rapids-mamba-retry env create --yes -n go

# seeing failures on activating the environment here on unbound locals
# apply workaround from https://github.com/conda/conda/issues/8186#issuecomment-532874667
set +eu
conda activate go
set -eu

rapids-print-env

export CGO_CFLAGS="-I$CONDA_PREFIX/include -I/usr/local/cuda/include -I/usr/local/include"
export CGO_LDFLAGS="-L$CONDA_PREFIX/lib -L/usr/local/cuda/lib64 -lcuvs_c -lcudart -Wl,-rpath,$CONDA_PREFIX/lib-Wl,-rpath,/usr/local/cuda/lib64"

rapids-logger "Downloading artifacts from previous jobs"
CPP_CHANNEL=$(rapids-download-conda-from-s3 cpp)

# installing libcuvs/libraft will speed up the rust build substantially
rapids-mamba-retry install \
--channel "${CPP_CHANNEL}" \
libcuvs \
libraft \
cuvs

bash ./build.sh go
4 changes: 3 additions & 1 deletion cpp/include/cuvs/core/c_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,10 @@ cuvsError_t cuvsRMMFree(cuvsResources_t res, void* ptr, size_t bytes);
* available memory
* @return cuvsError_t
*/

cuvsError_t cuvsRMMPoolMemoryResourceEnable(int initial_pool_size_percent,
int max_pool_size_percent);
int max_pool_size_percent,
int managed);
/**
* @brief Resets the memory resource to use the default memory resource (cuda_memory_resource)
* @return cuvsError_t
Expand Down
65 changes: 65 additions & 0 deletions cpp/include/cuvs/neighbors/cagra.h
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,42 @@ cuvsError_t cuvsCagraCompressionParamsCreate(cuvsCagraCompressionParams_t* param
*/
cuvsError_t cuvsCagraCompressionParamsDestroy(cuvsCagraCompressionParams_t params);

/**
* @}
*/

/**
* @defgroup cagra_c_extend_params C API for CUDA ANN Graph-based nearest neighbor search
* @{
*/
/**
* @brief Supplemental parameters to extend CAGRA Index
*
*/
struct cuvsCagraExtendParams {
/** Degree of input graph for pruning. */
uint32_t max_chunk_size;
/** Degree of output graph. */
};

typedef struct cuvsCagraExtendParams* cuvsCagraExtendParams_t;

/**
* @brief Allocate CAGRA Extend params, and populate with default values
*
* @param[in] params cuvsCagraExtendParams_t to allocate
* @return cuvsError_t
*/
cuvsError_t cuvsCagraExtendParamsCreate(cuvsCagraExtendParams_t* params);

/**
* @brief De-allocate CAGRA Extend params
*
* @param[in] params
* @return cuvsError_t
*/
cuvsError_t cuvsCagraExtendParamsDestroy(cuvsCagraExtendParams_t params);

/**
* @}
*/
Expand Down Expand Up @@ -327,6 +363,35 @@ cuvsError_t cuvsCagraBuild(cuvsResources_t res,
* @}
*/

/**
* @defgroup cagra_c_extend_params C API for CUDA ANN Graph-based nearest neighbor search
* @{
*/

/**
* @brief Extend a CAGRA index with a `DLManagedTensor` which has underlying
* `DLDeviceType` equal to `kDLCUDA`, `kDLCUDAHost`, `kDLCUDAManaged`,
* or `kDLCPU`. Also, acceptable underlying types are:
* 1. `kDLDataType.code == kDLFloat` and `kDLDataType.bits = 32`
* 2. `kDLDataType.code == kDLInt` and `kDLDataType.bits = 8`
* 3. `kDLDataType.code == kDLUInt` and `kDLDataType.bits = 8`
*
* @param[in] res cuvsResources_t opaque C handle
* @param[in] params cuvsCagraExtendParams_t used to extend CAGRA index
* @param[in] additional_dataset DLManagedTensor* additional dataset
* @param[out] index cuvsCagraIndex_t Extended CAGRA index
* @return cuvsError_t
*/
cuvsError_t cuvsCagraExtend(cuvsResources_t res,
cuvsCagraExtendParams_t params,
DLManagedTensor* additional_dataset,
DLManagedTensor* return_dataset,
cuvsCagraIndex_t index);

/**
* @}
*/

/**
* @defgroup cagra_c_index_search C API for CUDA ANN Graph-based nearest neighbor search
* @{
Expand Down
36 changes: 26 additions & 10 deletions cpp/src/core/c_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/core/resources.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/device_memory_resource.hpp>
#include <rmm/mr/device/managed_memory_resource.hpp>
#include <rmm/mr/device/owning_wrapper.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <rmm/mr/host/pinned_memory_resource.hpp>
Expand Down Expand Up @@ -66,6 +69,7 @@ extern "C" cuvsError_t cuvsStreamSync(cuvsResources_t res)
});
}


extern "C" cuvsError_t cuvsRMMAlloc(cuvsResources_t res, void** ptr, size_t bytes)
{
return cuvs::core::translate_exceptions([=] {
Expand All @@ -84,26 +88,38 @@ extern "C" cuvsError_t cuvsRMMFree(cuvsResources_t res, void* ptr, size_t bytes)
});
}

thread_local std::unique_ptr<rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>> pool_mr;

thread_local std::shared_ptr<
rmm::mr::owning_wrapper<rmm::mr::pool_memory_resource<rmm::mr::device_memory_resource>,
rmm::mr::device_memory_resource>>
pool_mr;

extern "C" cuvsError_t cuvsRMMPoolMemoryResourceEnable(int initial_pool_size_percent,
int max_pool_size_percent)
int max_pool_size_percent,
int managed)
{
return cuvs::core::translate_exceptions([=] {
// Upstream memory resource needs to be a cuda_memory_resource
auto cuda_mr = rmm::mr::get_current_device_resource();
auto* cuda_mr_casted = dynamic_cast<rmm::mr::cuda_memory_resource*>(cuda_mr);
if (cuda_mr_casted == nullptr) {
throw std::runtime_error("Current memory resource is not a cuda_memory_resource");
}
auto initial_size = rmm::percent_of_free_device_memory(initial_pool_size_percent);
auto max_size = rmm::percent_of_free_device_memory(max_pool_size_percent);
pool_mr = std::make_unique<rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource>>(
cuda_mr_casted, initial_size, max_size);

auto mr = std::shared_ptr<rmm::mr::device_memory_resource>();
if (managed) {
mr = std::static_pointer_cast<rmm::mr::device_memory_resource>(
std::make_shared<rmm::mr::managed_memory_resource>());
} else {
mr = std::static_pointer_cast<rmm::mr::device_memory_resource>(
std::make_shared<rmm::mr::cuda_memory_resource>());
}

pool_mr =
rmm::mr::make_owning_wrapper<rmm::mr::pool_memory_resource>(mr, initial_size, max_size);


rmm::mr::set_current_device_resource(pool_mr.get());
});
}


extern "C" cuvsError_t cuvsRMMMemoryResourceReset()
{
return cuvs::core::translate_exceptions([=] {
Expand Down
71 changes: 71 additions & 0 deletions cpp/src/neighbors/cagra_c.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,42 @@ void* _build(cuvsResources_t res, cuvsCagraIndexParams params, DLManagedTensor*
return index;
}

template <typename T>
void _extend(cuvsResources_t res,
cuvsCagraExtendParams params,
cuvsCagraIndex index,
DLManagedTensor* additional_dataset_tensor,
DLManagedTensor* return_tensor)
{
auto dataset = additional_dataset_tensor->dl_tensor;
auto return_dl_tensor = return_tensor->dl_tensor;
auto index_ptr = reinterpret_cast<cuvs::neighbors::cagra::index<T, uint32_t>*>(index.addr);
auto res_ptr = reinterpret_cast<raft::resources*>(res);

auto extend_params = cuvs::neighbors::cagra::extend_params();
extend_params.max_chunk_size = params.max_chunk_size;

if (cuvs::core::is_dlpack_device_compatible(dataset) &&
cuvs::core::is_dlpack_device_compatible(return_dl_tensor)) {
using mdspan_type = raft::device_matrix_view<T const, int64_t, raft::row_major>;
using mdspan_return_type = raft::device_matrix_view<T, int64_t, raft::row_major>;
auto mds = cuvs::core::from_dlpack<mdspan_type>(additional_dataset_tensor);
auto return_mds = cuvs::core::from_dlpack<mdspan_return_type>(return_tensor);
cuvs::neighbors::cagra::extend(*res_ptr, extend_params, mds, *index_ptr, return_mds);
} else if (cuvs::core::is_dlpack_host_compatible(dataset) &&
cuvs::core::is_dlpack_host_compatible(return_dl_tensor)) {
using mdspan_type = raft::host_matrix_view<T const, int64_t, raft::row_major>;
using mdspan_return_type = raft::device_matrix_view<T, int64_t, raft::row_major>;
auto mds = cuvs::core::from_dlpack<mdspan_type>(additional_dataset_tensor);
auto return_mds = cuvs::core::from_dlpack<mdspan_return_type>(return_tensor);
cuvs::neighbors::cagra::extend(*res_ptr, extend_params, mds, *index_ptr, return_mds);
} else {
RAFT_FAIL("Unsupported dataset DLtensor dtype: %d and bits: %d",
dataset.dtype.code,
dataset.dtype.bits);
}
}

template <typename T>
void _search(cuvsResources_t res,
cuvsCagraSearchParams params,
Expand Down Expand Up @@ -190,6 +226,30 @@ extern "C" cuvsError_t cuvsCagraBuild(cuvsResources_t res,
});
}

extern "C" cuvsError_t cuvsCagraExtend(cuvsResources_t res,
cuvsCagraExtendParams_t params,
DLManagedTensor* additional_dataset_tensor,
DLManagedTensor* return_dataset_tensor,
cuvsCagraIndex_t index_c_ptr)
{
return cuvs::core::translate_exceptions([=] {
auto dataset = additional_dataset_tensor->dl_tensor;
auto index = *index_c_ptr;

if ((dataset.dtype.code == kDLFloat) && (dataset.dtype.bits == 32)) {
_extend<float>(res, *params, index, additional_dataset_tensor, return_dataset_tensor);
} else if (dataset.dtype.code == kDLInt && dataset.dtype.bits == 8) {
_extend<int8_t>(res, *params, index, additional_dataset_tensor, return_dataset_tensor);
} else if (dataset.dtype.code == kDLUInt && dataset.dtype.bits == 8) {
_extend<uint8_t>(res, *params, index, additional_dataset_tensor, return_dataset_tensor);
} else {
RAFT_FAIL("Unsupported dataset DLtensor dtype: %d and bits: %d",
dataset.dtype.code,
dataset.dtype.bits);
}
});
}

extern "C" cuvsError_t cuvsCagraSearch(cuvsResources_t res,
cuvsCagraSearchParams_t params,
cuvsCagraIndex_t index_c_ptr,
Expand Down Expand Up @@ -265,6 +325,17 @@ extern "C" cuvsError_t cuvsCagraCompressionParamsDestroy(cuvsCagraCompressionPar
return cuvs::core::translate_exceptions([=] { delete params; });
}

extern "C" cuvsError_t cuvsCagraExtendParamsCreate(cuvsCagraExtendParams_t* params)
{
return cuvs::core::translate_exceptions(
[=] { *params = new cuvsCagraExtendParams{.max_chunk_size = 0}; });
}

extern "C" cuvsError_t cuvsCagraExtendParamsDestroy(cuvsCagraExtendParams_t params)
{
return cuvs::core::translate_exceptions([=] { delete params; });
}

extern "C" cuvsError_t cuvsCagraSearchParamsCreate(cuvsCagraSearchParams_t* params)
{
return cuvs::core::translate_exceptions([=] {
Expand Down
5 changes: 4 additions & 1 deletion cpp/test/core/c_api.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ int main()
if (free_error == CUVS_ERROR) { exit(EXIT_FAILURE); }

// Enable pool memory resource
cuvsError_t pool_error = cuvsRMMPoolMemoryResourceEnable(10, 100);

cuvsError_t pool_error = cuvsRMMPoolMemoryResourceEnable(10, 100, 1);
if (pool_error == CUVS_ERROR) { exit(EXIT_FAILURE); }

// Allocate memory again
Expand All @@ -55,7 +56,9 @@ int main()
if (free_error_pool == CUVS_ERROR) { exit(EXIT_FAILURE); }

// Reset pool memory resource

cuvsError_t reset_error = cuvsRMMMemoryResourceReset();

if (reset_error == CUVS_ERROR) { exit(EXIT_FAILURE); }

// Alloc memory on host (pinned)
Expand Down
Loading

0 comments on commit 4a9d25a

Please sign in to comment.