Skip to content

Commit

Permalink
[WIP] Reorganize Cython to separate C++ bindings and make Cython clas…
Browse files Browse the repository at this point in the history
…ses public
  • Loading branch information
Matt711 committed Sep 10, 2024
1 parent 6729def commit 388042f
Show file tree
Hide file tree
Showing 26 changed files with 2,953 additions and 1 deletion.
4 changes: 3 additions & 1 deletion python/rmm/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,6 @@ rapids_cython_init()
add_compile_definitions("SPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_${RMM_LOGGING_LEVEL}")

add_subdirectory(rmm/_cuda)
add_subdirectory(rmm/_lib)
# add_subdirectory(rmm/_lib)
add_subdirectory(rmm/cpp)
add_subdirectory(rmm/python)
35 changes: 35 additions & 0 deletions python/rmm/rmm/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# =============================================================================
# Copyright (c) 2022-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================

set(cython_sources cpp_logger.pyx cpp_memory_resource.pyx)
set(linked_libraries rmm::rmm)

# Build all of the Cython targets
rapids_cython_create_modules(SOURCE_FILES "${cython_sources}" LINKED_LIBRARIES "${linked_libraries}"
CXX)

# mark all symbols in these Cython targets "hidden" by default, so they won't collide with symbols
# loaded from other DSOs
foreach(_cython_target IN LISTS RAPIDS_CYTHON_CREATED_TARGETS)
set_target_properties(${_cython_target} PROPERTIES C_VISIBILITY_PRESET hidden
CXX_VISIBILITY_PRESET hidden)
endforeach()

add_library(_torch_allocator SHARED _torch_allocator.cpp)
# Want the output to be called _torch_allocator.so
set_target_properties(_torch_allocator PROPERTIES PREFIX "" SUFFIX ".so")
target_link_libraries(_torch_allocator PRIVATE rmm::rmm)
cmake_path(RELATIVE_PATH CMAKE_CURRENT_SOURCE_DIR BASE_DIRECTORY "${PROJECT_SOURCE_DIR}"
OUTPUT_VARIABLE _torch_allocator_location)
install(TARGETS _torch_allocator DESTINATION "${_torch_allocator_location}")
13 changes: 13 additions & 0 deletions python/rmm/rmm/cpp/__init__.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
64 changes: 64 additions & 0 deletions python/rmm/rmm/cpp/_torch_allocator.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
/*
* Copyright (c) 2023-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <rmm/cuda_device.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/mr/device/per_device_resource.hpp>

#include <cuda_runtime_api.h>

// These signatures must match those required by CUDAPluggableAllocator in
// github.com/pytorch/pytorch/blob/main/torch/csrc/cuda/CUDAPluggableAllocator.h
// Since the loading is done at runtime via dlopen, no error checking
// can be performed for mismatching signatures.

/**
* @brief Allocate memory of at least \p size bytes.
*
* @throws rmm::bad_alloc When the requested allocation cannot be satisfied.
*
* @param size The number of bytes to allocate
* @param device The device whose memory resource one should use
* @param stream CUDA stream to perform allocation on
* @return Pointer to the newly allocated memory
*/
extern "C" void* allocate(std::size_t size, int device, void* stream)
{
rmm::cuda_device_id const device_id{device};
rmm::cuda_set_device_raii with_device{device_id};
auto mr = rmm::mr::get_per_device_resource_ref(device_id);
return mr.allocate_async(
size, rmm::CUDA_ALLOCATION_ALIGNMENT, rmm::cuda_stream_view{static_cast<cudaStream_t>(stream)});
}

/**
* @brief Deallocate memory pointed to by \p ptr.
*
* @param ptr Pointer to be deallocated
* @param size The number of bytes in the allocation
* @param device The device whose memory resource one should use
* @param stream CUDA stream to perform deallocation on
*/
extern "C" void deallocate(void* ptr, std::size_t size, int device, void* stream)
{
rmm::cuda_device_id const device_id{device};
rmm::cuda_set_device_raii with_device{device_id};
auto mr = rmm::mr::get_per_device_resource_ref(device_id);
mr.deallocate_async(ptr,
size,
rmm::CUDA_ALLOCATION_ALIGNMENT,
rmm::cuda_stream_view{static_cast<cudaStream_t>(stream)});
}
28 changes: 28 additions & 0 deletions python/rmm/rmm/cpp/cpp_cuda_stream.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# Copyright (c) 2020-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from cuda.ccudart cimport cudaStream_t
from libcpp cimport bool

from rmm.cpp.cpp_cuda_stream_view cimport cuda_stream_view


cdef extern from "rmm/cuda_stream.hpp" namespace "rmm" nogil:
cdef cppclass cuda_stream:
cuda_stream() except +
bool is_valid() except +
cudaStream_t value() except +
cuda_stream_view view() except +
void synchronize() except +
void synchronize_no_throw()
23 changes: 23 additions & 0 deletions python/rmm/rmm/cpp/cpp_cuda_stream_pool.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# Copyright (c) 2021-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.cpp.cpp_cuda_stream_view cimport cuda_stream_view


cdef extern from "rmm/cuda_stream_pool.hpp" namespace "rmm" nogil:
cdef cppclass cuda_stream_pool:
cuda_stream_pool(size_t pool_size)
cuda_stream_view get_stream()
cuda_stream_view get_stream(size_t stream_id) except +
size_t get_pool_size()
32 changes: 32 additions & 0 deletions python/rmm/rmm/cpp/cpp_cuda_stream_view.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from cuda.ccudart cimport cudaStream_t
from libcpp cimport bool


cdef extern from "rmm/cuda_stream_view.hpp" namespace "rmm" nogil:
cdef cppclass cuda_stream_view:
cuda_stream_view()
cuda_stream_view(cudaStream_t)
cudaStream_t value()
bool is_default()
bool is_per_thread_default()
void synchronize() except +

cdef bool operator==(cuda_stream_view const, cuda_stream_view const)

const cuda_stream_view cuda_stream_default
const cuda_stream_view cuda_stream_legacy
const cuda_stream_view cuda_stream_per_thread
58 changes: 58 additions & 0 deletions python/rmm/rmm/cpp/cpp_device_buffer.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Copyright (c) 2019-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.cpp.cpp_cuda_stream_view cimport cuda_stream_view
from rmm.cpp.cpp_memory_resource cimport device_memory_resource


cdef extern from "rmm/mr/device/per_device_resource.hpp" namespace "rmm" nogil:
cdef cppclass cuda_device_id:
ctypedef int value_type
cuda_device_id()
cuda_device_id(value_type id)
value_type value()

cdef cuda_device_id get_current_cuda_device()

cdef extern from "rmm/prefetch.hpp" namespace "rmm" nogil:
cdef void prefetch(const void* ptr,
size_t bytes,
cuda_device_id device,
cuda_stream_view stream) except +

cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil:
cdef cppclass device_buffer:
device_buffer()
device_buffer(
size_t size,
cuda_stream_view stream,
device_memory_resource *
) except +
device_buffer(
const void* source_data,
size_t size,
cuda_stream_view stream,
device_memory_resource *
) except +
device_buffer(
const device_buffer buf,
cuda_stream_view stream,
device_memory_resource *
) except +
void reserve(size_t new_capacity, cuda_stream_view stream) except +
void resize(size_t new_size, cuda_stream_view stream) except +
void shrink_to_fit(cuda_stream_view stream) except +
void* data()
size_t size()
size_t capacity()
39 changes: 39 additions & 0 deletions python/rmm/rmm/cpp/cpp_device_uvector.pxd
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
# Copyright (c) 2021-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from rmm.cpp.cuda_stream_view cimport cuda_stream_view
from rmm.cpp.device_buffer cimport device_buffer
from rmm.cpp.memory_resource cimport device_memory_resource


cdef extern from "rmm/device_buffer.hpp" namespace "rmm" nogil:
cdef cppclass device_uvector[T]:
device_uvector(size_t size, cuda_stream_view stream) except +
T* element_ptr(size_t index)
void set_element(size_t element_index, const T& v, cuda_stream_view s)
void set_element_async(
size_t element_index,
const T& v,
cuda_stream_view s
) except +
T front_element(cuda_stream_view s) except +
T back_element(cuda_stream_view s) except +
void reserve(size_t new_capacity, cuda_stream_view stream) except +
void resize(size_t new_size, cuda_stream_view stream) except +
void shrink_to_fit(cuda_stream_view stream) except +
device_buffer release()
size_t capacity()
T* data()
size_t size()
device_memory_resource* memory_resource()
66 changes: 66 additions & 0 deletions python/rmm/rmm/cpp/cpp_logger.pyx
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
# Copyright (c) 2023-2024, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from libcpp cimport bool


cdef extern from "spdlog/common.h" namespace "spdlog::level" nogil:
cpdef enum logging_level "spdlog::level::level_enum":
"""
The debug logging level for RMM.
Debug logging prints messages to a log file. See
`Debug Logging <https://github.com/rapidsai/rmm#debug-logging>`_
for more information.
Valid levels, in decreasing order of verbosity, are TRACE, DEBUG,
INFO, WARN, ERR, CRITICAL, and OFF. Default is INFO.
Examples
--------
>>> import rmm
>>> rmm.logging_level.DEBUG
<logging_level.DEBUG: 1>
>>> rmm.logging_level.DEBUG.value
1
>>> rmm.logging_level.DEBUG.name
'DEBUG'
See Also
--------
set_logging_level : Set the debug logging level
get_logging_level : Get the current debug logging level
"""
TRACE "spdlog::level::trace"
DEBUG "spdlog::level::debug"
INFO "spdlog::level::info"
WARN "spdlog::level::warn"
ERR "spdlog::level::err"
CRITICAL "spdlog::level::critical"
OFF "spdlog::level::off"


cdef extern from "spdlog/spdlog.h" namespace "spdlog" nogil:
cdef cppclass spdlog_logger "spdlog::logger":
spdlog_logger() except +
void set_level(logging_level level)
logging_level level()
void flush() except +
void flush_on(logging_level level)
logging_level flush_level()
bool should_log(logging_level msg_level)


cdef extern from "rmm/logger.hpp" namespace "rmm" nogil:
cdef spdlog_logger& logger() except +
Loading

0 comments on commit 388042f

Please sign in to comment.