Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Throw rmm::out_of_memory when we know for sure #894

Merged
merged 5 commits into from
Oct 26, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 40 additions & 14 deletions include/rmm/detail/error.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,21 @@ class bad_alloc : public std::bad_alloc {
/**
* @brief Exception thrown when RMM runs out of memory
*
* This is thrown under the following conditions:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't like having a list in a comment that we have to maintain. I think instead we should make it very clear that this error should only be thrown when we know for sure a resource is out of memory.

I don't know for sure that cudaErrorMemoryAllocation always means OOM, BTW. Is this documented somewhere?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done.

According to the CUDA Runtime API doc:

cudaErrorMemoryAllocation = 2
The API call failed because it was unable to allocate enough memory to perform the requested operation.

* - For `arena_memory_resource`, when the global arena can no longer allocate more memory from
* upstream.
* - For `cuda_async_memory_resource`, when `cudaMallocFromPoolAsync` returns
* `cudaErrorMemoryAllocation`.
* - For `cuda_memory_resource`, when `cudaMalloc` returns `cudaErrorMemoryAllocation`.
* - For `limiting_resource_adapter`, when total allocated bytes exceeds the limit.
* - For `managed_memory_resource`, when `cudaMallocManaged` returns `cudaErrorMemoryAllocation`.
* - For `pool_memory_resource`, when the pool can no longer allocate more memory from upstream.
*/
class out_of_memory : public bad_alloc {
public:
out_of_memory(const char* msg) : bad_alloc{msg} {}
out_of_memory(std::string const& msg) : bad_alloc{msg} {}
using bad_alloc::bad_alloc;
jrhemstad marked this conversation as resolved.
Show resolved Hide resolved
};

/**
Expand Down Expand Up @@ -157,36 +167,52 @@ class out_of_range : public std::out_of_range {
*
* // Throws `std::runtime_error` if `cudaMalloc` fails
* RMM_CUDA_TRY(cudaMalloc(&p, 100), std::runtime_error);
*
* // Throws `rmm::bad_alloc` if `cudaMalloc` fails, but throw `rmm::out_of_memory` if
* // the error code is `cudaErrorMemoryAllocation`
* RMM_CUDA_TRY(cudaMalloc(&p, 100), rmm::bad_alloc, cudaErrorMemoryAllocation, rmm::out_of_memory);
* ```
*
*/
#define RMM_CUDA_TRY(...) \
GET_RMM_CUDA_TRY_MACRO(__VA_ARGS__, RMM_CUDA_TRY_4, INVALID, RMM_CUDA_TRY_2, RMM_CUDA_TRY_1) \
#define RMM_CUDA_TRY(...) \
GET_RMM_CUDA_TRY_MACRO(__VA_ARGS__, RMM_CUDA_TRY_2, RMM_CUDA_TRY_1) \
(__VA_ARGS__)
#define GET_RMM_CUDA_TRY_MACRO(_1, _2, _3, _4, NAME, ...) NAME
#define RMM_CUDA_TRY_4(_call, _exception_type, _custom_error, _custom_exception_type) \
#define GET_RMM_CUDA_TRY_MACRO(_1, _2, NAME, ...) NAME
#define RMM_CUDA_TRY_2(_call, _exception_type) \
do { \
cudaError_t const error = (_call); \
if (cudaSuccess != error) { \
cudaGetLastError(); \
/*NOLINTNEXTLINE(bugprone-macro-parentheses)*/ \
throw _exception_type{std::string{"CUDA error at: "} + __FILE__ + ":" + \
RMM_STRINGIFY(__LINE__) + ": " + cudaGetErrorName(error) + " " + \
cudaGetErrorString(error)}; \
} \
} while (0)
#define RMM_CUDA_TRY_1(_call) RMM_CUDA_TRY_2(_call, rmm::cuda_error)

/**
* @brief Error checking macro for CUDA memory allocation calls.
*
* Invokes a CUDA memory allocation function call. If the call does not return
* `cudaSuccess`, invokes cudaGetLastError() to clear the error and throws an
* exception detailing the CUDA error that occurred
*
* Defaults to throwing `rmm::bad_alloc`, but when `cudaErrorMemoryAllocation` is returned,
* `rmm::out_of_memory` is thrown instead.
*/
#define RMM_CUDA_TRY_ALLOC(_call) \
do { \
cudaError_t const error = (_call); \
if (cudaSuccess != error) { \
cudaGetLastError(); \
auto const msg = std::string{"CUDA error at: "} + __FILE__ + ":" + RMM_STRINGIFY(__LINE__) + \
": " + cudaGetErrorName(error) + " " + cudaGetErrorString(error); \
if ((_custom_error) == error) { \
if (cudaErrorMemoryAllocation == error) { \
/*NOLINTNEXTLINE(bugprone-macro-parentheses)*/ \
throw _custom_exception_type{msg}; \
throw rmm::out_of_memory{msg}; \
} else { \
/*NOLINTNEXTLINE(bugprone-macro-parentheses)*/ \
throw _exception_type{msg}; \
throw rmm::bad_alloc{msg}; \
} \
} \
} while (0)
#define RMM_CUDA_TRY_2(_call, _exception_type) \
RMM_CUDA_TRY_4(_call, _exception_type, cudaSuccess, rmm::cuda_error)
#define RMM_CUDA_TRY_1(_call) RMM_CUDA_TRY_2(_call, rmm::cuda_error)

/**
* @brief Error checking macro similar to `assert` for CUDA runtime API calls
Expand Down
5 changes: 1 addition & 4 deletions include/rmm/mr/device/cuda_async_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,10 +148,7 @@ class cuda_async_memory_resource final : public device_memory_resource {
void* ptr{nullptr};
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
if (bytes > 0) {
RMM_CUDA_TRY(cudaMallocFromPoolAsync(&ptr, bytes, pool_handle(), stream.value()),
rmm::bad_alloc,
cudaErrorMemoryAllocation,
rmm::out_of_memory);
RMM_CUDA_TRY_ALLOC(cudaMallocFromPoolAsync(&ptr, bytes, pool_handle(), stream.value()));
}
#else
(void)bytes;
Expand Down
3 changes: 1 addition & 2 deletions include/rmm/mr/device/cuda_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,7 @@ class cuda_memory_resource final : public device_memory_resource {
void* do_allocate(std::size_t bytes, cuda_stream_view) override
{
void* ptr{nullptr};
RMM_CUDA_TRY(
cudaMalloc(&ptr, bytes), rmm::bad_alloc, cudaErrorMemoryAllocation, rmm::out_of_memory);
RMM_CUDA_TRY_ALLOC(cudaMalloc(&ptr, bytes));
return ptr;
}

Expand Down
5 changes: 1 addition & 4 deletions include/rmm/mr/device/managed_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,7 @@ class managed_memory_resource final : public device_memory_resource {
if (bytes == 0) { return nullptr; }

void* ptr{nullptr};
RMM_CUDA_TRY(cudaMallocManaged(&ptr, bytes),
rmm::bad_alloc,
cudaErrorMemoryAllocation,
rmm::out_of_memory);
RMM_CUDA_TRY_ALLOC(cudaMallocManaged(&ptr, bytes));
return ptr;
}

Expand Down