Skip to content

Commit

Permalink
Merge pull request BVLC#82 from borisfom/caffe-0.14
Browse files Browse the repository at this point in the history
Removing CNMEM switches
  • Loading branch information
lukeyeager committed Nov 20, 2015
2 parents 31ee158 + 54811d3 commit 3bfc708
Show file tree
Hide file tree
Showing 12 changed files with 1 addition and 142 deletions.
16 changes: 0 additions & 16 deletions 3rdparty/Makefile

This file was deleted.

6 changes: 0 additions & 6 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ include(cmake/ConfigGen.cmake)
# ---[ Options
caffe_option(CPU_ONLY "Build Caffe without CUDA support" OFF) # TODO: rename to USE_CUDA
caffe_option(USE_CUDNN "Build Caffe with cuDNN library support" ON IF NOT CPU_ONLY)
caffe_option(USE_CNMEM "Build Caffe with CNMeM memory pool support" OFF)
caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON)
caffe_option(BUILD_python "Build Python wrapper" ON)
set(python_version "2" CACHE STRING "Specify which Python version to use")
Expand All @@ -42,11 +41,6 @@ if(UNIX OR APPLE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall")
endif()

if(USE_CNMEM)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_CNMEM -lcnmem")
message("-- Info: Using CNMEM memory pool configuration")
endif()

if(USE_libstdcpp)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++")
message("-- Warning: forcing libstdc++ (controlled by USE_libstdcpp option in cmake)")
Expand Down
6 changes: 0 additions & 6 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -325,12 +325,6 @@ ifeq ($(USE_CUDNN), 1)
COMMON_FLAGS += -DUSE_CUDNN
endif

# CNMEM integration
ifeq ($(USE_CNMEM), 1)
LIBRARIES += cnmem
COMMON_FLAGS += -DUSE_CNMEM
endif

# configure IO libraries
ifeq ($(USE_OPENCV), 1)
COMMON_FLAGS += -DUSE_OPENCV
Expand Down
3 changes: 0 additions & 3 deletions Makefile.config.example
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@
# cuDNN acceleration switch (uncomment to build with cuDNN).
# USE_CUDNN := 1

# CNMeM memory pool switch (experimental, may go away)
# USE_CNMEM := 1

# CPU-only switch (uncomment to build without GPU support).
# CPU_ONLY := 1

Expand Down
6 changes: 0 additions & 6 deletions cmake/ConfigGen.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -77,12 +77,6 @@ function(caffe_generate_export_configs)
list(APPEND DEFINITIONS -DUSE_CUDNN)
endif()

if(NOT HAVE_CNMEM)
set(HAVE_CNMEM FALSE)
else()
list(APPEND DEFINITIONS -DUSE_CNMEM)
endif()

if(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl")
list(APPEND Caffe_DEFINITIONS -DUSE_MKL)
endif()
Expand Down
36 changes: 0 additions & 36 deletions cmake/Cuda.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -188,33 +188,6 @@ function(detect_cuDNN)
endif()
endfunction()

################################################################################################
# Short command for cuDNN detection. Believe it soon will be a part of CUDA toolkit distribution.
# That's why not FindcuDNN.cmake file, but just the macro
# Usage:
# detect_cuDNN()
function(detect_CNMeM)
set(CNMEM_ROOT "" CACHE PATH "CNMeM root folder")

find_path(CNMEM_INCLUDE cnmem.h
PATHS ${CNMEM_ROOT} $ENV{CNMEM_ROOT} ${CUDA_TOOLKIT_INCLUDE}
DOC "Path to CNMeM include directory." )

get_filename_component(__libpath_hist ${CUDA_CUDART_LIBRARY} PATH)
find_library(CNMEM_LIBRARY NAMES libcnmem.so # libcudnn_static.a
PATHS ${CNMEM_ROOT} $ENV{CNMEM_ROOT} ${CNMEM_INCLUDE} ${__libpath_hist}
DOC "Path to CNMeM library.")

if(CNMEM_INCLUDE AND CNMEM_LIBRARY)
set(HAVE_CNMEM TRUE PARENT_SCOPE)
set(CNMEM_FOUND TRUE PARENT_SCOPE)

mark_as_advanced(CNMEM_INCLUDE CNMEM_LIBRARY CNMEM_ROOT)
message(STATUS "Found CNMeM (include: ${CNMEM_INCLUDE}, library: ${CNMEM_LIBRARY})")
endif()
endfunction()


################################################################################################
### Non macro section
################################################################################################
Expand Down Expand Up @@ -242,15 +215,6 @@ if(USE_CUDNN)
endif()
endif()

if(USE_CNMEM)
detect_CNMeM()
if(HAVE_CNMEM)
add_definitions(-DUSE_CNMEM)
include_directories(SYSTEM ${CNMEM_INCLUDE})
list(APPEND Caffe_LINKER_LIBS ${CNMEM_LIBRARY})
endif()
endif()

# setting nvcc arch flags
caffe_select_nvcc_arch_flags(NVCC_FLAGS_EXTRA)
list(APPEND CUDA_NVCC_FLAGS ${NVCC_FLAGS_EXTRA})
Expand Down
5 changes: 0 additions & 5 deletions cmake/Summary.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -146,11 +146,6 @@ function(caffe_print_configuration_summary)
else()
caffe_status(" cuDNN : Disabled")
endif()
if (USE_CNMEM)
caffe_status(" CNMeM : " HAVE_CNMEM THEN "Yes" ELSE "Not found")
else()
caffe_status(" CNMeM : Disabled")
endif()
caffe_status("")
endif()
if(HAVE_PYTHON)
Expand Down
3 changes: 1 addition & 2 deletions cmake/Templates/CaffeConfig.cmake.in
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
#
# Caffe_HAVE_CUDA - signals about CUDA support
# Caffe_HAVE_CUDNN - signals about cuDNN support
# Caffe_HAVE_CNMEM - signals about CNMeM support
#
#
#
Expand Down Expand Up @@ -60,4 +59,4 @@ set(Caffe_DEFINITIONS "@Caffe_DEFINITIONS@")
set(Caffe_CPU_ONLY @CPU_ONLY@)
set(Caffe_HAVE_CUDA @HAVE_CUDA@)
set(Caffe_HAVE_CUDNN @HAVE_CUDNN@)
set(Caffe_HAVE_CNMEM @HAVE_CNMEM@)

4 changes: 0 additions & 4 deletions cmake/Templates/caffe_config.h.in
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,6 @@
/* NVIDA cuDNN */
#cmakedefine HAVE_CUDNN
#cmakedefine USE_CUDNN

/* NVIDIA CNMeM */
#cmakedefine HAVE_CNMEM
#cmakedefine USE_CNMEM

/* NVIDA cuDNN */
#cmakedefine CPU_ONLY
Expand Down
12 changes: 0 additions & 12 deletions include/caffe/util/device_alternate.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,18 +66,6 @@ void classname<Dtype>::funcname##_##gpu(const vector<Blob<Dtype>*>& top, \
<< caffe::curandGetErrorString(status); \
} while (0)

#ifdef USE_CNMEM

#define CNMEM_CHECK(condition) \
do { \
cnmemStatus_t status = condition; \
CHECK_EQ(status, CNMEM_STATUS_SUCCESS) << " " \
<< cnmemGetErrorString(status); \
} while (0)
#else
#define CNMEM_CHECK(condition)
#endif

// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
Expand Down
5 changes: 0 additions & 5 deletions include/caffe/util/gpu_memory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,11 @@ class gpu_memory {
public:
enum PoolMode {
NoPool, // Straight CUDA malllc/free. May be very expensive
CnMemPool, // CNMEM arena allocator
CubPool, // CUB caching allocator
#ifdef CPU_ONLY
DefaultPool = NoPool
#else
# if (USE_CNMEM) && !defined (__arm__)
DefaultPool = CnMemPool // CNMEM pool only uses dedicated video memory.
# else
DefaultPool = CubPool // CUB pool is able to use unified memory properly
# endif
#endif
};

Expand Down
41 changes: 0 additions & 41 deletions src/caffe/util/gpu_memory.cpp
Original file line number Diff line number Diff line change
@@ -1,15 +1,9 @@
#include <algorithm>
#include <vector>
#include "caffe/common.hpp"

#include "caffe/util/gpu_memory.hpp"


#ifdef USE_CNMEM
// CNMEM integration
#include "cnmem.h"
#endif

#ifndef CPU_ONLY
#include "cub/cub/util_allocator.cuh"
#endif
Expand Down Expand Up @@ -42,7 +36,6 @@ namespace caffe {

switch (m) {
case CubPool:
case CnMemPool:
initMEM(gpus, m);
break;
default:
Expand All @@ -56,9 +49,6 @@ namespace caffe {

void gpu_memory::destroy() {
switch (mode_) {
case CnMemPool:
CNMEM_CHECK(cnmemFinalize());
break;
case CubPool:
delete cubAlloc;
cubAlloc = NULL;
Expand All @@ -73,9 +63,6 @@ namespace caffe {
void gpu_memory::allocate(void **ptr, size_t size, cudaStream_t stream) {
CHECK((ptr) != NULL);
switch (mode_) {
case CnMemPool:
CNMEM_CHECK(cnmemMalloc(ptr, size, stream));
break;
case CubPool:
CUDA_CHECK(cubAlloc->DeviceAllocate(ptr, size, stream));
break;
Expand All @@ -90,9 +77,6 @@ namespace caffe {
if (!ptr)
return;
switch (mode_) {
case CnMemPool:
CNMEM_CHECK(cnmemFree(ptr, stream));
break;
case CubPool:
CUDA_CHECK(cubAlloc->DeviceFree(ptr));
break;
Expand All @@ -104,9 +88,6 @@ namespace caffe {

void gpu_memory::registerStream(cudaStream_t stream) {
switch (mode_) {
case CnMemPool:
CNMEM_CHECK(cnmemRegisterStream(stream));
break;
case CubPool:
default:
break;
Expand All @@ -116,9 +97,6 @@ namespace caffe {
void gpu_memory::initMEM(const std::vector<int>& gpus, PoolMode m) {
mode_ = m;
int initial_device;
#if USE_CNMEM
cnmemDevice_t* devs = new cnmemDevice_t[gpus.size()];
#endif

CUDA_CHECK(cudaGetDevice(&initial_device));

Expand All @@ -142,21 +120,10 @@ namespace caffe {
// find out the smallest GPU size
if (poolsize_ == 0 || poolsize_ > free_mem)
poolsize_ = free_mem;
#if USE_CNMEM
devs[i].device = gpus[i];
devs[i].size = free_mem;
devs[i].numStreams = 0;
devs[i].streams = NULL;
#endif
}


switch ( mode_ ) {
case CnMemPool:
#if USE_CNMEM
CNMEM_CHECK(cnmemInit(gpus.size(), devs, CNMEM_FLAGS_DEFAULT));
#endif
break;
case CubPool:
try {
// if you are paranoid, that doesn't mean they are not after you :)
Expand All @@ -177,15 +144,10 @@ namespace caffe {
}

CUDA_CHECK(cudaSetDevice(initial_device));
#if USE_CNMEM
delete [] devs;
#endif
}

const char* gpu_memory::getPoolName() {
switch (mode_) {
case CnMemPool:
return "CNMEM Pool";
case CubPool:
return "CUB Pool";
default:
Expand All @@ -195,9 +157,6 @@ namespace caffe {

void gpu_memory::getInfo(size_t *free_mem, size_t *total_mem) {
switch (mode_) {
case CnMemPool:
CNMEM_CHECK(cnmemMemGetInfo(free_mem, total_mem, cudaStreamDefault));
break;
case CubPool:
int cur_device;
CUDA_CHECK(cudaGetDevice(&cur_device));
Expand Down

0 comments on commit 3bfc708

Please sign in to comment.