问题1

设置完darknet中 Makefile中的
GPU = 1
CUDNN = 1
后执行make
编译报错:include/darknet.h:16:23: 致命错误:cudnn.h:没有那个文件或目录

解决方法:
1、找cudnn.h文件

find / -name cudnn.h

这个命令会列出所有的cudnn.h文件
找到自己安装cuda时的虚拟环境中的cudnn.h,我的是:
/root/anaconda3/envs/han/include/cudnn.h

2、把找到的cudnn.h文件复制到/usr/include
执行:cp /root/anaconda3/envs/han/include/cudnn.h /usr/include
再执行make
编译一会后仍报错报错:/usr/bin/ld: 找不到 -lcudnncollect2: 错误:ld 返回 1make: *** [libdarknet.so] 错误 1

解决方法:
1、找文件

find / -name libcudnn.so

2、复制
执行cp /root/anaconda3/envs/han/lib/libcudnn.so /usr/local/cuda-9.2/lib64
然后make,生成了可执行文件darknet和libdarknet.so及库libdarknet.a
大功告成。

注意: 其他的缺失使用同样的解决办法即可

问题2

darknet源码中CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT问题

./src/convolutional_layer.c:153:13: error: ‘CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT’ undeclared (first use in this function)
153 | CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,

原因
cudnn8.x里是没有CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT这个宏定义的,而CUDA11.x又不能配套使用cudnn7.x,但是RTX30序列的GPU又必须使用CUDA11.x才能正常跑,感觉进了死胡同。后来找了比较久搜到NVIDIA给出了一个针对cudnn8的解决方案代码,就是修改出错的文件src/convolutional_layer.c的代码,增加针对CUDNN_MAJOR>=8的处理。

解决办法

vi src/convolutional_layer.c

替换成如下内容:

#include "convolutional_layer.h"
#include "utils.h"
#include "batchnorm_layer.h"
#include "im2col.h"
#include "col2im.h"
#include "blas.h"
#include "gemm.h"
#include <stdio.h>
#include <time.h>#define PRINT_CUDNN_ALGO 0
#define MEMORY_LIMIT 2000000000#ifdef AI2
#include "xnor_layer.h"
#endifvoid swap_binary(convolutional_layer *l)
{float *swap = l->weights;l->weights = l->binary_weights;l->binary_weights = swap;#ifdef GPUswap = l->weights_gpu;l->weights_gpu = l->binary_weights_gpu;l->binary_weights_gpu = swap;
#endif
}void binarize_weights(float *weights, int n, int size, float *binary)
{int i, f;for(f = 0; f < n; ++f){float mean = 0;for(i = 0; i < size; ++i){mean += fabs(weights[f*size + i]);}mean = mean / size;for(i = 0; i < size; ++i){binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;}}
}void binarize_cpu(float *input, int n, float *binary)
{int i;for(i = 0; i < n; ++i){binary[i] = (input[i] > 0) ? 1 : -1;}
}void binarize_input(float *input, int n, int size, float *binary)
{int i, s;for(s = 0; s < size; ++s){float mean = 0;for(i = 0; i < n; ++i){mean += fabs(input[i*size + s]);}mean = mean / n;for(i = 0; i < n; ++i){binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;}}
}int convolutional_out_height(convolutional_layer l)
{return (l.h + 2*l.pad - l.size) / l.stride + 1;
}int convolutional_out_width(convolutional_layer l)
{return (l.w + 2*l.pad - l.size) / l.stride + 1;
}image get_convolutional_image(convolutional_layer l)
{return float_to_image(l.out_w,l.out_h,l.out_c,l.output);
}image get_convolutional_delta(convolutional_layer l)
{return float_to_image(l.out_w,l.out_h,l.out_c,l.delta);
}static size_t get_workspace_size(layer l){#ifdef CUDNNif(gpu_index >= 0){size_t most = 0;size_t s = 0;cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle(),l.srcTensorDesc,l.weightDesc,l.convDesc,l.dstTensorDesc,l.fw_algo,&s);if (s > most) most = s;cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handle(),l.srcTensorDesc,l.ddstTensorDesc,l.convDesc,l.dweightDesc,l.bf_algo,&s);if (s > most) most = s;cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handle(),l.weightDesc,l.ddstTensorDesc,l.convDesc,l.dsrcTensorDesc,l.bd_algo,&s);if (s > most) most = s;return most;}
#endifreturn (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float);
}#ifdef GPU
#ifdef CUDNN
void cudnn_convolutional_setup(layer *l)
{cudnnSetTensor4dDescriptor(l->dsrcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w); cudnnSetTensor4dDescriptor(l->ddstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w); cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w); cudnnSetTensor4dDescriptor(l->normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l->out_c, 1, 1); cudnnSetFilter4dDescriptor(l->dweightDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l->n, l->c/l->groups, l->size, l->size); cudnnSetFilter4dDescriptor(l->weightDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l->n, l->c/l->groups, l->size, l->size); #if CUDNN_MAJOR >= 6cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT);#elsecudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION);#endif#if CUDNN_MAJOR >= 7cudnnSetConvolutionGroupCount(l->convDesc, l->groups);#elseif(l->groups > 1){error("CUDNN < 7 doesn't support groups, please upgrade!");}#endif#if CUDNN_MAJOR >= 8int returnedAlgoCount;cudnnConvolutionFwdAlgoPerf_t       fw_results[2 * CUDNN_CONVOLUTION_FWD_ALGO_COUNT];cudnnConvolutionBwdDataAlgoPerf_t   bd_results[2 * CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT];cudnnConvolutionBwdFilterAlgoPerf_t bf_results[2 * CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT];cudnnFindConvolutionForwardAlgorithm(cudnn_handle(),l->srcTensorDesc,l->weightDesc,l->convDesc,l->dstTensorDesc,CUDNN_CONVOLUTION_FWD_ALGO_COUNT,&returnedAlgoCount,fw_results);for(int algoIndex = 0; algoIndex < returnedAlgoCount; ++algoIndex){#if PRINT_CUDNN_ALGO > 0printf("^^^^ %s for Algo %d: %f time requiring %llu memory\n",cudnnGetErrorString(fw_results[algoIndex].status),fw_results[algoIndex].algo, fw_results[algoIndex].time,(unsigned long long)fw_results[algoIndex].memory);#endifif( fw_results[algoIndex].memory < MEMORY_LIMIT ){l->fw_algo = fw_results[algoIndex].algo;break;}}cudnnFindConvolutionBackwardDataAlgorithm(cudnn_handle(),l->weightDesc,l->ddstTensorDesc,l->convDesc,l->dsrcTensorDesc,CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT,&returnedAlgoCount,bd_results);for(int algoIndex = 0; algoIndex < returnedAlgoCount; ++algoIndex){#if PRINT_CUDNN_ALGO > 0printf("^^^^ %s for Algo %d: %f time requiring %llu memory\n",cudnnGetErrorString(bd_results[algoIndex].status),bd_results[algoIndex].algo, bd_results[algoIndex].time,(unsigned long long)bd_results[algoIndex].memory);#endifif( bd_results[algoIndex].memory < MEMORY_LIMIT ){l->bd_algo = bd_results[algoIndex].algo;break;}}cudnnFindConvolutionBackwardFilterAlgorithm(cudnn_handle(),l->srcTensorDesc,l->ddstTensorDesc,l->convDesc,l->dweightDesc,CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT,&returnedAlgoCount,bf_results);for(int algoIndex = 0; algoIndex < returnedAlgoCount; ++algoIndex){#if PRINT_CUDNN_ALGO > 0printf("^^^^ %s for Algo %d: %f time requiring %llu memory\n",cudnnGetErrorString(bf_results[algoIndex].status),bf_results[algoIndex].algo, bf_results[algoIndex].time,(unsigned long long)bf_results[algoIndex].memory);#endifif( bf_results[algoIndex].memory < MEMORY_LIMIT ){l->bf_algo = bf_results[algoIndex].algo;break;}}#elsecudnnGetConvolutionForwardAlgorithm(cudnn_handle(),l->srcTensorDesc,l->weightDesc,l->convDesc,l->dstTensorDesc,CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,2000000000,&l->fw_algo);cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handle(),l->weightDesc,l->ddstTensorDesc,l->convDesc,l->dsrcTensorDesc,CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,2000000000,&l->bd_algo);cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handle(),l->srcTensorDesc,l->ddstTensorDesc,l->convDesc,l->dweightDesc,CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,2000000000,&l->bf_algo);#endif
}
#endif
#endifconvolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam)
{int i;convolutional_layer l = {0};l.type = CONVOLUTIONAL;l.groups = groups;l.h = h;l.w = w;l.c = c;l.n = n;l.binary = binary;l.xnor = xnor;l.batch = batch;l.stride = stride;l.size = size;l.pad = padding;l.batch_normalize = batch_normalize;l.weights = calloc(c/groups*n*size*size, sizeof(float));l.weight_updates = calloc(c/groups*n*size*size, sizeof(float));l.biases = calloc(n, sizeof(float));l.bias_updates = calloc(n, sizeof(float));l.nweights = c/groups*n*size*size;l.nbiases = n;// float scale = 1./sqrt(size*size*c);float scale = sqrt(2./(size*size*c/l.groups));//printf("convscale %f\n", scale);//scale = .02;//for(i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_uniform(-1, 1);for(i = 0; i < l.nweights; ++i) l.weights[i] = scale*rand_normal();int out_w = convolutional_out_width(l);int out_h = convolutional_out_height(l);l.out_h = out_h;l.out_w = out_w;l.out_c = n;l.outputs = l.out_h * l.out_w * l.out_c;l.inputs = l.w * l.h * l.c;l.output = calloc(l.batch*l.outputs, sizeof(float));l.delta  = calloc(l.batch*l.outputs, sizeof(float));l.forward = forward_convolutional_layer;l.backward = backward_convolutional_layer;l.update = update_convolutional_layer;if(binary){l.binary_weights = calloc(l.nweights, sizeof(float));l.cweights = calloc(l.nweights, sizeof(char));l.scales = calloc(n, sizeof(float));}if(xnor){l.binary_weights = calloc(l.nweights, sizeof(float));l.binary_input = calloc(l.inputs*l.batch, sizeof(float));}if(batch_normalize){l.scales = calloc(n, sizeof(float));l.scale_updates = calloc(n, sizeof(float));for(i = 0; i < n; ++i){l.scales[i] = 1;}l.mean = calloc(n, sizeof(float));l.variance = calloc(n, sizeof(float));l.mean_delta = calloc(n, sizeof(float));l.variance_delta = calloc(n, sizeof(float));l.rolling_mean = calloc(n, sizeof(float));l.rolling_variance = calloc(n, sizeof(float));l.x = calloc(l.batch*l.outputs, sizeof(float));l.x_norm = calloc(l.batch*l.outputs, sizeof(float));}if(adam){l.m = calloc(l.nweights, sizeof(float));l.v = calloc(l.nweights, sizeof(float));l.bias_m = calloc(n, sizeof(float));l.scale_m = calloc(n, sizeof(float));l.bias_v = calloc(n, sizeof(float));l.scale_v = calloc(n, sizeof(float));}#ifdef GPUl.forward_gpu = forward_convolutional_layer_gpu;l.backward_gpu = backward_convolutional_layer_gpu;l.update_gpu = update_convolutional_layer_gpu;if(gpu_index >= 0){if (adam) {l.m_gpu = cuda_make_array(l.m, l.nweights);l.v_gpu = cuda_make_array(l.v, l.nweights);l.bias_m_gpu = cuda_make_array(l.bias_m, n);l.bias_v_gpu = cuda_make_array(l.bias_v, n);l.scale_m_gpu = cuda_make_array(l.scale_m, n);l.scale_v_gpu = cuda_make_array(l.scale_v, n);}l.weights_gpu = cuda_make_array(l.weights, l.nweights);l.weight_updates_gpu = cuda_make_array(l.weight_updates, l.nweights);l.biases_gpu = cuda_make_array(l.biases, n);l.bias_updates_gpu = cuda_make_array(l.bias_updates, n);l.delta_gpu = cuda_make_array(l.delta, l.batch*out_h*out_w*n);l.output_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n);if(binary){l.binary_weights_gpu = cuda_make_array(l.weights, l.nweights);}if(xnor){l.binary_weights_gpu = cuda_make_array(l.weights, l.nweights);l.binary_input_gpu = cuda_make_array(0, l.inputs*l.batch);}if(batch_normalize){l.mean_gpu = cuda_make_array(l.mean, n);l.variance_gpu = cuda_make_array(l.variance, n);l.rolling_mean_gpu = cuda_make_array(l.mean, n);l.rolling_variance_gpu = cuda_make_array(l.variance, n);l.mean_delta_gpu = cuda_make_array(l.mean, n);l.variance_delta_gpu = cuda_make_array(l.variance, n);l.scales_gpu = cuda_make_array(l.scales, n);l.scale_updates_gpu = cuda_make_array(l.scale_updates, n);l.x_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n);l.x_norm_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n);}
#ifdef CUDNNcudnnCreateTensorDescriptor(&l.normTensorDesc);cudnnCreateTensorDescriptor(&l.srcTensorDesc);cudnnCreateTensorDescriptor(&l.dstTensorDesc);cudnnCreateFilterDescriptor(&l.weightDesc);cudnnCreateTensorDescriptor(&l.dsrcTensorDesc);cudnnCreateTensorDescriptor(&l.ddstTensorDesc);cudnnCreateFilterDescriptor(&l.dweightDesc);cudnnCreateConvolutionDescriptor(&l.convDesc);cudnn_convolutional_setup(&l);
#endif}
#endifl.workspace_size = get_workspace_size(l);l.activation = activation;fprintf(stderr, "conv  %5d %2d x%2d /%2d  %4d x%4d x%4d   ->  %4d x%4d x%4d  %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);return l;
}void denormalize_convolutional_layer(convolutional_layer l)
{int i, j;for(i = 0; i < l.n; ++i){float scale = l.scales[i]/sqrt(l.rolling_variance[i] + .00001);for(j = 0; j < l.c/l.groups*l.size*l.size; ++j){l.weights[i*l.c/l.groups*l.size*l.size + j] *= scale;}l.biases[i] -= l.rolling_mean[i] * scale;l.scales[i] = 1;l.rolling_mean[i] = 0;l.rolling_variance[i] = 1;}
}/*
void test_convolutional_layer()
{convolutional_layer l = make_convolutional_layer(1, 5, 5, 3, 2, 5, 2, 1, LEAKY, 1, 0, 0, 0);l.batch_normalize = 1;float data[] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3};//net.input = data;//forward_convolutional_layer(l);
}
*/void resize_convolutional_layer(convolutional_layer *l, int w, int h)
{l->w = w;l->h = h;int out_w = convolutional_out_width(*l);int out_h = convolutional_out_height(*l);l->out_w = out_w;l->out_h = out_h;l->outputs = l->out_h * l->out_w * l->out_c;l->inputs = l->w * l->h * l->c;l->output = realloc(l->output, l->batch*l->outputs*sizeof(float));l->delta  = realloc(l->delta,  l->batch*l->outputs*sizeof(float));if(l->batch_normalize){l->x = realloc(l->x, l->batch*l->outputs*sizeof(float));l->x_norm  = realloc(l->x_norm, l->batch*l->outputs*sizeof(float));}#ifdef GPUcuda_free(l->delta_gpu);cuda_free(l->output_gpu);l->delta_gpu =  cuda_make_array(l->delta,  l->batch*l->outputs);l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs);if(l->batch_normalize){cuda_free(l->x_gpu);cuda_free(l->x_norm_gpu);l->x_gpu = cuda_make_array(l->output, l->batch*l->outputs);l->x_norm_gpu = cuda_make_array(l->output, l->batch*l->outputs);}
#ifdef CUDNNcudnn_convolutional_setup(l);
#endif
#endifl->workspace_size = get_workspace_size(*l);
}void add_bias(float *output, float *biases, int batch, int n, int size)
{int i,j,b;for(b = 0; b < batch; ++b){for(i = 0; i < n; ++i){for(j = 0; j < size; ++j){output[(b*n + i)*size + j] += biases[i];}}}
}void scale_bias(float *output, float *scales, int batch, int n, int size)
{int i,j,b;for(b = 0; b < batch; ++b){for(i = 0; i < n; ++i){for(j = 0; j < size; ++j){output[(b*n + i)*size + j] *= scales[i];}}}
}void backward_bias(float *bias_updates, float *delta, int batch, int n, int size)
{int i,b;for(b = 0; b < batch; ++b){for(i = 0; i < n; ++i){bias_updates[i] += sum_array(delta+size*(i+b*n), size);}}
}void forward_convolutional_layer(convolutional_layer l, network net)
{int i, j;fill_cpu(l.outputs*l.batch, 0, l.output, 1);if(l.xnor){binarize_weights(l.weights, l.n, l.c/l.groups*l.size*l.size, l.binary_weights);swap_binary(&l);binarize_cpu(net.input, l.c*l.h*l.w*l.batch, l.binary_input);net.input = l.binary_input;}int m = l.n/l.groups;int k = l.size*l.size*l.c/l.groups;int n = l.out_w*l.out_h;for(i = 0; i < l.batch; ++i){for(j = 0; j < l.groups; ++j){float *a = l.weights + j*l.nweights/l.groups;float *b = net.workspace;float *c = l.output + (i*l.groups + j)*n*m;float *im =  net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w;if (l.size == 1) {b = im;} else {im2col_cpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);}gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);}}if(l.batch_normalize){forward_batchnorm_layer(l, net);} else {add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w);}activate_array(l.output, l.outputs*l.batch, l.activation);if(l.binary || l.xnor) swap_binary(&l);
}void backward_convolutional_layer(convolutional_layer l, network net)
{int i, j;int m = l.n/l.groups;int n = l.size*l.size*l.c/l.groups;int k = l.out_w*l.out_h;gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);if(l.batch_normalize){backward_batchnorm_layer(l, net);} else {backward_bias(l.bias_updates, l.delta, l.batch, l.n, k);}for(i = 0; i < l.batch; ++i){for(j = 0; j < l.groups; ++j){float *a = l.delta + (i*l.groups + j)*m*k;float *b = net.workspace;float *c = l.weight_updates + j*l.nweights/l.groups;float *im  = net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w;float *imd = net.delta + (i*l.groups + j)*l.c/l.groups*l.h*l.w;if(l.size == 1){b = im;} else {im2col_cpu(im, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);}gemm(0,1,m,n,k,1,a,k,b,k,1,c,n);if (net.delta) {a = l.weights + j*l.nweights/l.groups;b = l.delta + (i*l.groups + j)*m*k;c = net.workspace;if (l.size == 1) {c = imd;}gemm(1,0,n,k,m,1,a,n,b,k,0,c,k);if (l.size != 1) {col2im_cpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, imd);}}}}
}void update_convolutional_layer(convolutional_layer l, update_args a)
{float learning_rate = a.learning_rate*l.learning_rate_scale;float momentum = a.momentum;float decay = a.decay;int batch = a.batch;axpy_cpu(l.n, learning_rate/batch, l.bias_updates, 1, l.biases, 1);scal_cpu(l.n, momentum, l.bias_updates, 1);if(l.scales){axpy_cpu(l.n, learning_rate/batch, l.scale_updates, 1, l.scales, 1);scal_cpu(l.n, momentum, l.scale_updates, 1);}axpy_cpu(l.nweights, -decay*batch, l.weights, 1, l.weight_updates, 1);axpy_cpu(l.nweights, learning_rate/batch, l.weight_updates, 1, l.weights, 1);scal_cpu(l.nweights, momentum, l.weight_updates, 1);
}image get_convolutional_weight(convolutional_layer l, int i)
{int h = l.size;int w = l.size;int c = l.c/l.groups;return float_to_image(w,h,c,l.weights+i*h*w*c);
}void rgbgr_weights(convolutional_layer l)
{int i;for(i = 0; i < l.n; ++i){image im = get_convolutional_weight(l, i);if (im.c == 3) {rgbgr_image(im);}}
}void rescale_weights(convolutional_layer l, float scale, float trans)
{int i;for(i = 0; i < l.n; ++i){image im = get_convolutional_weight(l, i);if (im.c == 3) {scale_image(im, scale);float sum = sum_array(im.data, im.w*im.h*im.c);l.biases[i] += sum*trans;}}
}image *get_weights(convolutional_layer l)
{image *weights = calloc(l.n, sizeof(image));int i;for(i = 0; i < l.n; ++i){weights[i] = copy_image(get_convolutional_weight(l, i));normalize_image(weights[i]);/*char buff[256];sprintf(buff, "filter%d", i);save_image(weights[i], buff);*/}//error("hey");return weights;
}image *visualize_convolutional_layer(convolutional_layer l, char *window, image *prev_weights)
{image *single_weights = get_weights(l);show_images(single_weights, l.n, window);image delta = get_convolutional_image(l);image dc = collapse_image_layers(delta, 1);char buff[256];sprintf(buff, "%s: Output", window);//show_image(dc, buff);//save_image(dc, buff);free_image(dc);return single_weights;
}

问题3

yolov3 ./src/image_opencv.cpp:5:10: fatal error: opencv2/opencv.hpp: No such file or directory

为了实现yolo检测视频,更改Makefile里的opencv=1后,执行make出现以下报错

./src/image_opencv.cpp:5:10: fatal error: opencv2/opencv.hpp: No such
file or directory

解决办法:

sudo apt install libopencv-dev

问题4

解决nvcc找不到的问题/bin/sh:1:nvcc:not found

问题描述
在执行make指令进行编译的时候,遇到问题"/bin/sh:1:nvcc:not found",如图所示:


解决方法
查看/usr/local/cuda/bin下是否有nvcc可执行程序,如果有则说明nvcc没有被设置为系统变量,执行如下命令

$ cd /usr/local/cuda/bin && ls

发现了nvcc确实已安装,则只需执行如下命令将其加入系统变量中:

$ sudo vi ~/.bashrc

在末尾行添加环境变量export PATH=$PATH:/usr/local/cuda/bin

这时再新建终端,然后切换到目录下进行make时就不会出现错误了。

问题5

nvcc fatal : Unsupported gpu architecture ‘compute_30‘

症结在于我的显卡不支持compute_30的GPU构架,与CUDA版本不兼容。

网上大多数的解决方法是找到makefile文件中CUDA_ARCH参数配置,将其中的 -gencode arch=compute_30,code=sm_30 \注释/删除,如图所示(图片来自网络):

解决yolov3编译中出现的问题:darknet make include/darknet.h:16:23:、CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT相关推荐

  1. 解决Cocos2d-x编译错误: 无法打开 源 文件 extensions/ExtensionExport.h

    #include "base/ccMacros.h" 转载于:https://www.cnblogs.com/davidgu/p/4491388.html

  2. QT中 :-1: error: Failed to resolve include /moc_predefs.h“ for moc file

    QT中 在构建时,出现如下问题:

  3. VTK编译中出现 no override found for vtkpolydatamapper 解决方法

    版本: VTK7.1     VS2013 如果通过visual studio编译而不是cmake编译的的话需要添加代码: #include "vtkAutoInit.h" VTK ...

  4. Eclipse中更改JDK版本,解决ant编译报错的问题

    要改一个JDK版本 主要是为了解决ant编译报错,JDK版本不一致的问题. 1. Windows--Preferences--Java--Compiler(配置的为1.8)--Installed JR ...

  5. 【转】解决多文件编译中连接错误 multiple definition of*****

    总结了解决multiple definition of的方法: 问题原因:     当多个文件包含同一个头文件时,并且你的.H里面没有加上条件编译 #ifndef TEST_H #define TES ...

  6. 在LaTeX中使用BibTeX时的一个问题及其解决:编译PDF不随bib文件更新

    在LaTeX中使用BibTeX时的一个问题及其解决:编译PDF不随bib文件更新 参考文章: (1)在LaTeX中使用BibTeX时的一个问题及其解决:编译PDF不随bib文件更新 (2)https: ...

  7. 解决CTeX工具包中MikTeX编译TeX文件报错问题

    解决 CTeX工具包中自带MikTeX编译TeX文件时报错问题 1. 问题描述 在成功安装CTeX工具包并对VSCode进行相应配置后, 我们虽然可以正常编辑 TeX\TeXTE​X 文件, 但在编译 ...

  8. 在查找预编译头时遇到意外的文件结尾。是否忘记了向源中添加“#include stdafx.h”?错误解决办法

    VS中出现是否忘记了向源中添加"#include "stdafx.h""?的错误解决办法 经常在用VS来写项目时,会出现这一问题. 在经过相关资料的查阅之后,发 ...

  9. 解决编译中“file too short”问题

    最近在编译中遇到下面的问题,由于代码是刚从svn上checkout下来的,下意识觉得没什么问题,因此折腾了比较久的时间,现在分享一下,问题如下: /usr/lib/gcc/x86_64-linux-g ...

最新文章

  1. 独家 | 数据科学家指南:梯度下降与反向传播算法
  2. max's java road
  3. oracle 重做日志内容,Oracle重做日志文件基础
  4. 最小表示法 最大表示法
  5. (*长期更新)软考网络工程师学习笔记——Section 8 传输层
  6. 大数据产品的备份及恢复
  7. matlab玫瑰,网上收到的用matlab画玫瑰花的代码怎么不行啊,报告错误,求大神
  8. oracle立即关闭数据库,Oracle数据库的起步和关闭
  9. java 漏洞挖掘_Java反序列化漏洞的挖掘、攻击与防御
  10. jquery on()方法绑定多个选择器,多个事件
  11. 完整的Java软件开发学习路线
  12. 数据分析36计(27):分析师与用户研究员,玩转定量研究和定性研究,落地研究结论...
  13. CDLinux破解WEP、WPA加密过程
  14. 智能配电系统监控解决方案在长白山机场配电工程的研究与应用
  15. Sub Matrix Sum 含负数的最短区间+ 矩阵一维化
  16. HIDS反弹shell检测方法
  17. 计算机 软件工程 应不应该考研?(二)
  18. android仿朋友圈教程,android 仿朋友圈动态 图片查看效果
  19. 《Java程序设计》第三周学习总结
  20. 服务器怎么防ddos攻击

热门文章

  1. 计算机毕业设计Java学生校内兼职管理平台(源码+系统+mysql数据库+lw文档)
  2. 新春大促:买域名送解析,域名续费享优惠!
  3. 读书笔记:《代码大全第2版》布局与风格
  4. python考试题目及答案-python期末考试试题汇总
  5. JSP 注册手机验证码
  6. linux下修改只读文件如/etc/ld.so.conf
  7. Scope Hoisting(范围提升)
  8. Eolink神技之五、API自动化——定时任务
  9. 百度前端技术学院——DAY2
  10. IDENTITY(函数)