3
我正在使用cpp在windows中使用caffe進行圖像分割問題。我正在使用「Imagedata」輸入類型來訓練網絡,但在測試時我得到了空白輸出。任何人都可以幫助我分析這個問題。輸入圖層類型:ImageData中的窗口caffe cpp給出空白輸出
********** solver.prototxt ***************
test_initialization: false
base_lr: 0.01
display: 51
max_iter: 50000
lr_policy: "step"
gamma: 0.1
momentum: 0.9
weight_decay: 0.0001
stepsize: 4069
snapshot: 10000
snapshot_prefix: "snapshot"
solver_mode: GPU
net: "train.prototxt"
solver_type: SGD
File_Triangle.txt和File_label_triangle.txt具有圖像位置和虛標籤的絕對路徑。 e.g d:\在CPP
shared_ptr<Net<float> > net_;
net_.reset(new Net<float>("train.prototxt", caffe::Phase::TRAIN));
Caffe::set_mode(Caffe::GPU);
caffe::SolverParameter solver_param;
caffe::ReadSolverParamsFromTextFileOrDie("solver.prototxt", &solver_param);
boost::shared_ptr<caffe::Solver<float> > solver(caffe::SolverRegistry<float>::CreateSolver(solver_param));
solver->Solve();
00000032.png 0
**************** train.prototxt ********************
layer {
name: "data"
type: "ImageData"
top: "data"
top: "xx"
include {
phase: TRAIN
}
image_data_param {
source: "File_triangle.txt"
batch_size: 1
new_height: 32
new_width: 32
is_color: False
}
}
layer {
name: "label"
type: "ImageData"
top: "label"
top: "yy"
image_data_param {
source: "File_label_triangle.txt"
batch_size: 1
new_height: 32
new_width: 32
is_color: False
}
include {
phase: TRAIN
}
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149
}
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.0010000000475
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "conv1"
top: "conv2"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149
}
convolution_param {
num_output: 1024
pad: 0
kernel_size: 16
stride: 16
weight_filler {
type: "gaussian"
std: 0.0010000000475
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "upsample"
type: "Deconvolution"
bottom: "conv2"
top: "upsample"
param {
lr_mult: 1.0
}
convolution_param {
num_output: 1
pad: 0
kernel_size: 16
stride: 16
bias_filler {
type: "constant"
value: 128.0
}
}
}
layer {
name: "lossL1"
type: "SmoothL1Loss"
bottom: "upsample"
bottom: "label"
top: "lossL1"
loss_weight: 1.0
}
的代碼段用於訓練我使用.caffemodel測試網絡訓練。
******************** test.prototxt **********************
layer {
name: "data"
type: "Input"
top: "data"
input_param { shape: { dim: 1 dim: 1 dim: 32 dim: 32 } }
}
layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149
}
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "gaussian"
std: 0.0010000000475
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "conv1"
top: "conv2"
param {
lr_mult: 1.0
}
param {
lr_mult: 0.10000000149
}
convolution_param {
num_output: 1024
pad: 0
kernel_size: 16
stride: 16
weight_filler {
type: "gaussian"
std: 0.0010000000475
}
bias_filler {
type: "constant"
value: 0.0
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "upsample"
type: "Deconvolution"
bottom: "conv2"
top: "upsample"
param {
lr_mult: 1.0
}
convolution_param {
num_output: 1
pad: 0
kernel_size: 16
stride: 16
bias_filler {
type: "constant"
value: 128.0
}
}
}
用於測試的代碼片段。
Caffe::set_mode(Caffe::GPU);
boost::shared_ptr<caffe::Net<float> > net_;
net_.reset(new Net<float>("test.prototxt", caffe::TEST));
net_->CopyTrainedLayersFrom("snapshot_iter_50000.caffemodel");
cv::Mat matInput = cv::imread("input image path");
matInput.convertTo(matInput, CV_32F);
int height = matInput.rows;
int width = matInput.cols;
Blob<float>* input_layer = net_->input_blobs()[0];
float* input_data = input_layer->mutable_cpu_data();
int layer_index = height * width;
for (size_t i = 0; i < height; i++)
{
for (size_t j = 0; j < width; j++)
{
input_data[i*width + j] = matInput.at<float>(i, j);
}
}
net_->Forward();
const shared_ptr<Blob<float> >& concat_blob = net_->blob_by_name("upsample");
const float* concat_out = concat_blob->cpu_data();
cv::Mat matout(height, width, CV_8UC1);
for (size_t i = 0; i < height*width; i++)
{
matout.data[i] = concat_out[i];
}
cv::imwrite(output_str, matout);