您的位置:首页 > Web前端

Caffe源码解读: Softmax_loss_Layer的前向与反向传播

2017-05-05 15:43 751 查看
1,前向传播

分为两步:1,计算softmax概率prob_data,直接使用softmaxlayer的forward函数;

                  2,计算loss,采用交叉熵,即每个第i类数据的loss为-log(prob(i))。

template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);//直接使用softmax_layer->forward()
const Dtype* prob_data = prob_.cpu_data(); //概率数据
const Dtype* label = bottom[1]->cpu_data(); //真实标签
int dim = prob_.count() / outer_num_;
int count = 0;
Dtype loss = 0;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; j++) {
const int label_value = static_cast<int>(label[i * inner_num_ + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
continue;
}
DCHECK_GE(label_value, 0);
DCHECK_LT(label_value, prob_.shape(softmax_axis_));
loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j],
Dtype(FLT_MIN))); //每个数据i的损失为-log(prob(i))
++count;
}
}
//loss除去样本总数,得到每个样本的loss
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}

2,反向传播



template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
const Dtype* prob_data = prob_.cpu_data();
caffe_copy(prob_.count(), prob_data, bottom_diff);//把概率数据复制到bottom_diff
const Dtype* label = bottom[1]->cpu_data();       //获得标签数据
int dim = prob_.count() / outer_num_;
int count = 0;
for (int i = 0; i < outer_num_; ++i) {
for (int j = 0; j < inner_num_; ++j) {
const int label_value = static_cast<int>(label[i * inner_num_ + j]);//获得真实标签
if (has_ignore_label_ && label_value == ignore_label_) {
//如果忽略标签,则偏导数为0
for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) {
bottom_diff[i * dim + c * inner_num_ + j] = 0;
}
} else {
//计算当前概率密度与理想概率密度之差(label位对应的理想概率密度为1,其他为0,故不计算)
bottom_diff[i * dim + label_value * inner_num_ + j] -= 1;
++count;
}
}
}
// Scale gradient
//缩放
Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, count);
caffe_scal(prob_.count(), loss_weight, bottom_diff);
}
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: