• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ caffe_mul函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中caffe_mul函数的典型用法代码示例。如果您正苦于以下问题:C++ caffe_mul函数的具体用法?C++ caffe_mul怎么用?C++ caffe_mul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了caffe_mul函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: caffe_mul

void MyAccuracyLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {

  Dtype RMSE_lin = 0;
  int count = bottom[0]->count();
  // weighting
  caffe_mul(count,
	    bottom[0]->cpu_data(),
	    bottom[2]->cpu_data(),
	    bottom[0]->mutable_cpu_data());
  caffe_mul(count,
	    bottom[1]->cpu_data(),
	    bottom[2]->cpu_data(),
	    bottom[1]->mutable_cpu_data());
  // rescaling
  caffe_exp(count, bottom[0]->cpu_data(), bottom[0]->mutable_cpu_data());
  caffe_exp(count, bottom[1]->cpu_data(), bottom[1]->mutable_cpu_data());
  // diff
  caffe_sub(
      count,
      bottom[0]->cpu_data(),
      bottom[1]->cpu_data(),
      diff_.mutable_cpu_data());
  // sum(diff^2)
  Dtype ss = caffe_cpu_dot(count, diff_.cpu_data(), diff_.cpu_data());
  // n
  Dtype n = caffe_cpu_asum(count, bottom[2]->cpu_data());
  n += std::numeric_limits<Dtype>::min();
  // sqrt(ss/n)
  RMSE_lin = sqrt(ss/n);

  top[0]->mutable_cpu_data()[0] = RMSE_lin;
}
开发者ID:sangdon,项目名称:caffe,代码行数:33,代码来源:myaccuracy_layer.cpp


示例2: caffe_scal

void WeightPlusLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
	const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom){

	const Dtype* bottom_data = bottom[0]->cpu_data();
	const Dtype* top_diff = top[0]->cpu_diff();
	const Dtype* weight = this->blobs_[0]->cpu_data();
	Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();

	caffe_scal(dim_, Dtype(2.0), weight_two_.mutable_cpu_data());

	// gradient with respect to weight
	for (int n = 0; n < batch_; ++n){
		int offset = n*dim_;
		caffe_mul(dim_, weight_two_.cpu_data(), bottom_data + offset, data_meta_.mutable_cpu_data() + offset);
		caffe_mul(dim_, top_diff + offset, data_meta_.cpu_data() + offset, data_meta_.mutable_cpu_data() + offset);
		caffe_axpy(dim_, Dtype(1.0), data_meta_.cpu_data() + offset, blobs_[0]->mutable_cpu_diff());
	}

	// gradient with respect to bottom data
	if (propagate_down[0]){
		for (int n = 0; n < batch_; ++n){
			int offset = n*dim_;
			caffe_mul(dim_, top_diff + offset, weight_two_.cpu_data(), bottom_diff + offset);
		}
	}

}
开发者ID:FuchenUSTC,项目名称:caffe,代码行数:27,代码来源:weight_plus_layer.cpp


示例3: if

void BinomialDevianceLossLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  n1 = 0;
  n2 = 0; 
  for (int i = 0; i < bottom[1]->num(); ++i){
    if (static_cast<int>(bottom[1]->cpu_data()[i]) == 1){
      n1++;
    }	
    else if (static_cast<int>(bottom[1]->cpu_data()[i]) == -1)	{
      n2++;
    }

  }


 // LOG(INFO) << n1 << "  " << n2;
  Dtype c = this->layer_param_.binomial_deviance_loss_param().c(); 
  for (int i = 0; i < bottom[1]->num(); ++i){
    M_.mutable_cpu_data()[i] = static_cast<int>(bottom[1]->cpu_data()[i]);
  
    if (static_cast<int>(bottom[1]->cpu_data()[i]) == 1)
      W_.mutable_cpu_data()[i] = 1.0/n1;
    else if (static_cast<int>(bottom[1]->cpu_data()[i]) == -1)	{
      W_.mutable_cpu_data()[i] = 1.0/n2;
      M_.mutable_cpu_data()[i] = -c;
    }
    else W_.mutable_cpu_data()[i] = 0.0;
  } 
  summer_vec_.Reshape(bottom[0]->num(), 1, 1, 1);
  for (int i = 0; i < bottom[0]->num(); ++i){
    exp_.mutable_cpu_data()[i] = Dtype(1);
    summer_vec_.mutable_cpu_data()[i] = Dtype(1);
  }

  Dtype alpha = this->layer_param_.binomial_deviance_loss_param().alpha(); 
  Dtype beta = this->layer_param_.binomial_deviance_loss_param().beta(); 

  caffe_cpu_axpby(
              bottom[1]->num(),
              Dtype(-alpha),
              bottom[0]->cpu_data(),
              Dtype(alpha * beta),
              exp_.mutable_cpu_data());

  caffe_mul(bottom[1]->num(), M_.cpu_data(), exp_.cpu_data(), exp_.mutable_cpu_data());
  caffe_exp(bottom[1]->num(), exp_.cpu_data(), exp_.mutable_cpu_data());
 
  caffe_cpu_axpby(bottom[1]->num(), Dtype(1), exp_.cpu_data(), Dtype(1), summer_vec_.mutable_cpu_data());
  for (int i = 0; i < bottom[0]->num(); ++i){
    summer_vec_.mutable_cpu_data()[i] = log(summer_vec_.cpu_data()[i]);
  }
//// multiply by elimination array
  caffe_mul(bottom[2]->num(), bottom[2]->cpu_data(), summer_vec_.cpu_data(), summer_vec_.mutable_cpu_data());
////
  Dtype loss = caffe_cpu_dot(bottom[1]->num(), W_.cpu_data(), summer_vec_.cpu_data());
  top[0]->mutable_cpu_data()[0] = loss;
}
开发者ID:madiken,项目名称:caffe-CUDNN-fork,代码行数:58,代码来源:binomial_deviance_loss_layer.cpp


示例4: switch

void EltwiseLayer<Dtype, MItype, MOtype>::Forward_cpu(
    const vector<Blob<MItype>*>& bottom,
    const vector<Blob<MOtype>*>& top) {
  int_tp* mask = NULL;
  const Dtype* bottom_data_a = NULL;
  const Dtype* bottom_data_b = NULL;
  const int_tp count = top[0]->count();
  Dtype* top_data = top[0]->mutable_cpu_data();
  Dtype maxVal = FLT_MAX;
  if (std::is_same<Dtype, half_fp>::value)
    maxVal = HALF_MAX;
  switch (op_) {
  case EltwiseParameter_EltwiseOp_PROD:
    caffe_mul(count, bottom[0]->cpu_data(), bottom[1]->cpu_data(), top_data);
    for (int_tp i = 2; i < bottom.size(); ++i) {
      caffe_mul(count, top_data, bottom[i]->cpu_data(), top_data);
    }
    break;
  case EltwiseParameter_EltwiseOp_SUM:
    caffe_set(count, Dtype(0), top_data);
    // TODO(shelhamer) does BLAS optimize to sum for coeff = 1?
    for (int_tp i = 0; i < bottom.size(); ++i) {
      caffe_axpy(count, coeffs_[i], bottom[i]->cpu_data(), top_data);
    }
    break;
  case EltwiseParameter_EltwiseOp_MAX:
    // Initialize
    mask = max_idx_.mutable_cpu_data();
    caffe_set(count, (int_tp)-1, mask);
    caffe_set(count, Dtype(-maxVal), top_data);
    // bottom 0 & 1
    bottom_data_a = bottom[0]->cpu_data();
    bottom_data_b = bottom[1]->cpu_data();
    for (int_tp idx = 0; idx < count; ++idx) {
      if (bottom_data_a[idx] > bottom_data_b[idx]) {
        top_data[idx] = bottom_data_a[idx];  // maxval
        mask[idx] = 0;  // maxid
      } else {
        top_data[idx] = bottom_data_b[idx];  // maxval
        mask[idx] = 1;  // maxid
      }
    }
    // bottom 2++
    for (int_tp blob_idx = 2; blob_idx < bottom.size(); ++blob_idx) {
      bottom_data_b = bottom[blob_idx]->cpu_data();
      for (int_tp idx = 0; idx < count; ++idx) {
        if (bottom_data_b[idx] > top_data[idx]) {
          top_data[idx] = bottom_data_b[idx];  // maxval
          mask[idx] = blob_idx;  // maxid
        }
      }
    }
    break;
  default:
    LOG(FATAL) << "Unknown elementwise operation.";
  }
}
开发者ID:naibaf7,项目名称:caffe,代码行数:57,代码来源:eltwise_layer.cpp


示例5: caffe_mul

Dtype EltwiseProductLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
  const int count = (*top)[0]->count();
  Dtype* top_data = (*top)[0]->mutable_cpu_data();
  caffe_mul(count, bottom[0]->cpu_data(), bottom[1]->cpu_data(), top_data);
  for (int i = 2; i < bottom.size(); ++i) {
    caffe_mul(count, top_data, bottom[i]->cpu_data(), top_data);
  }
  return Dtype(0.);
}
开发者ID:AlOa,项目名称:caffe,代码行数:10,代码来源:eltwise_product_layer.cpp


示例6: LOG

void SigmoidWeightedCrossEntropyLossLayer<Dtype>::Backward_cpu(
    const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[2]) {
    LOG(FATAL) << this->type()
               << " Layer cannot backpropagate to weight inputs.";
  }
  if (propagate_down[1]) {
    LOG(FATAL) << this->type()
               << " Layer cannot backpropagate to label inputs.";
  }
  if (propagate_down[0]) {
    // First, compute the diff
    const int count = bottom[0]->count();
    const int num = bottom[0]->num();
    const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data();
    const Dtype* target = bottom[1]->cpu_data();
    const Dtype* weight = bottom[2]->cpu_data();
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();

    Dtype* tmp = new Dtype[count << 1];
    Dtype* tmp1 = tmp + count;

    // diff: 1/2
    caffe_set(count, (Dtype)0.5, bottom_diff);
    // diff: 1/2 * \hat{p}
    caffe_mul(count, bottom_diff, sigmoid_output_data, bottom_diff);
    // diff: 1/2 * (1-p) * \hat{p}
    caffe_set(count, (Dtype)1, tmp1);
    caffe_sub(count, tmp1, target, tmp);
    caffe_mul(count, bottom_diff, tmp, bottom_diff);
    // diff: 1/2(1-w) * (1-p) * \hat{p}
    caffe_sub(count, tmp1, weight, tmp);
    caffe_div(count, bottom_diff, tmp, bottom_diff);

    // tmp: 1-\hat{p}
    caffe_sub(count, tmp1, sigmoid_output_data, tmp);
    // tmp: p * (1-\hat{p})
    caffe_mul(count, tmp, target, tmp);
    // tmp: -1/2 * p * (1-\hat{p})
    caffe_set(count, (Dtype)-0.5, tmp1);
    caffe_mul(count, tmp, tmp1, tmp);
    // tmp: -1/2w * p * (1-\hat{p})
    caffe_div(count, tmp, weight, tmp);
    // diff: -(1/2w * p * (1-\hat{p}) - 1/2(1-w) * (1-p) * \hat{p})
    caffe_add(count, bottom_diff, tmp, bottom_diff);

    delete[] tmp;

    // Scale down gradient
    const Dtype loss_weight = top[0]->cpu_diff()[0];
    caffe_scal(count, loss_weight / num, bottom_diff);
  }
}
开发者ID:kyu-sz,项目名称:caffe,代码行数:54,代码来源:sigmoid_weighted_cross_entropy_loss_layer.cpp


示例7: caffe_mul

void MVNLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  const Dtype* top_diff = top[0]->cpu_diff();
  const Dtype* top_data = top[0]->cpu_data();
  const Dtype* bottom_data = bottom[0]->cpu_data();
  Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();

  int num;
  if (this->layer_param_.mvn_param().across_channels())
    num = bottom[0]->num();
  else
    num = bottom[0]->num() * bottom[0]->channels();

  int dim = bottom[0]->count() / num;

  if (this->layer_param_.mvn_param().normalize_variance()) {
    caffe_mul(temp_.count(), top_data, top_diff, bottom_diff);
    caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., bottom_diff,
          sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data());
    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, 1.,
          mean_.cpu_data(), sum_multiplier_.cpu_data(), 0.,
          bottom_diff);
    caffe_mul(temp_.count(), top_data, bottom_diff, bottom_diff);

    caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_diff,
            sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data());
    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, 1.,
            mean_.cpu_data(), sum_multiplier_.cpu_data(), 1.,
            bottom_diff);

    caffe_cpu_axpby(temp_.count(), Dtype(1), top_diff, Dtype(-1. / dim),
        bottom_diff);

    // put the squares of bottom into temp_
    caffe_powx(temp_.count(), bottom_data, Dtype(2),
        temp_.mutable_cpu_data());
    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, 1.,
        variance_.cpu_data(), sum_multiplier_.cpu_data(), 0.,
        temp_.mutable_cpu_data());

    caffe_div(temp_.count(), bottom_diff, temp_.cpu_data(), bottom_diff);
  } else {
    caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1. / dim, top_diff,
      sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data());
    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
      mean_.cpu_data(), sum_multiplier_.cpu_data(), 0.,
      temp_.mutable_cpu_data());
    caffe_add(temp_.count(), top_diff, temp_.cpu_data(), bottom_diff);
  }
}
开发者ID:0hm,项目名称:caffe,代码行数:51,代码来源:mvn_layer.cpp


示例8: caffe_mul

	void MaskingLayer<Dtype>::Forward_cpu(
		const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
		caffe_mul(top[0]->count(), bottom[0]->cpu_data(), this->blobs_[0]->cpu_data(), top[0]->mutable_cpu_data()); // multiply mask, y=a*b
		if (bias_term_) {
			caffe_axpy(top[0]->count(), (Dtype)1.0, this->blobs_[1]->cpu_data(), top[0]->mutable_cpu_data()); // y=a*x+y
		}
	}
开发者ID:MalteOeljeklaus,项目名称:winpycaffe,代码行数:7,代码来源:masking_layer.cpp


示例9: caffe_mul

void TopologyLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
                                            const vector<bool>& propagate_down,
                                            const vector<Blob<Dtype>*>& bottom) {
    if (this->param_propagate_down_[0]) {
        const Dtype* top_diff = top[0]->cpu_diff();
        const Dtype* bottom_data = bottom[0]->cpu_data();
        Dtype* weighted_bottom_data = weighted_bottom_.mutable_cpu_data();
        // Gradient with respect to weight

//        caffe_cpu_axpby<Dtype>(N_, (Dtype)1., topology_weight_mask, (Dtype)1., bottom_data);
        caffe_mul(N_, weight_mask_.cpu_data(), bottom_data, weighted_bottom_data);

        caffe_cpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype)1.,
                              top_diff, weighted_bottom_data, (Dtype)1., this->blobs_[0]->mutable_cpu_diff());
    }

    if (bias_term_ && this->param_propagate_down_[1]) {
        const Dtype* top_diff = top[0]->cpu_diff();
        // Gradient with respect to bias

        caffe_cpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
                              bias_multiplier_.cpu_data(), (Dtype)1.,
                              this->blobs_[1]->mutable_cpu_diff());
    }
    if (propagate_down[0]) {
        const Dtype* top_diff = top[0]->cpu_diff();
        // Gradient with respect to bottom data
        caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype)1.,
                              top_diff, this->blobs_[0]->cpu_data(), (Dtype)0.,
                bottom[0]->mutable_cpu_diff());
    }
}
开发者ID:ATDupuis,项目名称:Deep-Learning---Project,代码行数:32,代码来源:topology_layer.cpp


示例10: Dtype

void CosineSimilarityLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {


  for (int i = 0; i < bottom[0]->num(); ++i){
    summer_vec_.mutable_cpu_data()[i] = Dtype(1);
  }

  int channels = bottom[0]->channels();
  for (int i = 0; i < bottom[0]->num(); ++i) {
    xx_.mutable_cpu_data()[i] = caffe_cpu_dot(bottom[0]->channels(), bottom[0]->cpu_data() + i * channels, 
							       bottom[0]->cpu_data() + i * channels);
    yy_.mutable_cpu_data()[i] = caffe_cpu_dot(bottom[1]->channels(), bottom[1]->cpu_data() + i * channels, 
							       bottom[1]->cpu_data() + i * channels);
    xy_.mutable_cpu_data()[i] = caffe_cpu_dot(bottom[0]->channels(), bottom[0]->cpu_data() + i * channels, 
							       bottom[1]->cpu_data() + i * channels);
  }
  caffe_mul(bottom[1]->num(), xx_.cpu_data(),yy_.cpu_data(), summer_vec_.mutable_cpu_data());

  for (int i = 0; i < bottom[0]->num(); ++i) {
    summer_vec_.mutable_cpu_data()[i] = sqrt(summer_vec_.cpu_data()[i]);
  } 
  caffe_div(bottom[1]->num(), xy_.cpu_data(), summer_vec_.cpu_data(), top[0]->mutable_cpu_data());
}
开发者ID:madiken,项目名称:caffe-CUDNN-fork,代码行数:25,代码来源:cosine_similarity_layer.cpp


示例11: caffe_copy

void SoftmaxLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  const Dtype* top_diff = top[0]->cpu_diff();
  const Dtype* top_data = top[0]->cpu_data();
  Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
  Dtype* scale_data = scale_.mutable_cpu_data();
  int channels = top[0]->shape(softmax_axis_);
  int dim = top[0]->count() / outer_num_;

  //从top_diff拷贝到bottom_diff
  caffe_copy(top[0]->count(), top_diff, bottom_diff);
  for (int i = 0; i < outer_num_; ++i) {
    // compute dot(top_diff, top_data) and subtract them from the bottom diff
    for (int k = 0; k < inner_num_; ++k) {
      scale_data[k] = caffe_cpu_strided_dot<Dtype>(channels,
          bottom_diff + i * dim + k, inner_num_,

        //因为bottom_diff是从top_diff拷贝而来,所以caffe_cpu_strided_dot的参数不用top_diff,用bottom_diff即可,而且,这样的话就可以在下面的代码("caffe_cpu_gemm<Dtype>()")中直接更新bottom_diff.
          top_data + i * dim + k, inner_num_);
    }

    //实现的非常巧妙
    // subtraction
    caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels, inner_num_, 1,
        -1., sum_multiplier_.cpu_data(), scale_data, 1., bottom_diff + i * dim);
  }
  // elementwise multiplication
  caffe_mul(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
开发者ID:runaway,项目名称:studycaffe,代码行数:30,代码来源:softmax_layer.cpp


示例12: switch

void EltwiseLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const bool propagate_down, vector<Blob<Dtype>*>* bottom) {
  if (propagate_down) {
    const int count = top[0]->count();
    const Dtype* top_data = top[0]->cpu_data();
    const Dtype* top_diff = top[0]->cpu_diff();
    for (int i = 0; i < bottom->size(); ++i) {
      const Dtype* bottom_data = (*bottom)[i]->cpu_data();
      Dtype* bottom_diff = (*bottom)[i]->mutable_cpu_diff();
      switch (op_) {
      case EltwiseParameter_EltwiseOp_PROD:
        caffe_div(count, top_data, bottom_data, bottom_diff);
        caffe_mul(count, bottom_diff, top_diff, bottom_diff);
        break;
      case EltwiseParameter_EltwiseOp_SUM:
        if (coeffs_[i] == Dtype(1)) {
          caffe_copy(count, top_diff, bottom_diff);
        } else {
          caffe_cpu_scale(count, coeffs_[i], top_diff, bottom_diff);
        }
        break;
      default:
        LOG(FATAL) << "Unknown elementwise operation.";
      }
    }
  }
}
开发者ID:aybassiouny,项目名称:wincaffe-cmake,代码行数:27,代码来源:eltwise_layer.cpp


示例13: caffe_mul

	void DeconvNormLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
		const vector<Blob<Dtype>*>& top)
	{
		Dtype* wa = weights_alphas->mutable_cpu_data();

		exp_layer->Forward(exp_bottom_vec, exp_top_vec);
		for (int ch_in = 0; ch_in < weights_alphas->num(); ++ch_in)
		{
			caffe_mul(alphas->count(), this->blobs_[0]->cpu_data() + this->blobs_[0]->offset(ch_in),
				alphas->cpu_data(), wa + weights_alphas->offset(ch_in));
		}

		deconv2_layer->Forward(bottom, deconv2_top_vec);
		deconv1_layer->Forward(deconv1_bottom_vec, deconv1_top_vec);

		Dtype* top_data = top[0]->mutable_cpu_data();
		const Dtype* deconv1_top_vec_data = deconv1_top_vec[0]->cpu_data();
		const Dtype* deconv2_top_vec_data = deconv2_top_vec[0]->cpu_data();
		caffe_add_scalar(deconv1_top_vec[0]->count(), (Dtype) std::numeric_limits<Dtype>::epsilon(), 
			deconv1_top_vec[0]->mutable_cpu_data());
		for (int n = 0; n < bottom[0]->num(); ++n)
		{
			caffe_div(deconv1_top_vec[0]->count(), deconv2_top_vec_data + deconv2_top_vec[0]->offset(n),
				deconv1_top_vec_data, top_data + top[0]->offset(n));

			if (this->bias_term_)
			{
				const Dtype* bias = this->blobs_[2]->cpu_data();
				caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, top[0]->channels(),
					top[0]->height() * top[0]->width(), 1, (Dtype)1., bias, bias_multiplier.cpu_data(),
					(Dtype)1., top_data + top[0]->offset(n));
			}
		}
	}
开发者ID:xieguotian,项目名称:caffe,代码行数:34,代码来源:deconv_norm_layer.cpp


示例14: LOG

void NormalizedSigmoidCrossEntropyLossLayer<Dtype>::Backward_cpu(
    const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[1]) {
    LOG(FATAL) << this->type()
               << " Layer cannot backpropagate to label inputs.";
  }
  if (propagate_down[0]) {
    // First, compute the diff
    const int count = bottom[0]->count();
    const int num = bottom[0]->num();
    const int dim = count / num;
    const Dtype* sigmoid_output_data = sigmoid_output_->cpu_data();
    const Dtype* target = bottom[1]->cpu_data();
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    caffe_sub(count, sigmoid_output_data, target, bottom_diff);
    // Scale down gradient
    const Dtype loss_weight = top[0]->cpu_diff()[0];
    Dtype* scales = new Dtype[count]();

    for (int i = 0; i < dim; ++i) {
      int n_pos = 0;
      int n_neg = 0;
      for (int j = 0; j < num; ++j) {
        int idx = j * dim + i;
        if (target[idx] > 0.5) {
          n_pos++;
        } else {
          n_neg++;
        }
      }
      // Only back propagate if there are both positive and negative samples
      if (n_pos > 0 && n_pos < num) {
        const float ratio = float(n_pos) / n_neg;
        const bool shouldNorm = (ratio >= thres_ || 1. / ratio >= thres_);
        for (int j = 0; j < num; ++j) {
          int idx = j * dim + i;
          if (target[idx] > 0.5) {
            if (shouldNorm) {
              scales[idx] = loss_weight / (n_pos * 2.);
            } else {
              scales[idx] = loss_weight / num;
            }
          } else {
            if (shouldNorm) {
              scales[idx] = loss_weight / (n_neg * 2.);
            } else {
              scales[idx] = loss_weight / num;
            }
          }
        }
      }
    }
    caffe_mul(count, scales, bottom_diff, bottom_diff);
    delete [] scales;
  }
}
开发者ID:scott89,项目名称:caffe-crowd,代码行数:57,代码来源:normalized_sigmoid_cross_entropy_loss_layer.cpp


示例15: multiplyAllChannelsByMask

void multiplyAllChannelsByMask(const Dtype* blob, const Dtype*  mask_blob, int mask_num, Dtype* blob_result, int sz, int blob_channels){
  int data_offset = 0;
  int mask_offset = mask_num * sz;

    for(int j = 0; j < blob_channels; j++){
      data_offset = j * sz;      
      caffe_mul(sz, blob + data_offset, mask_blob + mask_offset, blob_result + data_offset);
    }
}
开发者ID:madiken,项目名称:skaffe_private_old,代码行数:9,代码来源:bilinear_patch_fast_layer.cpp


示例16: caffe_set

void PowerLayer<Dtype>::Backward_cpu(
    const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[0]) {
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    const int count = bottom[0]->count();
    const Dtype* top_diff = top[0]->cpu_diff();
    if (diff_scale_ == Dtype(0) || power_ == Dtype(1)) {
      caffe_set(count, diff_scale_, bottom_diff);
    } else {
      const Dtype* bottom_data = bottom[0]->cpu_data();
      // Compute dy/dx = scale * power * (shift + scale * x)^(power - 1)
      //               = diff_scale * y / (shift + scale * x)
      if (power_ == Dtype(2)) {
        // Special case for y = (shift + scale * x)^2
        //     -> dy/dx = 2 * scale * (shift + scale * x)
        //              = diff_scale * shift + diff_scale * scale * x
        caffe_cpu_axpby(
            count,
            diff_scale_ * scale_,
            bottom_data,
            Dtype(0),
            bottom_diff);

        if (shift_ != Dtype(0)) {
          caffe_add_scalar(count, diff_scale_ * shift_, bottom_diff);
        }
      } else if (shift_ == Dtype(0)) {
        // Special case for y = (scale * x)^power
        //     -> dy/dx = scale * power * (scale * x)^(power - 1)
        //              = scale * power * (scale * x)^power * (scale * x)^(-1)
        //              = power * y / x
        const Dtype* top_data = top[0]->cpu_data();
        caffe_div(count, top_data, bottom_data, bottom_diff);
        caffe_scal(count, power_, bottom_diff);
      } else {
        caffe_copy(count, bottom_data, bottom_diff);
        if (scale_ != Dtype(1)) {
          caffe_scal(count, scale_, bottom_diff);
        }
        if (shift_ != Dtype(0)) {
          caffe_add_scalar(count, shift_, bottom_diff);
        }
        const Dtype* top_data = top[0]->cpu_data();
        caffe_div<Dtype>(count, top_data, bottom_diff, bottom_diff);
        if (diff_scale_ != Dtype(1)) {
          caffe_scal(count, diff_scale_, bottom_diff);
        }
      }
    }
    if (diff_scale_ != Dtype(0)) {
      caffe_mul(count, top_diff, bottom_diff, bottom_diff);
    }
  }
}
开发者ID:rickyHong,项目名称:CaffeForOpenCL,代码行数:56,代码来源:power_layer.cpp


示例17: caffe_cpu_sign

void AbsValLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  const int count = top[0]->count();
  const Dtype* top_diff = top[0]->cpu_diff();
  if (propagate_down[0]) {
    const Dtype* bottom_data = bottom[0]->cpu_data();
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    caffe_cpu_sign(count, bottom_data, bottom_diff);
    caffe_mul(count, bottom_diff, top_diff, bottom_diff);
  }
}
开发者ID:ALISCIFP,项目名称:caffe-stn,代码行数:11,代码来源:absval_layer.cpp


示例18: caffe_mul

void ScalarLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
  if (propagate_down[1]) {
    const Dtype* top_diff = top[0]->cpu_diff();
    const Dtype* bottom_data = bottom[0]->cpu_data();
    // Hack: store big eltwise product in bottom[0] diff, except in the special
    // case where this layer itself does the eltwise product, in which case we
    // can store it directly in the scalar diff, and we're done.
    const bool is_eltwise = (inner_dim_ == 1 && outer_dim_ == 1);
    Dtype* product = is_eltwise ?
        bottom[1]->mutable_cpu_diff() : bottom[0]->mutable_cpu_diff();
    caffe_mul(top[0]->count(), top_diff, bottom_data, product);
    if (!is_eltwise) {
      Dtype* sum_result = NULL;
      if (inner_dim_ == 1) {
        sum_result = product;
      } else if (sum_result_.count() == 1) {
        const Dtype* sum_mult = sum_multiplier_.cpu_data();
        Dtype* scalar_diff = bottom[1]->mutable_cpu_diff();
        *scalar_diff = caffe_cpu_dot(inner_dim_, product, sum_mult);
      } else {
        const Dtype* sum_mult = sum_multiplier_.cpu_data();
        sum_result = (outer_dim_ == 1) ?
            bottom[1]->mutable_cpu_diff() : sum_result_.mutable_cpu_data();
        caffe_cpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_,
                       Dtype(1), product, sum_mult, Dtype(0), sum_result);
      }
      if (outer_dim_ != 1) {
        const Dtype* sum_mult = sum_multiplier_.cpu_data();
        Dtype* scalar_diff = bottom[1]->mutable_cpu_diff();
        if (scalar_dim_ == 1) {
          *scalar_diff = caffe_cpu_dot(outer_dim_, sum_mult, sum_result);
        } else {
          caffe_cpu_gemv(CblasTrans, outer_dim_, scalar_dim_,
                         Dtype(1), sum_result, sum_mult, Dtype(0), scalar_diff);
        }
      }
    }
  }
  if (propagate_down[0]) {
    const Dtype* top_diff = top[0]->cpu_diff();
    const Dtype* scalar_data = bottom[1]->cpu_data();
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
    for (int n = 0; n < outer_dim_; ++n) {
      for (int d = 0; d < scalar_dim_; ++d) {
        const Dtype factor = scalar_data[d];
        caffe_cpu_scale(inner_dim_, factor, top_diff, bottom_diff);
        bottom_diff += inner_dim_;
        top_diff += inner_dim_;
      }
    }
  }
}
开发者ID:xieguotian,项目名称:caffe,代码行数:53,代码来源:scalar_layer.cpp


示例19: caffe_powx

void WeightPlusLayer<Dtype>::Forward_cpu(
	const vector<Blob<Dtype>*>& bottom,
	const vector<Blob<Dtype>*>& top){
	const Dtype* bottom_data = bottom[0]->cpu_data();
	const Dtype* weight = this->blobs_[0]->cpu_data();
	Dtype* top_data = top[0]->mutable_cpu_data();
	for (int n = 0; n < batch_; ++n){
		int offset = n*dim_;
		caffe_powx(dim_, weight, Dtype(2.0), weight_pow_.mutable_cpu_data());
		caffe_mul(dim_, bottom_data + offset, weight_pow_.cpu_data(), top_data + offset);
	}
}
开发者ID:FuchenUSTC,项目名称:caffe,代码行数:12,代码来源:weight_plus_layer.cpp


示例20: caffe_sub

void SmoothL1LossOHEMLayer<Dtype>::Forward_cpu(
    const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
    int count = bottom[0]->count();
  
    caffe_sub(
      count,
      bottom[0]->cpu_data(),
      bottom[1]->cpu_data(),
      diff_.mutable_cpu_data());    // d := b0 - b1
    if (has_weights_) {
      caffe_mul(
        count,
        bottom[2]->cpu_data(),
        diff_.cpu_data(),
        diff_.mutable_cpu_data());  // d := w * (b0 - b1)
    }

#ifdef _OPENMP
#pragma omp parallel for
#endif
    for (int index = 0; index < count; index++) {
      Dtype val = diff_.cpu_data()[index];
      Dtype abs_val = abs(val);
      if (abs_val < 1) {
        errors_.mutable_cpu_data()[index] = 0.5 * val * val;
      } else {
        errors_.mutable_cpu_data()[index] = abs_val - 0.5;
      }
    }

    Dtype loss = caffe_cpu_asum(count, errors_.cpu_data());

    Dtype pre_fixed_normalizer =
      this->layer_param_.loss_param().pre_fixed_normalizer();
    top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
      pre_fixed_normalizer);

    // Output per-instance loss
    if (top.size() >= 2) {
#ifdef _OPENMP
#pragma omp parallel for collapse(2)
#endif
        for (int i = 0; i < outer_num_; ++i) {
            for (int j = 0; j < inner_num_; j++) {
                Dtype sum = 0;
                for (int c = 0; c < bottom[0]->channels(); ++c) {
                    sum += errors_.cpu_data()[(i * bottom[0]->channels() + c) * inner_num_ + j];
                }
                top[1]->mutable_cpu_data()[i * inner_num_ + j] = sum;
            }
        }
    }
}
开发者ID:intel,项目名称:caffe,代码行数:53,代码来源:smooth_L1_loss_ohem_layer.cpp



注:本文中的caffe_mul函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ caffe_rng函数代码示例发布时间:2022-05-30
下一篇:
C++ caffe_cpu_axpby函数代码示例发布时间:2022-05-30
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap