• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

C++ matrix::Matrix类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中matrix::Matrix的典型用法代码示例。如果您正苦于以下问题:C++ Matrix类的具体用法?C++ Matrix怎么用?C++ Matrix使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Matrix类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: setSensorTeaching

void PiMax::setSensorTeaching(const matrix::Matrix& teaching){
  assert(teaching.getM() == number_sensors && teaching.getN() == 1);
  // calculate the a_teaching,
  // that belongs to the distal teaching value by the inverse model.
  a_teaching = (A.pseudoInverse() * (teaching-b)).mapP(0.95, clip);
  intern_isTeaching=true;
}
开发者ID:CentreForBioRobotics,项目名称:lpzrobots,代码行数:7,代码来源:pimax.cpp


示例2: setMotorTeaching

void PiMax::setMotorTeaching(const matrix::Matrix& teaching){
  assert(teaching.getM() == number_motors && teaching.getN() == 1);
  // Note: through the clipping the otherwise effectless
  //  teaching with old motor value has now an effect,
  //  namely to drive out of the saturation region.
  a_teaching= teaching.mapP(0.95,clip);
  intern_isTeaching=true;
}
开发者ID:CentreForBioRobotics,项目名称:lpzrobots,代码行数:8,代码来源:pimax.cpp


示例3: standardize

matrix::Matrix standardize(const matrix::Matrix& input)
{
    auto means = apply(reduce(input, {1}, matrix::Add()), matrix::Divide(input.size()[1]));

    auto inputMeanSubtracted = broadcast(input, means, {1}, matrix::Subtract());

    auto stddevs = apply(reduce(apply(inputMeanSubtracted,
        matrix::SquareAndScale(1.0 / input.size()[1])), {1}, matrix::Add()), matrix::Sqrt());

    std::cout << stddevs.toString();

    return broadcast(inputMeanSubtracted, stddevs, {1}, matrix::Divide());
}
开发者ID:sudnya,项目名称:video-classifier,代码行数:13,代码来源:test-linear-regression.cpp


示例4: learnHebb

void InvertNChannelControllerHebbH::learnHebb(const matrix::Matrix& context_sensors, const matrix::Matrix& h_update){

  // preprocess context sensors
  Matrix c_sensors = context_sensors;
  for (int i=0;i<number_context_sensors;i++){
   if (c_sensors.val(i,0)<0.15) {
      c_sensors.val(i,0)=0; // IR's should only have positive values
    }
  }

  // adapt hebbian weights
  for (uint i=0; i<number_motors; i++){
    for (uint j=0; j<(uint)number_context_sensors; j++){
      if (i==j){ // TODO: remove (it is just for testing)
      double dp=  eps_hebb* h_update.val(i,0) * c_sensors.val(j,0) *(1 - pow(p.val(i,j),2));
      //      std::cout<<eps_hebb<<"*"<<h_update.val(i,0)<<" * "<<c_sensors.val(j,0)<<std::endl;
      p.val(i,j)+=dp;
      }
    }
  }
  /*
  // remove this !!! (just a test)
  for (int i=0; i<number_motors; i++){
    for (int j=0; j<number_context_sensors; j++){
      if ((j==0) || (j==1)){
        p.val(i,j)=-0.1;
      } else {
        p.val(i,j)=0.1;
      }
    }
  }
  */

}
开发者ID:CentreForBioRobotics,项目名称:lpzrobots,代码行数:34,代码来源:invertnchannelcontrollerhebbh.cpp


示例5: predictHebb

/**
 * predict the update of h based on the actual context sensors
 * @param context_sensors prediction is based on these sensors
 */
matrix::Matrix InvertNChannelControllerHebbH::predictHebb(const matrix::Matrix& context_sensors){
  // preprocess context sensors
  Matrix c_sensors = context_sensors;
  for (int i=0;i<number_context_sensors;i++){
   if (c_sensors.val(i,0)<0.15) {
      c_sensors.val(i,0)=0; // IR's should only have positive values
    }
  }


  Matrix pred_h_update(number_motors,1) ;
  for (unsigned int k = 0; k < number_motors; k++) {
    pred_h_update.val(k,0)=0;
  }

  for (uint i=0; i<number_motors; i++){
    for (uint j=0; j<(uint)number_context_sensors; j++){
      pred_h_update.val(i,0)+= p.val(i,j) *  context_sensors.val(j,0);
    }
  }


  return pred_h_update;

}
开发者ID:CentreForBioRobotics,项目名称:lpzrobots,代码行数:29,代码来源:invertnchannelcontrollerhebbh.cpp


示例6: sliceLayerWeights

matrix::Matrix sliceLayerWeights(const matrix::Matrix& weights, const RecurrentOpsHandle& handle,
    size_t index)
{
    matrix::Dimension begin;
    matrix::Dimension end;

    prnn::rnn::getWeightsRange(begin, end, handle, weights.precision(), index);

    return slice(weights, begin, end);
}
开发者ID:GYGit,项目名称:persistent-rnn,代码行数:10,代码来源:persistent_rnn_high_level.cpp


示例7: makePrediction

void CuriosityLoop::makePrediction(const matrix::Matrix& s, const matrix::Matrix& m){

     matrix::Matrix sm = s.above(m);
     matrix::Matrix f;
     f.set(1,1);
     f.val(0,0) = 1;
     sm = sm.above(f);

     matrix::Matrix pwMod = predictorWeights;

     for(int i = 0; i < predictorWeights.getM(); i++){//to
	for(int j = 0; j < predictorWeights.getN(); j++){//from
	    if(pInput.val(j,0) == 1 && pOutput.val(i,0) == 1)
		   pwMod.val(i,j) = predictorWeights.val(i,j); //Transposes the weight matrix. 
	    else
		   pwMod.val(i,j) = 0; 
		 //   pwMod.val(i,j) = predictorWeights.val(i,j)*predictorMask.val(i,j);
	}
     }

     matrix::Matrix a = pwMod*sm; //Make prediction here.
     this->prediction = a; //The prediction is stored here.


	//************************UNRESTRICTED PREDICTOR CODE ****************************
//ALSO MAKE PREDICTIONS FOR THE UNRESTRICTED PREDICTOR

     matrix::Matrix uPwMod = uPredictorWeights;

     for(int i = 0; i < uPredictorWeights.getM(); i++){//to
	for(int j = 0; j < uPredictorWeights.getN(); j++){//from
	    if(uPInput.val(j,0) == 1 && uPOutput.val(i,0) == 1)
		   uPwMod.val(i,j) = uPredictorWeights.val(i,j); 
	    else
		   uPwMod.val(i,j) = 0; 
		 //   pwMod.val(i,j) = predictorWeights.val(i,j)*predictorMask.val(i,j);
	}
     }

     matrix::Matrix uA = uPwMod*sm; //Make prediction here.
     this->uPrediction = uA; //The prediction is stored here.

	//************************UNRESTRICTED PREDICTOR CODE ****************************

//	cout << predictorWeights.getM() << " " << predictorWeights.getN() << "= pw \n";
//	cout << sm.getM() << " " << sm.getN() << " = sm\n";
//	cout << pwMod.getM() << " " << pwMod.getN() << " = a\n";
//     for(int i = 0; i < prediction.getM(); i++){
//	for(int j = 0; j < pwMod.getN(); j++){
//	  cout << prediction.val(i,0) << " ";
//    }
//	 cout << " = precd\n";
//   }

};
开发者ID:humm,项目名称:playful,代码行数:55,代码来源:CuriosityLoop.cpp


示例8: forwardPropRecurrent

void forwardPropRecurrent(matrix::Matrix& activations,
    matrix::Matrix& reserve,
    const matrix::Matrix& weights,
    const RecurrentOpsHandle& handle)
{
    auto scratch = prnn::rnn::getForwardPropScratch(handle, activations.precision());

    prnn::rnn::forwardPropRecurrent(matrix::DynamicView(activations),
        matrix::ConstDynamicView(copy(activations)),
        matrix::ConstDynamicView(weights),
        matrix::DynamicView(scratch),
        matrix::DynamicView(reserve),
        handle);
}
开发者ID:GYGit,项目名称:persistent-rnn,代码行数:14,代码来源:persistent_rnn_high_level.cpp


示例9: backPropGradientsRecurrent

void backPropGradientsRecurrent(matrix::Matrix& dWeights,
    const matrix::Matrix& activations,
    const matrix::Matrix& outputActivations,
    const matrix::Matrix& reserve,
    const RecurrentOpsHandle& handle)
{
    auto scratch = prnn::rnn::getBackPropGradientsScratch(handle,
        activations.precision());

    prnn::rnn::backPropGradientsRecurrent(matrix::DynamicView(dWeights),
        matrix::ConstDynamicView(activations),
        matrix::ConstDynamicView(outputActivations),
        matrix::DynamicView(scratch),
        matrix::ConstDynamicView(reserve),
        handle);
}
开发者ID:GYGit,项目名称:persistent-rnn,代码行数:16,代码来源:persistent_rnn_high_level.cpp


示例10: compare

void compare(const matrix::Matrix& predictions, const matrix::Matrix& reference)
{
    size_t samples = predictions.size()[1];

    auto differences = apply(predictions, reference, matrix::Subtract());
    auto squareDifferences = apply(differences, matrix::Square());
    auto normalizedSquareDifferences = apply(squareDifferences, matrix::Divide(samples));

    double cost = std::sqrt(reduce(normalizedSquareDifferences, {}, matrix::Add())[0]);

    std::cout << "Total RMS error was " << cost << "\n";

    if(cost < 15.0)
    {
        std::cout << "Test Passed\n";
    }
    else
    {
        std::cout << "Test Failed\n";
    }
}
开发者ID:sudnya,项目名称:video-classifier,代码行数:21,代码来源:test-linear-regression.cpp


示例11: setSensorTeaching

void SeMoX::setSensorTeaching(const matrix::Matrix& teaching){
  assert(teaching.getM() == number_sensors && teaching.getN() == 1);
  // calculate the y_teaching, that belongs to the distal teaching value by the inverse model.
  y_teaching = (A.pseudoInverse(0.001) * (teaching-B)).mapP(0.95, clip);
  intern_useTeaching=true;
}
开发者ID:tnachstedt,项目名称:lpzrobots,代码行数:6,代码来源:semox.cpp


示例12: seth

void PiMax::seth(const matrix::Matrix& _h){
  assert(h.getM() == _h.getM() && h.getN() == _h.getN());
  h=_h;
}
开发者ID:CentreForBioRobotics,项目名称:lpzrobots,代码行数:4,代码来源:pimax.cpp


示例13: setC

void PiMax::setC(const matrix::Matrix& _C){
  assert(C.getM() == _C.getM() && C.getN() == _C.getN());
  C=_C;
}
开发者ID:CentreForBioRobotics,项目名称:lpzrobots,代码行数:4,代码来源:pimax.cpp


示例14: setA

void PiMax::setA(const matrix::Matrix& _A){
  assert(A.getM() == _A.getM() && A.getN() == _A.getN());
  A=_A;
}
开发者ID:CentreForBioRobotics,项目名称:lpzrobots,代码行数:4,代码来源:pimax.cpp


示例15: seth

void RandomDyn::seth(const matrix::Matrix& _h){
  assert(h.getM() == _h.getM() && h.getN() == _h.getN());
  h=_h;
}
开发者ID:artificialsimon,项目名称:lpzrobots,代码行数:4,代码来源:randomdyn.cpp


示例16: setC

void RandomDyn::setC(const matrix::Matrix& _C){
  assert(C.getM() == _C.getM() && C.getN() == _C.getN());
  C=_C;
}
开发者ID:artificialsimon,项目名称:lpzrobots,代码行数:4,代码来源:randomdyn.cpp


示例17: assembleNetworkOutput

 virtual matrix::Matrix assembleNetworkOutput(const matrix::Matrix& output) const {
   return output.rows(number_sensors, number_sensors + number_motors);
 }
开发者ID:CentreForBioRobotics,项目名称:lpzrobots,代码行数:3,代码来源:main.cpp


示例18: setA

void SosAvgGrad::setA(const matrix::Matrix& _A){
  assert(A.getM() == _A.getM() && A.getN() == _A.getN());
  A=_A;
}
开发者ID:amr1985,项目名称:playful,代码行数:4,代码来源:sos_avggrad.cpp


示例19: pow

double CuriosityLoop::updatePrediction(const matrix::Matrix& smHist, const matrix::Matrix& s, const matrix::Matrix& m, int phase){

        matrix::Matrix sm = s.above(m);
        matrix::Matrix f;
        f.set(1,1);
        f.val(0,0) = 1;
        sm = sm.above(f);

	//1. Go through the predictions of this predictor determining the prediction errors at each dimension.
	matrix::Matrix error;
	error.set(smHist.getM(), 1);

	prediction_error = 0;
	for(int i = 0; i < prediction.getM(); i++){
	 if(pOutput.val(i,0) == 1){ 
	  error.val(i,0) = prediction.val(i,0) - sm.val(i,0);
	  prediction_error = prediction_error + pow(error.val(i,0),2);
	//  cout << error << "predictionError\n";
	 }
	 else{
	//  cout << "This dimension is not predicted, and does not count towards the error\n";
	  error.val(i,0) = 0;
	  //prediction_error = prediction_error + error.val(i,0);
	 }
	}
	parent_error.val(phase,0) = prediction_error; 

	//2. Change the weights by the delta rule.
	for(int i = 0; i < prediction.getM(); i++){//to

		for(int j = 0; j < predictorWeights.getN(); j++){//from

//			predictorWeights.val(i,j) = predictorWeights.val(i,j) - 0.00001*error.val(i,0)*smHist.val(j,0);
			predictorWeights.val(i,j) = predictorWeights.val(i,j) - 0.0001*error.val(i,0)*smHist.val(j,0);

			if(predictorWeights.val(i,j) > 10)
				predictorWeights.val(i,j)  = 10; 
			else if(predictorWeights.val(i,j)  < -10)
				predictorWeights.val(i,j) = -10; 
			
		}

	}
	prediction_error_time_average = 0.9999*prediction_error_time_average + (1-0.9999)*prediction_error;  

	//Update the fitness of this predictor based on the instantaneous reduction / increase in prediction error. 
	this->fitness = 0.1 + 100*(prediction_error_time_average - old_prediction_error_time_average);
        old_prediction_error_time_average = prediction_error_time_average; 

	//cout << fitness << " "; 
 	
	//Improve the method of determining this gradient later! 


	//UPDATE THE UNRESTRICTED PREDICTOR NOW AS WELL, ALWAYS... 
	//1. Go through the predictions of this UNRESTRICTED predictor determining the prediction errors at each dimension.
	matrix::Matrix uError;
	uError.set(smHist.getM(), 1);

	uPrediction_error = 0;
	for(int i = 0; i < uPrediction.getM(); i++){
	 if(uPOutput.val(i,0) == 1){ 
	  uError.val(i,0) = uPrediction.val(i,0) - sm.val(i,0);
	  uPrediction_error = uPrediction_error + pow(uError.val(i,0),2);
	//  cout << error << "predictionError\n";
	 }
	 else{
	 // cout << "This dimension is not predicted, and does not count towards the error\n";
	  uError.val(i,0) = 0;
	  //prediction_error = prediction_error + error.val(i,0);
	 }

	}
	//cout << "phase = " << phase << "\n";  
	offspring_error.val(phase,0) = uPrediction_error; 
	//2. Change the weights by the delta rule.
	for(int i = 0; i < uPrediction.getM(); i++){

		for(int j = 0; j < uPredictorWeights.getN(); j++){

			uPredictorWeights.val(i,j) = uPredictorWeights.val(i,j) - 0.0001*uError.val(i,0)*smHist.val(j,0);

			if(uPredictorWeights.val(i,j) > 10)
				uPredictorWeights.val(i,j)  = 10; 
			else if(uPredictorWeights.val(i,j)  < -10)
				uPredictorWeights.val(i,j) = -10; 
			
		}

	}
	 
	//************************UNRESTRICTED PREDICTOR CODE ****************************
	


	return this->fitness; 
};
开发者ID:humm,项目名称:playful,代码行数:97,代码来源:CuriosityLoop.cpp


示例20: seth

 virtual void seth(const matrix::Matrix& _h){
   assert(h.getM() == _h.getM() && h.getN() == _h.getN());
   h=_h;
 }
开发者ID:amr1985,项目名称:playful,代码行数:4,代码来源:normalsim.cpp



注:本文中的matrix::Matrix类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ mautil::String类代码示例发布时间:2022-05-31
下一篇:
C++ mathutil::Rectangle类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap