• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Java ArrayMath类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Java中edu.stanford.nlp.math.ArrayMath的典型用法代码示例。如果您正苦于以下问题:Java ArrayMath类的具体用法?Java ArrayMath怎么用?Java ArrayMath使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



ArrayMath类属于edu.stanford.nlp.math包,在下文中一共展示了ArrayMath类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Java代码示例。

示例1: greedyDecode

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
/** 
 * THIS CLOBBERS THE LABELS, stores its decoding into them.
 * Does progressive rolling edge feature extraction
 **/
public void greedyDecode(ModelSentence sentence, boolean storeConfidences) {
	int T = sentence.T;
	sentence.labels = new int[T];
	sentence.edgeFeatures[0] = startMarker();
	
	if (storeConfidences) sentence.confidences = new double[T];

	double[] labelScores = new double[numLabels];
	for (int t=0; t<T; t++) {
		computeLabelScores(t, sentence, labelScores);
		sentence.labels[t] = ArrayMath.argmax(labelScores);
		if (t < T-1)
			sentence.edgeFeatures[t+1] = sentence.labels[t];
		if (storeConfidences) {
			ArrayMath.expInPlace(labelScores);
			double Z = ArrayMath.sum(labelScores);
			ArrayMath.multiplyInPlace(labelScores, 1.0/Z);
			sentence.confidences[t] = labelScores[ sentence.labels[t] ];
		}
	}
}
 
开发者ID:weizh,项目名称:geolocator-3.0,代码行数:26,代码来源:Model.java


示例2: mapDirByInverseHessian

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
void mapDirByInverseHessian() {
	int count = sList.size();

	if (count != 0) {				
		for (int i = count - 1; i >= 0; i--) {
			//mheilman: The program will try to divide by zero here unless there is a check 
			//that the parameters change at each iteration.  See comments in the minimize() method.
			//A roList value is the inner product of the change in the gradient 
			//and the change in parameters between the current and last iterations.  
			//See the discussion of L-BFGS in Nocedal and Wright's Numerical Optimization book 
			//(though I think that defines rho as the multiplicative inverse of what is here).
			alphas[i] = -ArrayMath.innerProduct(sList.get(i), dir) / roList.get(i); 
			ArrayMath.addMultInPlace(dir, yList.get(i), alphas[i]);
		}

		double[] lastY = yList.get(count - 1);
		double yDotY = ArrayMath.innerProduct(lastY, lastY);
		double scalar = roList.get(count - 1) / yDotY;
		ArrayMath.multiplyInPlace(dir, scalar);

		for (int i = 0; i < count; i++) {
			double beta = ArrayMath.innerProduct(yList.get(i), dir) / roList.get(i);
			ArrayMath.addMultInPlace(dir, sList.get(i), -alphas[i] - beta);
		}
	}
}
 
开发者ID:weizh,项目名称:geolocator-3.0,代码行数:27,代码来源:OWLQN.java


示例3: dirDeriv

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
double dirDeriv() {
	if (l1weight == 0) {
		return ArrayMath.innerProduct(dir, grad);
	} else {
		double val = 0.0;
		for (int i = 0; i < dim; i++) {
			//mheilman: I added this if-statement to avoid penalizing bias parameters.
			if(OWLQN.biasParameters.contains(i)){
				val += dir[i] * grad[i]; 
				continue;
			}
			if (dir[i] != 0) {
				if (x[i] < 0) {
					val += dir[i] * (grad[i] - l1weight);
				} else if (x[i] > 0) {
					val += dir[i] * (grad[i] + l1weight);
				} else if (dir[i] < 0) {
					val += dir[i] * (grad[i] - l1weight);
				} else if (dir[i] > 0) {
					val += dir[i] * (grad[i] + l1weight);
				}
			}
		}
		return val;
	}
}
 
开发者ID:weizh,项目名称:geolocator-3.0,代码行数:27,代码来源:OWLQN.java


示例4: getNextPoint

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
private boolean getNextPoint(double alpha) {			
	ArrayMath.addMultInto(newX, x, dir, alpha);
	/*if (OWLQN.isConstrained())
		newX = OWLQN.projectWeights(newX);*/ //TODO
	if (l1weight > 0) {
		for (int i=0; i<dim; i++) {
			//mheilman: I added this if-statement to avoid penalizing bias parameters.
			if(OWLQN.biasParameters.contains(i)){
	        	  continue;
	        }
			if (x[i] * newX[i] < 0.0) {
				newX[i] = 0.0;
			}
		}
	}
	return true;
}
 
开发者ID:weizh,项目名称:geolocator-3.0,代码行数:18,代码来源:OWLQN.java


示例5: bestSequence

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
/**
 * Runs the Viterbi algorithm on the sequence model given by the TagScorer
 * in order to find the best sequence.
 * @return an array containing the int tags of the best sequence
 */
public int[] bestSequence(SequenceModel ts) {
  
  int[] sample = new int[ts.length()+ts.leftWindow()];

  for (int pos = ts.leftWindow(); pos < sample.length; pos++) {
    double[] scores = ts.scoresOf(sample, pos);
    double total = 0.0;
    for (int i = 0; i < scores.length; i++) {
      scores[i] = Math.exp(scores[i]);
    }
    ArrayMath.normalize(scores);
    int l = ArrayMath.sampleFromDistribution(scores);
    sample[pos] = ts.getPossibleValues(pos)[l];
  }

  return sample;
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:23,代码来源:SequenceSampler.java


示例6: applyInitialHessian

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
public double[] applyInitialHessian(double[] x) {

      switch (scaleOpt) {
      case SCALAR:
        say("I");
        ArrayMath.multiplyInPlace(x, gamma);
        break;
      case DIAGONAL:
        say("D");
        if (d != null) {
          // Check sizes
          if (x.length != d.length) {
            throw new IllegalArgumentException("Vector of incorrect size passed to applyInitialHessian in QNInfo class");
          }
          // Scale element-wise
          for (int i = 0; i < x.length; i++) {
            x[i] = x[i] / (d[i]);
          }
        }
        break;
      }

      return x;

    }
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:26,代码来源:QNMinimizer.java


示例7: computeDir

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
private void computeDir(double[] dir, double[] fg, double[] x, QNInfo qn, Function func)
    throws SurpriseConvergence {
  System.arraycopy(fg, 0, dir, 0, fg.length);

  int mmm = qn.size();
  double[] as = new double[mmm];

  for (int i = mmm - 1; i >= 0; i--) {
    as[i] = qn.getRho(i) * ArrayMath.innerProduct(qn.getS(i), dir);
    plusAndConstMult(dir, qn.getY(i), -as[i], dir);
  }

  // multiply by hessian approximation
  qn.applyInitialHessian(dir);

  for (int i = 0; i < mmm; i++) {
    double b = qn.getRho(i) * ArrayMath.innerProduct(qn.getY(i), dir);
    plusAndConstMult(dir, qn.getS(i), as[i] - b, dir);
  }

  ArrayMath.multiplyInPlace(dir, -1);

  if (useOWLQN) { // step (2) in Galen & Gao 2007
    constrainSearchDir(dir, fg, x, func);
  }
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:27,代码来源:QNMinimizer.java


示例8: getHistories

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
protected double[] getHistories(String[] tags, History h) {
  boolean rare = maxentTagger.isRare(ExtractorFrames.cWord.extract(h));
  Extractors ex = maxentTagger.extractors, exR = maxentTagger.extractorsRare;
  String w = pairs.getWord(h.current);
  double[] lS, lcS;
  if((lS = localScores.get(w)) == null) {
    lS = getHistories(tags, h, ex.local, rare ? exR.local : null);
    localScores.put(w,lS);
  }
  if((lcS = localContextScores[h.current]) == null) {
    lcS = getHistories(tags, h, ex.localContext, rare ? exR.localContext : null);
    localContextScores[h.current] = lcS;
    ArrayMath.pairwiseAddInPlace(lcS,lS);
  }
  double[] totalS = getHistories(tags, h, ex.dynamic, rare ? exR.dynamic : null);
  ArrayMath.pairwiseAddInPlace(totalS,lcS);
  return totalS;
}
 
开发者ID:jaimeguzman,项目名称:data_mining,代码行数:19,代码来源:TestSentence.java


示例9: findBestUsingSampling

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
/**
 * Finds the best sequence by collecting numSamples samples, scoring them, and then choosing
 * the highest scoring sample.
 * @return the array of type int representing the highest scoring sequence
 */
public int[] findBestUsingSampling(SequenceModel model, int numSamples, int sampleInterval, int[] initialSequence) {
  List samples = collectSamples(model, numSamples, sampleInterval, initialSequence);
  int[] best = null;
  double bestScore = Double.NEGATIVE_INFINITY;
  for (int i = 0; i < samples.size(); i++) {
    int[] sequence = (int[]) samples.get(i);
    double score = model.scoreOf(sequence);
    if (score>bestScore) {
      best = sequence;
      bestScore = score;
      System.err.println("found new best ("+bestScore+")");
      System.err.println(ArrayMath.toString(best));
    }
  }
  return best;
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:22,代码来源:SequenceGibbsSampler.java


示例10: dirDeriv

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
double dirDeriv() {
    if (l1weight == 0) {
        return ArrayMath.innerProduct(dir, grad);
    } else {
        double val = 0.0;
        for (int i = 0; i < dim; i++) {
            if (OWLQN.biasParameters.contains(i)) {
                val += dir[i] * grad[i];
                continue;
            }
            if (dir[i] != 0) {
                if (x[i] < 0) {
                    val += dir[i] * (grad[i] - l1weight);
                } else if (x[i] > 0) {
                    val += dir[i] * (grad[i] + l1weight);
                } else if (dir[i] < 0) {
                    val += dir[i] * (grad[i] - l1weight);
                } else if (dir[i] > 0) {
                    val += dir[i] * (grad[i] + l1weight);
                }
            }
        }
        return val;
    }
}
 
开发者ID:vietansegan,项目名称:segan,代码行数:26,代码来源:OWLQN.java


示例11: conditionalLogProbGivenPrevious

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
/**
 * Computes the probability of the tag OF being at the end of the table given
 * that the previous tag sequence in table is GIVEN. given is at the beginning,
 * of is at the end.
 *
 * @return the probability of the tag OF being at the end of the table
 */
public double conditionalLogProbGivenPrevious(int[] given, int of) {
  if (given.length != windowSize - 1) {
    throw new IllegalArgumentException("conditionalLogProbGivenPrevious requires given one less than clique size (" +
            windowSize + ") but was " + Arrays.toString(given));
  }
  // Note: other similar methods could be optimized like this one, but this is the one the CRF uses....
  /*
  int startIndex = indicesFront(given);
  int numCellsToSum = SloppyMath.intPow(numClasses, windowSize - given.length);
  double z = ArrayMath.logSum(table, startIndex, startIndex + numCellsToSum);
  int i = indexOf(given, of);
  System.err.printf("startIndex is %d, numCellsToSum is %d, i is %d (of is %d)%n", startIndex, numCellsToSum, i, of);
  */
  int startIndex = indicesFront(given);
  double z = ArrayMath.logSum(table, startIndex, startIndex + numClasses);
  int i = startIndex + of;
  // System.err.printf("startIndex is %d, numCellsToSum is %d, i is %d (of is %d)%n", startIndex, numClasses, i, of);

  return table[i] - z;
}
 
开发者ID:benblamey,项目名称:stanford-nlp,代码行数:28,代码来源:FactorTable.java


示例12: computeDocLikelihood

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
/**
 * Assumes the entities are labeled with topics/slots.
 */
public double computeDocLikelihood(final List<TextEntity> docEntities) {
  double likelihood = 0.0;
  for( TextEntity entity : docEntities ) {
    // Probs are set in log-space.
    double[] probs = new double[sampler.numTopics];
    getTopicDistribution(entity, probs);
    likelihood += ArrayMath.max(probs);
  }
  return likelihood;
}
 
开发者ID:nchambers,项目名称:probschemas,代码行数:14,代码来源:Inference.java


示例13: wbSmoothing

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
private static float[] wbSmoothing(int[] counts, int[] totalCounts) {
  float[] p = new float[counts.length];
  double cN = ArrayMath.sum(counts);
  double cNT = ArrayMath.sum(totalCounts);
  double lambda = cN / (cN + W);
  for (int i = 0; i < counts.length; ++i) {
    double p_mle = counts[i] / cN;
    double p_backoff = totalCounts[i] / cNT;
    p[i] = (float) (lambda * p_mle + (1.0 - lambda) * p_backoff);
  }
  return p;
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:13,代码来源:DTUFeatureExtractor.java


示例14: addOneSmoothing

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
private static float[] addOneSmoothing(int[] counts) {
  float[] p = new float[counts.length];
  float n = ArrayMath.sum(counts) + counts.length * DELTA;
  for (int binI = 0; binI < counts.length; ++binI) {
    p[binI] = (counts[binI] * 1.0f + DELTA) / n;
  }
  return p;
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:9,代码来源:DTUFeatureExtractor.java


示例15: getBestTag

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
/**
 * Determine best tag based on current word and its immediate predecessors.
 *
 * @param s
 *          <i>leftWindow</i> plus one words
 * @param o
 *          Offset with respect to last position.
 * @return Best tag and its probability.
 */
public Pair<IString, Float> getBestTag(IString[] s, int o) {
  int loc = s.length - 1 + o;

  IStringArrayWrapper aw = null;
  Pair<IString, Float> tag;

  if (CACHE_POS) {
    aw = new IStringArrayWrapper(s);
    tag = cache.get(aw);
    if (tag != null)
      return tag;
  }

  init(s);

  int[] bestTags = new int[len];
  int[][] vals = new int[len][];
  for(int pos = 0 ; pos < len ; pos++) {
    vals[pos] = getPossibleValues(pos);
    bestTags[pos] = vals[pos][0];
  }

  this.initializeScorer();
  double[] scores = scoresOf(bestTags, loc);

  int am = ArrayMath.argmax(scores);

  // TODO
  bestTags[loc] = vals[loc][am];
  cleanUpScorer();

  tag = new Pair<IString, Float>(new IString(maxentTagger.getTag(bestTags[loc])),
          (float) scores[am]);
  if (CACHE_POS)
    cache.put(aw, tag);
  return tag;
}
 
开发者ID:stanfordnlp,项目名称:phrasal,代码行数:47,代码来源:PrefixTagger.java


示例16: makeSteepestDescDir

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
void makeSteepestDescDir() {
	if (l1weight == 0) {
		ArrayMath.multiplyInto(dir, grad, -1);
	} else {

		for (int i=0; i<dim; i++) {
			//mheilman: I added this if-statement to avoid penalizing bias parameters.
			if(OWLQN.biasParameters.contains(i)){
				dir[i] = -grad[i];
				continue;
			}
			if (x[i] < 0) {
				dir[i] = -grad[i] + l1weight;
			} else if (x[i] > 0) {
				dir[i] = -grad[i] - l1weight;
			} else {
				if (grad[i] < -l1weight) {
					dir[i] = -grad[i] - l1weight;
				} else if (grad[i] > l1weight) {
					dir[i] = -grad[i] + l1weight;
				} else {
					dir[i] = 0;
				}
			}
		}
	}
	steepestDescDir = dir.clone(); // deep copy needed
}
 
开发者ID:weizh,项目名称:geolocator-3.0,代码行数:29,代码来源:OWLQN.java


示例17: derivativeAt

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
@Override
public double[] derivativeAt(double[] flatCoefs) {
	double[] g = new double[model.flatIDsize()];
	model.setCoefsFromFlat(flatCoefs);
	for (ModelSentence s : mSentences) {
		model.computeGradient(s, g);
	}
	ArrayMath.multiplyInPlace(g, -1);
	addL2regularizerGradient(g, flatCoefs);
	return g;
}
 
开发者ID:weizh,项目名称:geolocator-3.0,代码行数:12,代码来源:Train.java


示例18: toMatrixString

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
@SuppressWarnings({"unchecked"})
public String toMatrixString(int cellSize) {
  List<K1> firstKeys = new ArrayList<K1>(firstKeySet());
  List<K2> secondKeys = new ArrayList<K2>(secondKeySet());
  Collections.sort((List<? extends Comparable>)firstKeys);
  Collections.sort((List<? extends Comparable>)secondKeys);
  double[][] counts = toMatrix(firstKeys, secondKeys);
  return ArrayMath.toString(counts, cellSize, firstKeys.toArray(), secondKeys.toArray(), new DecimalFormat(), true);
}
 
开发者ID:FabianFriedrich,项目名称:Text2Process,代码行数:10,代码来源:TwoDimensionalCounter.java


示例19: toMatrixString

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
@SuppressWarnings( { "unchecked" })
public String toMatrixString(int cellSize) {
  List<K1> firstKeys = new ArrayList<K1>(firstKeySet());
  List<K2> secondKeys = new ArrayList<K2>(secondKeySet());
  Collections.sort((List<? extends Comparable>) firstKeys);
  Collections.sort((List<? extends Comparable>) secondKeys);
  double[][] counts = toMatrix(firstKeys, secondKeys);
  return ArrayMath.toString(counts, cellSize, firstKeys.toArray(), secondKeys.toArray(), new DecimalFormat(), true);
}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:10,代码来源:TwoDimensionalCounter.java


示例20: takeStep

import edu.stanford.nlp.math.ArrayMath; //导入依赖的package包/类
@Override
protected void takeStep(AbstractStochasticCachingDiffFunction dfunction){
  for(int i = 0; i < x.length; i++){
    double thisGain = fixedGain*gainSchedule(k,5*numBatches)/(diag[i]);
    newX[i] = x[i] - thisGain*grad[i];
  }

  //Get a new pair...
  say(" A ");
  if (pairMem > 0 && sList.size() == pairMem || sList.size() == pairMem) {
    s = sList.remove(0);
    y = yList.remove(0);
  } else {
    s = new double[x.length];
    y = new double[x.length];
  }

  s = ArrayMath.pairwiseSubtract(newX, x);
  dfunction.recalculatePrevBatch = true;
  System.arraycopy(dfunction.derivativeAt(newX,bSize),0,y,0,grad.length);

  ArrayMath.pairwiseSubtractInPlace(y,newGrad);  // newY = newY-newGrad
  double[] comp = new double[x.length];

  sList.add(s);
  yList.add(y);
  updateDiag(diag,s,y);

}
 
开发者ID:paulirwin,项目名称:Stanford.NER.Net,代码行数:30,代码来源:ScaledSGDMinimizer.java



注:本文中的edu.stanford.nlp.math.ArrayMath类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Java ConnectionState类代码示例发布时间:2022-05-22
下一篇:
Java MappedTrackInfo类代码示例发布时间:2022-05-22
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap