本文整理汇总了C#中AForge.Neuro.ActivationNetwork类的典型用法代码示例。如果您正苦于以下问题:C# ActivationNetwork类的具体用法?C# ActivationNetwork怎么用?C# ActivationNetwork使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ActivationNetwork类属于AForge.Neuro命名空间,在下文中一共展示了ActivationNetwork类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: Execute
public override ConfusionMatrix Execute()
{
//Create an network with one layer and one neuron in that layer
var network = new ActivationNetwork(new ThresholdFunction(), 3, 1);
//Bind the reference of the neuron
var neuron = network.Layers[0].Neurons[0] as ActivationNeuron;
//Create the Perceptron learning algorithm
//Library perceptron implements a single layer linear classifier
var teacher = new PerceptronLearning(network);
teacher.LearningRate = 0.1;
//Enrich the dimensions of the vectors, padding 1 to the end
var richTraining = AlgorithmHelpers.PaddDimension(trainingSet);
var richTesting = AlgorithmHelpers.PaddDimension(testSet);
//Training the network until the error is small enough
//or 500 hundred iterations have been computed
int epochs = 0;
while (true)
{
double error = teacher.RunEpoch(richTraining, trainingOutput);/// trainingSet.Length;
++epochs;
if (error < 0.025 * trainingSet.Length || epochs == 500) break;
}
var predicted = richTesting
.Select(x => neuron.Compute(x))
.Select(x => Convert.ToInt32(x))
.ToArray();
//Create a confusion matrix with the calculated parameters
ConfusionMatrix cmatrix = new ConfusionMatrix(predicted, expected, POSITIVE, NEGATIVE);
OnAlgorithmEnded(Enumerable.Repeat(neuron, 1), cmatrix);
return cmatrix;
}
开发者ID:salufa,项目名称:MachineLearning,代码行数:40,代码来源:PerceptronRuntime.cs
示例2: AdditionalNeuralNetwork
public AdditionalNeuralNetwork(int numberOfTags)
{
this.network = new ActivationNetwork(new SigmoidFunction(1.0d),numberOfTags,numberOfTags);
this.teacher = new ISupervisedLearning[] {new DeltaRuleLearning(this.network),new PerceptronLearning(this.network),new BackPropagationLearning(this.network)};
//this.teacher.LearningRate = 0.10d;
//this.teacher.Momentum = 0.10d;
}
开发者ID:KommuSoft,项目名称:MLTag,代码行数:7,代码来源:AdditionalNeuralNetwork.cs
示例3: test
public void test() {
// initialize input and output values
var input = new double[4][] {
new double[] { 0, 0 }, new double[] { 0, 1 },
new double[] { 1, 0 }, new double[] { 1, 1 }
};
var output = new double[4][] {
new double[] { 0 }, new double[] { 1 },
new double[] { 1 }, new double[] { 1 }
};
// create neural network
var network = new ActivationNetwork(
new SigmoidFunction(2),
2, // two inputs in the network
//2, // two neurons in the first layer
1); // one neuron in the second layer
// create teacher
var teacher =
new BackPropagationLearning(network);
// loop
while (true) {
// run epoch of learning procedure
var error = teacher.RunEpoch(input, output);
// check error value to see if we need to stop
// ...
if (error < 0.001) {
break;
}
}
Console.WriteLine(network.Compute(new double[] { 0, 0 })[0] + ","
+ network.Compute(new double[] { 0, 1 })[0] + ","
+ network.Compute(new double[] { 1, 0 })[0] + ","
+ network.Compute(new double[] { 1, 1 })[0]);
}
开发者ID:RainsSoft,项目名称:Code2Xml,代码行数:34,代码来源:PerceptronTest.cs
示例4: IA
public IA(Game game, int players)
: base(game)
{
rndSeedGen = new Random();
rndControl = new Random();
rndMovControl = new Random();
this.comidas = null;
this.jugadores = null;
this.numWeights = HIDDEN_UNITS0 * (INPUT_UNITS + 1) + HIDDEN_UNITS1 * (HIDDEN_UNITS0 + 1) + OUTPUT_UNITS * (HIDDEN_UNITS1 + 1);
redes = new ActivationNetwork[players];
for (int i = 0; i < redes.Length; i++)
{
redes[i] = new ActivationNetwork(new SigmoidFunction(400), INPUT_UNITS, HIDDEN_UNITS0, HIDDEN_UNITS1, OUTPUT_UNITS);
}
inputVector = new double[INPUT_UNITS];
outputVector = new double[OUTPUT_UNITS];
doneEvents = new ManualResetEvent[players];
for (int i = 0; i < players; i++) doneEvents[i] = new ManualResetEvent(false);
//Se puede jugar con los parametros de los rangos para modificar la evolucion de las redes
//Tambien se puede modificar el metodo de seleccion.
chromosomeGenerator = new UniformGenerator(new Range(-10f, 10f), rndSeedGen.Next(-100, 100));
mutationAdditionGenerator = new UniformGenerator(new Range(-8f, 8f), rndSeedGen.Next(-100, 100));
mutationMultiplierGenerator = new UniformGenerator(new Range(-8f, 8f), rndSeedGen.Next(-100, 100));
fitnessFunction = new GameFitnessFunction();
selectionMethod = new EliteSelection();
padre = new gameChromosome(chromosomeGenerator, mutationMultiplierGenerator, mutationAdditionGenerator, numWeights);
poblacion = new Population(WorldGame.JUGADORES, padre, fitnessFunction, selectionMethod);
}
开发者ID:maxrevilo,项目名称:EvolvingNeuralNetworksXNA,代码行数:29,代码来源:IA.cs
示例5: Prepare
public virtual void Prepare()
{
PrepareData();
PrepareCharts();
network = new ActivationNetwork(new Tanh(0.2),
Sizes[0],
Sizes.Skip(1).ToArray());
network.ForEachWeight(z => rnd.NextDouble() * 2 - 1);
teacher = new BackPropagationLearning(network);
teacher.LearningRate = 1;
Form = new Form()
{
Text = GetType().Name,
Size = new Size(800, 600),
FormBorderStyle = FormBorderStyle.FixedDialog,
Controls =
{
AreaChart,
HistoryChart
}
};
}
开发者ID:Bulgano,项目名称:AIML,代码行数:26,代码来源:RegressionTaskV0.cs
示例6: ForEachWeight
static void ForEachWeight(ActivationNetwork network, Func<double, double> modifier)
{
foreach (var l in network.Layers)
foreach (var n in l.Neurons)
for (int i = 0; i < n.Weights.Length; i++)
n.Weights[i] = modifier(n.Weights[i]);
}
开发者ID:Bulgano,项目名称:AIML,代码行数:7,代码来源:Learning.cs
示例7: Test
public void Test()
{
ActivationNetwork network = new ActivationNetwork(
new SigmoidFunction(),
2, // two inputs in the network
2, // two neurons in the first layer
1); // one neuron in the second layer
BackPropagationLearning teacher = new BackPropagationLearning(network);
double lastError = double.MaxValue;
int counter = 0;
while (true)
{
counter++;
var error = teacher.RunEpoch(input, output);
if (lastError - error < 0.0000001 && error < 0.001)
break;
lastError = error;
}
//var bla = network.Compute(input[0])[0];
//var round = Math.Round(network.Compute(input[0])[0], 2);
//var result = output[0][0];
//Assert.IsTrue(Math.Abs(round - result) < double.Epsilon);
Assert.IsTrue(Math.Abs(network.Compute(input[0])[0] - output[0][0]) < 0.03);
Assert.IsTrue(Math.Abs(network.Compute(input[1])[0] - output[1][0]) < 0.03);
Assert.IsTrue(Math.Abs(network.Compute(input[2])[0] - output[2][0]) < 0.03);
Assert.IsTrue(Math.Abs(network.Compute(input[3])[0] - output[3][0]) < 0.03);
Console.WriteLine($"Loop counter = {counter}.");
}
开发者ID:clagoos,项目名称:colorful-cylinder-puzzle,代码行数:31,代码来源:XorNeuroTest.cs
示例8: Learn
public void Learn()
{
var network = new ActivationNetwork(new BipolarSigmoidFunction(), Constants.StoneCount, 1);
var teacher = new BackPropagationLearning(network);//new PerceptronLearning(network);
var data = LoadData("4-6-2012-04-24.know");
double error = 1.0;
int index = 0;
while (error > 0.001 && index < 100000) {
error = teacher.RunEpoch(data.Item1, data.Item2);
index++;
}
network.Save("4-6-2012-04-24.bp.net");
var text = "□○○○●○○□○●●□□●□□";
var i = ToDouble(text);//-2
var o = network.Compute(i);
var eval = o[0] * 2 * Constants.StoneCount - Constants.StoneCount;
Console.WriteLine("{0} {1}", text, eval);
}
开发者ID:coolcode,项目名称:ai,代码行数:26,代码来源:NeuralLearning.cs
示例9: CreateNetwork
protected virtual void CreateNetwork()
{
network = new ActivationNetwork(new Tanh(1), 1, 5, 1);
network.ForEachWeight(z => rnd.NextDouble() * 2 - 1);
teacher = new BackPropagationLearning(network);
teacher.LearningRate = 1;
}
开发者ID:vadimostanin,项目名称:AIML,代码行数:8,代码来源:RegressionTask.V0.cs
示例10: CreateNetwork
protected override void CreateNetwork()
{
network = new ActivationNetwork(new Tanh(0.1), 1, 5, 1);
network.ForEachWeight(z => rnd.NextDouble() * 2 - 1);
teacher = new BackPropagationLearning(network);
teacher.LearningRate = 1;
teacher.Momentum = 0.3;
}
开发者ID:Bulgano,项目名称:AIML,代码行数:10,代码来源:RegressionTask.V4.cs
示例11: NeuralNetworkBot1
public NeuralNetworkBot1()
: base(
"Neural Network Bot I",
"Runs a neural network neural network to compute a score out of different simulated moves",
true)
{
_network = new ActivationNetwork(new SigmoidFunction(), 4*4, 4*4, 1);
//_network.Randomize();
//ActivationNetwork.Load();
//_network.Save();
}
开发者ID:jdehaan,项目名称:Arena2048,代码行数:11,代码来源:NeuralNetworkBot1.cs
示例12: Estimate
public EstimationResult Estimate(IEnumerable<IDateValue> dateValues)
{
var data = dateValues.ToArray();
var samplesCount = data.Length - LayerWidth;
var factor = 1.7 / data.Length;
var yMin = data.Min(x => x.Value);
var input = new double[samplesCount][];
var output = new double[samplesCount][];
for (var i = 0; i < samplesCount; i++)
{
input[i] = new double[LayerWidth];
output[i] = new double[1];
for (var j = 0; j < LayerWidth; j++)
input[i][j] = (data[i + j].Value - yMin) * factor - 0.85;
output[i][0] = (data[i + LayerWidth].Value - yMin) * factor - 0.85;
}
var network = new ActivationNetwork(
new BipolarSigmoidFunction(SigmoidAlphaValue),
LayerWidth, LayerWidth * 2, 1);
var teacher = new BackPropagationLearning(network)
{
LearningRate = LearningRate,
Momentum = Momentum
};
var solutionSize = data.Length - LayerWidth;
var solution = new double[solutionSize, 2];
var networkInput = new double[LayerWidth];
for (var j = 0; j < solutionSize; j++)
solution[j, 0] = j + LayerWidth;
TimesLoop.Do(Iterations, () =>
{
teacher.RunEpoch(input, output);
for (int i = 0, n = data.Length - LayerWidth; i < n; i++)
{
for (var j = 0; j < LayerWidth; j++)
networkInput[j] = (data[i + j].Value - yMin) * factor - 0.85;
solution[i, 1] = (network.Compute(networkInput)[0] + 0.85) / factor + yMin;
}
});
return EstimationResult.Create(solution[0, 1], this);
}
开发者ID:kkalinowski,项目名称:TemperatureEstimator,代码行数:53,代码来源:NeuronNetworkEngine.cs
示例13: NeuralNetworkBot2
public NeuralNetworkBot2()
: base(
"Neural Network Bot II",
"Runs a neural network neural network to directly calculate outputs for each move",
true)
{
_network = new ActivationNetwork(new SigmoidFunction(), 4*4, 4*4, 2);
//_network.Randomize();
Load();
_teacher = new BackPropagationLearning(_network);
_teacher.LearningRate = 0.05;
_teacher.Momentum = 0.05;
}
开发者ID:jdehaan,项目名称:Arena2048,代码行数:14,代码来源:NeuralNetworkBot2.cs
示例14: ChromosomeToNetwork
public static void ChromosomeToNetwork(gameChromosome chromosome, ActivationNetwork network)
{
double[] values = chromosome.Value;
int l = 0;
for (int i = 0; i < network.LayersCount; i++)
for (int j = 0; j < network[i].NeuronsCount; j++)
for (int k = 0; k <= network[i][j].InputsCount; k++)
{
if (k == 0)
network[i][j].Threshold = values[l];
else
network[i][j][k - 1] = values[l];
l++;
}
}
开发者ID:maxrevilo,项目名称:EvolvingNeuralNetworksXNA,代码行数:15,代码来源:ChromosomeNetworkMapper.cs
示例15: Main
static void Main(string[] args)
{
// initialize input and output values
double[][] input = new double[4][] {
new double[] {0, 0}, new double[] {0, 1},
new double[] {1, 0}, new double[] {1, 1}
};
double[][] output = new double[4][] {
new double[] {0}, new double[] {1},
new double[] {1}, new double[] {0}
};
// create neural network
ActivationNetwork network = new ActivationNetwork(
new SigmoidFunction(1),
2, // two inputs in the network
2, // two neurons in the first layer
1); // one neuron in the second layer
// create teacher
BackPropagationLearning teacher =
new BackPropagationLearning(network);
// loop
for (int i = 0; i < 10000; i++)
{
// run epoch of learning procedure
double error = teacher.RunEpoch(input, output);
// check error value to see if we need to stop
// ...
Console.Out.WriteLine("#" + i + "\t" + error);
}
double[] ret1 = network.Compute(new double[] { 0, 0 });
double[] ret2 = network.Compute(new double[] { 1, 0 });
double[] ret3 = network.Compute(new double[] { 0, 1 });
double[] ret4 = network.Compute(new double[] { 1, 1 });
Console.Out.WriteLine();
Console.Out.WriteLine("Eval(0, 0) = " + ret1[0]);
Console.Out.WriteLine("Eval(1, 0) = " + ret2[0]);
Console.Out.WriteLine("Eval(0, 1) = " + ret3[0]);
Console.Out.WriteLine("Eval(1, 1) = " + ret4[0]);
Console.ReadLine();
}
开发者ID:abaffa,项目名称:INF1771,代码行数:45,代码来源:Program.cs
示例16: Main
static void Main()
{
weights = new double[3];
network = new ActivationNetwork(new SignumActivationFunction(), 2, 1);
weights[0] = ((ActivationNeuron)network.Layers[0].Neurons[0]).Threshold = 0;
weights[1] = network.Layers[0].Neurons[0].Weights[0] = 0.9;
weights[2] = network.Layers[0].Neurons[0].Weights[1] = 0.2;
learning = new PerceptronLearning(network);
learning.LearningRate = 0.005;
form = new MyForm() { WindowState = FormWindowState.Maximized };
form.Paint += (s, a) => Redraw(a.Graphics);
var timer = new System.Windows.Forms.Timer();
timer.Interval = 10;
timer.Tick += NextSample;
timer.Start();
Application.Run(form);
}
开发者ID:vadimostanin,项目名称:AIML,代码行数:20,代码来源:Perceptron.cs
示例17: Evaluate
public double Evaluate(IChromosome chromosome)
{
// Конструираме невронна мрежа и изчисляваме резултата
DoubleArrayChromosome dac = (DoubleArrayChromosome)chromosome;
ActivationNetwork Network = new ActivationNetwork(
new BipolarSigmoidFunction(sigmoidAlphaValue),
mArchitecture[0], mArchitecture[1], mArchitecture[2]);
int current = 0;
int i = 0;
// Тегла на скрит слой
for (i = 0; i < mArchitecture[1]; i++)
{
for(int j=0; j < mArchitecture[0]; j++){
Network[0][i][j] = dac.Value[current++];
}
}
// Тегла на изходен слой
for (i = 0; i < mArchitecture[2]; i++)
{
for (int j = 0; j < mArchitecture[1]; j++)
{
Network[1][i][j] = dac.Value[current++];
}
}
double Sum = 0.0;
for (int cnt = 0; cnt < mInput.Length; cnt++)
{
double[] predicted_output = Network.Compute(mInput[cnt]);
for (int l = 0; l < predicted_output.Length; l++)
{
Sum += (predicted_output[l] - mOutput[cnt][l]) * (predicted_output[l] - mOutput[cnt][l]);
}
}
return 100-Sum;
}
开发者ID:pignatov,项目名称:TSANN,代码行数:41,代码来源:NeuralNetworkFitness.cs
示例18: TestGenetic
public void TestGenetic()
{
ActivationNetwork network = new ActivationNetwork(new SigmoidFunction(), 2, 2, 1);
EvolutionaryLearning superTeacher = new EvolutionaryLearning(network, 10);
double lastError = double.MaxValue;
int counter = 0;
while (true)
{
counter++;
var error = superTeacher.RunEpoch(input, output);
if (lastError - error < 0.0000001 && error < 0.0001)
break;
lastError = error;
}
Assert.IsTrue(Math.Abs(network.Compute(input[0])[0] - output[0][0]) < 0.03);
Assert.IsTrue(Math.Abs(network.Compute(input[1])[0] - output[1][0]) < 0.03);
Assert.IsTrue(Math.Abs(network.Compute(input[2])[0] - output[2][0]) < 0.03);
Assert.IsTrue(Math.Abs(network.Compute(input[3])[0] - output[3][0]) < 0.03);
Console.WriteLine($"Loop counter = {counter}.");
}
开发者ID:clagoos,项目名称:colorful-cylinder-puzzle,代码行数:22,代码来源:XorNeuroTest.cs
示例19: RunEpochTest1
public void RunEpochTest1()
{
Accord.Math.Tools.SetupGenerator(0);
double[][] input =
{
new double[] { -1, -1 },
new double[] { -1, 1 },
new double[] { 1, -1 },
new double[] { 1, 1 }
};
double[][] output =
{
new double[] { -1 },
new double[] { 1 },
new double[] { 1 },
new double[] { -1 }
};
Neuron.RandGenerator = new ThreadSafeRandom(0);
ActivationNetwork network = new ActivationNetwork(
new BipolarSigmoidFunction(2), 2, 2, 1);
var teacher = new ParallelResilientBackpropagationLearning(network);
double error = 1.0;
while (error > 1e-5)
error = teacher.RunEpoch(input, output);
for (int i = 0; i < input.Length; i++)
{
double actual = network.Compute(input[i])[0];
double expected = output[i][0];
Assert.AreEqual(expected, actual, 0.01);
Assert.IsFalse(Double.IsNaN(actual));
}
}
开发者ID:KommuSoft,项目名称:accord_framework,代码行数:39,代码来源:ResilientPropagationLearningTest.cs
示例20: DiscreteNeuralNetworkByChord
public DiscreteNeuralNetworkByChord(List<NGram<Chord>[]> bad, List<NGram<Chord>[]> okay, List<NGram<Chord>[]> good, IActivationFunction function)
{
bad.NullCheck();
okay.NullCheck();
good.NullCheck();
bad.Any().AssertTrue();
okay.Any().AssertTrue();
good.Any().AssertTrue();
List<Tuple<double[], double[]>> input = new List<Tuple<double[], double[]>>(bad.Count + okay.Count + good.Count);
input.AddRange(
bad.Select(x => new Tuple<double[], double[]>(
x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
Enumerable.Repeat<double>(DiscreteNeuralNetworkByChord.BADWEIGHT, bad.Count).ToArray())));
input.AddRange(
okay.Select(x => new Tuple<double[], double[]>(
x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
Enumerable.Repeat<double>(OkayWeight, okay.Count).ToArray())));
input.AddRange(
good.Select(x => new Tuple<double[], double[]>(
x.SelectMany(y => y.SelectMany(p => ConvertChordIntoTrainingInput(p))).ToArray(),
Enumerable.Repeat<double>(DiscreteNeuralNetworkByChord.GOODWEIGHT, good.Count).ToArray())));
this.Max = input.Max(x => x.Item1.Max());
int minIndex = input.Min(x => x.Item1.Length);
var normalized = input.Select(item => Tuple.Create(item.Item1.Take(minIndex).Select(x => x / this.Max).ToArray(), item.Item2.Take(minIndex).ToArray())).ToArray();
this.trainingData = normalized.ToArray();
this.ActivationNetwork = new ActivationNetwork(function, this.trainingData.Max(y => y.Item1.Length), (HiddenLayerSize == 0) ? 23 : HiddenLayerSize, 1);
this.LearningMethod = new ResilientBackpropagationLearning(this.ActivationNetwork);
this.ActivationNetwork.Randomize();
}
开发者ID:johndpope,项目名称:Improvisation,代码行数:38,代码来源:DiscreteNeuralNetworkByChord.cs
注:本文中的AForge.Neuro.ActivationNetwork类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论