本文整理汇总了Python中pybrain.supervised.RPropMinusTrainer类的典型用法代码示例。如果您正苦于以下问题:Python RPropMinusTrainer类的具体用法?Python RPropMinusTrainer怎么用?Python RPropMinusTrainer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RPropMinusTrainer类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: trainedLSTMNN2
def trainedLSTMNN2():
"""
n = RecurrentNetwork()
inp = LinearLayer(100, name = 'input')
hid = LSTMLayer(30, name='hidden')
out = LinearLayer(1, name='output')
#add modules
n.addOutputModule(out)
n.addInputModule(inp)
n.addModule(hid)
#add connections
n.addConnection(FullConnection(inp, hid))
n.addConnection(FullConnection(hid, out))
n.addRecurrentConnection(FullConnection(hid, hid))
n.sortModules()
"""
n = buildSimpleLSTMNetwork()
print "Network created"
d = load1OrderDataSet()
print "Data loaded"
t = RPropMinusTrainer(n, dataset=d, verbose=True)
t.trainUntilConvergence()
exportANN(n)
return n
开发者ID:kamilsa,项目名称:KAIProject,代码行数:31,代码来源:honn.py
示例2: train
def train(self, params):
self.net.reset()
ds = SequentialDataSet(self.nDimInput, self.nDimOutput)
trainer = RPropMinusTrainer(self.net, dataset=ds, verbose=False)
history = self.window(self.history, params)
resets = self.window(self.resets, params)
for i in xrange(params['prediction_nstep'], len(history)):
if not resets[i-1]:
ds.addSample(self.inputEncoder.encode(history[i-params['prediction_nstep']]),
self.outputEncoder.encode(history[i][0]))
if resets[i]:
ds.newSequence()
# print ds.getSample(0)
# print ds.getSample(1)
# print ds.getSample(1000)
# print " training data size", ds.getLength(), " len(history) ", len(history), " self.history ", len(self.history)
# print ds
if len(history) > 1:
trainer.trainEpochs(params['num_epochs'])
self.net.reset()
for i in xrange(len(history) - params['prediction_nstep']):
symbol = history[i]
output = self.net.activate(ds.getSample(i)[0])
if resets[i]:
self.net.reset()
开发者ID:chanceraine,项目名称:nupic.research,代码行数:33,代码来源:suite.py
示例3: main
def main():
generated_data = [0 for i in range(10000)]
rate, data = get_data_from_wav("../../data/natabhairavi_violin.wav")
data = data[1000:190000]
print("Got wav")
ds = SequentialDataSet(1, 1)
for sample, next_sample in zip(data, cycle(data[1:])):
ds.addSample(sample, next_sample)
net = buildNetwork(1, 5, 1,
hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = 10
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
trainer.trainEpochs(EPOCHS_PER_CYCLE)
train_errors.append(trainer.testOnData())
epoch = (i+1) * EPOCHS_PER_CYCLE
print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
stdout.flush()
# predict new values
old_sample = [100]
for i in xrange(500000):
new_sample = net.activate(old_sample)
old_sample = new_sample
generated_data[i] = new_sample[0]
print(new_sample)
wavfile.write("../../output/test.wav", rate, np.array(generated_data))
开发者ID:cy94,项目名称:ml2,代码行数:34,代码来源:rnn.py
示例4: train
def train(self, params):
n = params['encoding_num']
net = buildNetwork(n, params['num_cells'], n,
hiddenclass=LSTMLayer,
bias=True,
outputbias=params['output_bias'],
recurrent=True)
net.reset()
ds = SequentialDataSet(n, n)
trainer = RPropMinusTrainer(net, dataset=ds)
history = self.window(self.history, params)
resets = self.window(self.resets, params)
for i in xrange(1, len(history)):
if not resets[i-1]:
ds.addSample(self.encoder.encode(history[i-1]),
self.encoder.encode(history[i]))
if resets[i]:
ds.newSequence()
if len(history) > 1:
trainer.trainEpochs(params['num_epochs'])
net.reset()
for i in xrange(len(history) - 1):
symbol = history[i]
output = self.net.activate(self.encoder.encode(symbol))
predictions = self.encoder.classify(output, num=params['num_predictions'])
if resets[i]:
net.reset()
return net
开发者ID:chanceraine,项目名称:nupic.research,代码行数:35,代码来源:suite.py
示例5: train
def train(self, params):
"""
Train LSTM network on buffered dataset history
After training, run LSTM on history[:-1] to get the state correct
:param params:
:return:
"""
if params['reset_every_training']:
n = params['encoding_num']
self.net = buildNetwork(n, params['num_cells'], n,
hiddenclass=LSTMLayer,
bias=True,
outputbias=params['output_bias'],
recurrent=True)
self.net.reset()
# prepare training dataset
ds = SequentialDataSet(params['encoding_num'], params['encoding_num'])
history = self.window(self.history, params)
resets = self.window(self.resets, params)
for i in xrange(1, len(history)):
if not resets[i - 1]:
ds.addSample(self.encoder.encode(history[i - 1]),
self.encoder.encode(history[i]))
if resets[i]:
ds.newSequence()
print "Train LSTM network on buffered dataset of length ", len(history)
if params['num_epochs'] > 1:
trainer = RPropMinusTrainer(self.net,
dataset=ds,
verbose=params['verbosity'] > 0)
if len(history) > 1:
trainer.trainEpochs(params['num_epochs'])
# run network on buffered dataset after training to get the state right
self.net.reset()
for i in xrange(len(history) - 1):
symbol = history[i]
output = self.net.activate(self.encoder.encode(symbol))
self.encoder.classify(output, num=params['num_predictions'])
if resets[i]:
self.net.reset()
else:
self.trainer.setData(ds)
self.trainer.train()
# run network on buffered dataset after training to get the state right
self.net.reset()
for i in xrange(len(history) - 1):
symbol = history[i]
output = self.net.activate(self.encoder.encode(symbol))
self.encoder.classify(output, num=params['num_predictions'])
if resets[i]:
self.net.reset()
开发者ID:rhyolight,项目名称:nupic.research,代码行数:59,代码来源:suite.py
示例6: trainLSTMnet
def trainLSTMnet(net, numTrainSequence, seedSeq=1):
np.random.seed(seedSeq)
for _ in xrange(numTrainSequence):
(ds, in_seq, out_seq) = getReberDS(maxLength)
print("train seq", _, sequenceToWord(in_seq))
trainer = RPropMinusTrainer(net, dataset=ds)
trainer.trainEpochs(rptPerSeq)
return net
开发者ID:Starcounter-Jack,项目名称:nupic.research,代码行数:9,代码来源:reberSequencePrediction_LSTM.py
示例7: train
def train(d, cycles=100, epochs_per_cycle=7):
ds = SequentialDataSet(1, 1)
net = buildNetwork(1, 5, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=False)
for sample, next_sample in zip(d, cycle(d[1:])):
ds.addSample(sample, next_sample)
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
for i in xrange(cycles):
trainer.trainEpochs(epochs_per_cycle)
train_errors.append(trainer.testOnData())
stdout.flush()
return net, train_errors
开发者ID:Morgaroth,项目名称:msi_lab2,代码行数:15,代码来源:zadanie.py
示例8: train
def train (ds, net):
# Train the network
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
trainer.trainEpochs(EPOCHS_PER_CYCLE)
error = trainer.testOnData()
train_errors.append(error)
epoch = (i+1) * EPOCHS_PER_CYCLE
print("\r epoch {}/{}".format(epoch, EPOCHS))
stdout.flush()
# print("final error =", train_errors[-1])
return train_errors, EPOCHS, EPOCHS_PER_CYCLE
开发者ID:DUTANGx,项目名称:GI15-Group-Project-Time-Series,代码行数:18,代码来源:timeseries.py
示例9: train
def train(self, params, verbose=False):
if params['reset_every_training']:
if verbose:
print 'create lstm network'
random.seed(6)
if params['output_encoding'] == None:
self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
hiddenclass=LSTMLayer, bias=True, outputbias=True, recurrent=True)
elif params['output_encoding'] == 'likelihood':
self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
hiddenclass=LSTMLayer, bias=True, outclass=SigmoidLayer, recurrent=True)
self.net.reset()
ds = SequentialDataSet(self.nDimInput, self.nDimOutput)
networkInput = self.window(self.networkInput, params)
targetPrediction = self.window(self.targetPrediction, params)
# prepare a training data-set using the history
for i in xrange(len(networkInput)):
ds.addSample(self.inputEncoder.encode(networkInput[i]),
self.outputEncoder.encode(targetPrediction[i]))
if params['num_epochs'] > 1:
trainer = RPropMinusTrainer(self.net, dataset=ds, verbose=verbose)
if verbose:
print " train LSTM on ", len(ds), " records for ", params['num_epochs'], " epochs "
if len(networkInput) > 1:
trainer.trainEpochs(params['num_epochs'])
else:
self.trainer.setData(ds)
self.trainer.train()
# run through the training dataset to get the lstm network state right
self.net.reset()
for i in xrange(len(networkInput)):
self.net.activate(ds.getSample(i)[0])
开发者ID:andrewmalta13,项目名称:nupic.research,代码行数:42,代码来源:run_lstm_suite.py
示例10: handle
def handle(self, *args, **options):
ticker = args[0]
print("****** STARTING PREDICTOR " + ticker + " ******* ")
prices = Price.objects.filter(symbol=ticker).order_by('-created_on').values_list('price',flat=True)
data = normalization(list(prices[0:NUM_MINUTES_BACK].reverse()))
data = [ int(x * MULT_FACTOR) for x in data]
print(data)
ds = SupervisedDataSet(5, 1)
try:
for i,val in enumerate(data):
DS.addSample((data[i], data[i+1], data[i+2], data[i+3], data[i+4]), (data[i+5],))
except Exception:
pass;
net = buildNetwork(5, 40, 1,
hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = 100
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
trainer.trainEpochs(EPOCHS_PER_CYCLE)
train_errors.append(trainer.testOnData())
epoch = (i+1) * EPOCHS_PER_CYCLE
print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
stdout.flush()
print()
print("final error =", train_errors[-1])
for sample, target in ds.getSequenceIterator(0):
show_pred_sample = net.activate(sample) / MULT_FACTOR
show_sample = sample / MULT_FACTOR
show_target = target / MULT_FACTOR
show_diff = show_pred_sample - show_target
show_diff_pct = 100 * show_diff / show_pred_sample
print("{} => {}, act {}. ({}%)".format(show_sample[0],round(show_pred_sample[0],3),show_target[0],int(round(show_diff_pct[0],0))))
开发者ID:AnthonyNystrom,项目名称:pytrader,代码行数:40,代码来源:predict_price_v1a.py
示例11: say_hello_text
def say_hello_text(username = "World",text="You are good"):
object_data_new = pd.read_csv('/Users/ruiyun_zhou/Documents/cmpe-274/data/data.csv')
data_area_new = object_data_new[object_data_new.Area==username]
data_area_new_1=data_area_new[data_area_new.Disease== text]
data_list_new = data_area_new_1['Count'].values.tolist()
print data_list_new.__len__()
data_list=data_list_new
ds = SequentialDataSet(1,1)
isZero=0;
for sample,next_sample in zip(data_list,cycle(data_list[1:])):
ds.addSample(sample, next_sample)
if sample:
isZero=1
if(isZero==0):
return '[0, 0]'
net = buildNetwork(1,5,1,hiddenclass=LSTMLayer,outputbias=False,recurrent=True)
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = 10
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
print "Doing epoch %d" %i
trainer.trainEpochs(EPOCHS_PER_CYCLE)
train_errors.append(trainer.testOnData())
epoch = (i+1) * EPOCHS_PER_CYCLE
# return '<p>%d</p>\n' % (data_list_new.__len__())
# print("final error =", train_errors[-1])
# print "Value for last week is %4.1d" % abs(data_list[-1])
# print "Value for next week is %4.1d" % abs(net.activate(data_list[-1]))
# result = (abs(data_list[-1]))
result = (abs(net.activate(data_list[-1])))
result_1 = (abs(net.activate(result)))
return '[%d, %d]' % (result,result_1)
开发者ID:farcryzry,项目名称:cmpe-274,代码行数:37,代码来源:application.py
示例12: Train
def Train(self, dataset, error_observer, logger, dump_file):
gradientCheck(self.m_net)
net_dataset = SequenceClassificationDataSet(4, 2)
for record in dataset:
net_dataset.newSequence()
gl_raises = record.GetGlRises()
gl_min = record.GetNocturnalMinimum()
if DayFeatureExpert.IsHypoglycemia(record):
out_class = [1, 0]
else:
out_class = [0, 1]
for gl_raise in gl_raises:
net_dataset.addSample([gl_raise[0][0].total_seconds() / (24*3600), gl_raise[0][1] / 300, gl_raise[1][0].total_seconds() / (24*3600), gl_raise[1][1] / 300] , out_class)
train_dataset, test_dataset = net_dataset.splitWithProportion(0.8)
trainer = RPropMinusTrainer(self.m_net, dataset=train_dataset, momentum=0.8, learningrate=0.3, lrdecay=0.9, weightdecay=0.01, verbose=True)
validator = ModuleValidator()
train_error = []
test_error = []
for i in range(0, 80):
trainer.trainEpochs(1)
train_error.append(validator.MSE(self.m_net, train_dataset)) # here is validate func, think it may be parametrised by custom core function
test_error.append(validator.MSE(self.m_net, test_dataset))
print train_error
print test_error
error_observer(train_error, test_error)
gradientCheck(self.m_net)
dump_file = open(dump_file, 'wb')
pickle.dump(self.m_net, dump_file)
开发者ID:sersajur,项目名称:NeuralPredictor,代码行数:36,代码来源:RNNPredictor.py
示例13: train
def train(data,name):
ds = SequentialDataSet(1, 1)
for sample, next_sample in zip(data, cycle(data[1:])):
ds.addSample(sample, next_sample)
net = buildNetwork(1, 200, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = 20
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
store=[]
for i in xrange(CYCLES):
trainer.trainEpochs(EPOCHS_PER_CYCLE)
train_errors.append(trainer.testOnData())
epoch = (i+1) * EPOCHS_PER_CYCLE
print("\r epoch {}/{}".format(epoch, EPOCHS))
print tm.time()-atm
stdout.flush()
for sample, target in ds.getSequenceIterator(0):
store.append(net.activate(sample))
abcd=pd.DataFrame(store)
abcd.to_csv(pwd+"lstmdata/"+name+".csv",encoding='utf-8')
print "result printed to file"
开发者ID:elishaROBINSON,项目名称:stock_Prediction_Neural_net,代码行数:24,代码来源:neural_net_train&store_data.py
示例14: RecurrentNetwork
layerCount = 10
net = RecurrentNetwork()
net.addInputModule(LinearLayer(10, name='in'))
for x in range(layerCount):
net.addModule(LSTMLayer(20, name='hidden' + str(x)))
net.addOutputModule(LinearLayer(10, name='out'))
net.addConnection(FullConnection(net['in'], net['hidden1'], name='cIn'))
for x in range(layerCount - 1):
net.addConnection(FullConnection(net[('hidden' + str(x))], net['hidden' + str(x + 1)], name=('c' + str(x + 1))))
net.addConnection(FullConnection(net['hidden' + str(layerCount - 1)], net['out'], name='cOut'))
net.sortModules()
from pybrain.supervised import RPropMinusTrainer
trainer = RPropMinusTrainer(net, dataset=ds)
epochcount = 0
while True:
startingnote = random.choice(range(1, 17))
startingnote2 = random.choice(range(1, 17))
startingduration = random.choice(range(1,17))
startingduration2 = random.choice(range(1, 17))
song = [[startingnote, startingduration, 1, 1, 0, startingnote2, startingduration2, 1, 1, 0]]
length = 50
while len(song) < length:
song.append(net.activate(song[-1]).tolist())
newsong = []
for x in song:
newx = []
newy = []
开发者ID:ml-lab,项目名称:Bach_AI,代码行数:29,代码来源:musicnetwork.py
示例15: SequentialDataSet
net.addRecurrentConnection(FullConnection(h, h, inSliceTo = dim, outSliceTo = 4*dim, name = 'r1'))
net.addRecurrentConnection(IdentityConnection(h, h, inSliceFrom = dim, outSliceFrom = 4*dim, name = 'rstate'))
net.addConnection(FullConnection(h, o, inSliceTo = dim, name = 'f3'))
net.sortModules()
print net
ds = SequentialDataSet(15, 1)
ds.newSequence()
input = open(sys.argv[1], 'r')
for line in input.readlines():
row = np.array(line.split(','))
ds.addSample([float(x) for x in row[:15]], float(row[16]))
print ds
if len(sys.argv) > 2:
test = SequentialDataSet(15, 1)
test.newSequence()
input = open(sys.argv[2], 'r')
for line in input.readlines():
row = np.array(line.split(','))
test.addSample([float(x) for x in row[:15]], float(row[16]))
else:
test = ds
print test
net.reset()
trainer = RPropMinusTrainer( net, dataset=ds, verbose=True)
trainer.trainEpochs(1000)
evalRnnOnSeqDataset(net, test, verbose = True)
开发者ID:babsher,项目名称:ann-indicator,代码行数:31,代码来源:rann.py
示例16: print
print(ds)
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import LSTMLayer
# Buils a simple LSTM network with 1 input node, 1 output node and 5 LSTM cells
net = buildNetwork(1, 12, 1, hiddenclass=LSTMLayer, peepholes = False, outputbias=False, recurrent=True)
# net = buildNetwork(1, 1, 1, hiddenclass=LSTMLayer, peepholes = True, outputbias=False, recurrent=True)
# rnn = buildNetwork( trndata.indim, 5, trndata.outdim, hiddenclass=LSTMLayer, outclass=SoftmaxLayer, outputbias=False, recurrent=True)
from pybrain.supervised import RPropMinusTrainer
from sys import stdout
trainer = RPropMinusTrainer(net, dataset=ds, verbose = True)
#trainer.trainUntilConvergence()
train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 100 # increasing the epochs to 20, decreases accuracy drastically, decreasing epochs is desiredepoch # 5 err = 0.04
CYCLES = 10 # vary the epochs adn the cycles and the LSTM cells to get more accurate results.
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
trainer.trainEpochs(EPOCHS_PER_CYCLE) # train on the given data set for given number of epochs
train_errors.append(trainer.testOnData())
epoch = (i+1) * EPOCHS_PER_CYCLE
print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
stdout.flush()
print()
print("final error =", train_errors[-1])
开发者ID:beekal,项目名称:UdacityMachieneLearningProjects,代码行数:31,代码来源:test+LSTM.py
示例17: sum
# trainer.setData(ds)
# import random
# random.shuffle(sequences)
# concat_sequences = []
# for sequence in sequences:
# concat_sequences += sequence
# concat_sequences.append(random.randrange(100, 1000000))
# # concat_sequences = sum(sequences, [])
# for j in xrange(len(concat_sequences) - 1):
# ds.addSample(num2vec(concat_sequences[j], nDim), num2vec(concat_sequences[j+1], nDim))
# trainer.train()
net = initializeLSTMnet(nDim, nLSTMcells=50)
net.reset()
ds = SequentialDataSet(nDim, nDim)
trainer = RPropMinusTrainer(net)
trainer.setData(ds)
for _ in xrange(1000):
# Batch training mode
# print "generate a dataset of sequences"
import random
random.shuffle(sequences)
concat_sequences = []
for sequence in sequences:
concat_sequences += sequence
concat_sequences.append(random.randrange(100, 1000000))
for j in xrange(len(concat_sequences) - 1):
ds.addSample(num2vec(concat_sequences[j], nDim), num2vec(concat_sequences[j+1], nDim))
trainer.trainEpochs(rptNum)
开发者ID:oxtopus,项目名称:nupic.research,代码行数:30,代码来源:predict_LSTM_2.py
示例18: buildNetwork
net.addInputModule(LinearLayer(training_dataset.indim, name="input"))
net.addModule(LSTMLayer(100, name="hidden1"))
net.addModule(SigmoidLayer(training_dataset.outdim * 3, name="hidden2"))
net.addOutputModule(LinearLayer(training_dataset.outdim, name="output"))
net.addModule(BiasUnit('bias'))
net.addConnection(FullConnection(net["input"], net["hidden1"], name="c1"))
net.addConnection(FullConnection(net["hidden1"], net["hidden2"], name="c3"))
net.addConnection(FullConnection(net["bias"], net["hidden2"], name="c4"))
net.addConnection(FullConnection(net["hidden2"], net["output"], name="c5"))
net.addRecurrentConnection(FullConnection(net["hidden1"], net["hidden1"], name="c6"))
net.sortModules()
# net = buildNetwork(n_input, 256, n_output, hiddenclass=LSTMLayer, outclass=TanhLayer, outputbias=False, recurrent=True)
# net = NetworkReader.readFrom('signal_weight.xml')
# train network
trainer = RPropMinusTrainer(net, dataset=training_dataset, verbose=True, weightdecay=0.01)
# trainer = BackpropTrainer(net, dataset=training_dataset, learningrate = 0.04, momentum = 0.96, weightdecay = 0.02, verbose = True)
for i in range(100):
# train the network for 1 epoch
trainer.trainEpochs(5)
# evaluate the result on the training and test data
trnresult = percentError(trainer.testOnClassData(), training_dataset['class'])
tstresult = percentError(trainer.testOnClassData(dataset=testing_dataset), testing_dataset['class'])
# print the result
print("epoch: %4d" % trainer.totalepochs, \
" train error: %5.2f%%" % trnresult, \
" test error: %5.2f%%" % tstresult)
if tstresult <= 0.5 :
开发者ID:indiejoseph,项目名称:nn_trading,代码行数:31,代码来源:train_signal.py
示例19: main
def main():
config = MU.ConfigReader('configs/%s' % sys.argv[1])
config.read()
logDir = '%s-%s' % (__file__, sys.argv[1])
os.mkdir(logDir)
with open('%s/config.txt' % logDir, 'w') as outfile:
json.dump(config.getConfigDict(), outfile, indent=4)
dr = MU.DataReader(config['input_tsv_path'])
data = dr.read(config['interested_columns'])
inLabels = config['input_columns']
outLabels = config['output_columns']
tds, vds = seqDataSetPair(data, inLabels, outLabels, config['seq_label_column'],
config['test_seqno'], config['validation_seqno'])
inScale = config.getDataScale(inLabels)
outScale = config.getDataScale(outLabels)
normalizeDataSet(tds, ins = inScale, outs = outScale)
normalizeDataSet(vds, ins = inScale, outs = outScale)
trainData = tds
validationData = vds
fdim = tds.indim / 2 + 15
xdim = tds.outdim * 2
rnn = buildNetwork(tds.indim,
fdim, fdim, xdim,
tds.outdim,
hiddenclass=SigmoidLayer,
recurrent=True)
rnn.addRecurrentConnection(FullConnection(rnn['hidden2'], rnn['hidden0']))
rnn.sortModules()
trainer = RPropMinusTrainer(rnn, dataset=trainData, batchlearning=True, verbose=True, weightdecay=0.005)
#trainer = RPropMinusTrainer(rnn, dataset=trainData, batchlearning=True, verbose=True)
#trainer = BackpropTrainer(rnn, dataset=trainData, learningrate=0.0001,
# lrdecay=1.0, momentum=0.4, verbose=True, batchlearning=False,
# weightdecay=0)
errTime = []
errTrain = []
errValidation = []
epochNo = 0
while True:
for i in range(config['epochs_per_update']):
trainer.train()
epochNo += config['epochs_per_update']
NetworkWriter.writeToFile(rnn, '%s/Epoch_%d.xml' % (logDir, epochNo))
NetworkWriter.writeToFile(rnn, '%s/Latest.xml' % logDir)
tOut = ModuleValidator.calculateModuleOutput(rnn, trainData)
vOut = ModuleValidator.calculateModuleOutput(rnn, validationData)
tScaler = config.getDataScale([config['output_scalar_label']])[0][1]
tAvgErr = NP.sqrt(NP.mean((trainData['target'] - tOut) ** 2)) * tScaler
vAvgErr = NP.sqrt(NP.mean((validationData['target'] - vOut) ** 2)) * tScaler
tMaxErr = NP.max(NP.abs(trainData['target'] - tOut)) * tScaler
vMaxErr = NP.max(NP.abs(validationData['target'] - vOut)) * tScaler
errTrain.append(tAvgErr)
errValidation.append(vAvgErr)
errTime.append(epochNo)
print "Training error: avg %5.3f degC max %5.3f degC" % (tAvgErr, tMaxErr)
print "Validation error: avg %5.3f degC max %5.3f degC" % (vAvgErr, vMaxErr)
print "------------------------------------------------------------------------------"
if (config['visualize_on_training'] == 'yes'):
PL.figure(1)
PL.ioff()
visulizeDataSet(rnn, trainData, 0,
config['visualized_columns']['input'],
config['visualized_columns']['output'])
PL.ion()
PL.draw()
PL.figure(2)
PL.ioff()
visulizeDataSet(rnn, validationData, 0,
config['visualized_columns']['input'],
config['visualized_columns']['output'])
PL.ion()
PL.draw()
p = PL.figure(3)
PL.ioff()
p.clear()
PL.plot(errTime, errTrain, label = 'Train')
#.........这里部分代码省略.........
开发者ID:sheldonucr,项目名称:thermal_model_control_building,代码行数:101,代码来源:NLSSVR.py
示例20: zip
dataset_8.addSample(current_sample, next_sample)
for current_sample, next_sample in zip(training_data_9, cycle(training_data_9[1:])):
dataset_9.addSample(current_sample, next_sample)
for current_sample, next_sample in zip(training_data_10, cycle(training_data_10[1:])):
dataset_10.addSample(current_sample, next_sample)
for current_sample, next_sample in zip(testing_data, cycle(testing_data[1:])):
dataset_bis.addSample(current_sample, next_sample)
# Initializing the LSTM RNN: 23 nodes in the hidden layer
network = buildNetwork(1, 23, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
# Training data
trainer = RPropMinusTrainer(network, dataset=dataset, delta0 = 0.01)
trainer_2 = RPropMinusTrainer(network, dataset=dataset_2, delta0 = 0.01)
trainer_3 = RPropMinusTrainer(network, dataset=dataset_3, delta0 = 0.01)
trainer_4 = RPropMinusTrainer(network, dataset=dataset_4, delta0 = 0.01)
trainer_5 = RPropMinusTrainer(network, dataset=dataset_5, delta0 = 0.01)
trainer_6 = RPropMinusTrainer(network, dataset=dataset_6, delta0 = 0.01)
trainer_8 = RPropMinusTrainer(network, dataset=dataset_8, delta0 = 0.01)
trainer_9 = RPropMinusTrainer(network, dataset=dataset_9, delta0 = 0.01)
trainer_10 = RPropMinusTrainer(network, dataset=dataset_10, delta0 = 0.01)
# Initiazlizing storage for the error curves
train_errors = []
train_errors_2 = []
train_errors_3 = []
train_errors_4 = []
开发者ID:LoicBontempsINSA,项目名称:LSTM_RNN_Collective_Anomaly_Detection,代码行数:31,代码来源:CADTest.py
注:本文中的pybrain.supervised.RPropMinusTrainer类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论