本文整理汇总了Python中pybrain.datasets.SequentialDataSet类的典型用法代码示例。如果您正苦于以下问题:Python SequentialDataSet类的具体用法?Python SequentialDataSet怎么用?Python SequentialDataSet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SequentialDataSet类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: main
def main():
generated_data = [0 for i in range(10000)]
rate, data = get_data_from_wav("../../data/natabhairavi_violin.wav")
data = data[1000:190000]
print("Got wav")
ds = SequentialDataSet(1, 1)
for sample, next_sample in zip(data, cycle(data[1:])):
ds.addSample(sample, next_sample)
net = buildNetwork(1, 5, 1,
hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
EPOCHS_PER_CYCLE = 5
CYCLES = 10
EPOCHS = EPOCHS_PER_CYCLE * CYCLES
for i in xrange(CYCLES):
trainer.trainEpochs(EPOCHS_PER_CYCLE)
train_errors.append(trainer.testOnData())
epoch = (i+1) * EPOCHS_PER_CYCLE
print("\r epoch {}/{}".format(epoch, EPOCHS), end="")
stdout.flush()
# predict new values
old_sample = [100]
for i in xrange(500000):
new_sample = net.activate(old_sample)
old_sample = new_sample
generated_data[i] = new_sample[0]
print(new_sample)
wavfile.write("../../output/test.wav", rate, np.array(generated_data))
开发者ID:cy94,项目名称:ml2,代码行数:34,代码来源:rnn.py
示例2: train
def train(self, params):
n = params['encoding_num']
net = buildNetwork(n, params['num_cells'], n,
hiddenclass=LSTMLayer,
bias=True,
outputbias=params['output_bias'],
recurrent=True)
net.reset()
ds = SequentialDataSet(n, n)
trainer = RPropMinusTrainer(net, dataset=ds)
history = self.window(self.history, params)
resets = self.window(self.resets, params)
for i in xrange(1, len(history)):
if not resets[i-1]:
ds.addSample(self.encoder.encode(history[i-1]),
self.encoder.encode(history[i]))
if resets[i]:
ds.newSequence()
if len(history) > 1:
trainer.trainEpochs(params['num_epochs'])
net.reset()
for i in xrange(len(history) - 1):
symbol = history[i]
output = self.net.activate(self.encoder.encode(symbol))
predictions = self.encoder.classify(output, num=params['num_predictions'])
if resets[i]:
net.reset()
return net
开发者ID:chanceraine,项目名称:nupic.research,代码行数:35,代码来源:suite.py
示例3: visulizeDataSet
def visulizeDataSet(network, data, seqno, in_labels, out_labels):
seq = data.getSequence(seqno)
tmpDs = SequentialDataSet(data.indim, data.outdim)
tmpDs.newSequence()
for i in xrange(data.getSequenceLength(seqno)):
tmpDs.addSample(seq[0][i], seq[1][i])
nplots = len(in_labels) + len(out_labels)
for i in range(len(in_labels)):
p = PL.subplot(nplots, 1, i + 1)
p.clear()
p.plot(tmpDs['input'][:, i])
p.set_ylabel(in_labels[i])
for i in range(len(out_labels)):
p = PL.subplot(nplots, 1, i + 1 + len(in_labels))
p.clear()
output = ModuleValidator.calculateModuleOutput(network, tmpDs)
p.plot(tmpDs['target'][:, i], label='train')
p.plot(output[:, i], label='sim')
p.legend()
p.set_ylabel(out_labels[i])
开发者ID:sheldonucr,项目名称:thermal_model_control_building,代码行数:28,代码来源:NLSSVR.py
示例4: train
def train(self, params):
"""
Train LSTM network on buffered dataset history
After training, run LSTM on history[:-1] to get the state correct
:param params:
:return:
"""
if params['reset_every_training']:
n = params['encoding_num']
self.net = buildNetwork(n, params['num_cells'], n,
hiddenclass=LSTMLayer,
bias=True,
outputbias=params['output_bias'],
recurrent=True)
self.net.reset()
# prepare training dataset
ds = SequentialDataSet(params['encoding_num'], params['encoding_num'])
history = self.window(self.history, params)
resets = self.window(self.resets, params)
for i in xrange(1, len(history)):
if not resets[i - 1]:
ds.addSample(self.encoder.encode(history[i - 1]),
self.encoder.encode(history[i]))
if resets[i]:
ds.newSequence()
print "Train LSTM network on buffered dataset of length ", len(history)
if params['num_epochs'] > 1:
trainer = RPropMinusTrainer(self.net,
dataset=ds,
verbose=params['verbosity'] > 0)
if len(history) > 1:
trainer.trainEpochs(params['num_epochs'])
# run network on buffered dataset after training to get the state right
self.net.reset()
for i in xrange(len(history) - 1):
symbol = history[i]
output = self.net.activate(self.encoder.encode(symbol))
self.encoder.classify(output, num=params['num_predictions'])
if resets[i]:
self.net.reset()
else:
self.trainer.setData(ds)
self.trainer.train()
# run network on buffered dataset after training to get the state right
self.net.reset()
for i in xrange(len(history) - 1):
symbol = history[i]
output = self.net.activate(self.encoder.encode(symbol))
self.encoder.classify(output, num=params['num_predictions'])
if resets[i]:
self.net.reset()
开发者ID:rhyolight,项目名称:nupic.research,代码行数:59,代码来源:suite.py
示例5: create_data
def create_data(self, inputs, targets):
data = SequentialDataSet(inputs, targets)
for i in xrange(0, len(self.dataframe) - 1):
data.newSequence()
ins = self.dataframe.ix[i].values
target = self.dataframe.ix[i + 1].values[0]
data.appendLinked(ins, target)
self.data = data
开发者ID:thearrow,项目名称:ai-financialANN,代码行数:8,代码来源:datahandler.py
示例6: create_train_set
def create_train_set (consumption):
#create train/test set
global active_max
ds = SequentialDataSet(1, 1)
consumption_data = normalize (consumption)
active_max = max(consumption_data[1],active_max)
consumption = consumption_data[0]
size = len (consumption)
for i in range(0, size-1):
ds.addSample(consumption[i], consumption[i+1])
return ds
开发者ID:DUTANGx,项目名称:GI15-Group-Project-Time-Series,代码行数:13,代码来源:timeseries.py
示例7: train
def train(d, cycles=100, epochs_per_cycle=7):
ds = SequentialDataSet(1, 1)
net = buildNetwork(1, 5, 1, hiddenclass=LSTMLayer, outputbias=False, recurrent=False)
for sample, next_sample in zip(d, cycle(d[1:])):
ds.addSample(sample, next_sample)
trainer = RPropMinusTrainer(net, dataset=ds)
train_errors = [] # save errors for plotting later
for i in xrange(cycles):
trainer.trainEpochs(epochs_per_cycle)
train_errors.append(trainer.testOnData())
stdout.flush()
return net, train_errors
开发者ID:Morgaroth,项目名称:msi_lab2,代码行数:15,代码来源:zadanie.py
示例8: __init__
def __init__(self, net, task, valueNetwork=None, **args):
self.net = net
self.task = task
self.setArgs(**args)
if self.valueLearningRate == None:
self.valueLearningRate = self.learningRate
if self.valueMomentum == None:
self.valueMomentum = self.momentum
if self.supervisedPlotting:
from pylab import ion
ion()
# adaptive temperature:
self.tau = 1.
# prepare the datasets to be used
self.weightedDs = ImportanceDataSet(self.task.outdim, self.task.indim)
self.rawDs = ReinforcementDataSet(self.task.outdim, self.task.indim)
self.valueDs = SequentialDataSet(self.task.outdim, 1)
# prepare the supervised trainers
self.bp = BackpropTrainer(self.net, self.weightedDs, self.learningRate,
self.momentum, verbose=False,
batchlearning=True)
# CHECKME: outsource
self.vnet = valueNetwork
if valueNetwork != None:
self.vbp = BackpropTrainer(self.vnet, self.valueDs, self.valueLearningRate,
self.valueMomentum, verbose=self.verbose)
# keep information:
self.totalSteps = 0
self.totalEpisodes = 0
开发者ID:pachkun,项目名称:Machine_learning,代码行数:34,代码来源:rwr.py
示例9: __init__
def __init__(self, ds):
self.alldata = SequentialDataSet(ds.num_features, 1)
# Now add the samples to the data set.
idx = 1
self.alldata.newSequence()
for sample in ds.all_moves:
self.alldata.addSample(sample.get_features(), [ds.get_classes().index(sample.class_)])
idx += 1
if (idx%6 == 0):
self.alldata.newSequence()
self.tstdata, self.trndata = self.alldata.splitWithProportion(0.25)
#print "Number of training patterns: ", len(self.trndata)
#print "Input and output dimensions: ", self.trndata.indim, self.trndata.outdim
#print "First sample (input, target, class):"
#print self.trndata['input'][0], self.trndata['target'][0], self.trndata['class'][0]
# 5 hidden layers.
self.rnn = buildNetwork(self.trndata.indim,
3,
self.trndata.outdim,
hiddenclass=LSTMLayer,
outclass=SigmoidLayer,
recurrent=True)
self.rnn.randomize()
self.trainer = BackpropTrainer(self.nn, dataset=self.trndata, momentum=0.1, verbose=True, weightdecay=0.01)
开发者ID:SameerAsal,项目名称:accelerometer_training,代码行数:27,代码来源:rnn_classifier.py
示例10: create_train_set
def create_train_set (open_price, close_price):
global open_max
global close_max
ds = SequentialDataSet(1, 1)
open_data = normalize (open_price)
close_data = normalize (close_price)
open_max = open_data[1]
close_max = close_data[1]
open_price = open_data[0]
close_price = close_data[0]
size = len (open_price)
for i in range(0, size):
ds.addSample(open_price[i], close_price[i])
return ds
开发者ID:kahowu,项目名称:TimeSeriesLSTM,代码行数:16,代码来源:timeseries.py
示例11: __init__
def __init__(self):
SequentialDataSet.__init__(self, 1,1)
self.newSequence()
self.addSample([-1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.newSequence()
self.addSample([1], [1])
self.addSample([1], [-1])
self.newSequence()
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([1], [1])
self.addSample([1], [-1])
self.newSequence()
self.addSample([1], [1])
self.addSample([1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.addSample([-1], [1])
self.newSequence()
self.addSample([-1], [-1])
self.addSample([-1], [-1])
self.addSample([1], [1])
开发者ID:Angeliqe,项目名称:pybrain,代码行数:45,代码来源:parity.py
示例12: __init__
def __init__(self):
SequentialDataSet.__init__(self, 0, 1)
self.newSequence()
self.addSample([],[0])
self.addSample([],[1])
self.addSample([],[0])
self.addSample([],[1])
self.addSample([],[0])
self.addSample([],[1])
self.newSequence()
self.addSample([],[0])
self.addSample([],[1])
self.addSample([],[0])
self.addSample([],[1])
self.addSample([],[0])
self.addSample([],[1])
开发者ID:Angeliqe,项目名称:pybrain,代码行数:18,代码来源:anbncn.py
示例13: __init__
def __init__(self, inp, target, nb_classes=0, class_labels=None):
"""Initialize an empty dataset.
`inp` is used to specify the dimensionality of the input. While the
number of targets is given by implicitly by the training samples, it can
also be set explicity by `nb_classes`. To give the classes names, supply
an iterable of strings as `class_labels`."""
# FIXME: hard to keep nClasses synchronized if appendLinked() etc. is used.
SequentialDataSet.__init__(self, inp, target)
# we want integer class numbers as targets
self.convertField('target', int)
if len(self) > 0:
# calculate class histogram, if we already have data
self.calculateStatistics()
self.nClasses = nb_classes
self.class_labels = list(range(self.nClasses)) if class_labels is None else class_labels
# copy classes (targets may be changed into other representation)
self.setField('class', self.getField('target'))
开发者ID:fh-wedel,项目名称:pybrain,代码行数:18,代码来源:classification.py
示例14: getPyBrainDataSetScalarEncoder
def getPyBrainDataSetScalarEncoder(sequence, nTrain, encoderInput, encoderOutput,
predictionStep=1, useTimeOfDay=True, useDayOfWeek=True):
"""
Use scalar encoder for the data
:param sequence:
:param nTrain:
:param predictionStep:
:param useTimeOfDay:
:param useDayOfWeek:
:return:
"""
print "generate a pybrain dataset of sequences"
print "the training data contains ", str(nTrain-predictionStep), "records"
if encoderInput is None:
inDim = 1 + int(useTimeOfDay) + int(useDayOfWeek)
else:
inDim = encoderInput.n + int(useTimeOfDay) + int(useDayOfWeek)
if encoderOutput is None:
outDim = 1
else:
outDim = encoderOutput.n
ds = SequentialDataSet(inDim, outDim)
if useTimeOfDay:
print "include time of day as input field"
if useDayOfWeek:
print "include day of week as input field"
for i in xrange(nTrain-predictionStep):
sample = getSingleSample(i, sequence, useTimeOfDay, useDayOfWeek)
if encoderOutput is None:
dataSDROutput = [sequence['normdata'][i+predictionStep]]
else:
dataSDROutput = encoderOutput.encode(sequence['data'][i+predictionStep])
ds.addSample(sample, dataSDROutput)
return ds
开发者ID:Starcounter-Jack,项目名称:nupic.research,代码行数:43,代码来源:run_lstm_scalarEncoder.py
示例15: train
def train(self, params, verbose=False):
if params['reset_every_training']:
if verbose:
print 'create lstm network'
random.seed(6)
if params['output_encoding'] == None:
self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
hiddenclass=LSTMLayer, bias=True, outputbias=True, recurrent=True)
elif params['output_encoding'] == 'likelihood':
self.net = buildNetwork(self.nDimInput, params['num_cells'], self.nDimOutput,
hiddenclass=LSTMLayer, bias=True, outclass=SigmoidLayer, recurrent=True)
self.net.reset()
ds = SequentialDataSet(self.nDimInput, self.nDimOutput)
networkInput = self.window(self.networkInput, params)
targetPrediction = self.window(self.targetPrediction, params)
# prepare a training data-set using the history
for i in xrange(len(networkInput)):
ds.addSample(self.inputEncoder.encode(networkInput[i]),
self.outputEncoder.encode(targetPrediction[i]))
if params['num_epochs'] > 1:
trainer = RPropMinusTrainer(self.net, dataset=ds, verbose=verbose)
if verbose:
print " train LSTM on ", len(ds), " records for ", params['num_epochs'], " epochs "
if len(networkInput) > 1:
trainer.trainEpochs(params['num_epochs'])
else:
self.trainer.setData(ds)
self.trainer.train()
# run through the training dataset to get the lstm network state right
self.net.reset()
for i in xrange(len(networkInput)):
self.net.activate(ds.getSample(i)[0])
开发者ID:andrewmalta13,项目名称:nupic.research,代码行数:42,代码来源:run_lstm_suite.py
示例16: __init__
def __init__(self, task, agent):
EpisodicExperiment.__init__(self, task, agent)
# create model and training set (action dimension + 1 for time)
self.modelds = SequentialDataSet(self.task.indim + 1, 1)
self.model = [GaussianProcess(indim=self.modelds.getDimension('input'),
start=(-10, -10, 0), stop=(10, 10, 300), step=(5, 5, 100))
for _ in range(self.task.outdim)]
# change hyper parameters for all gps
for m in self.model:
m.hyper = (20, 2.0, 0.01)
开发者ID:HKou,项目名称:pybrain,代码行数:12,代码来源:gpmodel.py
示例17: buildAppropriateDataset
def buildAppropriateDataset(module):
""" build a sequential dataset with 2 sequences of 3 samples, with arndom input and target values,
but the appropriate dimensions to be used on the provided module. """
if module.sequential:
d = SequentialDataSet(module.indim, module.outdim)
for dummy in range(2):
d.newSequence()
for dummy in range(3):
d.addSample(randn(module.indim), randn(module.outdim))
else:
d = SupervisedDataSet(module.indim, module.outdim)
for dummy in range(3):
d.addSample(randn(module.indim), randn(module.outdim))
return d
开发者ID:Boblogic07,项目名称:pybrain,代码行数:14,代码来源:helpers.py
示例18: train
def train(self, params):
self.net.reset()
ds = SequentialDataSet(self.nDimInput, self.nDimOutput)
trainer = RPropMinusTrainer(self.net, dataset=ds, verbose=False)
history = self.window(self.history, params)
resets = self.window(self.resets, params)
for i in xrange(params['prediction_nstep'], len(history)):
if not resets[i-1]:
ds.addSample(self.inputEncoder.encode(history[i-params['prediction_nstep']]),
self.outputEncoder.encode(history[i][0]))
if resets[i]:
ds.newSequence()
# print ds.getSample(0)
# print ds.getSample(1)
# print ds.getSample(1000)
# print " training data size", ds.getLength(), " len(history) ", len(history), " self.history ", len(self.history)
# print ds
if len(history) > 1:
trainer.trainEpochs(params['num_epochs'])
self.net.reset()
for i in xrange(len(history) - params['prediction_nstep']):
symbol = history[i]
output = self.net.activate(ds.getSample(i)[0])
if resets[i]:
self.net.reset()
开发者ID:chanceraine,项目名称:nupic.research,代码行数:33,代码来源:suite.py
示例19: trainNetwork
def trainNetwork(dirname):
numFeatures = 5000
ds = SequentialDataSet(numFeatures, 1)
tracks = glob.glob(os.path.join(dirname, 'train??.wav'))
for t in tracks:
track = os.path.splitext(t)[0]
# load training data
print "Reading %s..." % track
data = numpy.genfromtxt(track + '_seg.csv', delimiter=",")
labels = numpy.genfromtxt(track + 'REF.txt', delimiter='\t')[0::10,1]
numData = data.shape[0]
# add the input to the dataset
print "Adding to dataset..."
ds.newSequence()
for i in range(numData):
ds.addSample(data[i], (labels[i],))
# initialize the neural network
print "Initializing neural network..."
net = buildNetwork(numFeatures, 50, 1,
hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
# train the network on the dataset
print "Training neural net"
trainer = RPropMinusTrainer(net, dataset=ds)
## trainer.trainUntilConvergence(maxEpochs=50, verbose=True, validationProportion=0.1)
error = -1
for i in range(100):
new_error = trainer.train()
print "error: " + str(new_error)
if abs(error - new_error) < 0.1: break
error = new_error
# save the network
print "Saving neural network..."
NetworkWriter.writeToFile(net, os.path.basename(dirname) + 'net')
开发者ID:tediris,项目名称:MusicML,代码行数:38,代码来源:trainer.py
示例20: getReberDS
def getReberDS(maxLength, display = 0):
"""
@param maxLength (int): maximum length of the sequence
"""
[in_seq, out_seq] = generateSequencesVector(maxLength)
target = out_seq
last_target = target[-1]
last_target[np.argmax(out_seq[-1])] = 1
target[-1] = last_target
ds = SequentialDataSet(7, 7)
i = 0
for sample, next_sample in zip(in_seq, target):
ds.addSample(sample, next_sample)
if display:
print(" sample: %s" % sample)
print(" target: %s" % next_sample)
print("next sample: %s" % out_seq[i])
print()
i += 1
return (ds, in_seq, out_seq)
开发者ID:Starcounter-Jack,项目名称:nupic.research,代码行数:23,代码来源:reberSequencePrediction_LSTM.py
注:本文中的pybrain.datasets.SequentialDataSet类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论