本文整理汇总了Python中pybrain.tools.customxml.NetworkWriter类的典型用法代码示例。如果您正苦于以下问题:Python NetworkWriter类的具体用法?Python NetworkWriter怎么用?Python NetworkWriter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了NetworkWriter类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: main
def main():
start_time = time.time()
novice = ArtificialNovice()
genius = ArtificialGenius()
game = HangmanGame(genius, novice)
if __debug__:
print "------------------- EVALUATION ------------------------"
network = NetworkReader.readFrom("../IA/network_weight_1000.xml")
j = 0
while j < 1:
game.launch(False, None, network)
j += 1
print ("--- %s total seconds ---" % (time.time() - start_time))
else:
print "------------------- LEARNING ------------------------"
network = buildNetwork(3, 4, 1, hiddenclass=SigmoidLayer)
ds = SupervisedDataSet(3, 1)
i = 0
while i < 100:
game.launch(True, ds)
i += 1
print " INITIATE trainer : "
trainer = BackpropTrainer(network, ds)
print " START trainer : "
start_time_trainer = time.time()
trainer.train()
print ("--- END trainer in % seconds ---" % (time.time() - start_time_trainer))
print " START EXPORT network : "
NetworkWriter.writeToFile(network, "../IA/network_weight_test_learning.xml")
print " END EXPORT network : "
开发者ID:CelyaRousseau,项目名称:NaoHangman,代码行数:33,代码来源:main.py
示例2: run
def run(epochs, network_file, file_length, part_length, dominant_frequncies, show_graph, verbose_output):
start_time = time.time()
learner = dominant_freqs_learner.DominantFreqsLearner(file_length, part_length ,dominant_frequncies)
all_files = get_all_split_files()
if verbose_output:
print 'started adding files to dataset at ' + time.ctime()
for f in all_files:
try:
learner.add_split_file(f, channel=None, verbose=verbose_output)
except:
pass
dataset_add_time = time.time() - start_time
if verbose_output:
print 'finished adding file to dataset at ' + time.ctime()
errors = []
learning_start_time = time.time()
for epoch in range(epochs):
error = learner.train_single_epoch()
if verbose_output:
print '{0}: epoch {1} : {2}'.format(time.ctime(), epoch, error)
errors.append(error)
learning_time = time.time() - learning_start_time
NetworkWriter.writeToFile(learner._net, network_file)
if show_graph:
plot_graph(errors)
return (errors, dataset_add_time, learning_time)
开发者ID:agadish,项目名称:HotC,代码行数:32,代码来源:dominant_freqs_runner.py
示例3: save_network
def save_network(self,name_of_the_net):
print "Saving the trained network to file"
if self.network is None:
print "Network has not been trained!!"
else:
NetworkWriter.writeToFile(self.network, name_of_the_net)
print "Saving Finished"
开发者ID:DajeRoma,项目名称:clicc-flask,代码行数:8,代码来源:regression.py
示例4: train
def train(self):
print "Training"
trndata, tstdata = self.ds.splitWithProportion(.1)
self.trainer.trainUntilConvergence(verbose=True,
trainingData=trndata,
maxEpochs=1000)
self.trainer.testOnData(tstdata, verbose= True)
# if raw_input('Save Network?: y/n\n')=='y':
NetworkWriter.writeToFile(self.net, 'Network1.xml')
print 'Saving network'
开发者ID:nahtonaj,项目名称:neuralnetworkdrone,代码行数:10,代码来源:imageProcessing.py
示例5: main
def main():
print "Calculating mfcc...."
mfcc_coeff_vectors_dict = {}
for i in range(1, 201):
extractor = FeatureExtractor(
'/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Happiness/HappinessAudios/' + str(i) + '.wav')
mfcc_coeff_vectors = extractor.calculate_mfcc()
mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})
for i in range(201, 401):
extractor = FeatureExtractor(
'/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Sadness/SadnessAudios/' + str(i - 200) + '.wav')
mfcc_coeff_vectors = extractor.calculate_mfcc()
mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})
audio_with_min_frames, min_frames = get_min_frames_audio(
mfcc_coeff_vectors_dict)
processed_mfcc_coeff = preprocess_input_vectors(
mfcc_coeff_vectors_dict, min_frames)
# frames = min_frames
# print frames
# print len(processed_mfcc_coeff['1'])
# for each_vector in processed_mfcc_coeff['1']:
# print len(each_vector)
print "mffcc found..."
classes = ["happiness", "sadness"]
training_data = ClassificationDataSet(
26, target=1, nb_classes=2, class_labels=classes)
# training_data = SupervisedDataSet(13, 1)
try:
network = NetworkReader.readFrom(
'network_state_frame_level_new2_no_pp1.xml')
except:
for i in range(1, 51):
mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
for each_vector in mfcc_coeff_vectors:
training_data.appendLinked(each_vector, [1])
for i in range(201, 251):
mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
for each_vector in mfcc_coeff_vectors:
training_data.appendLinked(each_vector, [0])
training_data._convertToOneOfMany()
print "prepared training data.."
print training_data.indim, training_data.outdim
network = buildNetwork(
training_data.indim, 5, training_data.outdim, fast=True)
trainer = BackpropTrainer(network, learningrate=0.01, momentum=0.99)
print "Before training...", trainer.testOnData(training_data)
trainer.trainOnDataset(training_data, 1000)
print "After training...", trainer.testOnData(training_data)
NetworkWriter.writeToFile(
network, "network_state_frame_level_new2_no_pp.xml")
开发者ID:abhinavkashyap92,项目名称:sentitude,代码行数:55,代码来源:pybrain_frame_level_classifier.py
示例6: save_network
def save_network(self,name_of_the_net):
print "Saving the trained network to file"
if self.network is None:
print "Network has not been trained!!"
else:
NetworkWriter.writeToFile(self.network, name_of_the_net)
fileName = name_of_the_net.replace('.xml','')
fileName = fileName+'_testIndex.txt'
np.savetxt(fileName,self.tstIndex)
print "Saving Finished"
开发者ID:RunshengSong,项目名称:CLiCC_Packages,代码行数:11,代码来源:regression.py
示例7: nn_train
def nn_train(pvar,ovar,code,date1,date2,niter,np,nh):
print "Doing Networ "+pvar+" "+ovar+" "+date1+" "+date2+" "+str(iter)
# ----------------------- add samples ------------------
# get the training data
print "adding training data "+pvar+" "
file="eod_main.db"
sqldir = os.path.join(datadir,"sql")
sqlfile = os.path.join(sqldir,file)
conn,cur=open_sql(sqlfile)
d = rd_sql(cur,code,'AND date > "'+date1+'" AND date < "'+date2+'"')
print "Read no of days "+str(len(d))
if pvar == "basic8":
print "calling nn_pp_basic8"
pp = nn_pp_basic8(d,3,3)
oclose,ohigh,olow,oclose_disc,o3day=nn_po_basic(d,3,3)
if ovar == "close":
po=oclose
if ovar == "high":
po=ohigh
if ovar == "3day":
po=o3day
ds = SupervisedDataSet(np,1)
for i in range(0,len(po)):
ds.addSample(pp[i],po[i])
# ----------------------- Build and Train ------------------
print "Training Network"
net = buildNetwork(np,nh,1,hiddenclass=TanhLayer)
trainer = BackpropTrainer(net,ds)
xxx = trainer.trainUntilConvergence(maxEpochs=niter,validationProportion=0.01)
#for n in range(0,niter):
# xxx=trainer.train(validationProportion=0.0)
# if n % 100 ==0:
# print "{} : {}".format(n,xxx)
# -------------- Save network parameters ------------------
print "Saving Network"
netdir2 = os.path.join(basedir,"inv")
netdir = os.path.join(netdir2,"analyse")
netfile = os.path.join(netdir,'net_'+pvar+'_'+ovar+'_'+date1+'_'+date1+'_'+str(niter)+'.xml')
NetworkWriter.writeToFile(net, netfile)
return
开发者ID:kizombakid,项目名称:inv,代码行数:52,代码来源:nn_train.py
示例8: neuralNet
def neuralNet(info, test_data):
ann = FeedForwardNetwork()
'''
Initiate the input nodes, hidden layer nodes,
and the output layer nodes.
'''
inputLayer = LinearLayer(5)
hiddenLayer = SigmoidLayer(20)
outputLayer = LinearLayer(1)
'''
Add the nodes to the corresponding layer
'''
ann.addInputModule(inputLayer)
ann.addModule(hiddenLayer)
ann.addOutputModule(outputLayer)
'''
Connect the input layer to hidden layer,
then connect hidden layer to output layer
'''
in_to_hidden = FullConnection(inputLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outputLayer)
ann.addConnection(in_to_hidden)
ann.addConnection(hidden_to_out)
ann.sortModules ()
data_set = SupervisedDataSet(5, 1)
for data in info:
data_set.addSample(data[:-1], data[-1])
trainer = BackpropTrainer(ann, data_set, verbose=False)
#test_data, train_data = data_set.splitWithProportion(0.2)
train_data = data_set
test_data = test_data
'''
Using 50 epochs for testing purposes, it will train
the network until convergence within the first 50 epochs
'''
train = trainer.trainUntilConvergence(dataset=train_data, maxEpochs=10)
NetworkWriter.writeToFile(ann, 'filename5.xml')
for d in test_data:
out = ann.activate(d)
#print (train)
print (out)
'''
开发者ID:TeamBall,项目名称:CapstoneProject,代码行数:52,代码来源:neuralNetwork.py
示例9: main
def main():
print "Calculating mfcc...."
mfcc_coeff_vectors_dict = {}
for i in range(1, 201):
extractor = FeatureExtractor('/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Happiness/HappinessAudios/' + str(i) + '.wav')
mfcc_coeff_vectors = extractor.calculate_mfcc()
mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})
for i in range(201, 401):
extractor = FeatureExtractor('/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Sadness/SadnessAudios/' + str(i - 200) + '.wav')
mfcc_coeff_vectors = extractor.calculate_mfcc()
mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})
audio_with_min_frames, min_frames = get_min_frames_audio(mfcc_coeff_vectors_dict)
processed_mfcc_coeff = preprocess_input_vectors(mfcc_coeff_vectors_dict, min_frames)
frames = min_frames
print "mfcc found...."
classes = ["happiness", "sadness"]
try:
network = NetworkReader.readFrom('network_state_new_.xml')
except:
# Create new network and start Training
training_data = ClassificationDataSet(frames * 26, target=1, nb_classes=2, class_labels=classes)
# training_data = SupervisedDataSet(frames * 39, 1)
for i in range(1, 151):
mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
training_data.appendLinked(mfcc_coeff_vectors.ravel(), [1])
# training_data.addSample(mfcc_coeff_vectors.ravel(), [1])
for i in range(201, 351):
mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
training_data.appendLinked(mfcc_coeff_vectors.ravel(), [0])
# training_data.addSample(mfcc_coeff_vectors.ravel(), [0])
training_data._convertToOneOfMany()
network = buildNetwork(training_data.indim, 5, training_data.outdim)
trainer = BackpropTrainer(network, learningrate=0.01, momentum=0.99)
print "Before training...", trainer.testOnData(training_data)
trainer.trainOnDataset(training_data, 1000)
print "After training...", trainer.testOnData(training_data)
NetworkWriter.writeToFile(network, "network_state_new_.xml")
print "*" * 30 , "Happiness Detection", "*" * 30
for i in range(151, 201):
output = network.activate(processed_mfcc_coeff[str(i)].ravel())
# print output,
# if output > 0.7:
# print "happiness"
class_index = max(xrange(len(output)), key=output.__getitem__)
class_name = classes[class_index]
print class_name
开发者ID:abhinavkashyap92,项目名称:sentitude,代码行数:51,代码来源:pybrain_learning.py
示例10: dump
def dump(self, dirPath):
"""
Save a representation of this classifier and it's network at the given path.
"""
if os.path.isdir(dirPath) and os.listdir(dirPath):
raise IOError("The directory exists and is not empty: {}".format(dirPath))
util.mkdir_p(dirPath)
#save network
NetworkWriter.writeToFile(self.net, os.path.join(dirPath, self._NET_NAME))
#save classifier
with open(os.path.join(dirPath, self._CLASSIFIER_NAME), 'w') as f:
f.write(serializer.dump(self))
开发者ID:ForeverWintr,项目名称:ImageClassipy,代码行数:14,代码来源:classifier.py
示例11: save
def save(self, filename):
tmpfile = filename + '~net~'
NetworkWriter.writeToFile(self.net, tmpfile)
with open(tmpfile, 'rb') as f:
network_data = f.read()
os.unlink(tmpfile)
with open(filename + '~', 'wb') as f:
out = pickle.Pickler(f)
out.dump( (const.PWINDOW, self.window) )
out.dump( (const.PSIZE, self.size) )
out.dump( (const.PRATIO, self.ratio) )
out.dump( (const.PMULTIPLIER, self.multiplier) )
out.dump( (const.PNETWORK, network_data) )
f.flush()
os.rename(filename + '~', filename)
开发者ID:majek,项目名称:transfer,代码行数:15,代码来源:network.py
示例12: xmlInvariance
def xmlInvariance(n, forwardpasses = 1):
""" try writing a network to an xml file, reading it, rewrite it, reread it, and compare
if the result looks the same (compare string representation, and forward processing
of some random inputs) """
# We only use this for file creation.
tmpfile = tempfile.NamedTemporaryFile(dir='.')
f = tmpfile.name
tmpfile.close()
NetworkWriter.writeToFile(n, f)
tmpnet = NetworkReader.readFrom(f)
NetworkWriter.writeToFile(tmpnet, f)
endnet = NetworkReader.readFrom(f)
# Unlink temporary file.
os.unlink(f)
netCompare(tmpnet, endnet, forwardpasses, True)
开发者ID:Boblogic07,项目名称:pybrain,代码行数:18,代码来源:helpers.py
示例13: neural_train
def neural_train(filename, testfile, output):
tag, data = readfile(filename)
testtag, testdata = readfile(testfile)
net = buildNetwork(len(data[0]), 80, 10)
ds = SupervisedDataSet(len(data[0]), 10)
for x in range(0, len(data)):
ds.addSample(data[x], trans(tag[x]))
testds = SupervisedDataSet(len(data[0]), 10)
for x in range(0, len(testdata)):
testds.addSample(testdata[x], trans(testtag[x]))
trainer = BackpropTrainer(net, ds, learningrate = 0.001, momentum = 0.99)
print "training..."
trainer.trainUntilConvergence(verbose=True,
trainingData=ds,
validationData=testds,
maxEpochs=500)
print "done"
NetworkWriter.writeToFile(net, output)
开发者ID:YueDayu,项目名称:AdvancedDataStructureProj2,代码行数:18,代码来源:NN_training.py
示例14: main
def main():
start_time = time.time()
dataModel = [
[(0,0,0), (1,0,0,0,0,0,0,0)],
[(0,0,1), (0,1,0,0,0,0,0,0)],
[(0,1,0), (0,0,1,0,0,0,0,0)],
[(0,1,1), (0,0,0,1,0,0,0,0)],
[(1,0,0), (0,0,0,0,1,0,0,0)],
[(1,0,1), (0,0,0,0,0,1,0,0)],
[(1,1,0), (0,0,0,0,0,0,1,0)],
[(1,1,1), (0,0,0,0,0,0,0,1)],
]
ds = SupervisedDataSet(3, 8)
for input, target in dataModel:
ds.addSample(input, target)
# create a large random data set
random.seed()
trainingSet = SupervisedDataSet(3, 8);
for ri in range(0,2000):
input,target = dataModel[random.getrandbits(3)];
trainingSet.addSample(input, target)
net = buildNetwork(3, 8, 8, bias=True)
trainer = BackpropTrainer(net, ds, learningrate = 0.001)
for i in range(10):
trainer.trainUntilConvergence(verbose=True,
trainingData=trainingSet,
validationData=ds,
maxEpochs=1)
NetworkWriter.writeToFile(net, 'savedNeuralNets/trainedNet'+str(i)+'.xml')
print("The Program took %s seconds to run" % (time.time() - start_time))
开发者ID:nasgold,项目名称:rounder,代码行数:40,代码来源:exampleNeuralNetwork.py
示例15: trainNetwork
def trainNetwork(dirname):
numFeatures = 5000
ds = SequentialDataSet(numFeatures, 1)
tracks = glob.glob(os.path.join(dirname, 'train??.wav'))
for t in tracks:
track = os.path.splitext(t)[0]
# load training data
print "Reading %s..." % track
data = numpy.genfromtxt(track + '_seg.csv', delimiter=",")
labels = numpy.genfromtxt(track + 'REF.txt', delimiter='\t')[0::10,1]
numData = data.shape[0]
# add the input to the dataset
print "Adding to dataset..."
ds.newSequence()
for i in range(numData):
ds.addSample(data[i], (labels[i],))
# initialize the neural network
print "Initializing neural network..."
net = buildNetwork(numFeatures, 50, 1,
hiddenclass=LSTMLayer, outputbias=False, recurrent=True)
# train the network on the dataset
print "Training neural net"
trainer = RPropMinusTrainer(net, dataset=ds)
## trainer.trainUntilConvergence(maxEpochs=50, verbose=True, validationProportion=0.1)
error = -1
for i in range(100):
new_error = trainer.train()
print "error: " + str(new_error)
if abs(error - new_error) < 0.1: break
error = new_error
# save the network
print "Saving neural network..."
NetworkWriter.writeToFile(net, os.path.basename(dirname) + 'net')
开发者ID:tediris,项目名称:MusicML,代码行数:38,代码来源:trainer.py
示例16: main
def main():
start_time = time.time()
dataModel = createTheDataModel([2,5,9,15])
trainingSet = SupervisedDataSet(228, 1)
for input, target in dataModel:
trainingSet.addSample(input, target)
net = buildNetwork(228, 220, 1, bias=True)
numberOfEpochsToTrainFor = 2
for epochNumber in range(1, 3):
trainer = BackpropTrainer(net, trainingSet)
trainer.trainEpochs(2)
NetworkWriter.writeToFile(net, 'savedNeuralNets/trainedNet1-epoch' + str(epochNumber * numberOfEpochsToTrainFor) + '.xml')
seconds = str(int(time.time() - start_time))
print("The Program took %s seconds to run" % (seconds))
开发者ID:nasgold,项目名称:rounder,代码行数:23,代码来源:trainNeuralNetwork.py
示例17: entrenarO
def entrenarO(red):
#Se inicializa el dataset
ds = SupervisedDataSet(4096,1)
"""Se crea el dataset, para ello procesamos cada una de las imagenes obteniendo las figuras,
luego se le asignan los valores deseados del resultado la red neuronal."""
print "O - Figura"
for i,c in enumerate(os.listdir(os.path.dirname('C:\\Users\\LuisD\\Desktop\\Reconocimiento\\prueba/'))):
try:
im = cv2.imread('C:\\Users\\LuisD\\Desktop\\Reconocimiento\\prueba/'+c)
cv2.resize(im,(64,64))
pim = pi.ProcesarImagen(im)
ds.appendLinked(pim.flatten(),10)
except:
pass
print len(ds)
print i,c
trainer = BackpropTrainer(red, ds)
print "Entrenando hasta converger"
trainer.trainUntilConvergence()
NetworkWriter.writeToFile(red, 'rna_o.xml')
开发者ID:FEnoR,项目名称:3RT,代码行数:24,代码来源:EntrenarRedneuronal.py
示例18: BackpropTrainer
t = BackpropTrainer(n, learningrate = 0.01 ,
momentum = mom)
#train the neural network from the train DataSet
cterrori=1.0
print "trainer momentum:"+str(mom)
for iter in range(25):
t.trainOnDataset(trndata, 1000)
ctrndata = mv.calculateModuleOutput(n,trndata)
cterr = v.MSE(ctrndata,trndata['target'])
relerr = abs(cterr-cterrori)
cterrori = cterr
print 'iteration:',iter+1,'MSE error:',cterr
myplot(trndata,ctrndata,iter=iter+1)
if cterr < 1.e-5 or relerr < 1.e-7:
break
#write the network using xml file
myneuralnet = os.path.join(os.getcwd(),'myneuralnet.xml')
if os.path.isfile(myneuralnet):
NetworkWriter.appendToFile(n,myneuralnet)
else:
NetworkWriter.writeToFile(n,myneuralnet)
#calculate the test DataSet based on the trained Neural Network
ctsts = mv.calculateModuleOutput(n,tsts)
tserr = v.MSE(ctsts,tsts['target'])
print 'MSE error on TSTS:',tserr
myplot(trndata,ctrndata,tsts,ctsts)
pylab.show()
开发者ID:Boblogic07,项目名称:pybrain,代码行数:30,代码来源:jpq2layersWriter.py
示例19: zip
vals.append(float(n.activate(x)))
error = 0.0
num = 0.0;
for o, t in zip(vals, prediction_outputs):
if abs(t - o) < 10:
error += abs(t - o)
num = num + 1
error = error / num
if error < local_min_error:
local_min_error = error
if error < min_error and num >= 16:
NetworkWriter.writeToFile(n, "20 prediction games with num = 16.xml")
min_error = error
num_n = num
min_vals = []
for x in vals:
x = float(x)
min_vals.append(x)
print("\n")
for x in vals:
print x
print("\n")
print(min_error)
print(num_n)
print("\n")
for x in min_vals:
开发者ID:KendallWeihe,项目名称:PyBrain-NN-for-regression,代码行数:31,代码来源:main.py
示例20: buildNetwork
# split up training data for cross validation
print "Split data into training and test sets..."
net = buildNetwork(200, 134, 2, bias=True, outclass=SoftmaxLayer)
trainer = BackpropTrainer(net, dataset=trndata)
print "training for {} epochs..."
trainer.trainUntilConvergence( verbose = True, validationProportion = 0.15, maxEpochs = 1000, continueEpochs = 10 )
trnresult = percentError( trainer.testOnClassData(),trndata['class'] )
tstresult = percentError( trainer.testOnClassData(dataset=tstdata ), tstdata['class'] )
print "epoch: %4d" % trainer.totalepochs," train error: %5.2f%%" % trnresult, " test error: %5.2f%%" % tstresult
NetworkWriter.writeToFile(net, 'oliv-x2-80.xml')
# predict using test data
# print "Making predictions..."
# ypreds = []
# ytrues = []
# for i in range(Xtest.getLength()]):
# pred = fnn.activate(getSample(i)[0])
# ypreds.append(pred.argmax())
# ytrues.append(ytest[i])
# print "Accuracy on test set: %7.4f" % accuracy_score(ytrues, ypreds,
# normalize=True)
开发者ID:thak123,项目名称:IASNLP-2016,代码行数:28,代码来源:nn-supervised.py
注:本文中的pybrain.tools.customxml.NetworkWriter类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论