• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python callbacks.Callbacks类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中neon.callbacks.callbacks.Callbacks的典型用法代码示例。如果您正苦于以下问题:Python Callbacks类的具体用法?Python Callbacks怎么用?Python Callbacks使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Callbacks类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: train

    def train(self, dataset, model=None):
        """Trains the passed model on the given dataset. If no model is passed, `generate_default_model` is used."""
        print "[%s] Starting training..." % self.model_name                                                              
        start = time.time()

        # The training will be run on the CPU. If a GPU is available it should be used instead.
        backend = gen_backend(backend='cpu',
                              batch_size=self.batch_size,
                              rng_seed=self.random_seed,
                              stochastic_round=False)

        cost = GeneralizedCost(
            name='cost',
            costfunc=CrossEntropyMulti())

        optimizer = GradientDescentMomentum(
            learning_rate=self.lrate,
            momentum_coef=0.9)

        # set up the model and experiment
        if not model:
            model = self.generate_default_model(dataset.num_labels)

        args = NeonCallbackParameters()
        args.output_file = os.path.join(self.root_path, self.Callback_Store_Filename)
        args.evaluation_freq = 1
        args.progress_bar = False
        args.epochs = self.max_epochs
        args.save_path = os.path.join(self.root_path, self.Intermediate_Model_Filename)
        args.serialize = 1
        args.history = 100
        args.model_file = None

        callbacks = Callbacks(model, dataset.train(), args, eval_set=dataset.test())

        # add a callback that saves the best model state
        callbacks.add_save_best_state_callback(self.model_path)

        # Uncomment line below to run on GPU using cudanet backend
        # backend = gen_backend(rng_seed=0, gpu='cudanet')
        model.fit(
            dataset.train(),
            optimizer=optimizer,
            num_epochs=self.max_epochs,
            cost=cost,
            callbacks=callbacks)

        print("[%s] Misclassification error = %.1f%%"
              % (self.model_name, model.eval(dataset.test(), metric=Misclassification()) * 100))
        print "[%s] Finished training!" % self.model_name
        end = time.time()
        print "[%s] Duration in seconds", end - start

        return model
开发者ID:youngstone,项目名称:Datapalooza,代码行数:54,代码来源:mlp_model.py


示例2: train

def train(args, hyper_params, model, opt, data_set):
    # setup cost function as CrossEntropy
    cost = GeneralizedCost(costfunc=CrossEntropyMulti())
    
    callbacks = Callbacks(model, **args.callback_args)
    callbacks.add_callback(EpochEndCallback())
    
    data_set.set_mode('train')
    model.fit(data_set, optimizer=opt,
              num_epochs=hyper_params.num_epochs, cost=cost, callbacks=callbacks)
    
    return
开发者ID:623401157,项目名称:ModelZoo,代码行数:12,代码来源:transfer_learning.py


示例3: deserialize

def deserialize(fn, datasets=None, inference=False):
    """
    Helper function to load all objects from a serialized file,
    this includes callbacks and datasets as well as the model, layers,
    etc.

    Arguments:
        datasets (DataSet, optional): If the dataset is not serialized
                                      in the file it can be passed in
                                      as an argument.  This will also
                                      override any dataset in the serialized
                                      file
        inference (bool, optional): if true only the weights will be loaded, not
                                    the states
    Returns:
        Model: the model object
        Dataset: the data set object
        Callback: the callbacks
    """
    config_dict = load_obj(fn)

    if datasets is not None:
        logger.warn('Ignoring datasets serialized in archive file %s' % fn)
    elif 'datasets' in config_dict:
        ds_cls = load_class(config_dict['datasets']['type'])
        dataset = ds_cls.gen_class(config_dict['datasets']['config'])
        datasets = dataset.gen_iterators()

    if 'train' in datasets:
        data_iter = datasets['train']
    else:
        key = datasets.keys()[0]
        data_iter = datasets[key]
        logger.warn('Could not find training set iterator'
                    'using %s instead' % key)

    model = Model(config_dict, data_iter)

    callbacks = None
    if 'callbacks' in config_dict:
        # run through the callbacks looking for dataset objects
        # replace them with the corresponding data set above
        cbs = config_dict['callbacks']['callbacks']
        for cb in cbs:
            if 'config' not in cb:
                cb['config'] = {}
            for arg in cb['config']:
                if type(cb['config'][arg]) is dict and 'type' in cb['config'][arg]:
                    if cb['config'][arg]['type'] == 'Data':
                        key = cb['config'][arg]['name']
                        if key in datasets:
                            cb['config'][arg] = datasets[key]
                        else:
                            cb['config'][arg] = None
        # now we can generate the callbacks
        callbacks = Callbacks.load_callbacks(config_dict['callbacks'], model)
    return (model, dataset, callbacks)
开发者ID:AdityoSanjaya,项目名称:neon,代码行数:57,代码来源:load_experiment.py


示例4: main

def main():
    # setup the model and run for num_epochs saving the last state only
    # this is at the top so that the be is generated
    mlp = gen_model(args.backend)

    # setup data iterators
    (X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
    if args.backend == 'nervanacpu' or args.backend == 'cpu':
        # limit data since cpu backend runs slower
        train = DataIterator(X_train[:1000], y_train[:1000], nclass=nclass, lshape=(1, 28, 28))
        valid = DataIterator(X_test[:1000], y_test[:1000], nclass=nclass, lshape=(1, 28, 28))
    else:
        train = DataIterator(X_train, y_train, nclass=nclass, lshape=(1, 28, 28))
        valid = DataIterator(X_test, y_test, nclass=nclass, lshape=(1, 28, 28))

    # serialization related
    cost = GeneralizedCost(costfunc=CrossEntropyBinary())
    opt_gdm = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)

    checkpoint_model_path = os.path.join('./', 'test_oneshot.pkl')
    checkpoint_schedule = 1  # save at every step

    callbacks = Callbacks(mlp, train)
    callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path, history=2)

    # run the fit all the way through saving a checkpoint e
    mlp.fit(train, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)

    # setup model with same random seed run epoch by epoch
    # serializing and deserializing at each step
    mlp = gen_model(args.backend)
    cost = GeneralizedCost(costfunc=CrossEntropyBinary())
    opt_gdm = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)

    # reset data iterators
    train.reset()
    valid.reset()

    checkpoint_model_path = os.path.join('./', 'test_manyshot.pkl')
    checkpoint_schedule = 1  # save at evey step
    callbacks = Callbacks(mlp, train)
    callbacks.add_serialize_callback(checkpoint_schedule,
                                     checkpoint_model_path,
                                     history=num_epochs)
    for epoch in range(num_epochs):
        # _0 points to state at end of epoch 0
        mlp.fit(train, optimizer=opt_gdm, num_epochs=epoch+1, cost=cost, callbacks=callbacks)

        # load saved file
        prts = os.path.splitext(checkpoint_model_path)
        fn = prts[0] + '_%d' % epoch + prts[1]
        mlp.load_weights(fn)  # load the saved weights

    # compare test_oneshot_<num_epochs>.pkl to test_manyshot_<num_epochs>.pkl
    try:
        compare_model_pickles('test_oneshot_%d.pkl' % (num_epochs-1),
                              'test_manyshot_%d.pkl' % (num_epochs-1))
    except:
        print 'test failed....'
        sys.exit(1)
开发者ID:ferenckulcsar,项目名称:neon,代码行数:60,代码来源:serialization_check.py


示例5: GeneralizedCost

layers.append(Dropout(keep=0.5))
layers.append(Affine(nout=1000, init=init1, bias=Constant(-7), activation=Softmax()))

cost = GeneralizedCost(costfunc=CrossEntropyMulti())

opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})

mlp = Model(layers=layers)

if args.model_file:
    import os
    assert os.path.exists(args.model_file), '%s not found' % args.model_file
    mlp.load_weights(args.model_file)

# configure callbacks
callbacks = Callbacks(mlp, train, output_file=args.output_file)

if args.validation_freq:
    class TopKMetrics(Callback):
        def __init__(self, valid_set, epoch_freq=args.validation_freq):
            super(TopKMetrics, self).__init__(epoch_freq=epoch_freq)
            self.valid_set = valid_set

        def on_epoch_end(self, epoch):
            self.valid_set.reset()
            allmetrics = TopKMisclassification(k=5)
            stats = mlp.eval(self.valid_set, metric=allmetrics)
            print ", ".join(allmetrics.metric_names) + ": " + ", ".join(map(str, stats.flatten()))

    callbacks.add_callback(TopKMetrics(test))
开发者ID:sunclx,项目名称:neon,代码行数:30,代码来源:alexnet.py


示例6: str

                        print 'Training specialist: ', i
                        path = EXPERIMENT_DIR + confusion_matrix_name + '_' + clustering_name + '_' + str(num_clusters) + 'clusters/' + 'specialist' + '_' + str(i) + '.prm'

                        # Create datasets
                        X_spec, y_spec, spec_out = filter_dataset(X_train, y_train, cluster)
                        X_spec_test, y_spec_test, spec_out = filter_dataset(
                            X_test, y_test, cluster)
                        spec_out = nout
                        spec_set = DataIterator(
                            X_spec, y_spec, nclass=spec_out, lshape=(3, 32, 32))
                        spec_test = DataIterator(
                            X_spec_test, y_spec_test, nclass=spec_out, lshape=(3, 32, 32))

                        # Train the specialist
                        specialist, opt, cost = spec_net(nout=spec_out, archive_path=gene_path)
                        callbacks = Callbacks(specialist, spec_set, args, eval_set=spec_test)
                        callbacks.add_early_stop_callback(early_stop)
                        callbacks.add_save_best_state_callback(path)
                        specialist.fit(spec_set, optimizer=opt,
                                    num_epochs=specialist.epoch_index + num_epochs, cost=cost, callbacks=callbacks)

                        # Print results
                        print 'Specialist Train misclassification error: ', specialist.eval(spec_set, metric=Misclassification())
                        print 'Specialist Test misclassification error: ', specialist.eval(spec_test, metric=Misclassification())
                        print 'Generalist Train misclassification error: ', generalist.eval(spec_set, metric=Misclassification())
                        print 'Generalist Test misclassification error: ', generalist.eval(spec_test, metric=Misclassification())
                        # specialists.append(specialist)
                        save_obj(specialist.serialize(), path)
                except:
                    path = confusion_matrix_name + '_' + clustering_name + '_' + str(num_clusters) + 'clusters/'
                    print 'Failed for ', path
开发者ID:seba-1511,项目名称:specialists,代码行数:31,代码来源:train_all_specs.py


示例7: GRU

output_size = 8
N = 120  # number of memory locations
M = 8  # size of a memory location

# model initialization
layers = [
    GRU(hidden_size, init, activation=Tanh(), gate_activation=Logistic()),
    Affine(train_set.nout, init, bias=init, activation=Logistic())
]

cost = GeneralizedCostMask(costfunc=CrossEntropyBinary())

model = Model(layers=layers)

optimizer = RMSProp(gradient_clip_value=gradient_clip_value,
                    stochastic_round=args.rounding)

# configure callbacks
callbacks = Callbacks(model, **args.callback_args)

# we can use the training set as the validation set,
# since the data is tickerally generated
callbacks.add_watch_ticker_callback(train_set)

# train model
model.fit(train_set,
          optimizer=optimizer,
          num_epochs=args.epochs,
          cost=cost,
          callbacks=callbacks)
开发者ID:AdityoSanjaya,项目名称:neon,代码行数:30,代码来源:rnn_copy.py


示例8: RBM

# setup optimizer
optimizer = {'momentum': [0],
             'step_config': 1,
             'learning_rate': 0.1,
             'weight_decay': 0}

# initialize model object
rbm = RBM(layers=layers)

if args.model_file:
    assert os.path.exists(args.model_file), '%s not found' % args.model_file
    logger.info('loading initial model state from %s' % args.model_file)
    rbm.load_weights(args.model_file)

# setup standard fit callbacks
callbacks = Callbacks(rbm, train_set, output_file=args.output_file,
                      progress_bar=args.progress_bar)

# add a callback ot calculate

if args.serialize > 0:
    # add callback for saving checkpoint file
    # every args.serialize epchs
    checkpoint_schedule = args.serialize
    checkpoint_model_path = args.save_path
    callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path)

rbm.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, callbacks=callbacks)

for mb_idx, (x_val, y_val) in enumerate(valid_set):
    hidden = rbm.fprop(x_val)
    break
开发者ID:yeahrmek,项目名称:3dShapeNets,代码行数:32,代码来源:mnist_multilayer.py


示例9: Pooling

          Pooling(3, strides=2)]


# Structure of the deep residual part of the network:
# args.depth modules of 2 convolutional layers each at feature map depths
# of 64, 128, 256, 512
nfms = list(itt.chain.from_iterable(
    [itt.repeat(2**(x + 6), r) for x, r in enumerate(stages)]))
strides = [-1] + [1 if cur == prev else 2 for cur,
                  prev in zip(nfms[1:], nfms[:-1])]

for nfm, stride in zip(nfms, strides):
    layers.append(module_factory(nfm, stride))

layers.append(Pooling('all', op='avg'))
layers.append(Conv(**conv_params(1, train.nclass, relu=False)))
layers.append(Activation(Softmax()))
model = Model(layers=layers)

weight_sched = Schedule([30, 60], 0.1)
opt = GradientDescentMomentum(0.1, 0.9, wdecay=0.0001, schedule=weight_sched)

# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, eval_set=test, metric=valmetric, **args.callback_args)
callbacks.add_callback(BatchNormTuneCallback(tune), insert_pos=0)

cost = GeneralizedCost(costfunc=CrossEntropyMulti())
model.fit(train, optimizer=opt, num_epochs=args.epochs,
          cost=cost, callbacks=callbacks)
开发者ID:JediKoder,项目名称:neon,代码行数:30,代码来源:i1k_msra.py


示例10: GeneralizedCost

# setup cost function as CrossEntropy
cost = GeneralizedCost(costfunc=CrossEntropyBinary())

# setup optimizer
optimizer = GradientDescentMomentum(0.1, momentum_coef=0.9, stochastic_round=args.rounding)

# initialize model object
mlp = Model(layers=layers)

if args.model_file:
    assert os.path.exists(args.model_file), '%s not found' % args.model_file
    logger.info('loading initial model state from %s' % args.model_file)
    mlp.load_weights(args.model_file)

# setup standard fit callbacks
callbacks = Callbacks(mlp, train_set, output_file=args.output_file,
                      progress_bar=args.progress_bar)

# add a callback ot calculate

if args.validation_freq:
    # setup validation trial callbacks
    callbacks.add_validation_callback(valid_set, args.validation_freq)

if args.serialize > 0:
    # add callback for saving checkpoint file
    # every args.serialize epchs
    checkpoint_schedule = args.serialize
    checkpoint_model_path = args.save_path
    callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path)

# run fit
开发者ID:ZebTech,项目名称:neon,代码行数:32,代码来源:mnist_mlp.py


示例11: MNIST

random_seed = args.rng_seed if args.rng_seed else 0

# load up the mnist data set, padding images to size 32
dataset = MNIST(path=args.data_dir, sym_range=True, size=32, shuffle=True)
train = dataset.train_iter

# create a GAN
model, cost = create_model(dis_model=args.dmodel, gen_model=args.gmodel,
                           cost_type='wasserstein', noise_type='normal',
                           im_size=32, n_chan=1, n_noise=128,
                           n_gen_ftr=args.n_gen_ftr, n_dis_ftr=args.n_dis_ftr,
                           depth=4, n_extra_layers=4,
                           batch_norm=True, dis_iters=5,
                           wgan_param_clamp=0.01, wgan_train_sched=True)

# setup optimizer
optimizer = RMSProp(learning_rate=2e-4, decay_rate=0.99, epsilon=1e-8)

# configure callbacks
callbacks = Callbacks(model, **args.callback_args)
fdir = ensure_dirs_exist(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'results/'))
fname = os.path.splitext(os.path.basename(__file__))[0] +\
    '_[' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + ']'
im_args = dict(filename=os.path.join(fdir, fname), hw=32,
               num_samples=args.batch_size, nchan=1, sym_range=True)
callbacks.add_callback(GANPlotCallback(**im_args))
callbacks.add_callback(GANCostCallback())

# model fit
model.fit(train, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
开发者ID:StevenLOL,项目名称:neon,代码行数:30,代码来源:mnist_wgan.py


示例12: StepSchedule

                 weights=[1, 1, 1])

# setup optimizer
schedule_w = StepSchedule(step_config=[5], change=[0.001 / 10])
schedule_b = StepSchedule(step_config=[5], change=[0.002 / 10])

opt_w = GradientDescentMomentum(0.001, 0.9, wdecay=0.0005, schedule=schedule_w)
opt_b = GradientDescentMomentum(0.002, 0.9, wdecay=0.0005, schedule=schedule_b)
opt_skip = GradientDescentMomentum(0.0, 0.0)

optimizer = MultiOptimizer({'default': opt_w, 'Bias': opt_b,
                            'skip': opt_skip, 'skip_bias': opt_skip})

# if training a new model, seed the image model conv layers with pre-trained weights
# otherwise, just load the model file
if args.model_file is None:
    util.load_vgg_all_weights(model, args.data_dir)

callbacks = Callbacks(model, eval_set=train_set, **args.callback_args)
callbacks.add_callback(TrainMulticostCallback())

# model.benchmark(train_set, optimizer=optimizer, cost=cost)
model.fit(train_set, optimizer=optimizer, cost=cost, num_epochs=args.epochs, callbacks=callbacks)

# Scale the bbox regression branch linear layer weights before saving the model
model = util.scale_bbreg_weights(model, [0.0, 0.0, 0.0, 0.0],
                                 [0.1, 0.1, 0.2, 0.2], train_set.num_classes)

if args.save_path is not None:
    save_obj(model.serialize(keep_states=True), args.save_path)
开发者ID:Jokeren,项目名称:neon,代码行数:30,代码来源:train.py


示例13: train_mlp

def train_mlp():
	"""
	Train data and save scaling and network weights and biases to file
	to be used by forward prop phase on test data
	"""
	parser = NeonArgparser(__doc__)
	
	args = parser.parse_args()
	
	logger = logging.getLogger()
	logger.setLevel(args.log_thresh)
	
	# hyperparameters
	num_epochs = args.epochs
	
	#preprocessor
	std_scale = preprocessing.StandardScaler(with_mean=True,with_std=True)
	#std_scale = feature_scaler(type='Standardizer',with_mean=True,with_std=True)
	
	#number of non one-hot encoded features, including ground truth
	num_feat = 4
	
	# load up the mnist data set
	# split into train and tests sets
	#load data from csv-files and rescale
	#training
	traindf = pd.DataFrame.from_csv('data/train.csv')
	ncols = traindf.shape[1]
	
	#tmpmat=std_scale.fit_transform(traindf.as_matrix())
	#print std_scale.scale_
	#print std_scale.mean_
	
	tmpmat = traindf.as_matrix()
	#print tmpmat[:,1:num_feat]
	
	tmpmat[:,:num_feat] = std_scale.fit_transform(tmpmat[:,:num_feat])
	X_train = tmpmat[:,1:]
	y_train = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
	
	#validation
	validdf = pd.DataFrame.from_csv('data/validate.csv')
	ncols = validdf.shape[1]
	tmpmat = validdf.as_matrix()
	tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
	X_valid = tmpmat[:,1:]
	y_valid = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
	
	#test
	testdf = pd.DataFrame.from_csv('data/test.csv')
	ncols = testdf.shape[1]
	tmpmat = testdf.as_matrix()
	tmpmat[:,:num_feat] = std_scale.transform(tmpmat[:,:num_feat])
	X_test = tmpmat[:,1:]
	y_test = np.reshape(tmpmat[:,0],(tmpmat[:,0].shape[0],1))
	
	# setup a training set iterator
	train_set = CustomDataIterator(X_train, lshape=(X_train.shape[1]), y_c=y_train)
	# setup a validation data set iterator
	valid_set = CustomDataIterator(X_valid, lshape=(X_valid.shape[1]), y_c=y_valid)
	# setup a validation data set iterator
	test_set = CustomDataIterator(X_test, lshape=(X_test.shape[1]), y_c=y_test)
	
	# setup weight initialization function
	init_norm = Xavier()
	
	# setup model layers
	layers = [Affine(nout=X_train.shape[1], init=init_norm, activation=Rectlin()),
	          Dropout(keep=0.5),
	          Affine(nout=X_train.shape[1]/2, init=init_norm, activation=Rectlin()),
			  Linear(nout=1, init=init_norm)]
	
	# setup cost function as CrossEntropy
	cost = GeneralizedCost(costfunc=SmoothL1Loss())
	
	# setup optimizer
	#schedule
	#schedule = ExpSchedule(decay=0.3)
	#optimizer = GradientDescentMomentum(0.0001, momentum_coef=0.9, stochastic_round=args.rounding, schedule=schedule)
	optimizer = Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1.e-8)
	
	# initialize model object
	mlp = Model(layers=layers)
	
	# configure callbacks
	if args.callback_args['eval_freq'] is None:
		args.callback_args['eval_freq'] = 1
	
	# configure callbacks
	callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
	
	callbacks.add_early_stop_callback(stop_func)
	callbacks.add_save_best_state_callback(os.path.join(args.data_dir, "early_stop-best_state.pkl"))
	
	# run fit
	mlp.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
	
	#evaluate model
	print('Evaluation Error = %.4f'%(mlp.eval(valid_set, metric=SmoothL1Metric())))
	print('Test set error = %.4f'%(mlp.eval(test_set, metric=SmoothL1Metric())))
#.........这里部分代码省略.........
开发者ID:ankitvb,项目名称:homeprice,代码行数:101,代码来源:train_mlp.py


示例14: Uniform

init = Uniform(low=-0.08, high=0.08)

# model initialization
layers = [
    LSTM(hidden_size, init, Logistic(), Tanh()),
    Affine(len(train_set.vocab), init, bias=init, activation=Softmax())
]
model = Model(layers=layers)

cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))

optimizer = RMSProp(clip_gradients=clip_gradients, stochastic_round=args.rounding)

# configure callbacks
callbacks = Callbacks(model, train_set, output_file=args.output_file,
                      progress_bar=args.progress_bar,
                      valid_set=valid_set, valid_freq=1,
                      )
callbacks.add_serialize_callback(1, args.save_path)

# fit and validate
model.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, cost=cost, callbacks=callbacks)


def sample(prob):
    """
    Sample index from probability distribution
    """
    prob = prob / (prob.sum() + 1e-6)
    return np.argmax(np.random.multinomial(1, prob, 1))

# Set batch size and time_steps to 1 for generation and reset buffers
开发者ID:ZebTech,项目名称:neon,代码行数:32,代码来源:text_generation_lstm.py


示例15: HDF5Iterator

                     validation=False,
                     remove_history=False,
                     minimal_set=False,
                     next_N=3)
valid = HDF5Iterator(filenames,
                     ndata=(16 * 2014),
                     validation=True,
                     remove_history=False,
                     minimal_set=False,
                     next_N=1)

out1, out2, out3 = model.layers.get_terminal()

cost = Multicost(costs=[GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)),
                        GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True)),
                        GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))])

schedule = ExpSchedule(decay=(1.0 / 50))  # halve the learning rate every 50 epochs
opt_gdm = GradientDescentMomentum(learning_rate=0.01,
                                  momentum_coef=0.9,
                                  stochastic_round=args.rounding,
                                  gradient_clip_value=1,
                                  gradient_clip_norm=5,
                                  wdecay=0.0001,
                                  schedule=schedule)

callbacks = Callbacks(model, eval_set=valid, metric=TopKMisclassification(5), **args.callback_args)
callbacks.add_save_best_state_callback(os.path.join(args.workspace_dir, "best_state_h5resnet.pkl"))
model.fit(train, optimizer=opt_gdm, num_epochs=num_epochs, cost=cost, callbacks=callbacks)
model.save_params(os.path.join(args.workspace_dir, "final_state_h5resnet.pkl"))
开发者ID:thouis,项目名称:go_policy,代码行数:30,代码来源:train.py


示例16: GeneralizedCost

#cost function

cost = GeneralizedCost(costfunc=CrossEntropyBinary())

#final model

mlp = Model(layers=layers)

logger.info("model construction complete...")

"""
model training and classification accurate rate
"""
#model training and results

callbacks = Callbacks(mlp,train, args, eval_set=valid,metric=Misclassification())

#add lost and metric call backs facilitate more diagnostic

callbacks.add_callback(MetricCallback(mlp,eval_set=train,metric=Misclassification(),epoch_freq=args.evaluation_freq))
callbacks.add_callback(MetricCallback(mlp,eval_set=valid,metric=Misclassification(),epoch_freq=args.evaluation_freq))
#run the model

mlp.fit(train, optimizer=opt_gdm, num_epochs=args.epochs, cost=cost, callbacks=callbacks)

#final classification accuracy

t_mis_rate=mlp.eval(train, metric=Misclassification())*100
v_mis_rate=mlp.eval(valid, metric=Misclassification())*100
#test_mis_rate=mlp.eval(test, metric=Misclassification())*100
开发者ID:poijqwef,项目名称:trackit,代码行数:30,代码来源:hurricane_classify_trainvalid.py


示例17: GeneralizedCost

layers.append(Conv((3, 3, 384), pad=1, init=init2, bias=Constant(0), activation=relu))
layers.append(Conv((3, 3, 256), pad=1, init=init2, bias=Constant(1), activation=relu))
layers.append(Conv((3, 3, 256), pad=1, init=init2, bias=Constant(1), activation=relu))
layers.append(Pooling(3, strides=2))
layers.append(Affine(nout=4096, init=init1, bias=Constant(1), activation=relu))
layers.append(Dropout(keep=0.5))
layers.append(Affine(nout=4096, init=init1, bias=Constant(1), activation=relu))
layers.append(Dropout(keep=0.5))
layers.append(Affine(nout=1000, init=init1, bias=Constant(-7), activation=Softmax()))

cost = GeneralizedCost(costfunc=CrossEntropyMulti())

opt = MultiOptimizer({'default': opt_gdm, 'Bias': opt_biases})

mlp = Model(layers=layers)

# configure callbacks
callbacks = Callbacks(mlp, train, output_file=args.output_file)

if args.validation_freq:
    callbacks.add_validation_callback(test, args.validation_freq)

if args.save_path:
    checkpoint_schedule = range(1, args.epochs)
    callbacks.add_serialize_callback(checkpoint_schedule, args.save_path, history=2)

mlp.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)

test.exit_batch_provider()
train.exit_batch_provider()
开发者ID:rupertsmall,项目名称:neon,代码行数:30,代码来源:alexnet.py


示例18: NeonArgparser

parser = NeonArgparser(__doc__, default_config_files=config_files,
                       default_overrides=dict(batch_size=64))
parser.add_argument('--deconv', action='store_true',
                    help='save visualization data from deconvolution')
parser.add_argument('--subset_pct', type=float, default=100,
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args()

model, cost = create_network()
rseed = 0 if args.rng_seed is None else args.rng_seed

# setup data provider
assert 'train' in args.manifest, "Missing train manifest"
assert 'val' in args.manifest, "Missing validation manifest"
train = make_alexnet_train_loader(args.manifest['train'], args.manifest_root,
                                  model.be, args.subset_pct, rseed)
valid = make_validation_loader(args.manifest['val'], args.manifest_root,
                               model.be, args.subset_pct)

sched_weight = Schedule([10], change=0.1)
opt = GradientDescentMomentum(0.01, 0.9, wdecay=0.0005, schedule=sched_weight)

# configure callbacks
valmetric = TopKMisclassification(k=5)
callbacks = Callbacks(model, eval_set=valid, metric=valmetric, **args.callback_args)

if args.deconv:
    callbacks.add_deconv_callback(train, valid)

model.fit(train, optimizer=opt, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
开发者ID:NervanaSystems,项目名称:neon,代码行数:30,代码来源:allcnn.py


示例19: tuple

# define stopping function
# it takes as input a tuple (State,val[t])
# which describes the cumulative validation state (generated by this function)
# and the validation error at time t
# and returns as output a tuple (State', Bool),
# which represents the new state and whether to stop


# Stop if validation error ever increases from epoch to epoch
def stop_func(s, v):
    if s is None:
        return (v, False)

    return (min(v, s), v > s)

# fit and validate
optimizer = GradientDescentMomentum(learning_rate=0.1, momentum_coef=0.9)

# configure callbacks
if args.callback_args['eval_freq'] is None:
    args.callback_args['eval_freq'] = 1

callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
callbacks.add_early_stop_callback(stop_func)
callbacks.add_save_best_state_callback(os.path.join(args.data_dir, "early_stop-best_state.pkl"))
mlp.fit(train_set,
        optimizer=optimizer,
        num_epochs=args.epochs,
        cost=cost,
        callbacks=callbacks)
开发者ID:Jokeren,项目名称:neon,代码行数:30,代码来源:early_stopping.py


示例20: Conv

          Conv((1, 1, 192), **conv),
          Conv((1, 1, 16), **conv),
          Pooling(8, op="avg"),
          Activation(Softmax())]


def stop_func(s, v):
    if s is None:
        return (v, False)

    return (min(v, s), v > s)

cost = GeneralizedCost(costfunc=CrossEntropyMulti())

mlp = Model(layers=layers)

if args.model_file:
    import os
    assert os.path.exists(args.model_file), '%s not found' % args.model_file
    mlp.load_params(args.model_file)

# configure callbacks
callbacks = Callbacks(mlp, eval_set=valid_set, **args.callback_args)
callbacks.add_early_stop_callback(stop_func)

if args.deconv:
    callbacks.add_deconv_callback(train_set, valid_set)

mlp.fit(train_set, optimizer=opt_gdm, num_epochs=50, cost=cost, callbacks=callbacks)
print('Misclassification error = %.1f%%' % (mlp.eval(valid_set, metric=Misclassification())*100))
开发者ID:EnriqueSMarquez,项目名称:CNNs_RelatedProjects,代码行数:30,代码来源:cifar10_allcnn.py



注:本文中的neon.callbacks.callbacks.Callbacks类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python data.load_mnist函数代码示例发布时间:2022-05-27
下一篇:
Python backends.gen_backend函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap