本文整理汇总了Python中neon.logger.display函数的典型用法代码示例。如果您正苦于以下问题:Python display函数的具体用法?Python display怎么用?Python display使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了display函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: fetch_dataset
def fetch_dataset(url, sourcefile, destfile, totalsz):
"""
Download the file specified by the given URL.
Args:
url (str): Base URL of the file to be downloaded.
sourcefile (str): Name of the source file.
destfile (str): Path to the destination.
totalsz (int): Size of the file to be downloaded.
"""
req = Request(os.path.join(url, sourcefile), headers={'User-Agent': 'neon'})
# backport https limitation and workaround per http://python-future.org/imports.html
cloudfile = urlopen(req)
neon_logger.display("Downloading file: {}".format(destfile))
blockchar = u'\u2588' # character to display in progress bar
with open(destfile, 'wb') as f:
data_read = 0
chunksz = 1024**2
while 1:
data = cloudfile.read(chunksz)
if not data:
break
data_read = min(totalsz, data_read + chunksz)
progress_string = u'Download Progress |{:<50}| '.format(
blockchar * int(float(data_read) / totalsz * 50))
sys.stdout.write('\r')
if PY3:
sys.stdout.write(progress_string)
else:
sys.stdout.write(progress_string.encode("utf-8"))
sys.stdout.flush()
f.write(data)
neon_logger.display("Download Complete")
开发者ID:NervanaSystems,项目名称:neon,代码行数:34,代码来源:datasets.py
示例2: display_model_params
def display_model_params(neon_args, neon_root_yaml):
"""
Display model parameters
:param neon_args: contains command line arguments,
:param neon_root_yaml: contains YAML elements
"""
output_string = '\n-- INFORMATION: HYPER PARAMETERS ------\n'
try:
output_string = add_param_to_output(output_string,
'backend',
neon_args.backend)
output_string = add_param_to_output(output_string,
'batch size',
neon_args.batch_size)
output_string = add_param_to_output(output_string,
'epochs',
neon_args.epochs)
output_string = add_param_to_output(output_string,
'optimizer type',
neon_root_yaml['optimizer']['type'])
output_string = add_param_to_output(output_string,
'learning rate',
neon_root_yaml['optimizer']['config']['learning_rate'])
output_string = add_param_to_output(output_string,
'momentum coef',
neon_root_yaml['optimizer']['config']['momentum_coef'])
except Exception:
output_string += 'Some parameters cannot be displayed\n'
output_string += '----------------------------------------'
neon_logger.display(output_string)
开发者ID:StevenLOL,项目名称:neon,代码行数:30,代码来源:display_information.py
示例3: train_model
def train_model(lrmodel, opt, cost, X, Y, devX, devY, devscores):
"""
Train model, using pearsonr on dev for early stopping
"""
done = False
best = -1.0
r = np.arange(1, 6)
train_set = ArrayIterator(X=X, y=Y, make_onehot=False)
valid_set = ArrayIterator(X=devX, y=devY, make_onehot=False)
eval_epoch = 10
while not done:
callbacks = Callbacks(lrmodel, eval_set=valid_set)
lrmodel.fit(train_set, optimizer=opt, num_epochs=eval_epoch,
cost=cost, callbacks=callbacks)
# Every 10 epochs, check Pearson on development set
yhat = np.dot(lrmodel.get_outputs(valid_set), r)
score = pearsonr(yhat, devscores)[0]
if score > best:
neon_logger.display('Dev Pearson: {}'.format(score))
best = score
bestlrmodel = copy.copy(lrmodel)
else:
done = True
eval_epoch += 10
yhat = np.dot(bestlrmodel.get_outputs(valid_set), r)
score = pearsonr(yhat, devscores)[0]
neon_logger.display('Dev Pearson: {}'.format(score))
return bestlrmodel
开发者ID:NervanaSystems,项目名称:neon,代码行数:35,代码来源:eval_sick.py
示例4: train_or_val_pairs
def train_or_val_pairs(self, setn):
"""
untar imagenet tar files into directories that indicate their label.
returns [(filename, label), ...] for train or val set partitions
"""
img_dir = os.path.join(self.out_dir, setn)
neon_logger.display("Extracting %s files" % (setn))
root_tf_path = self.tars[setn]
if not os.path.exists(root_tf_path):
raise IOError(("tar file {} not found. Ensure you have ImageNet downloaded"
).format(root_tf_path))
try:
root_tf = tarfile.open(root_tf_path)
except tarfile.ReadError as e:
raise ValueError('ReadError opening {}: {}'.format(root_tf_path, e))
label_dict = self.extract_labels(setn)
subpaths = root_tf.getmembers()
arg_iterator = zip(repeat(self.target_size), repeat(root_tf_path), repeat(img_dir),
repeat(setn), repeat(label_dict), subpaths)
pool = multiprocessing.Pool()
pairs = []
for pair_list in tqdm.tqdm(pool.imap_unordered(process_i1k_tar_subpath, arg_iterator),
total=len(subpaths)):
pairs.extend(pair_list)
pool.close()
pool.join()
root_tf.close()
return pairs
开发者ID:StevenLOL,项目名称:neon,代码行数:34,代码来源:ingest.py
示例5: extract_images
def extract_images(self, overwrite=False):
for setn in ('train', 'val'):
img_dir = os.path.join(self.out_dir, setn)
neon_logger.display("Extracting %s files" % (setn))
toptar = getattr(self, setn + '_tar')
label_dict = getattr(self, setn + '_labels')
name_slice = slice(None, 9) if setn == 'train' else slice(15, -5)
with tarfile.open(toptar) as tf:
for s in tf.getmembers():
label = label_dict[s.name[name_slice]]
subpath = os.path.join(img_dir, str(label))
if not os.path.exists(subpath):
os.makedirs(subpath)
if setn == 'train':
tarfp = tarfile.open(fileobj=tf.extractfile(s))
file_list = tarfp.getmembers()
else:
tarfp = tf
file_list = [s]
for fobj in file_list:
fname = os.path.join(subpath, fobj.name)
if not os.path.exists(fname) or overwrite:
with open(fname, 'wb') as jf:
jf.write(tarfp.extractfile(fobj).read())
开发者ID:Jokeren,项目名称:neon,代码行数:26,代码来源:batch_writer.py
示例6: write_csv_files
def write_csv_files(self, overwrite=False):
self.extract_images()
for setn in ('train', 'val'):
img_dir = os.path.join(self.out_dir, setn)
csvfile = getattr(self, setn + '_file')
neon_logger.display("Getting %s file list" % (setn))
if os.path.exists(csvfile) and not overwrite:
neon_logger.display("File %s exists, not overwriting" % (csvfile))
continue
flines = []
subdirs = glob(os.path.join(img_dir, '*'))
for subdir in subdirs:
subdir_label = os.path.basename(subdir) # This is the int label
files = glob(os.path.join(subdir, self.file_pattern))
flines += [(filename, subdir_label) for filename in files]
if setn == 'train':
np.random.seed(0)
np.random.shuffle(flines)
with gzip.open(csvfile, 'wb') as f:
f.write('filename,l_id\n')
for tup in flines:
f.write('{},{}\n'.format(*tup))
开发者ID:Jokeren,项目名称:neon,代码行数:25,代码来源:batch_writer.py
示例7: checkSequentialMatchesBatch
def checkSequentialMatchesBatch():
""" check LSTM I/O forward/backward interactions """
n, b, d = (5, 3, 4) # sequence length, batch size, hidden size
input_size = 10
WLSTM = LSTM.init(input_size, d) # input size, hidden size
X = np.random.randn(n, b, input_size)
h0 = np.random.randn(b, d)
c0 = np.random.randn(b, d)
# sequential forward
cprev = c0
hprev = h0
caches = [{} for t in range(n)]
Hcat = np.zeros((n, b, d))
for t in range(n):
xt = X[t:t + 1]
_, cprev, hprev, cache = LSTM.forward(xt, WLSTM, cprev, hprev)
caches[t] = cache
Hcat[t] = hprev
# sanity check: perform batch forward to check that we get the same thing
H, _, _, batch_cache = LSTM.forward(X, WLSTM, c0, h0)
assert np.allclose(H, Hcat), 'Sequential and Batch forward don''t match!'
# eval loss
wrand = np.random.randn(*Hcat.shape)
# loss = np.sum(Hcat * wrand)
dH = wrand
# get the batched version gradients
BdX, BdWLSTM, Bdc0, Bdh0 = LSTM.backward(dH, batch_cache)
# now perform sequential backward
dX = np.zeros_like(X)
dWLSTM = np.zeros_like(WLSTM)
dc0 = np.zeros_like(c0)
dh0 = np.zeros_like(h0)
dcnext = None
dhnext = None
for t in reversed(range(n)):
dht = dH[t].reshape(1, b, d)
dx, dWLSTMt, dcprev, dhprev = LSTM.backward(
dht, caches[t], dcnext, dhnext)
dhnext = dhprev
dcnext = dcprev
dWLSTM += dWLSTMt # accumulate LSTM gradient
dX[t] = dx[0]
if t == 0:
dc0 = dcprev
dh0 = dhprev
# and make sure the gradients match
neon_logger.display('Making sure batched version agrees with sequential version: '
'(should all be True)')
neon_logger.display(np.allclose(BdX, dX))
neon_logger.display(np.allclose(BdWLSTM, dWLSTM))
neon_logger.display(np.allclose(Bdc0, dc0))
neon_logger.display(np.allclose(Bdh0, dh0))
开发者ID:JediKoder,项目名称:neon,代码行数:60,代码来源:lstm_ref.py
示例8: display_platform_information
def display_platform_information():
"""
Display platform information.
"""
import platform
output_string = '\n-- INFORMATION: PLATFORM & OS ---------\n'
try:
output_string = add_param_to_output(output_string,
'OS',
platform.platform())
output_string = add_param_to_output(output_string,
'OS release version',
platform.version())
output_string = add_param_to_output(output_string,
'machine',
platform.machine())
output_string = add_param_to_output(output_string,
'node',
platform.node())
output_string = add_param_to_output(output_string,
'python version',
platform.python_version())
output_string = add_param_to_output(output_string,
'python build',
platform.python_build())
output_string = add_param_to_output(output_string,
'python compiler',
platform.python_compiler())
except Exception:
output_string += 'Some platform information cannot be displayed\n'
output_string += '----------------------------------------'
neon_logger.display(output_string)
开发者ID:StevenLOL,项目名称:neon,代码行数:35,代码来源:display_information.py
示例9: run
def run(self):
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
neon_logger.display("Writing train macrobatches")
self.write_batches(self.train_start, self.labels['train'], self.imgs['train'])
neon_logger.display("Writing validation macrobatches")
self.write_batches(self.val_start, self.labels['val'], self.imgs['val'])
self.save_meta()
开发者ID:Jokeren,项目名称:neon,代码行数:8,代码来源:batch_writer.py
示例10: __init__
def __init__(self, path='.', task='qa1_single-supporting-fact', subset='en'):
"""
Load bAbI dataset and extract text and read the stories
For a particular task, the class will read both train and test files
and combine the vocabulary.
Arguments:
path (str): Directory to store the dataset
task (str): a particular task to solve (all bAbI tasks are train
and tested separately)
subset (str): subset of the dataset to use:
{en, en-10k, shuffled, hn, hn-10k, shuffled-10k}
"""
url = 'http://www.thespermwhale.com/jaseweston/babi'
size = 11745123
filename = 'tasks_1-20_v1-2.tar.gz'
super(BABI, self).__init__(filename,
url,
size,
path=path)
self.task = task
self.subset = subset
neon_logger.display('Preparing bAbI dataset or extracting from %s' % path)
neon_logger.display('Task is %s/%s' % (subset, task))
self.tasks = [
'qa1_single-supporting-fact',
'qa2_two-supporting-facts',
'qa3_three-supporting-facts',
'qa4_two-arg-relations',
'qa5_three-arg-relations',
'qa6_yes-no-questions',
'qa7_counting',
'qa8_lists-sets',
'qa9_simple-negation',
'qa10_indefinite-knowledge',
'qa11_basic-coreference',
'qa12_conjunction',
'qa13_compound-coreference',
'qa14_time-reasoning',
'qa15_basic-deduction',
'qa16_basic-induction',
'qa17_positional-reasoning',
'qa18_size-reasoning',
'qa19_path-finding',
'qa20_agents-motivations'
]
assert task in self.tasks, "given task is not in the bAbI dataset"
self.train_file, self.test_file = self.load_data(path, task)
self.train_parsed = BABI.parse_babi(self.train_file)
self.test_parsed = BABI.parse_babi(self.test_file)
self.compute_statistics()
self.train = self.vectorize_stories(self.train_parsed)
self.test = self.vectorize_stories(self.test_parsed)
开发者ID:JediKoder,项目名称:neon,代码行数:56,代码来源:questionanswer.py
示例11: test_dataset
def test_dataset(backend_default, data):
dataset = MNIST(path=data)
dataset.gen_iterators()
train_set = dataset.data_dict['train']
train_set.be = NervanaObject.be
for i in range(2):
for X_batch, y_batch in train_set:
neon_logger.display("Xshape: {}, yshape: {}".format(X_batch.shape, y_batch.shape))
train_set.index = 0
开发者ID:NervanaSystems,项目名称:neon,代码行数:10,代码来源:test_dataset.py
示例12: test_dataset
def test_dataset(backend_default, data):
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=data)
train_set = ArrayIterator(X_train, y_train, nclass=nclass)
train_set.be = NervanaObject.be
for i in range(2):
for X_batch, y_batch in train_set:
neon_logger.display("Xshape: {}, yshape: {}".format(X_batch.shape, y_batch.shape))
train_set.index = 0
开发者ID:JediKoder,项目名称:neon,代码行数:10,代码来源:test_dataset.py
示例13: load_vocab
def load_vocab(self):
"""
Load vocab and initialize buffers
Input sentence batch is of dimension (vocab_size, max_sentence_length * batch_size)
where each column is the 1-hot representation of a word and the first batch_size columns
are the first words of each sentence.
"""
sentences = [sent['tokens'] for sent in self.iterSentences()]
# Flatten list of list of words to one list of words
words = [word for sentence in sentences for word in sentence]
# Count words and keep words greater than threshold
word_counts = Counter(words)
vocab = [self.end_token] + \
[word for word in list(word_counts.keys()) if word_counts[word] >= 5]
self.vocab_size = len(vocab)
self.vocab_to_index = dict((c, i) for i, c in enumerate(vocab))
self.index_to_vocab = dict((i, c) for i, c in enumerate(vocab))
# Compute optional bias vector for initializing final linear layer bias
word_counts[self.end_token] = len(sentences)
self.bias_init = np.array([1.0 * word_counts[self.index_to_vocab[i]]
for i in self.index_to_vocab]).reshape((self.vocab_size, 1))
self.bias_init /= np.sum(self.bias_init)
self.bias_init = np.log(self.bias_init)
self.bias_init -= np.max(self.bias_init)
self.max_sentence_length = max(len(sent) for sent in sentences) + 1
self.dev_image = self.be.iobuf(self.image_size)
self.dev_imageT = self.be.empty(self.dev_image.shape[::-1])
self.dev_X = self.be.iobuf((self.vocab_size, self.max_sentence_length))
self.dev_y = self.be.iobuf((self.vocab_size, self.max_sentence_length + 1))
# Create mask to deal with variable length sentences
self.dev_y_mask = self.be.iobuf((self.vocab_size, self.max_sentence_length + 1))
self.y_mask = np.zeros(self.dev_y_mask.shape,
dtype=np.uint8).reshape(self.vocab_size,
self.max_sentence_length + 1, -1)
self.y_mask_reshape = self.y_mask.reshape(self.dev_y_mask.shape)
self.dev_lbl = self.be.iobuf(self.max_sentence_length, dtype=np.int32)
self.dev_lblT = self.be.empty(self.dev_lbl.shape[::-1])
self.dev_lblflat = self.dev_lbl.reshape((1, self.dev_lbl.size))
self.dev_y_lbl = self.be.iobuf(self.max_sentence_length + 1, dtype=np.int32)
self.dev_y_lblT = self.be.empty(self.dev_y_lbl.shape[::-1])
self.dev_y_lblflat = self.dev_y_lbl.reshape((1, self.dev_y_lbl.size))
self.shape = [self.image_size, (self.vocab_size, self.max_sentence_length)]
neon_logger.display("Vocab size: %d, Max sentence length: %d" % (self.vocab_size,
self.max_sentence_length))
开发者ID:NervanaSystems,项目名称:neon,代码行数:52,代码来源:imagecaption.py
示例14: _print_tree
def _print_tree(node, level=0):
"""
print tree with indentation
"""
if type(node) is list:
neon_logger.display((" " * level) + ", ".join(native_str(s) for s in node[0:3]))
if len(node) > 3:
_print_tree(node[3], level + 1)
if len(node) > 4:
_print_tree(node[4], level + 1)
else:
neon_logger.display((" " * level) + native_str(node))
开发者ID:NervanaSystems,项目名称:neon,代码行数:13,代码来源:float_ew.py
示例15: train_mlp_classifier
def train_mlp_classifier(dataset, model_file_path, num_epochs, callback_args):
"""
Train the np_semantic_segmentation mlp classifier
Args:
model_file_path (str): model path
num_epochs (int): number of epochs
callback_args (dict): callback_arg
dataset: NpSemanticSegData object containing the dataset
Returns:
print error_rate, test_accuracy_rate and precision_recall_rate evaluation from the model
"""
model = NpSemanticSegClassifier(num_epochs, callback_args)
model.build()
# run fit
model.fit(dataset.test_set, dataset.train_set)
# save model params
model.save(model_file_path)
# set evaluation error rates
error_rate, test_accuracy_rate, precision_recall_rate = model.eval(dataset.test_set)
neon_logger.display('Misclassification error = %.1f%%' %
(error_rate * 100))
neon_logger.display('Test accuracy rate = %.1f%%' %
(test_accuracy_rate * 100))
neon_logger.display('precision rate = %s!!' %
(str(precision_recall_rate[0])))
neon_logger.display('recall rate = %s!!' %
(str(precision_recall_rate[1])))
开发者ID:cdj0311,项目名称:nlp-architect,代码行数:29,代码来源:train.py
示例16: display_text
def display_text(index_to_token, gt, pr):
"""
Print out some example strings of input - output pairs.
"""
index_to_token[0] = '|' # remove actual line breaks
display_len = 3 * time_steps
# sample 3 sentences and their start and end time steps
(s1_s, s1_e) = (0, time_steps)
(s2_s, s2_e) = (time_steps, 2*time_steps)
(s3_s, s3_e) = (2*time_steps, 3*time_steps)
gt_string = "".join([index_to_token[gt[k]] for k in range(display_len)])
pr_string = "".join([index_to_token[pr[k]] for k in range(display_len)])
match = np.where([gt_string[k] == pr_string[k] for k in range(display_len)])
di_string = "".join([gt_string[k] if k in match[0] else '.'
for k in range(display_len)])
neon_logger.display('GT: [' + gt_string[s1_s:s1_e] + '] '
'[' + gt_string[s2_s:s2_e] + '] '
'[' + gt_string[s3_s:s3_e] + '] ')
neon_logger.display('Pred: [' + pr_string[s1_s:s1_e] + '] '
'[' + pr_string[s2_s:s2_e] + '] '
'[' + pr_string[s3_s:s3_e] + '] ')
neon_logger.display('Difference indicated by .')
neon_logger.display('Diff: [' + di_string[s1_s:s1_e] + '] '
'[' + di_string[s2_s:s2_e] + '] '
'[' + di_string[s3_s:s3_e] + '] ')
开发者ID:StevenLOL,项目名称:neon,代码行数:33,代码来源:char_rae.py
示例17: main
def main():
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--output_path', required=True,
help='Output path used when training model')
parser.add_argument('--w2v_path', required=False, default=None,
help='Path to GoogleNews w2v file for voab expansion.')
parser.add_argument('--eval_data_path', required=False, default='./SICK_data',
help='Path to the SICK dataset for evaluating semantic relateness')
parser.add_argument('--max_vocab_size', required=False, default=1000000,
help='Limit the vocabulary expansion to fit in GPU memory')
parser.add_argument('--subset_pct', required=False, default=100,
help='subset of training dataset to use (use to retreive \
preprocessed data from training)')
args = parser.parse_args(gen_be=True)
# load vocab file from training
_, vocab_file = load_data(args.data_dir, output_path=args.output_path,
subset_pct=float(args.subset_pct))
vocab, _, _ = load_obj(vocab_file)
vocab_size = len(vocab)
neon_logger.display("\nVocab size from the dataset is: {}".format(vocab_size))
index_from = 2 # 0: padding 1: oov
vocab_size_layer = vocab_size + index_from
max_len = 30
# load trained model
model_dict = load_obj(args.model_file)
# Vocabulary expansion trick needs to pass the correct vocab set to evaluate (for tokenization)
if args.w2v_path:
neon_logger.display("Performing Vocabulary Expansion... Loading W2V...")
w2v_vocab, w2v_vocab_size = get_w2v_vocab(args.w2v_path,
int(args.max_vocab_size), cache=True)
vocab_size_layer = w2v_vocab_size + index_from
model = load_sent_encoder(model_dict, expand_vocab=True, orig_vocab=vocab,
w2v_vocab=w2v_vocab, w2v_path=args.w2v_path, use_recur_last=True)
vocab = w2v_vocab
else:
# otherwise stick with original vocab size used to train the model
model = load_sent_encoder(model_dict, use_recur_last=True)
model.initialize(dataset=(max_len, 1))
evaluate(model, vocab=vocab, data_path=args.eval_data_path, evaltest=True,
vocab_size_layer=vocab_size_layer)
开发者ID:NervanaSystems,项目名称:neon,代码行数:49,代码来源:eval_sick.py
示例18: load_vgg_weights
def load_vgg_weights(model, path):
# load a pre-trained VGG16 from Neon model zoo to the local
url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/'
filename = 'VGG_D_Conv.p'
size = 169645138
workdir, filepath = Dataset._valid_path_append(path, '', filename)
if not os.path.exists(filepath):
Dataset.fetch_dataset(url, filename, filepath, size)
neon_logger.display('De-serializing the pre-trained VGG16 model...')
pdict = load_obj(filepath)
param_layers = [l for l in model.layers.layers[0].layers[0].layers]
param_dict_list = pdict['model']['config']['layers']
for layer, ps in zip(param_layers, param_dict_list):
neon_logger.display("{}".format(layer.name, ps['config']['name']))
layer.load_weights(ps, load_states=True)
开发者ID:JediKoder,项目名称:neon,代码行数:18,代码来源:util.py
示例19: test_roipooling_fprop_random
def test_roipooling_fprop_random(backend_default, fargs):
rois_per_image, img_fm_c, img_fm_h, img_fm_w, roi_size, bsz = fargs
# generate a random feature map and some random ROIs
feature_maps = np.random.random(
(img_fm_c, img_fm_h, img_fm_w, bsz)).reshape(-1, bsz)
rois_per_batch = rois_per_image * bsz
rois_idx = np.vstack([i * np.ones((rois_per_image, 1)) for i in range(bsz)])
rois = np.random.random((rois_per_batch, 4)) * min(img_fm_h, img_fm_w)
rois = np.zeros((rois_per_batch, 4))
rois[:, 0] = np.random.random((rois_per_batch,)) * 10 / spatial_scale
rois[:, 1] = np.random.random((rois_per_batch,)) * 25 / spatial_scale
rois[:, 2] = (
np.random.random((rois_per_batch,)) * 27 + (img_fm_w - 27)) / spatial_scale
rois[:, 3] = (
np.random.random((rois_per_batch,)) * 12 + (img_fm_h - 12)) / spatial_scale
rois = np.hstack((rois_idx, rois))
# run the numpy roi fprop (function inside this test script)
outputs_np = fprop_roipooling_ref(feature_maps, rois,
img_fm_c, img_fm_h, img_fm_w,
bsz, rois_per_image, roi_size, roi_size)
# call backend roipooling kernel
NervanaObject.be.bsz = bsz
be = NervanaObject.be
input_dev = be.array(feature_maps)
rois_dev = be.array(rois)
output_shape = (img_fm_c, roi_size, roi_size, rois_per_batch)
outputs_dev = be.zeros(output_shape)
# make sure the type being int
argmax_dev = be.zeros(output_shape, np.int32)
start_time = timeit()
be.roipooling_fprop(input_dev, rois_dev, outputs_dev, argmax_dev, rois_per_batch,
img_fm_c, img_fm_h, img_fm_w, roi_size, roi_size, spatial_scale)
neon_logger.display("Nervana backend roipooling fprop (sec): {}".format(timeit() - start_time))
outputs_be = outputs_dev.get().reshape(-1, rois_per_batch)
assert allclose_with_out(outputs_np, outputs_be, atol=1e-6, rtol=0)
开发者ID:StevenLOL,项目名称:neon,代码行数:44,代码来源:test_roipooling_layer.py
示例20: display_cpu_information
def display_cpu_information():
"""
Display CPU information.
Assumes all CPUs are the same.
"""
import cpuinfo
output_string = '\n-- INFORMATION: CPU -------------------\n'
cpu_info = cpuinfo.get_cpu_info()
try:
output_string = add_param_to_output(output_string,
'brand',
cpu_info['brand'])
output_string = add_param_to_output(output_string,
'vendor id',
cpu_info['vendor_id'])
output_string = add_param_to_output(output_string,
'model',
cpu_info['model'])
output_string = add_param_to_output(output_string,
'family',
cpu_info['family'])
output_string = add_param_to_output(output_string,
'bits',
cpu_info['bits'])
output_string = add_param_to_output(output_string,
'architecture',
cpu_info['arch'])
output_string = add_param_to_output(output_string,
'cores',
cpu_info['count'])
output_string = add_param_to_output(output_string,
'advertised Hz',
cpu_info['hz_advertised'])
output_string = add_param_to_output(output_string,
'actual Hz',
cpu_info['hz_actual'])
output_string = add_param_to_output(output_string,
'l2 cache size',
cpu_info['l2_cache_size'])
except Exception:
output_string += 'Some CPU information cannot be displayed\n'
output_string += '----------------------------------------'
neon_logger.display(output_string)
开发者ID:StevenLOL,项目名称:neon,代码行数:43,代码来源:display_information.py
注:本文中的neon.logger.display函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论