本文整理汇总了Python中mxnet.nd.array函数的典型用法代码示例。如果您正苦于以下问题:Python array函数的具体用法?Python array怎么用?Python array使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了array函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: preprocess_imdb
def preprocess_imdb(train_tokenized, test_tokenized, train_data, test_data,
vocab):
"""Preprocess the IMDB data set for sentiment analysis."""
def encode_samples(tokenized_samples, vocab):
features = []
for sample in tokenized_samples:
feature = []
for token in sample:
if token in vocab.token_to_idx:
feature.append(vocab.token_to_idx[token])
else:
feature.append(0)
features.append(feature)
return features
def pad_samples(features, maxlen=500, PAD=0):
padded_features = []
for feature in features:
if len(feature) > maxlen:
padded_feature = feature[:maxlen]
else:
padded_feature = feature
while len(padded_feature) < maxlen:
padded_feature.append(PAD)
padded_features.append(padded_feature)
return padded_features
train_features = encode_samples(train_tokenized, vocab)
test_features = encode_samples(test_tokenized, vocab)
train_features = nd.array(pad_samples(train_features, 500, 0))
test_features = nd.array(pad_samples(test_features, 500, 0))
train_labels = nd.array([score for _, score in train_data])
test_labels = nd.array([score for _, score in test_data])
return train_features, test_features, train_labels, test_labels
开发者ID:xiaodongdreams,项目名称:gluon-tutorials-zh,代码行数:34,代码来源:utils.py
示例2: __init__
def __init__(self, is_train, crop_size, voc_dir, colormap2label):
self.rgb_mean = nd.array([0.485, 0.456, 0.406])
self.rgb_std = nd.array([0.229, 0.224, 0.225])
self.crop_size = crop_size
data, labels = read_voc_images(root=voc_dir, is_train=is_train)
self.data = [self.normalize_image(im) for im in self.filter(data)]
self.labels = self.filter(labels)
self.colormap2label = colormap2label
print('read ' + str(len(self.data)) + ' examples')
开发者ID:tsintian,项目名称:d2l-en,代码行数:9,代码来源:voc.py
示例3: plotscore
def plotscore(w, d):
xgrid = np.arange(-3, 3, 0.02)
ygrid = np.arange(-3, 3, 0.02)
xx, yy = np.meshgrid(xgrid, ygrid)
zz = nd.zeros(shape=(xgrid.size, ygrid.size, 2))
zz[:, :, 0] = nd.array(xx)
zz[:, :, 1] = nd.array(yy)
vv = nd.dot(zz, w) + b
CS = plt.contour(xgrid, ygrid, vv.asnumpy())
plt.clabel(CS, inline=1, fontsize=10)
开发者ID:sharmasx,项目名称:mlheaven,代码行数:10,代码来源:mxnet_samples.py
示例4: predict
def predict(net, data, label):
data = nd.array(data)
label = nd.array(label)
hidden = net.begin_state(func=mx.nd.zeros,batch_size = data.shape[0],ctx=mx.cpu())
dd = nd.array(data.reshape((data.shape[0],5,11)).swapaxes(0,1))
output,hidden = net(dd,hidden)
output = output.reshape((5,data.shape[0],1))
output = nd.sum(output,axis=0)/5
l = nd.argmax(output, axis=1)
res = nd.mean(l==label)
return res.asscalar()
开发者ID:EGOISTK21,项目名称:iFantasy,代码行数:11,代码来源:models.py
示例5: preprocess_imdb
def preprocess_imdb(data, vocab):
"""Preprocess the IMDB data set for sentiment analysis."""
max_l = 500
def pad(x):
return x[:max_l] if len(x) > max_l else x + [0] * (max_l - len(x))
tokenized_data = get_tokenized_imdb(data)
features = nd.array([pad(vocab.to_indices(x)) for x in tokenized_data])
labels = nd.array([score for _, score in data])
return features, labels
开发者ID:tsintian,项目名称:d2l-zh,代码行数:11,代码来源:utils.py
示例6: reset
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
random.shuffle(self.idx)
for i in range(len(self.data)):
data, labels = self.data[i], self.labels[i]
p = np.random.permutation(len(data))
self.data[i], self.labels[i] = data[p], labels[p]
self.nddata = []
self.ndlabel = []
for buck,label_buck in zip(self.data, self.labels):
self.nddata.append(nd.array(buck, dtype=self.dtype))
self.ndlabel.append(nd.array(label_buck, dtype=self.dtype))
开发者ID:FNDaily,项目名称:amazon-sagemaker-examples,代码行数:14,代码来源:sentiment.py
示例7: __iter__
def __iter__(self):
data = self.dataset[:]
X = data[0]
y = nd.array(data[1])
n = X.shape[0]
if self.shuffle:
idx = np.arange(n)
np.random.shuffle(idx)
X = nd.array(X.asnumpy()[idx])
y = nd.array(y.asnumpy()[idx])
for i in range(n // self.batch_size):
yield (X[i * self.batch_size:(i + 1) * self.batch_size],
y[i * self.batch_size:(i + 1) * self.batch_size])
开发者ID:liushuchun,项目名称:machinelearning,代码行数:14,代码来源:utils.py
示例8: build_array
def build_array(lines, vocab, max_len, is_source):
lines = [vocab[line] for line in lines]
if not is_source:
lines = [[vocab.bos] + line + [vocab.eos] for line in lines]
array = nd.array([pad(line, max_len, vocab.pad) for line in lines])
valid_len = (array != vocab.pad).sum(axis=1)
return array, valid_len
开发者ID:tsintian,项目名称:d2l-en,代码行数:7,代码来源:nmt.py
示例9: data_iter_random
def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None):
"""Sample mini-batches in a random order from sequential data."""
num_examples = (len(corpus_indices) - 1) // num_steps
epoch_size = num_examples // batch_size
example_indices = list(range(num_examples))
random.shuffle(example_indices)
def _data(pos):
return corpus_indices[pos : pos+num_steps]
for i in range(epoch_size):
i = i * batch_size
batch_indices = example_indices[i : i+batch_size]
X = nd.array(
[_data(j * num_steps) for j in batch_indices], ctx=ctx)
Y = nd.array(
[_data(j * num_steps + 1) for j in batch_indices], ctx=ctx)
yield X, Y
开发者ID:wisemaker,项目名称:gluon-tutorials-zh,代码行数:16,代码来源:utils.py
示例10: data_iter
def data_iter(batch_size, num_examples, features, labels):
"""遍历数据集。"""
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
j = nd.array(indices[i: min(i + batch_size, num_examples)])
yield features.take(j), labels.take(j)
开发者ID:noticeable,项目名称:gluon-tutorials-zh,代码行数:7,代码来源:utils.py
示例11: run
def run(self, inputs, **kwargs):
"""Run model inference and return the result
Parameters
----------
inputs : numpy array
input to run a layer on
Returns
-------
params : numpy array
result obtained after running the inference on mxnet
"""
# create module, passing cpu context
if self.device == 'CPU':
ctx = mx.cpu()
else:
raise NotImplementedError("ONNX tests are run only for CPU context.")
# run inference
net_inputs = [nd.array(input_data, ctx=ctx) for input_data in inputs]
net_outputs = self.net(*net_inputs)
results = []
results.extend([o for o in net_outputs.asnumpy()])
result = np.array(results)
return [result]
开发者ID:UniKrau,项目名称:incubator-mxnet,代码行数:27,代码来源:gluon_backend_rep.py
示例12: data_iter
def data_iter(batch_size, num_examples, X, y):
"""walk around dataset"""
idx = list(range(num_examples))
random.shuffle(idx)
for i in range(0, num_examples, batch_size):
j = nd.array(idx[i: min(i + batch_size, num_examples)])
yield X.take(j), y.take(j)
开发者ID:z01nl1o02,项目名称:tests,代码行数:7,代码来源:utils.py
示例13: forward
def forward(self, words1, words2, words3): # pylint: disable=arguments-differ
"""Implement forward computation.
Parameters
----------
words1 : NDArray
Word indices.
words2 : NDArray
Word indices.
words3 : NDArray
Word indices.
Returns
-------
predicted_indices : NDArray
Predicted indices of shape (batch_size, k)
"""
pred_idxs = self.analogy(words1, words2, words3)
if self.exclude_question_words:
orig_context = pred_idxs.context
pred_idxs = pred_idxs.asnumpy().tolist()
pred_idxs = [[
idx for idx in row if idx != w1 and idx != w2 and idx != w3
] for row, w1, w2, w3 in zip(pred_idxs, words1, words2, words3)]
pred_idxs = [p[:self.k] for p in pred_idxs]
pred_idxs = nd.array(pred_idxs, ctx=orig_context)
return pred_idxs
开发者ID:arfu2016,项目名称:gluon-nlp,代码行数:29,代码来源:evaluation.py
示例14: __setitem__
def __setitem__(self, tokens, new_embedding):
"""Updates embedding vectors for tokens.
If self.allow_extend is True, vectors for previously unknown tokens can be introduced.
Parameters
----------
tokens : hashable object or a list or tuple of hashable objects
A token or a list of tokens whose embedding vector are to be updated.
new_embedding : mxnet.ndarray.NDArray
An NDArray to be assigned to the embedding vectors of `tokens`. Its length must be equal
to the number of `tokens` and its width must be equal to the dimension of embedding of
the glossary. If `tokens` is a singleton, it must be 1-D or 2-D. If `tokens` is a list
of multiple strings, it must be 2-D.
"""
if self.allow_extend and self._idx_to_vec is None:
# Initialize self._idx_to_vec
assert C.UNK_IDX == 0
self._idx_to_vec = self._init_unknown_vec(shape=(1, new_embedding.shape[-1]))
tokens = self._check_vector_update(tokens, new_embedding)
if self.allow_extend:
# Add new / previously unknown tokens
for token in filter(lambda t: t not in self._token_to_idx, tokens):
idx = len(self._token_to_idx)
self._token_to_idx[token] = idx
self._idx_to_token.append(token)
num_extended = len(self._token_to_idx) - self.idx_to_vec.shape[0]
if num_extended == 1:
warnings.warn(
'When adding new tokens via TokenEmbedding.__setitem__ '
'the internal embedding matrix needs to be reallocated. '
'Users are therefore encouraged to batch their updates '
'(i.e. add multiple new tokens at a time).')
# Extend shape of idx_to_vec
idx_to_vec = nd.zeros(shape=(len(self._token_to_idx),
self.idx_to_vec.shape[1]))
idx_to_vec[:self.idx_to_vec.shape[0]] = self._idx_to_vec
self._idx_to_vec = idx_to_vec
indices = []
for token in tokens:
if token in self._token_to_idx:
indices.append(self._token_to_idx[token])
else:
if self.unknown_token:
raise KeyError(('Token "{}" is unknown. To update the embedding vector for an'
' unknown token, please explicitly include "{}" as the '
'`unknown_token` in `tokens`. This is to avoid unintended '
'updates.').format(token, self._idx_to_token[C.UNK_IDX]))
else:
raise KeyError(('Token "{}" is unknown. Updating the embedding vector for an '
'unknown token is not allowed because `unknown_token` is not '
'specified.').format(token))
self._idx_to_vec[nd.array(indices)] = new_embedding
开发者ID:hridaydutta123,项目名称:gluon-nlp,代码行数:59,代码来源:token_embedding.py
示例15: evals
def evals(net, adata ,alabel, batch_size):
hidden = net.begin_state(func=mx.nd.zeros,batch_size = batch_size,ctx=mx.cpu())
dataLoader = DataLoader(adata, alabel)
tl = 0
for data, label in dataLoader.dataIter(batch_size):
label = nd.array(label)
#label = nd.ones(shape=(5,batch_size)) * label
#label = label.reshape((-1,))
dd = nd.array(data.reshape((batch_size,5,11)).swapaxes(0,1))
#hidden = detach(hidden)
output,hidden = net(dd, hidden)
output = output.reshape((5,batch_size,1))
output = nd.sum(output,axis=0)/5
lv = loss(output, label)
tl += nd.sum(lv).asscalar()
return tl / len(adata)
开发者ID:EGOISTK21,项目名称:iFantasy,代码行数:17,代码来源:models.py
示例16: data_iter
def data_iter(batch_size, features, labels):
"""Iterate through a data set."""
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
j = nd.array(indices[i: min(i + batch_size, num_examples)])
yield features.take(j), labels.take(j)
开发者ID:tsintian,项目名称:d2l-zh,代码行数:8,代码来源:utils.py
示例17: try_gpu
def try_gpu():
"""If GPU is available, return mx.gpu(0); else return mx.cpu()"""
try:
ctx = mx.gpu()
_ = nd.array([0], ctx=ctx)
except:
ctx = mx.cpu()
return ctx
开发者ID:liushuchun,项目名称:machinelearning,代码行数:8,代码来源:utils.py
示例18: try_gpu
def try_gpu():
"""If GPU is available, return mx.gpu(0); else return mx.cpu()."""
try:
ctx = mx.gpu()
_ = nd.array([0], ctx=ctx)
except mx.base.MXNetError:
ctx = mx.cpu()
return ctx
开发者ID:tsintian,项目名称:d2l-zh,代码行数:8,代码来源:utils.py
示例19: grad_clipping
def grad_clipping(params, theta, ctx):
"""Clip the gradient."""
norm = nd.array([0], ctx)
for param in params:
norm += (param.grad ** 2).sum()
norm = norm.sqrt().asscalar()
if norm > theta:
for param in params:
param.grad[:] *= theta / norm
开发者ID:tsintian,项目名称:d2l-en,代码行数:9,代码来源:train.py
示例20: grad_clipping
def grad_clipping(params, theta, ctx):
"""Gradient clipping."""
if theta is not None:
norm = nd.array([0.0], ctx)
for p in params:
norm += nd.sum(p.grad ** 2)
norm = nd.sqrt(norm).asscalar()
if norm > theta:
for p in params:
p.grad[:] *= theta / norm
开发者ID:cuipeng,项目名称:gluon-tutorials-zh,代码行数:10,代码来源:utils.py
注:本文中的mxnet.nd.array函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论