本文整理汇总了Python中mxnet.nd.zeros函数的典型用法代码示例。如果您正苦于以下问题:Python zeros函数的具体用法?Python zeros怎么用?Python zeros使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zeros函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: sample
def sample(prefix, num_chars, temperature=1.0):
#####################################
# Initialize the string that we'll return to the supplied prefix
#####################################
string = prefix
#####################################
# Prepare the prefix as a sequence of one-hots for ingestion by RNN
#####################################
prefix_numerical = [character_dict[char] for char in prefix]
input = one_hots(prefix_numerical)
#####################################
# Set the initial state of the hidden representation ($h_0$) to the zero vector
#####################################
h = nd.zeros(shape=(1, num_hidden), ctx=ctx)
c = nd.zeros(shape=(1, num_hidden), ctx=ctx)
#####################################
# For num_chars iterations,
# 1) feed in the current input
# 2) sample next character from from output distribution
# 3) add sampled character to the decoded string
# 4) prepare the sampled character as a one_hot (to be the next input)
#####################################
for i in range(num_chars):
outputs, h, c = lstm_rnn(input, h, c, temperature=temperature)
choice = np.random.choice(vocab_size, p=outputs[-1][0].asnumpy())
string += character_list[choice]
input = one_hots([choice])
return string
开发者ID:HaoranYi,项目名称:gitProj,代码行数:31,代码来源:gru.py
示例2: __init__
def __init__(self, dataset, ctx, labels=None, shape=None, label_shape=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if labels is not None:
llen = 0
for cond in labels:
llen += (dataset._label == cond).sum()
self._length = llen
else:
self._length = len(dataset)
if shape is None:
shape = dataset._data.shape[1:]
if label_shape is None:
label_shape = dataset._label.shape[1:]
self._data = nd.zeros([self._length] + list(shape), dtype='float32', ctx=ctx)
self._label = nd.zeros([self._length] + list(label_shape), dtype='int32', ctx=ctx)
uniques = set()
i = 0
for dat, dlab in dataset:
lab = dlab.item()
if labels is None or np.any([lab == cond for cond in labels]):
self._data[i] = dat
self._label[i] = lab
i += 1
uniques.add(lab)
self.classes = list(uniques)
开发者ID:chr5tphr,项目名称:ecGAN,代码行数:29,代码来源:data.py
示例3: get_parameters
def get_parameters():
# parameters for INPUT gate
W_xi = nd.random_normal(scale=config.std, shape=(config.input_dim, config.hidden_dim))
W_hi = nd.random_normal(scale=config.std, shape=(config.hidden_dim, config.hidden_dim))
b_i = nd.zeros(shape=config.hidden_dim)
# parameters for FORGET gate
W_xf = nd.random_normal(scale=config.std, shape=(config.input_dim, config.hidden_dim))
W_hf = nd.random_normal(scale=config.std, shape=(config.hidden_dim, config.hidden_dim))
b_f = nd.zeros(shape=config.hidden_dim)
# parameters for OUTPUT gate
W_xo = nd.random_normal(scale=config.std, shape=(config.input_dim, config.hidden_dim))
W_ho = nd.random_normal(scale=config.std, shape=(config.hidden_dim, config.hidden_dim))
b_o = nd.zeros(shape=config.hidden_dim)
# parameters for memory cell
W_xc = nd.random_normal(scale=config.std, shape=(config.input_dim, config.hidden_dim))
W_hc = nd.random_normal(scale=config.std, shape=(config.hidden_dim, config.hidden_dim))
b_c = nd.zeros(shape=config.hidden_dim)
# output layer
W_hy = nd.random_normal(scale=config.std, shape=(config.hidden_dim, config.output_dim))
b_y = nd.zeros(shape=config.output_dim)
parameters = [W_xi, W_hi, b_i,
W_xf, W_hf, b_f,
W_xo, W_ho, b_o,
W_xc, W_hc, b_c,
W_hy, b_y]
for parameter in parameters:
parameter.attach_grad()
return parameters
开发者ID:dolphinsUnderMoon,项目名称:HoloXon,代码行数:35,代码来源:lstm.py
示例4: get_parameters
def get_parameters():
# parameters for UPDATE gate
W_xz = nd.random_normal(scale=config.std, shape=(config.input_dim, config.hidden_dim))
W_hz = nd.random_normal(scale=config.std, shape=(config.hidden_dim, config.hidden_dim))
b_z = nd.zeros(shape=config.hidden_dim)
# parameters for RESET gate
W_xr = nd.random_normal(scale=config.std, shape=(config.input_dim, config.hidden_dim))
W_hr = nd.random_normal(scale=config.std, shape=(config.hidden_dim, config.hidden_dim))
b_r = nd.zeros(shape=config.hidden_dim)
# parameters for candidate hidden state
W_xh = nd.random_normal(scale=config.std, shape=(config.input_dim, config.hidden_dim))
W_hh = nd.random_normal(scale=config.std, shape=(config.hidden_dim, config.hidden_dim))
b_h = nd.zeros(shape=config.hidden_dim)
# output layer
W_hy = nd.random_normal(scale=config.std, shape=(config.hidden_dim, config.output_dim))
b_y = nd.zeros(shape=config.output_dim)
parameters = [W_xz, W_hz, b_z,
W_xr, W_hr, b_r,
W_xh, W_hh, b_h,
W_hy, b_y]
for parameter in parameters:
parameter.attach_grad()
return parameters
开发者ID:dolphinsUnderMoon,项目名称:HoloXon,代码行数:29,代码来源:gru.py
示例5: forward
def forward(self,X,lrp_aware=False):
'''
Realizes the forward pass of an input through the convolution layer.
Parameters
----------
X : mxnet.ndarray.ndarray.NDArray
a network input, shaped (N,H,W,D), with
N = batch size
H, W, D = input size in heigth, width, depth
lrp_aware : bool
controls whether the forward pass is to be computed with awareness for multiple following
LRP calls. this will sacrifice speed in the forward pass but will save time if multiple LRP
calls will follow for the current X, e.g. wit different parameter settings or for multiple
target classes.
Returns
-------
Y : mxnet.ndarray.ndarray.NDArray
the layer outputs.
'''
self.lrp_aware = lrp_aware
self.X = X
N,H,W,D = X.shape
hf, wf, df, nf = self.W.shape
hstride, wstride = self.stride
numfilters = self.n
#assume the given pooling and stride parameters are carefully chosen.
Hout = (H - hf) // hstride + 1
Wout = (W - wf) // wstride + 1
#initialize pooled output
self.Y = nd.zeros((N,Hout,Wout,numfilters), ctx=self.ctx, dtype=self.dtype)
if self.lrp_aware:
self.Z = nd.zeros((N, Hout, Wout, hf, wf, df, nf), ctx=self.ctx, dtype=self.dtype) #initialize container for precomputed forward messages
for i in range(Hout):
for j in range(Wout):
self.Z[:,i,j,...] = nd.expand_dims(self.W, axis=0) * nd.expand_dims(self.X[:, i*hstride:i*hstride+hf , j*wstride:j*wstride+wf , :], axis=4) # N, hf, wf, df, nf
self.Y[:,i,j,:] = self.Z[:,i,j,...].sum(axis=(1,2,3)) + self.B
else:
for i in range(Hout):
for j in range(Wout):
self.Y[:,i,j,:] = nd.sum( nd.expand_dims( X[:, i*hstride:i*hstride+hf: , j*wstride:j*wstride+wf: , : ].transpose((1,2,3,0)), 4) * nd.expand_dims(self.W, 3), axis=(0,1,2)) + self.B
return self.Y
开发者ID:sebastian-lapuschkin,项目名称:lrp_toolbox,代码行数:52,代码来源:convolution.py
示例6: train_and_predict_rnn
def train_and_predict_rnn(rnn, is_random_iter, num_epochs, num_steps,
num_hiddens, lr, clipping_theta, batch_size,
vocab_size, pred_period, pred_len, prefixes,
get_params, get_inputs, ctx, corpus_indices,
idx_to_char, char_to_idx, is_lstm=False):
"""Train an RNN model and predict the next item in the sequence."""
if is_random_iter:
data_iter = data_iter_random
else:
data_iter = data_iter_consecutive
params = get_params()
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(1, num_epochs + 1):
if not is_random_iter:
state_h = nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx)
if is_lstm:
state_c = nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx)
train_l_sum = nd.array([0], ctx=ctx)
train_l_cnt = 0
for X, Y in data_iter(corpus_indices, batch_size, num_steps, ctx):
if is_random_iter:
state_h = nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx)
if is_lstm:
state_c = nd.zeros(shape=(batch_size, num_hiddens),
ctx=ctx)
else:
state_h = state_h.detach()
if is_lstm:
state_c = state_c.detach()
with autograd.record():
if is_lstm:
outputs, state_h, state_c = rnn(
get_inputs(X, vocab_size), state_h, state_c, *params)
else:
outputs, state_h = rnn(
get_inputs(X, vocab_size), state_h, *params)
y = Y.T.reshape((-1,))
outputs = nd.concat(*outputs, dim=0)
l = loss(outputs, y)
l.backward()
grad_clipping(params, clipping_theta, ctx)
sgd(params, lr, 1)
train_l_sum = train_l_sum + l.sum()
train_l_cnt += l.size
if epoch % pred_period == 0:
print("\nepoch %d, perplexity %f"
% (epoch, (train_l_sum / train_l_cnt).exp().asscalar()))
for prefix in prefixes:
print(' - ', predict_rnn(
rnn, prefix, pred_len, params, num_hiddens, vocab_size,
ctx, idx_to_char, char_to_idx, get_inputs, is_lstm))
开发者ID:wisemaker,项目名称:gluon-tutorials-zh,代码行数:52,代码来源:utils.py
示例7: train_ch7
def train_ch7(trainer_fn, states, hyperparams, features, labels, batch_size=10,
num_epochs=2):
"""Train a linear regression model."""
net, loss = linreg, squared_loss
w, b = nd.random.normal(scale=0.01, shape=(features.shape[1], 1)), nd.zeros(1)
w.attach_grad()
b.attach_grad()
def eval_loss():
return loss(net(features, w, b), labels).mean().asscalar()
ls = [eval_loss()]
data_iter = gdata.DataLoader(
gdata.ArrayDataset(features, labels), batch_size, shuffle=True)
for _ in range(num_epochs):
start = time.time()
for batch_i, (X, y) in enumerate(data_iter):
with autograd.record():
l = loss(net(X, w, b), y).mean()
l.backward()
trainer_fn([w, b], states, hyperparams)
if (batch_i + 1) * batch_size % 100 == 0:
ls.append(eval_loss())
print('loss: %f, %f sec per epoch' % (ls[-1], time.time() - start))
set_figsize()
plt.plot(np.linspace(0, num_epochs, len(ls)), ls)
plt.xlabel('epoch')
plt.ylabel('loss')
开发者ID:tsintian,项目名称:d2l-zh,代码行数:28,代码来源:utils.py
示例8: __setitem__
def __setitem__(self, tokens, new_embedding):
"""Updates embedding vectors for tokens.
If self.allow_extend is True, vectors for previously unknown tokens can be introduced.
Parameters
----------
tokens : hashable object or a list or tuple of hashable objects
A token or a list of tokens whose embedding vector are to be updated.
new_embedding : mxnet.ndarray.NDArray
An NDArray to be assigned to the embedding vectors of `tokens`. Its length must be equal
to the number of `tokens` and its width must be equal to the dimension of embedding of
the glossary. If `tokens` is a singleton, it must be 1-D or 2-D. If `tokens` is a list
of multiple strings, it must be 2-D.
"""
if self.allow_extend and self._idx_to_vec is None:
# Initialize self._idx_to_vec
assert C.UNK_IDX == 0
self._idx_to_vec = self._init_unknown_vec(shape=(1, new_embedding.shape[-1]))
tokens = self._check_vector_update(tokens, new_embedding)
if self.allow_extend:
# Add new / previously unknown tokens
for token in filter(lambda t: t not in self._token_to_idx, tokens):
idx = len(self._token_to_idx)
self._token_to_idx[token] = idx
self._idx_to_token.append(token)
num_extended = len(self._token_to_idx) - self.idx_to_vec.shape[0]
if num_extended == 1:
warnings.warn(
'When adding new tokens via TokenEmbedding.__setitem__ '
'the internal embedding matrix needs to be reallocated. '
'Users are therefore encouraged to batch their updates '
'(i.e. add multiple new tokens at a time).')
# Extend shape of idx_to_vec
idx_to_vec = nd.zeros(shape=(len(self._token_to_idx),
self.idx_to_vec.shape[1]))
idx_to_vec[:self.idx_to_vec.shape[0]] = self._idx_to_vec
self._idx_to_vec = idx_to_vec
indices = []
for token in tokens:
if token in self._token_to_idx:
indices.append(self._token_to_idx[token])
else:
if self.unknown_token:
raise KeyError(('Token "{}" is unknown. To update the embedding vector for an'
' unknown token, please explicitly include "{}" as the '
'`unknown_token` in `tokens`. This is to avoid unintended '
'updates.').format(token, self._idx_to_token[C.UNK_IDX]))
else:
raise KeyError(('Token "{}" is unknown. Updating the embedding vector for an '
'unknown token is not allowed because `unknown_token` is not '
'specified.').format(token))
self._idx_to_vec[nd.array(indices)] = new_embedding
开发者ID:hridaydutta123,项目名称:gluon-nlp,代码行数:59,代码来源:token_embedding.py
示例9: test_ndarray2numpy
def test_ndarray2numpy(self):
m = gluon.nn.Embedding(14000, 128)
m.initialize()
ind = nd.zeros((700000, 128))
x = m(ind)
x.shape
test = x.asnumpy()
assert (x.shape == test.shape)
开发者ID:dpom,项目名称:incubator-mxnet,代码行数:8,代码来源:test_large_array.py
示例10: corr2d
def corr2d(X, K):
"""Compute 2D cross-correlation."""
h, w = K.shape
Y = nd.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i: i + h, j: j + w] * K).sum()
return Y
开发者ID:tsintian,项目名称:d2l-zh,代码行数:8,代码来源:utils.py
示例11: getfake
def getfake(samples, dimensions, epsilon):
wfake = nd.random_normal(shape=(dimensions)) # fake weight vector for separation
bfake = nd.random_normal(shape=(1)) # fake bias
wfake = wfake / nd.norm(wfake) # rescale to unit length
# making some linearly separable data, simply by chosing the labels accordingly
X = nd.zeros(shape=(samples, dimensions))
Y = nd.zeros(shape=(samples))
i = 0
while (i < samples):
tmp = nd.random_normal(shape=(1, dimensions))
margin = nd.dot(tmp, wfake) + bfake
if (nd.norm(tmp).asscalar() < 3) & (abs(margin.asscalar()) > epsilon):
X[i, :] = tmp
Y[i] = 2 * (margin > 0) - 1
i += 1
return X, Y
开发者ID:sharmasx,项目名称:mlheaven,代码行数:18,代码来源:mxnet_samples.py
示例12: plotscore
def plotscore(w, d):
xgrid = np.arange(-3, 3, 0.02)
ygrid = np.arange(-3, 3, 0.02)
xx, yy = np.meshgrid(xgrid, ygrid)
zz = nd.zeros(shape=(xgrid.size, ygrid.size, 2))
zz[:, :, 0] = nd.array(xx)
zz[:, :, 1] = nd.array(yy)
vv = nd.dot(zz, w) + b
CS = plt.contour(xgrid, ygrid, vv.asnumpy())
plt.clabel(CS, inline=1, fontsize=10)
开发者ID:sharmasx,项目名称:mlheaven,代码行数:10,代码来源:mxnet_samples.py
示例13: transform_mnist
def transform_mnist(data, label):
# transform a batch of examples
if resize:
n = data.shape[0]
new_data = nd.zeros((n, resize, resize, data.shape[3]))
for i in range(n):
new_data[i] = image.imresize(data[i], resize, resize)
data = new_data
# change data from batch x height x weight x channel to batch x channel x height x weight
return nd.transpose(data.astype('float32'), (0, 3, 1, 2)) / 255, label.astype('float32')
开发者ID:liushuchun,项目名称:machinelearning,代码行数:10,代码来源:utils.py
示例14: transform_mnist
def transform_mnist(data, label):
# transform a batch of examples
if resize:#改变形状
n = data.shape[0]#样本数量 n* 784 *1 ——————> n* 28 * 28 *1
new_data = nd.zeros((n, resize, resize, data.shape[3]))#data.shape[3]为通道数量
for i in range(n):
new_data[i] = image.imresize(data[i], resize, resize)
data = new_data
# change data from batch x height x weight x channel to batch 0 x channel 3 x height 1 x weight 2
return nd.transpose(data.astype('float32'), (0,3,1,2))/255, label.astype('float32')
开发者ID:dyz-zju,项目名称:MVision,代码行数:10,代码来源:utils.py
示例15: predict_rnn
def predict_rnn(rnn, prefix, num_chars, params, hidden_dim, ctx, idx_to_char,
char_to_idx, get_inputs, is_lstm=False):
"""Predict the next chars given the prefix."""
prefix = prefix.lower()
state_h = nd.zeros(shape=(1, hidden_dim), ctx=ctx)
if is_lstm:
state_c = nd.zeros(shape=(1, hidden_dim), ctx=ctx)
output = [char_to_idx[prefix[0]]]
for i in range(num_chars + len(prefix)):
X = nd.array([output[-1]], ctx=ctx)
if is_lstm:
Y, state_h, state_c = rnn(get_inputs(X), state_h, state_c, *params)
else:
Y, state_h = rnn(get_inputs(X), state_h, *params)
if i < len(prefix)-1:
next_input = char_to_idx[prefix[i+1]]
else:
next_input = int(Y[0].argmax(axis=1).asscalar())
output.append(next_input)
return ''.join([idx_to_char[i] for i in output])
开发者ID:noticeable,项目名称:gluon-tutorials-zh,代码行数:20,代码来源:utils.py
示例16: sample
def sample(prefix, num_chars, temperature=1.0):
string = prefix
prefix_numerical = [character_dict[char] for char in prefix]
input = one_hots(prefix_numerical)
sample_state = nd.zeros(shape(1, num_hidden), ctx=ctx)
for i in range(num_chars):
outputs, sample_state = simple_rnn(input, sample_state, temperature=
temperature)
choice = np.random.choice(77, p=outputs[-1][0].asnumpy())
string += character_list[choice]
input = one_hots([choice])
开发者ID:HaoranYi,项目名称:gitProj,代码行数:11,代码来源:rnn.py
示例17: smooth
def smooth(label, classes, eta=0.1):
if isinstance(label, nd.NDArray):
label = [label]
smoothed = []
for l in label:
ind = l.astype('int')
res = nd.zeros((ind.shape[0], classes), ctx = l.context)
res += eta/classes
res[nd.arange(ind.shape[0], ctx = l.context), ind] = 1 - eta + eta/classes
smoothed.append(res)
return smoothed
开发者ID:xiayongtao,项目名称:gluon-cv,代码行数:11,代码来源:train_imagenet_nasnet.py
示例18: forward
def forward(self, cls_pred, box_pred, cls_target, box_target):
"""Compute loss in entire batch across devices."""
# require results across different devices at this time
cls_pred, box_pred, cls_target, box_target = [_as_list(x) \
for x in (cls_pred, box_pred, cls_target, box_target)]
# cross device reduction to obtain positive samples in entire batch
num_pos = []
for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
pos_samples = (ct > 0)
num_pos.append(pos_samples.sum())
num_pos_all = sum([p.asscalar() for p in num_pos])
if num_pos_all < 1:
# no positive samples found, return dummy losses
return nd.zeros((1,)), nd.zeros((1,)), nd.zeros((1,))
# compute element-wise cross entropy loss and sort, then perform negative mining
cls_losses = []
box_losses = []
sum_losses = []
for cp, bp, ct, bt in zip(*[cls_pred, box_pred, cls_target, box_target]):
pred = nd.log_softmax(cp, axis=-1)
pos = ct > 0
cls_loss = -nd.pick(pred, ct, axis=-1, keepdims=False)
rank = (cls_loss * (pos - 1)).argsort(axis=1).argsort(axis=1)
hard_negative = rank < (pos.sum(axis=1) * self._negative_mining_ratio).expand_dims(-1)
# mask out if not positive or negative
cls_loss = nd.where((pos + hard_negative) > 0, cls_loss, nd.zeros_like(cls_loss))
cls_losses.append(nd.sum(cls_loss, axis=0, exclude=True) / num_pos_all)
bp = _reshape_like(nd, bp, bt)
box_loss = nd.abs(bp - bt)
box_loss = nd.where(box_loss > self._rho, box_loss - 0.5 * self._rho,
(0.5 / self._rho) * nd.square(box_loss))
# box loss only apply to positive samples
box_loss = box_loss * pos.expand_dims(axis=-1)
box_losses.append(nd.sum(box_loss, axis=0, exclude=True) / num_pos_all)
sum_losses.append(cls_losses[-1] + self._lambd * box_losses[-1])
return sum_losses, cls_losses, box_losses
开发者ID:mohamedelsiesyibra,项目名称:gluon-cv,代码行数:39,代码来源:loss.py
示例19: _build_vocab
def _build_vocab(data_name, train_dataset, test_dataset):
all_token = []
max_len = 0
for i, line in enumerate(train_dataset):
train_dataset[i][0] = _clean_str(line[0], data_name)
line = train_dataset[i][0].split()
max_len = max_len if max_len > len(line) else len(line)
all_token.extend(line)
for i, line in enumerate(test_dataset):
test_dataset[i][0] = _clean_str(line[0], data_name)
line = test_dataset[i][0].split()
max_len = max_len if max_len > len(line) else len(line)
all_token.extend(line)
vocab = nlp.Vocab(nlp.data.count_tokens(all_token))
vocab.set_embedding(nlp.embedding.create('Word2Vec', source='GoogleNews-vectors-negative300'))
for word in vocab.embedding._idx_to_token:
if (vocab.embedding[word] == nd.zeros(300)).sum() == 300:
vocab.embedding[word] = nd.random.normal(-1.0, 1.0, 300)
vocab.embedding['<unk>'] = nd.zeros(300)
vocab.embedding['<pad>'] = nd.zeros(300)
vocab.embedding['<bos>'] = nd.zeros(300)
vocab.embedding['<eos>'] = nd.zeros(300)
print('maximum length (in tokens): ', max_len)
return vocab, max_len
开发者ID:hridaydutta123,项目名称:gluon-nlp,代码行数:24,代码来源:process_data.py
示例20: set_embedding
def set_embedding(self, *embeddings):
"""Attaches one or more embeddings to the indexed text tokens.
Parameters
----------
embeddings : None or tuple of :class:`gluonnlp.embedding.TokenEmbedding` instances
The embedding to be attached to the indexed tokens. If a tuple of multiple embeddings
are provided, their embedding vectors will be concatenated for the same token.
"""
if len(embeddings) == 1 and embeddings[0] is None:
self._embedding = None
return
for embs in embeddings:
assert isinstance(embs, emb.TokenEmbedding), \
'The argument `embeddings` must be an instance or a list of instances of ' \
'`gluonnlp.embedding.TokenEmbedding`.'
assert all([embs.unknown_token for embs in embeddings]) or \
all([not embs.unknown_token for embs in embeddings]), \
'Either all or none of the TokenEmbeddings must have an ' \
'unknown_token set.'
new_embedding = emb.TokenEmbedding(self.unknown_token, allow_extend=False)
new_embedding._token_to_idx = self.token_to_idx
new_embedding._idx_to_token = self.idx_to_token
new_vec_len = sum(embs.idx_to_vec.shape[1] for embs in embeddings
if embs and embs.idx_to_vec is not None)
new_idx_to_vec = nd.zeros(shape=(len(self), new_vec_len))
col_start = 0
# Concatenate all the embedding vectors in embedding.
for embs in embeddings:
if embs and embs.idx_to_vec is not None:
col_end = col_start + embs.idx_to_vec.shape[1]
# Cancatenate vectors of the unknown token.
new_idx_to_vec[0, col_start:col_end] = embs.idx_to_vec[0]
new_idx_to_vec[1:, col_start:col_end] = embs[self._idx_to_token[1:]]
col_start = col_end
new_embedding._idx_to_vec = new_idx_to_vec
self._embedding = new_embedding
开发者ID:hridaydutta123,项目名称:gluon-nlp,代码行数:45,代码来源:vocab.py
注:本文中的mxnet.nd.zeros函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论