• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python ndarray.array函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中mxnet.ndarray.array函数的典型用法代码示例。如果您正苦于以下问题:Python array函数的具体用法?Python array怎么用?Python array使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了array函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_word_embedding_similarity_evaluation_models

def test_word_embedding_similarity_evaluation_models(similarity_function):
    try:
        from scipy import stats
    except ImportError:
        raise ImportError('This testcase requires scipy.')

    dataset = nlp.data.WordSim353()

    counter = nlp.data.utils.Counter(w for wpair in dataset for w in wpair[:2])
    vocab = nlp.vocab.Vocab(counter)
    vocab.set_embedding(
        nlp.embedding.create('fasttext', source='wiki.simple',
                             embedding_root='tests/data/embedding'))

    data = [[vocab[d[0]], vocab[d[1]], d[2]] for d in dataset]
    words1, words2, scores = zip(*data)

    evaluator = nlp.embedding.evaluation.WordEmbeddingSimilarity(
        vocab.embedding.idx_to_vec,
        similarity_function=similarity_function)
    evaluator.initialize()

    words1, words2 = nd.array(words1), nd.array(words2)
    pred_similarity = evaluator(words1, words2)

    sr = stats.spearmanr(pred_similarity.asnumpy(), np.array(scores))
    assert np.isclose(0.6076485693769645, sr.correlation)
开发者ID:hridaydutta123,项目名称:gluon-nlp,代码行数:27,代码来源:test_vocab_embed.py


示例2: main

def main(ctx):
    calcEngine = CALC()
    
    tmp = np.asarray( [k for k in range(6)] )
    matA = nd.array( np.reshape( tmp ,(2,3) ) ).as_in_context( ctx )

    tmp = np.asarray( [k*10 for k in range(6)] )
    matB = nd.array( np.reshape( tmp, (2,3) ) ).as_in_context( ctx )

    
    num = 1000
    
    if 1:
        t0 = time.time()
        for k in range(num):
            matD = calcEngine.calc_sum(matA, matB)
        t1 = time.time() 
        print 'dll: time cost {}ms'.format( float(t1 - t0)*1000/num)
        print matD

    if 1:
        t0 = time.time()
        for k in range(num):
            matC = calc_sum(matA, matB)
        t1 = time.time() 
        print 'py: time cost {}ms'.format( float(t1 - t0)*1000/num)
        print matC
开发者ID:z01nl1o02,项目名称:tests,代码行数:27,代码来源:test.py


示例3: _preprocess

 def _preprocess(self, data):
     input_shape = self.signature['inputs'][0]['data_shape']
     height, width = input_shape[2:]
     img_arr = image.read(data[0])
     img_arr = image.resize(img_arr, width, height)
     img_arr = image.color_normalize(img_arr, nd.array([127.5]), nd.array([127.5]))
     img_arr = image.transform_shape(img_arr)
     return [img_arr]
开发者ID:codealphago,项目名称:mxnet-model-server,代码行数:8,代码来源:pixel2pixel_service.py


示例4: train

def train(input_variable, target_variable, encoder, decoder, teacher_forcing_ratio,
          encoder_optimizer, decoder_optimizer, criterion, max_length, ctx):
    with autograd.record():
        loss = F.zeros((1,), ctx=ctx)

        encoder_hidden = encoder.initHidden(ctx)

        input_length = input_variable.shape[0]
        target_length = target_variable.shape[0]

        encoder_outputs, encoder_hidden = encoder(
                input_variable.expand_dims(0), encoder_hidden)

        if input_length < max_length:
            encoder_outputs = F.concat(encoder_outputs.flatten(),
                F.zeros((max_length - input_length, encoder.hidden_size), ctx=ctx), dim=0)
        else:
            encoder_outputs = encoder_outputs.flatten()



        decoder_input = F.array([SOS_token], ctx=ctx)

        decoder_hidden = encoder_hidden

        use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False

        if use_teacher_forcing:
            # Teacher forcing: Feed the target as the next input
            for di in range(target_length):
                decoder_output, decoder_hidden, decoder_attention = decoder(
                    decoder_input, decoder_hidden, encoder_outputs)

                loss = F.add(loss, criterion(decoder_output, target_variable[di]))
                print criterion(decoder_output, target_variable[di])
                decoder_input = target_variable[di]  # Teacher forcing

        else:
            # Without teacher forcing: use its own predictions as the next input
            for di in range(target_length):
                decoder_output, decoder_hidden, decoder_attention = decoder(
                    decoder_input, decoder_hidden, encoder_outputs)
                topi = decoder_output.argmax(axis=1)

                decoder_input = F.array([topi.asscalar()], ctx=ctx)

                loss = F.add(loss, criterion(decoder_output, target_variable[di]))

                if topi.asscalar() == EOS_token:
                    break

        loss.backward()

    encoder_optimizer.step(1)
    decoder_optimizer.step(1)

    return loss.asscalar()/target_length
开发者ID:ZiyueHuang,项目名称:MXSeq2Seq,代码行数:57,代码来源:seq2seq.py


示例5: next

 def next(self):
     if self._fetcher.iter_next():
         tic = time.time()
         data_batch = self._fetcher.get()
         print 'Waited for {} seconds'.format(time.time() - tic)
     else:
         raise StopIteration
     
     return DataBatch(data=[array(data_batch[0])], label=[array(data_batch[1])])
开发者ID:zgsxwsdxg,项目名称:ademxapp,代码行数:9,代码来源:data.py


示例6: _score_sentence

 def _score_sentence(self, feats, tags):
     # Gives the score of a provided tag sequence
     score = nd.array([0])
     tags = nd.concat(nd.array([self.tag2idx[START_TAG]]), *tags, dim=0)
     for i, feat in enumerate(feats):
         score = score + \
             self.transitions[to_scalar(tags[i+1]), to_scalar(tags[i])] + feat[to_scalar(tags[i+1])]
     score = score + self.transitions[self.tag2idx[STOP_TAG],
                                      to_scalar(tags[int(tags.shape[0]-1)])]
     return score
开发者ID:CoderHHX,项目名称:incubator-mxnet,代码行数:10,代码来源:lstm_crf.py


示例7: data_iter

def data_iter():
    # generate random indices
    idx = list(range(num_examples))
    random.shuffle(idx) # randomly sort
    for i in range(0, num_examples, batch_size): #1000 examples and fetch 10 each time
        j = nd.array(idx[i: min(i+batch_size, num_examples)])
        yield nd.take(X, j), nd.take(y,j) # ?
开发者ID:gonglixue,项目名称:PRML_Python,代码行数:7,代码来源:LinearRegression.py


示例8: data_generator

def data_generator(batch_size):
    index = list(range(config.training_size))
    random.shuffle(index)

    for i in range(0, config.training_size, batch_size):
        j = nd.array(index[i:min(i + batch_size, config.training_size)])
        yield nd.take(X, j), nd.take(y, j)
开发者ID:dolphinsUnderMoon,项目名称:HoloXon,代码行数:7,代码来源:linear_regression.py


示例9: _forward_alg

    def _forward_alg(self, feats):
        # Do the forward algorithm to compute the partition function
        alphas = [[-10000.] * self.tagset_size]
        alphas[0][self.tag2idx[START_TAG]] = 0.
        alphas = nd.array(alphas)

        # Iterate through the sentence
        for feat in feats:
            alphas_t = []  # The forward variables at this timestep
            for next_tag in range(self.tagset_size):
                # broadcast the emission score: it is the same regardless of
                # the previous tag
                emit_score = feat[next_tag].reshape((1, -1))
                # the ith entry of trans_score is the score of transitioning to
                # next_tag from i
                trans_score = self.transitions[next_tag].reshape((1, -1))
                # The ith entry of next_tag_var is the value for the
                # edge (i -> next_tag) before we do log-sum-exp
                next_tag_var = alphas + trans_score + emit_score
                # The forward variable for this tag is log-sum-exp of all the
                # scores.
                alphas_t.append(log_sum_exp(next_tag_var))
            alphas = nd.concat(*alphas_t, dim=0).reshape((1, -1))
        terminal_var = alphas + self.transitions[self.tag2idx[STOP_TAG]]
        alpha = log_sum_exp(terminal_var)
        return alpha
开发者ID:CoderHHX,项目名称:incubator-mxnet,代码行数:26,代码来源:lstm_crf.py


示例10: data_iter

def data_iter():
    # 产生一个随机索引
    idx = list(range(num_examples))
    random.shuffle(idx)##打乱
    for i in range(0, num_examples, batch_size):##0 10 20 ...
        j = nd.array(idx[i:min(i+batch_size,num_examples)])##随机抽取10个样例
        yield nd.take(X, j), nd.take(y, j)##样例和标签 我们通过python的yield来构造一个迭代器。
开发者ID:dyz-zju,项目名称:MVision,代码行数:7,代码来源:0_linear_regression_dis2_with_bis.py


示例11: test_out_grads

def test_out_grads():
    x = nd.ones((3, 5))
    dx = nd.zeros_like(x)
    mark_variables([x], [dx])
    da = None
    db = nd.array([1,2,3,4,5])
    dc = nd.array([5,4,3,2,1])

    with train_section():
        a, b, c = nd.split(x, axis=0, num_outputs=3, squeeze_axis=True)
        backward([a, b, c], [da, db, dc])

    assert (dx.asnumpy() == np.array(
        [[1,1,1,1,1],
         [1,2,3,4,5],
         [5,4,3,2,1]])).all()
开发者ID:CoderHHX,项目名称:incubator-mxnet,代码行数:16,代码来源:test_contrib_autograd.py


示例12: calculate_avg_q

def calculate_avg_q(samples, qnet):
    total_q = 0.0
    for i in range(len(samples)):
        state = nd.array(samples[i:i + 1], ctx=qnet.ctx) / float(255.0)
        total_q += qnet.forward(is_train=False, data=state)[0].asnumpy().max(axis=1).sum()
    avg_q_score = total_q / float(len(samples))
    return avg_q_score
开发者ID:CoderHHX,项目名称:incubator-mxnet,代码行数:7,代码来源:dqn_run_test.py


示例13: forward

 def forward(self, x):
     if self.scale_factor == 0:
         warnings.warn("Scale factor cannot be 0.")
         return x
     if isinstance(x, np.ndarray):
         return nd.array(x/self.scale_factor)
     return x / self.scale_factor
开发者ID:luobao-intel,项目名称:incubator-mxnet,代码行数:7,代码来源:transforms.py


示例14: SGD

def SGD(sym, data_inputs, X, Y, X_test, Y_test, total_iter_num,
        lr=None,
        lr_scheduler=None, prior_precision=1,
        out_grad_f=None,
        initializer=None,
        minibatch_size=100, dev=mx.gpu()):
    if out_grad_f is None:
        label_key = list(set(data_inputs.keys()) - set(['data']))[0]
    exe, params, params_grad, _ = get_executor(sym, dev, data_inputs, initializer)
    optimizer = mx.optimizer.create('sgd', learning_rate=lr,
                                    rescale_grad=X.shape[0] / minibatch_size,
                                    lr_scheduler=lr_scheduler,
                                    wd=prior_precision)
    updater = mx.optimizer.get_updater(optimizer)
    start = time.time()
    for i in range(total_iter_num):
        indices = numpy.random.randint(X.shape[0], size=minibatch_size)
        X_batch = X[indices]
        Y_batch = Y[indices]
        exe.arg_dict['data'][:] = X_batch
        if out_grad_f is None:
            exe.arg_dict[label_key][:] = Y_batch
            exe.forward(is_train=True)
            exe.backward()
        else:
            exe.forward(is_train=True)
            exe.backward(out_grad_f(exe.outputs, nd.array(Y_batch, ctx=dev)))
        for k in params:
            updater(k, params_grad[k], params[k])
        if (i + 1) % 500 == 0:
            end = time.time()
            print("Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start))
            sample_test_acc(exe, X=X_test, Y=Y_test, label_num=10, minibatch_size=100)
            start = time.time()
    return exe, params, params_grad
开发者ID:CoderHHX,项目名称:incubator-mxnet,代码行数:35,代码来源:algos.py


示例15: get_image

 def get_image(self,X):
     B,C,H,W = self.shape
     X = np.reshape(X,(28,28))
     X = X[:,:,np.newaxis]
     X = np.tile(X,(1,1,3))
     if H > X.shape[0] or W > X.shape[1]:
         raise RuntimeError
     if H < X.shape[0] or W < X.shape[1]:
         if self.fortrain:
             X, _ = mx.image.random_crop(nd.array(X),(H,W))
         else:
             X,_ = mx.image.center_crop(nd.array(X),(H,W))
         X = np.transpose(X.asnumpy(),(2,0,1))
     else:
         #print "data augment is off"
         X = np.transpose(X,(2,0,1))
     return X
开发者ID:z01nl1o02,项目名称:tests,代码行数:17,代码来源:demo.py


示例16: SGLD

def SGLD(sym, X, Y, X_test, Y_test, total_iter_num,
         data_inputs=None,
         learning_rate=None,
         lr_scheduler=None, prior_precision=1,
         out_grad_f=None,
         initializer=None,
         minibatch_size=100, thin_interval=100, burn_in_iter_num=1000, task='classification',
         dev=mx.gpu()):
    if out_grad_f is None:
        label_key = list(set(data_inputs.keys()) - set(['data']))[0]
    exe, params, params_grad, _ = get_executor(sym, dev, data_inputs, initializer)
    optimizer = mx.optimizer.create('sgld', learning_rate=learning_rate,
                                    rescale_grad=X.shape[0] / minibatch_size,
                                    lr_scheduler=lr_scheduler,
                                    wd=prior_precision)
    updater = mx.optimizer.get_updater(optimizer)
    sample_pool = []
    start = time.time()
    for i in xrange(total_iter_num):
        indices = numpy.random.randint(X.shape[0], size=minibatch_size)
        X_batch = X[indices]
        Y_batch = Y[indices]
        exe.arg_dict['data'][:] = X_batch
        if out_grad_f is None:
            exe.arg_dict[label_key][:] = Y_batch
            exe.forward(is_train=True)
            exe.backward()
        else:
            exe.forward(is_train=True)
            exe.backward(out_grad_f(exe.outputs, nd.array(Y_batch, ctx=dev)))
        for k in params:
            updater(k, params_grad[k], params[k])
            print k, nd.norm(params_grad[k]).asnumpy()
        if i < burn_in_iter_num:
            continue
        else:
            if 0 == (i - burn_in_iter_num) % thin_interval:
                if optimizer.lr_scheduler is not None:
                    lr = optimizer.lr_scheduler(optimizer.num_update)
                else:
                    lr = learning_rate
                sample_pool.append([lr, copy_param(exe)])
        if (i + 1) % 100000 == 0:
            end = time.time()
            if task == 'classification':
                print "Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start)
                test_correct, test_total, test_acc = \
                    sample_test_acc(exe, sample_pool=sample_pool, X=X_test, Y=Y_test, label_num=10,
                                    minibatch_size=minibatch_size)
                print "Test %d/%d=%f" % (test_correct, test_total, test_acc)
            else:
                print "Current Iter Num: %d" % (i + 1), "Time Spent: %f" % (end - start), "MSE:",
                print sample_test_regression(exe=exe, sample_pool=sample_pool,
                                             X=X_test,
                                             Y=Y_test, minibatch_size=minibatch_size,
                                             save_path='regression_SGLD.txt')
            start = time.time()
    return exe, sample_pool
开发者ID:sxjscience,项目名称:mxnet,代码行数:58,代码来源:algos.py


示例17: grad_clipping

def grad_clipping(params, theta, ctx):
    if theta is not None:
        norm = nd.array([0.0], ctx)
        for p in params:
            norm += nd.sum(p.grad * p.grad)
        norm = nd.sqrt(norm).asscalar()
        if norm > theta:
            for p in params:
                p.grad[:] *= theta / norm
开发者ID:z01nl1o02,项目名称:tests,代码行数:9,代码来源:main.py


示例18: test_normalize

def test_normalize():
    data_in = np.random.uniform(0, 255, (300, 300, 3)).astype(dtype=np.uint8)
    data_in = transforms.ToTensor()(nd.array(data_in, dtype='uint8'))
    out_nd = transforms.Normalize(mean=(0, 1, 2), std=(3, 2, 1))(data_in)
    data_expected = data_in.asnumpy()
    data_expected[:][:][0] = data_expected[:][:][0] / 3.0
    data_expected[:][:][1] = (data_expected[:][:][1] - 1.0) / 2.0
    data_expected[:][:][2] = data_expected[:][:][2] - 2.0
    assert_almost_equal(data_expected, out_nd.asnumpy())
开发者ID:bhuWenDongchao,项目名称:incubator-mxnet,代码行数:9,代码来源:test_gluon_data_vision.py


示例19: test_word_embedding_analogy_evaluation_models

def test_word_embedding_analogy_evaluation_models(analogy_function):
    dataset = nlp.data.GoogleAnalogyTestSet()
    dataset = [d for i, d in enumerate(dataset) if i < 10]

    embedding = nlp.embedding.create('fasttext', source='wiki.simple',
                                     embedding_root='tests/data/embedding')
    counter = nlp.data.utils.Counter(embedding.idx_to_token)
    vocab = nlp.vocab.Vocab(counter)
    vocab.set_embedding(embedding)

    dataset_coded = [[vocab[d[0]], vocab[d[1]], vocab[d[2]], vocab[d[3]]]
                     for d in dataset]
    dataset_coded_nd = nd.array(dataset_coded)

    for k in [1, 3]:
        for exclude_question_words in [True, False]:
            evaluator = nlp.embedding.evaluation.WordEmbeddingAnalogy(
                idx_to_vec=vocab.embedding.idx_to_vec,
                analogy_function=analogy_function, k=k,
                exclude_question_words=exclude_question_words)
            evaluator.initialize()

            words1 = dataset_coded_nd[:, 0]
            words2 = dataset_coded_nd[:, 1]
            words3 = dataset_coded_nd[:, 2]
            pred_idxs = evaluator(words1, words2, words3)

            # If we don't exclude inputs most predictions should be wrong
            words4 = dataset_coded_nd[:, 3]
            accuracy = nd.mean(pred_idxs[:, 0] == nd.array(words4))
            accuracy = accuracy.asscalar()
            if not exclude_question_words:
                assert accuracy <= 0.1

                # Instead the model would predict W3 most of the time
                accuracy_w3 = nd.mean(pred_idxs[:, 0] == nd.array(words3))
                assert accuracy_w3.asscalar() >= 0.89

            else:
                # The wiki.simple vectors don't perform too good
                assert accuracy >= 0.29

            # Assert output shape
            assert pred_idxs.shape[1] == k
开发者ID:hridaydutta123,项目名称:gluon-nlp,代码行数:44,代码来源:test_vocab_embed.py


示例20: _csv_labelled_dataset

 def _csv_labelled_dataset(self, root, skip_rows=0):
     with open(self._train_csv, "r") as traincsv:
         for line in islice(csv.reader(traincsv), skip_rows, None):
             filename = os.path.join(root, line[0])
             label = line[1].strip()
             if label not in self.synsets:
                 self.synsets.append(label)
             if self._format not in filename:
                 filename = filename+self._format
             self.items.append((filename, nd.array([self.synsets.index(label)]).reshape((1,))))
开发者ID:luobao-intel,项目名称:incubator-mxnet,代码行数:10,代码来源:datasets.py



注:本文中的mxnet.ndarray.array函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python ndarray.ones函数代码示例发布时间:2022-05-27
下一篇:
Python nd.zeros函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap