本文整理汇总了Python中tensorflow.sparse_tensor_dense_matmul函数的典型用法代码示例。如果您正苦于以下问题:Python sparse_tensor_dense_matmul函数的具体用法?Python sparse_tensor_dense_matmul怎么用?Python sparse_tensor_dense_matmul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sparse_tensor_dense_matmul函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _inference
def _inference(self, x, dropout):
with tf.name_scope('gconv1'):
N, M = x.get_shape() # N: number of samples, M: number of features
M = int(M)
# Filter
W = self._weight_variable([self.K, self.F])
def filter(xt, k):
xt = tf.transpose(xt) # N x M
xt = tf.reshape(xt, [-1, 1]) # NM x 1
w = tf.slice(W, [k,0], [1,-1]) # 1 x F
y = tf.matmul(xt, w) # NM x F
return tf.reshape(y, [-1, M, self.F]) # N x M x F
xt0 = tf.transpose(x) # M x N
y = filter(xt0, 0)
if self.K > 1:
xt1 = tf.sparse_tensor_dense_matmul(self.L, xt0)
y += filter(xt1, 1)
for k in range(2, self.K):
xt2 = 2 * tf.sparse_tensor_dense_matmul(self.L, xt1) - xt0 # M x N
y += filter(xt2, k)
xt0, xt1 = xt1, xt2
# Bias and non-linearity
# b = self._bias_variable([1, 1, self.F])
b = self._bias_variable([1, M, self.F])
y += b # N x M x F
y = tf.nn.relu(y)
with tf.name_scope('fc1'):
W = self._weight_variable([self.F*M, NCLASSES])
b = self._bias_variable([NCLASSES])
y = tf.reshape(y, [-1, self.F*M])
y = tf.matmul(y, W) + b
return y
开发者ID:hyzcn,项目名称:cnn_graph,代码行数:32,代码来源:models.py
示例2: _build_fm
def _build_fm(self):
"""Construct the factorization machine part for the model.
This is a traditional 2-order FM module.
Returns:
obj: prediction score made by factorization machine.
"""
with tf.variable_scope("fm_part") as scope:
x = tf.SparseTensor(
self.iterator.fm_feat_indices,
self.iterator.fm_feat_values,
self.iterator.fm_feat_shape,
)
xx = tf.SparseTensor(
self.iterator.fm_feat_indices,
tf.pow(self.iterator.fm_feat_values, 2),
self.iterator.fm_feat_shape,
)
fm_output = 0.5 * tf.reduce_sum(
tf.pow(tf.sparse_tensor_dense_matmul(x, self.embedding), 2)
- tf.sparse_tensor_dense_matmul(xx, tf.pow(self.embedding, 2)),
1,
keep_dims=True,
)
return fm_output
开发者ID:David-Li-L,项目名称:recommenders,代码行数:25,代码来源:xDeepFM.py
示例3: _call
def _call(self, inputs):
x = inputs
x = dropout_sparse(x, 1-self.dropout, self.features_nonzero)
x = tf.sparse_tensor_dense_matmul(x, self.vars['weights'])
x = tf.sparse_tensor_dense_matmul(self.adj, x)
outputs = self.act(x)
return outputs
开发者ID:burakbayramli,项目名称:classnotes,代码行数:7,代码来源:util.py
示例4: chebyshev5
def chebyshev5(self, x, L, Fout, K, regularization=False):
N, M, Fin = x.get_shape()
N, M, Fin = int(N), int(M), int(Fin)
# Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
L = scipy.sparse.csr_matrix(L)
L = graph.rescale_L(L, lmax=2)
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.SparseTensor(indices, L.data, L.shape)
L = tf.sparse_reorder(L)
# Transform to Chebyshev basis
x0 = tf.transpose(x, perm=[1, 2, 0]) # M x Fin x N
x0 = tf.reshape(x0, [M, Fin*N]) # M x Fin*N
x = tf.expand_dims(x0, 0) # 1 x M x Fin*N
def concat(x, x_):
x_ = tf.expand_dims(x_, 0) # 1 x M x Fin*N
return tf.concat(0, [x, x_]) # K x M x Fin*N
if K > 1:
x1 = tf.sparse_tensor_dense_matmul(L, x0)
x = concat(x, x1)
for k in range(2, K):
x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0 # M x Fin*N
x = concat(x, x2)
x0, x1 = x1, x2
x = tf.reshape(x, [K, M, Fin, N]) # K x M x Fin x N
x = tf.transpose(x, perm=[3,1,2,0]) # N x M x Fin x K
x = tf.reshape(x, [N*M, Fin*K]) # N*M x Fin*K
# Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
W = self._weight_variable([Fin*K, Fout], regularization=regularization)
x = tf.matmul(x, W) # N*M x Fout
return tf.reshape(x, [N, M, Fout]) # N x M x Fout
开发者ID:parisots,项目名称:gcn_metric_learning,代码行数:31,代码来源:models_siamese.py
示例5: _call
def _call(self, inputs):
x = inputs
# if self.dropout > 0:
x = dropout_sparse(x, 1-self.dropout, self.features_nonzero, dtype=self.dtype)
x = tf.sparse_tensor_dense_matmul(tf.cast(x, tf.float32), tf.cast(self.vars['weights'], tf.float32))
x = tf.sparse_tensor_dense_matmul(tf.cast(self.adj, tf.float32), tf.cast(x, tf.float32))
outputs = tf.cast(self.act(x), self.dtype)
return outputs
开发者ID:habedi,项目名称:link-prediction,代码行数:8,代码来源:layers.py
示例6: build_model
def build_model(self):
dense_masker01 = tf.sparse_tensor_to_dense(self.mask)
dense_masker02 = tf.sparse_tensor_to_dense(self.mask1)
dense_masker03 = tf.sparse_tensor_to_dense(self.mask2)
with tf.name_scope('encoding'):
encoding = tf.add(tf.sparse_tensor_dense_matmul(self.X, self.W) , self.b, name= 'raw_values')
encoded_values = self.enc_func(encoding, name = 'encoded_values') - self.enc_func(self.b)
encoding1 = tf.add(tf.sparse_tensor_dense_matmul(self.X1, self.W) , self.b, name= 'raw_values1')
encoded_values1 = self.enc_func(encoding1, name = 'encoded_values1') - self.enc_func(self.b)
encoding2 = tf.add(tf.sparse_tensor_dense_matmul(self.X2, self.W) , self.b, name= 'raw_values2')
encoded_values2 = self.enc_func(encoding2, name = 'encoded_values2') - self.enc_func(self.b)
with tf.name_scope('decoding'):
decoding = tf.nn.xw_plus_b(encoded_values, self.W_prime, self.b_prime)
decoded_values = self.dec_func(decoding, name = 'decoded_values')
decoding1 = tf.nn.xw_plus_b(encoded_values1, self.W_prime, self.b_prime)
decoded_values1 = self.dec_func(decoding1, name = 'decoded_values1')
decoding2 = tf.nn.xw_plus_b(encoded_values2, self.W_prime, self.b_prime)
decoded_values2 = self.dec_func(decoding2, name = 'decoded_values2')
masked_decoded_values = tf.multiply(dense_masker01, decoded_values)
with tf.name_scope('training_process'):
diff01 = tf.squared_difference(tf.sparse_tensor_to_dense(self.Y) , decoded_values)
diff02 = tf.squared_difference(tf.sparse_tensor_to_dense(self.Y1) , decoded_values1)
diff03 = tf.squared_difference(tf.sparse_tensor_to_dense(self.Y2) , decoded_values2)
L_R = tf.reduce_sum( tf.multiply(dense_masker01, diff01)) \
+ tf.reduce_sum( tf.multiply(dense_masker02, diff02)) \
+ tf.reduce_sum( tf.multiply(dense_masker03, diff03))
L_T = tf.reduce_sum( tf.log(1+ tf.exp( tf.reduce_sum( tf.multiply(encoded_values, encoded_values2), 1) - tf.reduce_sum(tf.multiply(encoded_values, encoded_values1),1))))
error = L_R + self.alpha_enc * L_T
reg = 0
for param in self.params.items():
reg += tf.nn.l2_loss(param[1])* self.lambda_w
loss = error + reg
model_params = [p for p in self.params.values()]
train_step = self._optimize(loss, model_params)
tf.summary.scalar('error', error)
tf.summary.scalar('loss', loss)
for param in self.params.items():
tf.summary.histogram(param[0], param[1])
merged_summary = tf.summary.merge_all()
return encoded_values, decoded_values, masked_decoded_values, error, loss, train_step, merged_summary
开发者ID:WendyLNU,项目名称:rnn_recsys,代码行数:57,代码来源:CDAE.py
示例7: _build_fm
def _build_fm(self, hparams):
with tf.variable_scope("fm_part") as scope:
x = tf.SparseTensor(self.iterator.fm_feat_indices,
self.iterator.fm_feat_values,
self.iterator.fm_feat_shape)
xx = tf.SparseTensor(self.iterator.fm_feat_indices,
tf.pow(self.iterator.fm_feat_values, 2),
self.iterator.fm_feat_shape)
fm_output = 0.5 * tf.reduce_sum(
tf.pow(tf.sparse_tensor_dense_matmul(x, self.embedding), 2) - \
tf.sparse_tensor_dense_matmul(xx,
tf.pow(self.embedding, 2)), 1,
keep_dims=True)
return fm_output
开发者ID:zeroToAll,项目名称:tensorflow_practice,代码行数:14,代码来源:CIN.py
示例8: _build_linear
def _build_linear(self):
"""Construct the linear part for the model.
This is a linear regression.
Returns:
obj: prediction score made by linear regression.
"""
with tf.variable_scope("linear_part", initializer=self.initializer) as scope:
w = tf.get_variable(
name="w", shape=[self.hparams.FEATURE_COUNT, 1], dtype=tf.float32
)
b = tf.get_variable(
name="b",
shape=[1],
dtype=tf.float32,
initializer=tf.zeros_initializer(),
)
x = tf.SparseTensor(
self.iterator.fm_feat_indices,
self.iterator.fm_feat_values,
self.iterator.fm_feat_shape,
)
linear_output = tf.add(tf.sparse_tensor_dense_matmul(x, w), b)
self.layer_params.append(w)
self.layer_params.append(b)
tf.summary.histogram("linear_part/w", w)
tf.summary.histogram("linear_part/b", b)
return linear_output
开发者ID:David-Li-L,项目名称:recommenders,代码行数:28,代码来源:xDeepFM.py
示例9: __call__
def __call__(self, inputs, states, scope=None):
with tf.variable_scope(scope or type(self).__name__):
# this is the mode-3 matricization of W :)
big_tensor = random_sparse_tensor(
[self._num_units,
self._num_inputs * self._num_units],
self.sparsity, name='W_3')
u = tf.get_variable('U', [self._num_units, self._num_units])
v = tf.get_variable('V', [self._num_units, self._num_inputs])
b = tf.get_variable('b', [self._num_units],
initializer=tf.constant_initializer(0.0))
# make and flatten the outer product
# have to do this with some unfortunate reshaping
outer_prod = tf.matmul(
tf.reshape(states, [-1, self._num_units, 1]),
tf.reshape(inputs, [-1, 1, self._num_inputs]))
outer_prod = tf.reshape(
outer_prod,
[-1, self._num_units * self._num_inputs])
tensor_prod = tf.sparse_tensor_dense_matmul(
big_tensor, outer_prod, adjoint_b=True)
tensor_prod = tf.transpose(tensor_prod)
hidden_act = tf.matmul(states, u)
input_act = tf.matmul(inputs, v)
linears = tensor_prod + hidden_act
linears += input_act
linears += b
output = self._nonlinearity(linears)
return output, output
开发者ID:PFCM,项目名称:rnns,代码行数:29,代码来源:simple_tensor_rnn.py
示例10: build_model
def build_model(self):
dense_masker = tf.sparse_tensor_to_dense(self.mask)
with tf.name_scope('encoding'):
encoding = tf.add(tf.sparse_tensor_dense_matmul(self.X, self.W) , self.b, name= 'raw_values')
encoded_values = self.enc_func(encoding, name = 'encoded_values')
with tf.name_scope('decoding'):
decoding = tf.nn.xw_plus_b(encoded_values, self.W_prime, self.b_prime)
decoded_values = self.dec_func(decoding, name = 'decoded_values')
masked_decoded_values = tf.multiply(dense_masker, decoded_values)
with tf.name_scope('training_process'):
diff = tf.squared_difference(tf.sparse_tensor_to_dense(self.Y, default_value = 0) , decoded_values)
error = tf.reduce_sum( tf.multiply(dense_masker, diff) )
reg = 0
for param in self.params.items():
reg += tf.nn.l2_loss(param[1])* self.lambda_w
loss = error + reg
model_params = [p for p in self.params.values()]
train_step = self._optimize(loss, model_params)
tf.summary.scalar('error', error)
tf.summary.scalar('loss', loss)
for param in self.params.items():
tf.summary.histogram(param[0], param[1])
#tf.summary.histogram('predictions', decoded_values)
merged_summary = tf.summary.merge_all()
return encoded_values, decoded_values, masked_decoded_values, error, loss, train_step, merged_summary
开发者ID:WendyLNU,项目名称:rnn_recsys,代码行数:33,代码来源:DAE.py
示例11: dot
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
开发者ID:Eilene,项目名称:gcn,代码行数:7,代码来源:layers.py
示例12: __init__
def __init__(self, input_dim=None, output_dim=1, init_path=None, opt_algo='gd', learning_rate=1e-2, l2_weight=0,
random_seed=None):
Model.__init__(self)
init_vars = [('w', [input_dim, output_dim], 'xavier', dtype),
('b', [output_dim], 'zero', dtype)]
self.graph = tf.Graph()
with self.graph.as_default():
if random_seed is not None:
tf.set_random_seed(random_seed)
self.X = tf.sparse_placeholder(dtype)
self.y = tf.placeholder(dtype)
self.vars = utils.init_var_map(init_vars, init_path) # 初始化变量w, b
w = self.vars['w']
b = self.vars['b']
xw = tf.sparse_tensor_dense_matmul(self.X, w)
logits = tf.reshape(xw + b, [-1])
self.y_prob = tf.sigmoid(logits)
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y, logits=logits)) + \
l2_weight * tf.nn.l2_loss(xw)
self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=self.sess)
开发者ID:zgcgreat,项目名称:WSDM,代码行数:28,代码来源:models.py
示例13: __init__
def __init__(self, field_sizes=None, embed_size=10, filter_sizes=None, layer_acts=None, drop_out=None,
init_path=None, opt_algo='gd', learning_rate=1e-2, random_seed=None):
Model.__init__(self)
init_vars = []
num_inputs = len(field_sizes)
for i in range(num_inputs):
init_vars.append(('embed_%d' % i, [field_sizes[i], embed_size], 'xavier', dtype))
init_vars.append(('f1', [embed_size, filter_sizes[0], 1, 2], 'xavier', dtype))
init_vars.append(('f2', [embed_size, filter_sizes[1], 2, 2], 'xavier', dtype))
init_vars.append(('w1', [2 * 3 * embed_size, 1], 'xavier', dtype))
init_vars.append(('b1', [1], 'zero', dtype))
self.graph = tf.Graph()
with self.graph.as_default():
if random_seed is not None:
tf.set_random_seed(random_seed)
self.X = [tf.sparse_placeholder(dtype) for i in range(num_inputs)]
self.y = tf.placeholder(dtype)
self.keep_prob_train = 1 - np.array(drop_out)
self.keep_prob_test = np.ones_like(drop_out)
self.layer_keeps = tf.placeholder(dtype)
self.vars = utils.init_var_map(init_vars, init_path)
w0 = [self.vars['embed_%d' % i] for i in range(num_inputs)]
xw = tf.concat([tf.sparse_tensor_dense_matmul(self.X[i], w0[i]) for i in range(num_inputs)], 1)
l = xw
l = tf.transpose(tf.reshape(l, [-1, num_inputs, embed_size, 1]), [0, 2, 1, 3])
f1 = self.vars['f1']
l = tf.nn.conv2d(l, f1, [1, 1, 1, 1], 'SAME')
l = tf.transpose(
utils.max_pool_4d(
tf.transpose(l, [0, 1, 3, 2]),
int(num_inputs / 2)),
[0, 1, 3, 2])
f2 = self.vars['f2']
l = tf.nn.conv2d(l, f2, [1, 1, 1, 1], 'SAME')
l = tf.transpose(
utils.max_pool_4d(
tf.transpose(l, [0, 1, 3, 2]), 3),
[0, 1, 3, 2])
l = tf.nn.dropout(
utils.activate(
tf.reshape(l, [-1, embed_size * 3 * 2]),
layer_acts[0]),
self.layer_keeps[0])
w1 = self.vars['w1']
b1 = self.vars['b1']
l = tf.matmul(l, w1) + b1
l = tf.squeeze(l)
self.y_prob = tf.sigmoid(l)
self.loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=l, labels=self.y))
self.optimizer = utils.get_optimizer(opt_algo, learning_rate, self.loss)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
tf.global_variables_initializer().run(session=self.sess)
开发者ID:chenxingqiang,项目名称:ML_CIA,代码行数:60,代码来源:models.py
示例14: affine_loss
def affine_loss(output, M, weight):
loss_affine = 0.0
output_t = output / 255.
for Vc in tf.unstack(output_t, axis=-1):
Vc_ravel = tf.reshape(tf.transpose(Vc), [-1])
loss_affine += tf.matmul(tf.expand_dims(Vc_ravel, 0), tf.sparse_tensor_dense_matmul(M, tf.expand_dims(Vc_ravel, -1)))
return loss_affine * weight
开发者ID:4ever911,项目名称:deep-photo-styletransfer-tf,代码行数:8,代码来源:photo_style.py
示例15: tensor_mul
def tensor_mul(lin_op, value_map):
a = tensor(lin_op.data, value_map)
b = tensor(lin_op.args[0], value_map)
if is_sparse(a):
return tf.sparse_tensor_dense_matmul(a, b)
elif is_scalar(a) or is_scalar(b):
return tf.mul(a, b)
else:
return tf.matmul(a, b)
开发者ID:mwytock,项目名称:cvxflow,代码行数:9,代码来源:cvxpy_expr.py
示例16: adjoint_tensor_mul
def adjoint_tensor_mul(lin_op, value):
a = tensor(lin_op.data)
b = value
if is_sparse(a):
c = tf.sparse_tensor_dense_matmul(a, b, adjoint_a=True)
elif is_scalar(a) or is_scalar(b):
c = tf.mul(tf.transpose(a), b)
else:
c = tf.matmul(a, b, transpose_a=True)
return adjoint_tensor(lin_op.args[0], c)
开发者ID:mwytock,项目名称:cvxflow,代码行数:12,代码来源:cvxpy_expr.py
示例17: __init__
def __init__(self, config):
self._weights_indices = tf.placeholder(tf.int64)
self._weights_values = tf.placeholder(config.data_type)
self._weights_shape = tf.placeholder(tf.int64)
self._b = tf.placeholder(config.data_type)
self._initial_x = tf.placeholder(config.data_type)
weights = tf.SparseTensor(self.weights_indices, self.weights_values, self.weights_shape)
x = self.initial_x
for i in range(config.num_iterations):
# Jacobi iteration
x = self.b - tf.sparse_tensor_dense_matmul(weights, x)
self._final_x = x
开发者ID:priyathamkat,项目名称:Kroma,代码行数:13,代码来源:Kroma.py
示例18: testShapeInference
def testShapeInference(self):
x = np.random.rand(10, 10)
x[np.abs(x) < 0.5] = 0 # Make it sparse
y = np.random.randn(10, 20)
x_indices = np.vstack(np.where(x)).astype(np.int64).T
x_values = x[np.where(x)]
x_shape = x.shape
x_st = tf.SparseTensor(x_indices, x_values, x_shape)
result = tf.sparse_tensor_dense_matmul(x_st, y)
self.assertEqual(result.get_shape(), (10, 20))
x_shape_unknown = tf.placeholder(dtype=tf.int64, shape=None)
x_st_shape_unknown = tf.SparseTensor(x_indices, x_values, x_shape_unknown)
result_left_shape_unknown = tf.sparse_tensor_dense_matmul(
x_st_shape_unknown, y)
self.assertEqual(
result_left_shape_unknown.get_shape().as_list(), [None, 20])
x_shape_inconsistent = [10, 15]
x_st_shape_inconsistent = tf.SparseTensor(
x_indices, x_values, x_shape_inconsistent)
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
tf.sparse_tensor_dense_matmul(x_st_shape_inconsistent, y)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:23,代码来源:sparse_tensor_dense_matmul_op_test.py
示例19: _testGradients
def _testGradients(self, adjoint_a, adjoint_b, name, np_dtype, use_gpu=False):
n, k, m = np.random.randint(1, 10, size=3)
sp_t = self._randomTensor([n, k], np_dtype, adjoint=adjoint_a, sparse=True)
dense_t = self._randomTensor([k, m], np_dtype, adjoint=adjoint_b)
matmul = tf.sparse_tensor_dense_matmul(
sp_t, dense_t, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name=name)
with self.test_session(use_gpu=use_gpu):
dense_t_shape = [m, k] if adjoint_b else [k, m]
err = tf.test.compute_gradient_error(dense_t, dense_t_shape, matmul,
[n, m])
print("%s gradient err = %s" % (name, err))
self.assertLess(err, 1e-3)
开发者ID:13683116633,项目名称:tensorflow,代码行数:14,代码来源:sparse_tensor_dense_matmul_grad_test.py
示例20: project_biases
def project_biases(tf_features, n_features):
"""
Projects the biases from the feature space to calculate bias per actor
:param tf_features:
:param n_features:
:return:
"""
tf_feature_biases = tf.Variable(tf.zeros([n_features, 1]))
# The reduce sum is to perform a rank reduction
tf_projected_biases = tf.reduce_sum(
tf.sparse_tensor_dense_matmul(tf_features, tf_feature_biases),
axis=1
)
return tf_feature_biases, tf_projected_biases
开发者ID:CloudBreadPaPa,项目名称:tensorrec,代码行数:16,代码来源:recommendation_graphs.py
注:本文中的tensorflow.sparse_tensor_dense_matmul函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论