本文整理汇总了Python中tensorflow.gather函数的典型用法代码示例。如果您正苦于以下问题:Python gather函数的具体用法?Python gather怎么用?Python gather使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gather函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _recurrence
def _recurrence(node_h,node_c,idx_var):
node_info=tf.gather(treestr,idx_var)
child_h=tf.gather(node_h,node_info)
child_c=tf.gather(node_c,node_info)
flat_ = tf.reshape(child_h,[-1])
tmp=tf.matmul(tf.expand_dims(flat_,0),cW)
u,o,i,fl,fr=tf.split(1,5,tmp)
i=tf.nn.sigmoid(i+bi)
o=tf.nn.sigmoid(o+bo)
u=tf.nn.tanh(u+bu)
fl=tf.nn.sigmoid(fl+bf)
fr=tf.nn.sigmoid(fr+bf)
f=tf.concat(0,[fl,fr])
c = i * u + tf.reduce_sum(f*child_c,[0])
h = o * tf.nn.tanh(c)
node_h = tf.concat(0,[node_h,h])
node_c = tf.concat(0,[node_c,c])
idx_var=tf.add(idx_var,1)
return node_h,node_c,idx_var
开发者ID:Chelz,项目名称:RecursiveNN,代码行数:27,代码来源:tf_tree_lstm.py
示例2: f
def f(X):
"""
prob: n probabilities
box: nx4 boxes
Returns: n boolean, the selection
"""
prob, box = X
output_shape = tf.shape(prob)
# filter by score threshold
ids = tf.reshape(tf.where(prob > cfg.TEST.RESULT_SCORE_THRESH), [-1])
prob = tf.gather(prob, ids)
box = tf.gather(box, ids)
# NMS within each class
selection = tf.image.non_max_suppression(
box, prob, cfg.TEST.RESULTS_PER_IM, cfg.TEST.FRCNN_NMS_THRESH)
selection = tf.to_int32(tf.gather(ids, selection))
# sort available in TF>1.4.0
# sorted_selection = tf.contrib.framework.sort(selection, direction='ASCENDING')
sorted_selection = -tf.nn.top_k(-selection, k=tf.size(selection))[0]
mask = tf.sparse_to_dense(
sparse_indices=sorted_selection,
output_shape=output_shape,
sparse_values=True,
default_value=False)
return mask
开发者ID:tobyma,项目名称:tensorpack,代码行数:26,代码来源:model.py
示例3: modular_layer
def modular_layer(inputs, modules: ModulePool, parallel_count: int, context: ModularContext):
with tf.variable_scope(None, 'modular_layer'):
inputs = context.begin_modular(inputs)
flat_inputs = tf.layers.flatten(inputs)
logits = tf.layers.dense(flat_inputs, modules.module_count * parallel_count)
logits = tf.reshape(logits, [-1, parallel_count, modules.module_count])
ctrl = tfd.Categorical(logits)
initializer = tf.random_uniform_initializer(maxval=modules.module_count, dtype=tf.int32)
shape = [context.dataset_size, parallel_count]
best_selection_persistent = tf.get_variable('best_selection', shape, tf.int32, initializer)
if context.mode == ModularMode.E_STEP:
# 1 x batch_size x 1
best_selection = tf.gather(best_selection_persistent, context.data_indices)[tf.newaxis]
# sample_size x batch_size x 1
sampled_selection = tf.reshape(ctrl.sample(), [context.sample_size, -1, parallel_count])
selection = tf.concat([best_selection, sampled_selection[1:]], axis=0)
selection = tf.reshape(selection, [-1, parallel_count])
elif context.mode == ModularMode.M_STEP:
selection = tf.gather(best_selection_persistent, context.data_indices)
elif context.mode == ModularMode.EVALUATION:
selection = ctrl.mode()
else:
raise ValueError('Invalid modular mode')
attrs = ModularLayerAttributes(selection, best_selection_persistent, ctrl)
context.layers.append(attrs)
return run_modules(inputs, selection, modules.module_fnc, modules.output_shape)
开发者ID:timediv,项目名称:libmodular,代码行数:31,代码来源:layers.py
示例4: scheduled_sample_count
def scheduled_sample_count(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
"""Sample batch with specified mix of groundtruth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
num_ground_truth = scheduled_sample_var
idx = tf.random_shuffle(tf.range(batch_size))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
output = tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
# if batch size is known set it.
if isinstance(batch_size, int):
output.set_shape([batch_size] + common_layers.shape_list(output)[1:])
return output
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:29,代码来源:common_video.py
示例5: build_predict
def build_predict(self,Xnew,task_ind):
"""
We need to assume the task_ind starts from 0
"""
Fmean,Fvar = 0,0
for i in np.arange(self.rank):
for j in np.arange(self.num_latent_list[i]):
lat_id = np.sum(self.num_latent_list[:i],dtype = np.int64) + j
if self.whiten_list[lat_id]: # need to compute fmean and fvar by the weights
fmean, fvar = conditionals.gaussian_gp_predict_whitened(Xnew, self.Z[lat_id],
self.kern_list[i], self.q_mu_list[lat_id],
self.q_sqrt_list[lat_id], 1)
else:
fmean, fvar = conditionals.gaussian_gp_predict(Xnew, self.Z[lat_id],
self.kern_list[i], self.q_mu_list[lat_id],
self.q_sqrt_list[lat_id],1)
W_ij = tf.gather(self.W,task_ind)[lat_id]
Fmean += (fmean + self.mean_function_list[lat_id](Xnew))*W_ij
Fvar += fvar * tf.square(W_ij)
if self.tsk:
for i in np.arange(self.num_tasks):
lat_id = np.sum(self.num_latent_list,dtype = np.int64) + i
if self.whiten_list[lat_id]: # need to compute fmean and fvar by the weights
fmean, fvar = conditionals.gaussian_gp_predict_whitened(Xnew, self.Z[lat_id],
self.tskern_list[i], self.q_mu_list[lat_id],
self.q_sqrt_list[lat_id], 1)
else:
fmean, fvar = conditionals.gaussian_gp_predict(Xnew, self.Z[lat_id],
self.tskern_list[i], self.q_mu_list[lat_id],
self.q_sqrt_list[lat_id], 1)
switch = tf.cast(tf.equal(tf.to_int64(i), task_ind),tf.float64)
W_ij = tf.gather(self.Kappa,i)[0]*switch
Fmean += (fmean + self.mean_function_list[lat_id](Xnew))*W_ij
Fvar += fvar * tf.square(W_ij)
return Fmean, Fvar
开发者ID:dashan-emr,项目名称:GPflow,代码行数:35,代码来源:lmc_tsk.py
示例6: append
def append(self, transitions, rows=None):
"""Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.assert_less(
rows, self._capacity,
message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.assert_less(
tf.gather(self._length, rows), self._max_length,
message='max length exceeded')
append_ops = []
with tf.control_dependencies([assert_max_length]):
for buffer_, elements in zip(self._buffers, transitions):
timestep = tf.gather(self._length, rows)
indices = tf.stack([rows, timestep], 1)
append_ops.append(tf.scatter_nd_update(buffer_, indices, elements))
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(
rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask)
开发者ID:AndrewMeadows,项目名称:bullet3,代码行数:29,代码来源:memory.py
示例7: make_minibatch
def make_minibatch(self, valid_anchors):
with tf.variable_scope('rpn_minibatch'):
# in labels(shape is [N, ]): 1 is positive, 0 is negative, -1 is ignored
labels, anchor_matched_gtboxes, object_mask = \
self.rpn_find_positive_negative_samples(valid_anchors) # [num_of_valid_anchors, ]
positive_indices = tf.reshape(tf.where(tf.equal(labels, 1.0)), [-1]) # use labels is same as object_mask
num_of_positives = tf.minimum(tf.shape(positive_indices)[0],
tf.cast(self.rpn_mini_batch_size * self.rpn_positives_ratio, tf.int32))
# num of positives <= minibatch_size * 0.5
positive_indices = tf.random_shuffle(positive_indices)
positive_indices = tf.slice(positive_indices, begin=[0], size=[num_of_positives])
# positive_anchors = tf.gather(self.anchors, positive_indices)
negative_indices = tf.reshape(tf.where(tf.equal(labels, 0.0)), [-1])
num_of_negatives = tf.minimum(self.rpn_mini_batch_size - num_of_positives,
tf.shape(negative_indices)[0])
negative_indices = tf.random_shuffle(negative_indices)
negative_indices = tf.slice(negative_indices, begin=[0], size=[num_of_negatives])
# negative_anchors = tf.gather(self.anchors, negative_indices)
minibatch_indices = tf.concat([positive_indices, negative_indices], axis=0)
minibatch_indices = tf.random_shuffle(minibatch_indices)
minibatch_anchor_matched_gtboxes = tf.gather(anchor_matched_gtboxes, minibatch_indices)
object_mask = tf.gather(object_mask, minibatch_indices)
labels = tf.cast(tf.gather(labels, minibatch_indices), tf.int32)
labels_one_hot = tf.one_hot(labels, depth=2)
return minibatch_indices, minibatch_anchor_matched_gtboxes, object_mask, labels_one_hot
开发者ID:mbossX,项目名称:RRPN_FPN_Tensorflow,代码行数:33,代码来源:build_rpn.py
示例8: c_body
def c_body(c, pa):
# Zeroing predictions below threshold
with tf.variable_scope('bboxes_c_select', reuse=True):
c_scores = b_scores[:, c]
c_fmask = tf.cast(tf.greater(c_scores, confidence_threshold), scores.dtype)
c_scores = c_scores * c_fmask
c_bboxes = b_bboxes * tf.expand_dims(c_fmask, axis=-1)
# Apply NMS
with tf.variable_scope('bboxes_c_nms', reuse=True):
c_indices = tf.image.non_max_suppression(c_bboxes, c_scores, top_k, nms_threshold)
size = tf.size(c_indices)
c_batch_ = tf.to_float(b) * tf.ones(shape=[top_k, 1], dtype=tf.float32) # len(indices) x 1
c_labels = tf.to_float(c) * tf.ones(shape=[top_k, 1], dtype=tf.float32) # len(indices) x 1
extra_size = top_k - size
c_scores = tf.expand_dims(tf.gather(c_scores, c_indices), axis=-1) # len(indices) x 1
empty_c_scores = tf.zeros([extra_size, 1], dtype=tf.float32)
c_scores = tf.concat([c_scores, empty_c_scores], axis=0)
c_bboxes = tf.gather(c_bboxes, c_indices) # len(indices) x 4
empty_c_bboxes = tf.zeros([extra_size, 4], dtype=tf.float32)
c_bboxes = tf.concat([c_bboxes, empty_c_bboxes], axis=0)
c_predictions = tf.concat([c_batch_, c_labels, c_scores, c_bboxes], axis=1) # len(indices) x 7
return c + 1, pa.write(index=c - 1, value=c_predictions)
开发者ID:undeadinu,项目名称:training_toolbox_tensorflow,代码行数:25,代码来源:ssd_base.py
示例9: _ProcessSingleScale
def _ProcessSingleScale(scale_index,
boxes,
features,
scales,
scores,
reuse=True):
"""Resize the image and run feature extraction and keypoint selection.
This function will be passed into tf.while_loop() and be called
repeatedly. The input boxes are collected from the previous iteration
[0: scale_index -1]. We get the current scale by
image_scales[scale_index], and run image resizing, feature extraction and
keypoint selection. Then we will get a new set of selected_boxes for
current scale. In the end, we concat the previous boxes with current
selected_boxes as the output.
Args:
scale_index: A valid index in the image_scales.
boxes: Box tensor with the shape of [N, 4].
features: Feature tensor with the shape of [N, depth].
scales: Scale tensor with the shape of [N].
scores: Attention score tensor with the shape of [N].
reuse: Whether or not the layer and its variables should be reused.
Returns:
scale_index: The next scale index for processing.
boxes: Concatenated box tensor with the shape of [K, 4]. K >= N.
features: Concatenated feature tensor with the shape of [K, depth].
scales: Concatenated scale tensor with the shape of [K].
scores: Concatenated attention score tensor with the shape of [K].
"""
scale = tf.gather(image_scales, scale_index)
new_image_size = tf.to_int32(tf.round(original_image_shape_float * scale))
resized_image = tf.image.resize_bilinear(image_tensor, new_image_size)
attention, feature_map = model_fn(
resized_image, normalized_image=True, reuse=reuse)
rf_boxes = CalculateReceptiveBoxes(
tf.shape(feature_map)[1],
tf.shape(feature_map)[2], rf, stride, padding)
# Re-project back to the original image space.
rf_boxes = tf.divide(rf_boxes, scale)
attention = tf.reshape(attention, [-1])
feature_map = tf.reshape(feature_map, [-1, feature_depth])
# Use attention score to select feature vectors.
indices = tf.reshape(tf.where(attention >= abs_thres), [-1])
selected_boxes = tf.gather(rf_boxes, indices)
selected_features = tf.gather(feature_map, indices)
selected_scores = tf.gather(attention, indices)
selected_scales = tf.ones_like(selected_scores, tf.float32) / scale
# Concat with the previous result from different scales.
boxes = tf.concat([boxes, selected_boxes], 0)
features = tf.concat([features, selected_features], 0)
scales = tf.concat([scales, selected_scales], 0)
scores = tf.concat([scores, selected_scores], 0)
return scale_index + 1, boxes, features, scales, scores
开发者ID:812864539,项目名称:models,代码行数:60,代码来源:feature_extractor.py
示例10: build_loss
def build_loss(self, logits, labels, lambs):
# put a sigfunction on logits and then transpose
logits = tf.transpose(framwork.sig_func(logits))
# according to the labels, erase rows which is not in labels
labels_unique = tf.constant(range(self.image_classes), dtype=tf.int32)
labels_num = self.image_classes
logits = tf.gather(logits, indices=labels_unique)
lambs = tf.gather(lambs, indices=labels_unique)
# set the value of each row to True when it occurs in labels
template = tf.tile(tf.expand_dims(labels_unique, dim=1), [1, self.batch_size])
labels_expand = tf.tile(tf.expand_dims(labels, dim=0), [labels_num, 1])
indict_logic = tf.equal(labels_expand, template)
# split the tensor along rows
logit_list = tf.split(0, labels_num, logits)
indict_logic_list = tf.split(0, labels_num, indict_logic)
lambda_list = tf.split(0, self.image_classes, lambs)
# loss_list = list()
# for i in range(self.image_classes):
# loss_list.append(framwork.loss_func(logit_list[i], indict_logic_list[i], lambda_list[i]))
loss_list = map(framwork.loss_func, logit_list, indict_logic_list, lambda_list)
loss = tf.add_n(loss_list)
tensors_dict = {'labels_unique': labels_unique, 'template': template, 'logits_sig_trans': logits,
'loss': loss, 'indict_logic': indict_logic}
self.tensors_names.extend(tensors_dict.keys())
self.net_tensors.update(tensors_dict)
开发者ID:chengyang317,项目名称:information_pursuit,代码行数:25,代码来源:infor_net.py
示例11: mf_binary_likelihood
def mf_binary_likelihood(U, V, nzr, nzc, nzz, noise_prec, alpha, n, m, k, fix_entries=FIX_TRIANGLE):
with tf.name_scope("priors"):
U_prior = tf.reduce_sum(bf.dists.gaussian_log_density(U, stddev=alpha), name="U_prior")
V_prior = tf.reduce_sum(bf.dists.gaussian_log_density(V, stddev=alpha), name="V_prior")
if fix_entries == FIX_IDENTITY:
mask = np.float32(np.vstack((np.eye(k), np.ones((m-k, k)))))
V = V * mask
elif fix_entries == FIX_TRIANGLE:
mask = np.float32(np.tril(np.ones((m, k))))
V = V * mask
else:
pass
with tf.name_scope("model"):
Us = tf.gather(U, nzr, name="Us")
#tf.histogram_summary("Us", Us)
Vs = tf.gather(V, nzc, name="Vs")
#tf.histogram_summary("Vs", Vs)
Rs = tf.reduce_sum(tf.mul(Us, Vs), reduction_indices=1, name="Rs")
#tf.histogram_summary("rs", Rs)
probs, _ = bf.transforms.logit(Rs * noise_prec)
#tf.histogram_summary("probs", probs)
ll = tf.reduce_sum(bf.dists.bernoulli_log_density(nzz, probs), name="ll")
joint_logprob = U_prior + V_prior + ll
return joint_logprob
开发者ID:BenJamesbabala,项目名称:bayesflow,代码行数:30,代码来源:matrix_factorization.py
示例12: build_network
def build_network(self):
net_tensors = self.net_tensors
with self.net_graph.as_default(), tf.device(self.net_device):
logits = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, self.image_classes))
labels = tf.placeholder(dtype=tf.int32, shape=(self.batch_size,))
lambs = tf.placeholder(dtype=tf.float32, shape=(self.image_classes,))
# put a sigfunction on logits and then transpose
logits = tf.transpose(framwork.sig_func(logits))
# according to the labels, erase rows which is not in labels
labels_unique = tf.constant(range(self.image_classes), dtype=tf.int32)
labels_num = self.image_classes
logits = tf.gather(logits, indices=labels_unique)
lambs = tf.gather(lambs, indices=labels_unique)
# set the value of each row to True when it occurs in labels
templete = tf.tile(tf.expand_dims(labels_unique, dim=1), [1, self.batch_size])
labels_expand = tf.tile(tf.expand_dims(labels, dim=0), [labels_num, 1])
indict_logic = tf.equal(labels_expand, templete)
# split the tensor along rows
logit_list = tf.split(0, labels_num, logits)
indict_logic_list = tf.split(0, labels_num, indict_logic)
lamb_list = tf.split(0, self.image_classes, lambs)
logit_list = [tf.squeeze(item) for item in logit_list]
indict_logic_list = [tf.squeeze(item) for item in indict_logic_list]
left_right_tuples = list()
for i in range(self.image_classes):
left_right_tuples.append(framwork.lamb_func(logit_list[i], indict_logic_list[i], lamb=lamb_list[i]))
# func = framwork.lamb_func()
# left_right_tuples = map(func, logit_list, indict_logic_list, lamb_list)
net_tensors.update({'left_right_tuples': left_right_tuples, 'logits': logits, 'labels': labels,
'lambs': lambs})
开发者ID:chengyang317,项目名称:information_pursuit,代码行数:31,代码来源:infor_net.py
示例13: testIndexedSlices
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.test_session():
v1 = tf.Variable(np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v1_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1.ref()), tf.constant([1])
)
v2 = tf.Variable(np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(np.float32))
v2_at_1 = tf.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2.ref()), tf.constant([1])
)
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = tf.gather(st1.values, st1.indices)
g2 = tf.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]], v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v1.eval())
开发者ID:peace195,项目名称:tensorflow,代码行数:33,代码来源:control_flow_ops_py_test.py
示例14: proposal_layer
def proposal_layer(rpn_cls_prob, rpn_bbox_pred, im_info, cfg_key, _feat_stride, anchors, num_anchors):
if type(cfg_key) == bytes:
cfg_key = cfg_key.decode('utf-8')
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
# Get the scores and bounding boxes
scores = rpn_cls_prob[:, :, :, num_anchors:]
scores = tf.reshape(scores, shape=(-1,))
rpn_bbox_pred = tf.reshape(rpn_bbox_pred, shape=(-1, 4))
proposals = bbox_transform_inv_tf(anchors, rpn_bbox_pred)
proposals = clip_boxes_tf(proposals, im_info[:2])
# Non-maximal suppression
indices = tf.image.non_max_suppression(proposals, scores, max_output_size=post_nms_topN, iou_threshold=nms_thresh)
boxes = tf.gather(proposals, indices)
boxes = tf.to_float(boxes)
scores = tf.gather(scores, indices)
scores = tf.reshape(scores, shape=(-1, 1))
# Only support single image as input
batch_inds = tf.zeros((tf.shape(indices)[0], 1), dtype=tf.float32)
blob = tf.concat([batch_inds, boxes], 1)
return blob, scores
开发者ID:guanlongzhao,项目名称:dehaze,代码行数:28,代码来源:proposal_layer.py
示例15: full_loss_op
def full_loss_op(self, logits, labels):
"""Adds loss ops to the computational graph.
Hint: Use sparse_softmax_cross_entropy_with_logits
Hint: Remember to add l2_loss (see tf.nn.l2_loss)
Args:
logits: tensor(num_nodes, output_size)
labels: python list, len = num_nodes
Returns:
loss: tensor 0-D
"""
if self.full_loss is None:
loss = None
# YOUR CODE HERE
l2_loss = self.config.l2 * tf.add_n(tf.get_collection("l2_loss"))
idx = tf.where(tf.less(self.labelholder,2))
logits = tf.gather(logits, idx)
labels = tf.gather(labels, idx)
objective_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
loss = objective_loss + l2_loss
tf.summary.scalar("loss_l2", l2_loss)
tf.summary.scalar("loss_objective", tf.reduce_sum(objective_loss))
tf.summary.scalar("loss_total", loss)
self.full_loss = loss
# END YOUR CODE
return self.full_loss
开发者ID:kingtaurus,项目名称:cs224d,代码行数:26,代码来源:rnn_tensorarray.py
示例16: get_mention_emb
def get_mention_emb(self, text_emb, text_outputs, mention_starts, mention_ends):
mention_emb_list = []
mention_start_emb = tf.gather(text_outputs, mention_starts) # [num_mentions, emb]
mention_emb_list.append(mention_start_emb)
mention_end_emb = tf.gather(text_outputs, mention_ends) # [num_mentions, emb]
mention_emb_list.append(mention_end_emb)
mention_width = 1 + mention_ends - mention_starts # [num_mentions]
if self.config["use_features"]:
mention_width_index = mention_width - 1 # [num_mentions]
mention_width_emb = tf.gather(tf.get_variable("mention_width_embeddings", [self.config["max_mention_width"], self.config["feature_size"]]), mention_width_index) # [num_mentions, emb]
mention_width_emb = tf.nn.dropout(mention_width_emb, self.dropout)
mention_emb_list.append(mention_width_emb)
if self.config["model_heads"]:
mention_indices = tf.expand_dims(tf.range(self.config["max_mention_width"]), 0) + tf.expand_dims(mention_starts, 1) # [num_mentions, max_mention_width]
mention_indices = tf.minimum(util.shape(text_outputs, 0) - 1, mention_indices) # [num_mentions, max_mention_width]
mention_text_emb = tf.gather(text_emb, mention_indices) # [num_mentions, max_mention_width, emb]
self.head_scores = util.projection(text_outputs, 1) # [num_words, 1]
mention_head_scores = tf.gather(self.head_scores, mention_indices) # [num_mentions, max_mention_width, 1]
mention_mask = tf.expand_dims(tf.sequence_mask(mention_width, self.config["max_mention_width"], dtype=tf.float32), 2) # [num_mentions, max_mention_width, 1]
mention_attention = tf.nn.softmax(mention_head_scores + tf.log(mention_mask), dim=1) # [num_mentions, max_mention_width, 1]
mention_head_emb = tf.reduce_sum(mention_attention * mention_text_emb, 1) # [num_mentions, emb]
mention_emb_list.append(mention_head_emb)
mention_emb = tf.concat(mention_emb_list, 1) # [num_mentions, emb]
return mention_emb
开发者ID:qq547276542,项目名称:e2e-coref,代码行数:29,代码来源:coref_model.py
示例17: _log_unnormalized_prob
def _log_unnormalized_prob(self, x):
mean = tf.squeeze(tf.gather(x, [0], axis=-1), axis=-1)
precision = self._maybe_assert_valid_sample(
tf.squeeze(tf.gather(x, [1], axis=-1), axis=-1))
return (tf.math.xlogy(self.concentration - 0.5, precision)
- self.rate * precision
- 0.5 * self._lambda * precision * tf.square(mean - self.loc))
开发者ID:imito,项目名称:odin,代码行数:7,代码来源:normal_gamma.py
示例18: build_eval_graph
def build_eval_graph(self):
analogy_a = tf.placeholder(dtype=tf.int32)
analogy_b = tf.placeholder(dtype=tf.int32)
analogy_c = tf.placeholder(dtype=tf.int32)
norm_w_embed = tf.nn.l2_normalize(self._w_embed_in, 1)
a_embed = tf.gather(norm_w_embed, analogy_a)
b_embed = tf.gather(norm_w_embed, analogy_b)
c_embed = tf.gather(norm_w_embed, analogy_c)
target = c_embed + (b_embed - a_embed)
cosine_analogy_dist = tf.matmul(target, norm_w_embed, transpose_b=True)
_, analogy_indices = tf.nn.top_k(cosine_analogy_dist, word_config.top_k_analogy)
near_word = tf.placeholder(dtype=tf.int32)
near_embed = tf.gather(norm_w_embed, near_word)
cosine_near_dist = tf.matmul(near_embed, norm_w_embed, transpose_b=True)
near_val, near_ind = tf.nn.top_k(cosine_near_dist, min(1000, self._vocab_size))
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_indices = analogy_indices
self._near_word = near_word
self._near_val = near_val
self._near_ind = near_ind
开发者ID:ioanachelu,项目名称:word2vec,代码行数:29,代码来源:word2vec.py
示例19: _tf_linear_interp1d
def _tf_linear_interp1d(x_to_interpolate, fn_x, fn_y):
"""Tensorflow implementation of 1d linear interpolation.
Args:
x_to_interpolate: tf.float32 Tensor of shape (num_examples,) over which 1d
linear interpolation is performed.
fn_x: Monotonically-increasing, non-repeating tf.float32 Tensor of shape
(length,) used as the domain to approximate a function.
fn_y: tf.float32 Tensor of shape (length,) used as the range to approximate
a function.
Returns:
tf.float32 Tensor of shape (num_examples,)
"""
x_pad = tf.concat([fn_x[:1] - 1, fn_x, fn_x[-1:] + 1], axis=0)
y_pad = tf.concat([fn_y[:1], fn_y, fn_y[-1:]], axis=0)
interval_idx = _find_interval_containing_new_value(x_pad, x_to_interpolate)
# Interpolate
alpha = (
(x_to_interpolate - tf.gather(x_pad, interval_idx)) /
(tf.gather(x_pad, interval_idx + 1) - tf.gather(x_pad, interval_idx)))
interpolation = ((1 - alpha) * tf.gather(y_pad, interval_idx) +
alpha * tf.gather(y_pad, interval_idx + 1))
return interpolation
开发者ID:Exscotticus,项目名称:models,代码行数:26,代码来源:calibration_builder.py
示例20: sparse_dot_product0
def sparse_dot_product0(emb, tuples, use_matmul=True, output_type='real'):
"""
Compute the dot product of complex vectors.
It uses complex vectors but tensorflow does not optimize in the complex space (or there is a bug in the gradient
propagation with complex numbers...)
:param emb: embeddings
:param tuples: indices at which we compute dot products
:return: scores (dot products)
"""
n_t = tuples.get_shape()[0].value
rk = emb.get_shape()[1].value
emb_sel_a = tf.gather(emb, tuples[:, 0])
emb_sel_b = tf.gather(emb, tuples[:, 1])
if use_matmul:
pred_cplx = tf.squeeze(tf.batch_matmul(
tf.reshape(emb_sel_a, [n_t, rk, 1]),
tf.reshape(emb_sel_b, [n_t, rk, 1]), adj_x=True))
else:
pred_cplx = tf.reduce_sum(tf.mul(tf.conj(emb_sel_a), emb_sel_b), 1)
if output_type == 'complex':
return pred_cplx
elif output_type == 'real':
return tf.real(pred_cplx) + tf.imag(pred_cplx)
elif output_type == 'real':
return tf.abs(pred_cplx)
elif output_type == 'angle':
raise NotImplementedError('No argument or inverse-tanh function for complex number in Tensorflow')
else:
raise NotImplementedError()
开发者ID:Peratham,项目名称:factorix,代码行数:29,代码来源:hermitian.py
注:本文中的tensorflow.gather函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论