• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python projector.visualize_embeddings函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings函数的典型用法代码示例。如果您正苦于以下问题:Python visualize_embeddings函数的具体用法?Python visualize_embeddings怎么用?Python visualize_embeddings使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了visualize_embeddings函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: set_model

    def set_model(self, model):
        if self.embeddings_freq:
            self.saver = tf.train.Saver()

            embeddings_layer_names = self.embeddings_layer_names

            elayers = find_embedding_layers(model.layers)
            if not embeddings_layer_names:
                embeddings_layer_names = [layer.name for layer in elayers]

            embeddings = {layer.name: layer.weights[0] for layer in elayers
                          if layer.name in embeddings_layer_names}

            embeddings_metadata = {}

            if not isinstance(self.embeddings_metadata, str):
                embeddings_metadata = self.embeddings_metadata
            else:
                embeddings_metadata = {layer_name: self.embeddings_metadata
                                       for layer_name in embeddings.keys()}

            config = projector.ProjectorConfig()
            self.embeddings_logs = []

            for layer_name, tensor in embeddings.items():
                embedding = config.embeddings.add()
                embedding.tensor_name = tensor.name

                self.embeddings_logs.append(os.path.join(self.log_dir,
                                                         layer_name + '.ckpt'))

                if layer_name in embeddings_metadata:
                    embedding.metadata_path = embeddings_metadata[layer_name]

            projector.visualize_embeddings(self.writer, config)
开发者ID:NthTensor,项目名称:keras_experiments,代码行数:35,代码来源:tensorboard_embedding.py


示例2: visualize_char

def visualize_char(model, path="/home/aegis/igor/LM4paper/tests/textchar.txt", ):
    chars = open(path, 'r').read().splitlines()
    embedding = np.empty(shape=(len(chars), model.hps.emb_char_size), dtype=np.float32)
    for i, char in enumerate(chars):
        embedding[i] = model.get_char_embedding(char)
    print(embedding)
    print(embedding.shape)

    logdir = "/data/visualog/char/"
    metadata = os.path.join(logdir, "metadata.tsv")

    with open(metadata, "w") as metadata_file:
        for c in chars:
            metadata_file.write("%s\n" % c)

    tf.reset_default_graph()
    with tf.Session() as sess:
        X = tf.Variable([0.0], name='embedding')
        place = tf.placeholder(tf.float32, shape=embedding.shape)
        set_x = tf.assign(X, place, validate_shape=False)
        sess.run(tf.global_variables_initializer())
        sess.run(set_x, feed_dict={place: embedding})

        saver = tf.train.Saver([X])

        saver.save(sess, os.path.join(logdir, 'char.ckpt'))

        config = projector.ProjectorConfig()
        # One can add multiple embeddings.
        embedding = config.embeddings.add()
        embedding.tensor_name = X.name
        # Link this tensor to its metadata file (e.g. labels).
        embedding.metadata_path = metadata
        # Saves a config file that TensorBoard will read during startup.
        projector.visualize_embeddings(tf.summary.FileWriter(logdir), config)
开发者ID:IgorWang,项目名称:RNNLM,代码行数:35,代码来源:visualize.py


示例3: test

	def test(self, step, number=400):  # 256 self.batch_size
		session = sess = self.session
		config = projector.ProjectorConfig()
		if visualize_cluster:
			embedding = config.embeddings.add()  # You can add multiple embeddings. Here just one.
			embedding.tensor_name = self.last_layer.name  # last_dense
			# embedding.tensor_path
			# embedding.tensor_shape
			embedding.sprite.image_path = PATH_TO_SPRITE_IMAGE
			# help(embedding.sprite)
			embedding.sprite.single_image_dim.extend([width, hight])  # if mnist   thumbnail
			# embedding.single_image_dim.extend([28, 28]) # if mnist   thumbnail
			# Link this tensor to its metadata file (e.g. labels).
			embedding.metadata_path = os.path.join(LOG_DIR, 'metadata.tsv')
			# Saves a configuration file that TensorBoard will read during startup.
			projector.visualize_embeddings(self.summary_writer, config)

		run_metadata = tf.RunMetadata()
		run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
		# Calculate accuracy for 256 mnist test images

		test_images, test_labels = self.next_batch(number, session, test=True)

		feed_dict = {self.x: test_images, self.y: test_labels, self.keep_prob: 1., self.train_phase: False}
		# accuracy,summary= self.session.run([self.accuracy, self.summaries], feed_dict=feed_dict)
		accuracy, summary = session.run([self.accuracy, self.summaries], feed_dict, run_options, run_metadata)
		print('\t' * 3 + "Test Accuracy: ", accuracy)
		self.summary_writer.add_run_metadata(run_metadata, 'step #%03d' % step)
		self.summary_writer.add_summary(summary, global_step=step)
开发者ID:duydb2,项目名称:tensorflow-speech-recognition,代码行数:29,代码来源:net.py


示例4: visualize

    def visualize(self, visual_fld, num_visualize):
        """ run "'tensorboard --logdir='visualization'" to see the embeddings """
        
        # create the list of num_variable most common words to visualize
        word2vec_utils.most_common_words(visual_fld, num_visualize)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))

            # if that checkpoint exists, restore from checkpoint
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)

            final_embed_matrix = sess.run(self.embed_matrix)
            
            # you have to store embeddings in a new variable
            embedding_var = tf.Variable(final_embed_matrix[:num_visualize], name='embedding')
            sess.run(embedding_var.initializer)

            config = projector.ProjectorConfig()
            summary_writer = tf.summary.FileWriter(visual_fld)

            # add embedding to the config file
            embedding = config.embeddings.add()
            embedding.tensor_name = embedding_var.name
            
            # link this tensor to its metadata file, in this case the first NUM_VISUALIZE words of vocab
            embedding.metadata_path = 'vocab_' + str(num_visualize) + '.tsv'

            # saves a configuration file that TensorBoard will read during startup.
            projector.visualize_embeddings(summary_writer, config)
            saver_embed = tf.train.Saver([embedding_var])
            saver_embed.save(sess, os.path.join(visual_fld, 'model.ckpt'), 1)
开发者ID:XJTUeducation,项目名称:stanford-tensorflow-tutorials,代码行数:35,代码来源:04_word2vec_visualize.py


示例5: generate_embeddings

def generate_embeddings():
    # Import data
    mnist = input_data.read_data_sets(FLAGS.data_dir,
                                      one_hot=True,
                                      fake_data=FLAGS.fake_data)
    sess = tf.InteractiveSession()

    # Input set for Embedded TensorBoard visualization
    # Performed with cpu to conserve memory and processing power
    with tf.device("/cpu:0"):
        embedding = tf.Variable(tf.stack(mnist.test.images[:FLAGS.max_steps], axis=0), trainable=False, name='embedding')

    tf.global_variables_initializer().run()

    saver = tf.train.Saver()
    writer = tf.summary.FileWriter(FLAGS.log_dir + '/projector', sess.graph)

    # Add embedding tensorboard visualization. Need tensorflow version
    # >= 0.12.0RC0
    config = projector.ProjectorConfig()
    embed= config.embeddings.add()
    embed.tensor_name = 'embedding:0'
    embed.metadata_path = os.path.join(FLAGS.log_dir + '/projector/metadata.tsv')
    embed.sprite.image_path = os.path.join(FLAGS.data_dir + '/mnist_10k_sprite.png')

    # Specify the width and height of a single thumbnail.
    embed.sprite.single_image_dim.extend([28, 28])
    projector.visualize_embeddings(writer, config)

    saver.save(sess, os.path.join(
            FLAGS.log_dir, 'projector/a_model.ckpt'), global_step=FLAGS.max_steps)
开发者ID:pramitchoudhary,项目名称:Experiments,代码行数:31,代码来源:tensorboard_embedding_visualization.py


示例6: write_embeddings

    def write_embeddings(self, Wv, name="WordVectors"):
        """Write embedding matrix to the right place.

        Args:
          Wv: (numpy.ndarray) |V| x d matrix of word embeddings
        """
        with tf.Graph().as_default(), tf.Session() as session:
            ##
            # Feed embeddings to tf, and save.
            embedding_var = tf.Variable(Wv, name=name, dtype=tf.float32)
            session.run(tf.global_variables_initializer())

            saver = tf.train.Saver()
            saver.save(session, self.CHECKPOINT_FILE, 0)

            ##
            # Save metadata
            summary_writer = tf.summary.FileWriter(self.LOGDIR)
            config = projector.ProjectorConfig()
            embedding = config.embeddings.add()
            embedding.tensor_name = embedding_var.name
            embedding.metadata_path = self.VOCAB_FILE_BASE
            projector.visualize_embeddings(summary_writer, config)

        msg = "Saved {s0:d} x {s1:d} embedding matrix '{name}'"
        msg += " to LOGDIR='{logdir}'"
        print(msg.format(s0=Wv.shape[0], s1=Wv.shape[1], name=name,
                         logdir=self.LOGDIR))

        print("To view, run:")
        print("\n  tensorboard --logdir=\"{logdir}\"\n".format(logdir=self.LOGDIR))
        print("and navigate to the \"Embeddings\" tab in the web interface.")
开发者ID:divyag9,项目名称:2018-summer-main,代码行数:32,代码来源:tf_embed_viz.py


示例7: save_embeddings

def save_embeddings(model: adagram.VectorModel, output: Path, words: List[str]):
    labels = []
    senses = []
    for word in words:
        for sense, _ in model.word_sense_probs(word):
            labels.append('{} #{}'.format(word, sense))
            v = model.sense_vector(word, sense)
            senses.append(v / np.linalg.norm(v))
    output.mkdir(exist_ok=True)
    labels_path = output.joinpath('labels.tsv')
    labels_path.write_text('\n'.join(labels))
    senses = np.array(senses)

    with tf.Session() as session:
        embedding_var = tf.Variable(senses, trainable=False, name='senses')
        session.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        saver.save(session, str(output.joinpath('model.ckpt')))

        summary_writer = tf.train.SummaryWriter(str(output))
        config = projector.ProjectorConfig()
        embedding = config.embeddings.add()
        embedding.tensor_name = embedding_var.name
        embedding.metadata_path = str(labels_path)
        projector.visualize_embeddings(summary_writer, config)
开发者ID:lopuhin,项目名称:WSI,代码行数:25,代码来源:tf_embeddings.py


示例8: save_tb_embeddings

def save_tb_embeddings(embeddings_filename):
    f = open(embeddings_filename, 'rb')
    embeddings = pickle.load(f)

    images = embeddings['images']
    zs = embeddings['zs']

    # overwrite Tensorboard log dir if necessary
    if os.path.exists(TB_DIR):
        shutil.rmtree(TB_DIR)
    os.makedirs(TB_DIR)

    # create grid image
    img_width, img_height = save_sprite_image(images)

    with tf.device('cpu:0'):
        # create embedding var
        embedding_var = tf.Variable(initial_value=zs)

        # save projector config
        summary_writer = tf.summary.FileWriter(TB_DIR)
        config = projector.ProjectorConfig()
        embedding = config.embeddings.add()
        embedding.tensor_name = embedding_var.name
        embedding.sprite.image_path = SPRITE_IMAGE_FILENAME
        embedding.sprite.single_image_dim.extend([img_width, img_height])
        projector.visualize_embeddings(summary_writer, config)

        # save embeddings
        sess = tf.Session()
        sess.run(embedding_var.initializer)
        saver = tf.train.Saver([embedding_var])
        saver.save(sess, os.path.join(TB_DIR, 'model.ckpt'))
开发者ID:Dasona,项目名称:DIGITS,代码行数:33,代码来源:gan_embeddings.py


示例9: main

def main(vectors_loc, out_loc, name="spaCy_vectors"):
    meta_file = "{}.tsv".format(name)
    out_meta_file = path.join(out_loc, meta_file)

    print('Loading spaCy vectors model: {}'.format(vectors_loc))
    model = spacy.load(vectors_loc)
    print('Finding lexemes with vectors attached: {}'.format(vectors_loc))
    strings_stream = tqdm.tqdm(model.vocab.strings, total=len(model.vocab.strings), leave=False)
    queries = [w for w in strings_stream if model.vocab.has_vector(w)]
    vector_count = len(queries)

    print('Building Tensorboard Projector metadata for ({}) vectors: {}'.format(vector_count, out_meta_file))

    # Store vector data in a tensorflow variable
    tf_vectors_variable = numpy.zeros((vector_count, model.vocab.vectors.shape[1]))

    # Write a tab-separated file that contains information about the vectors for visualization
    #
    # Reference: https://www.tensorflow.org/programmers_guide/embedding#metadata
    with open(out_meta_file, 'wb') as file_metadata:
        # Define columns in the first row
        file_metadata.write("Text\tFrequency\n".encode('utf-8'))
        # Write out a row for each vector that we add to the tensorflow variable we created
        vec_index = 0
        for text in tqdm.tqdm(queries, total=len(queries), leave=False):
            # https://github.com/tensorflow/tensorflow/issues/9094
            text = '<Space>' if text.lstrip() == '' else text
            lex = model.vocab[text]

            # Store vector data and metadata
            tf_vectors_variable[vec_index] = model.vocab.get_vector(text)
            file_metadata.write("{}\t{}\n".format(text, math.exp(lex.prob) * vector_count).encode('utf-8'))
            vec_index += 1

    print('Running Tensorflow Session...')
    sess = tf.InteractiveSession()
    tf.Variable(tf_vectors_variable, trainable=False, name=name)
    tf.global_variables_initializer().run()
    saver = tf.train.Saver()
    writer = tf.summary.FileWriter(out_loc, sess.graph)

    # Link the embeddings into the config
    config = ProjectorConfig()
    embed = config.embeddings.add()
    embed.tensor_name = name
    embed.metadata_path = meta_file

    # Tell the projector about the configured embeddings and metadata file
    visualize_embeddings(writer, config)

    # Save session and print run command to the output
    print('Saving Tensorboard Session...')
    saver.save(sess, path.join(out_loc, '{}.ckpt'.format(name)))
    print('Done. Run `tensorboard --logdir={0}` to view in Tensorboard'.format(out_loc))
开发者ID:IndicoDataSolutions,项目名称:spaCy,代码行数:54,代码来源:vectors_tensorboard.py


示例10: _add_emb_vis

 def _add_emb_vis(self, embedding_var):
   """Do setup so that we can view word embedding visualization in Tensorboard, as described here:
   https://www.tensorflow.org/get_started/embedding_viz
   Make the vocab metadata file, then make the projector config file pointing to it."""
   train_dir = os.path.join(FLAGS.log_root, "train")
   vocab_metadata_path = os.path.join(train_dir, "vocab_metadata.tsv")
   self._vocab.write_metadata(vocab_metadata_path) # write metadata file
   summary_writer = tf.summary.FileWriter(train_dir)
   config = projector.ProjectorConfig()
   embedding = config.embeddings.add()
   embedding.tensor_name = embedding_var.name
   embedding.metadata_path = vocab_metadata_path
   projector.visualize_embeddings(summary_writer, config)
开发者ID:sra4077,项目名称:RLSeq2Seq,代码行数:13,代码来源:model.py


示例11: testVisualizeEmbeddings

  def testVisualizeEmbeddings(self):
    # Create a dummy configuration.
    config = projector_config_pb2.ProjectorConfig()
    config.model_checkpoint_path = 'test'
    emb1 = config.embeddings.add()
    emb1.tensor_name = 'tensor1'
    emb1.metadata_path = 'metadata1'

    # Call the API method to save the configuration to a temporary dir.
    temp_dir = self.get_temp_dir()
    self.addCleanup(shutil.rmtree, temp_dir)
    writer = writer_lib.FileWriter(temp_dir)
    projector.visualize_embeddings(writer, config)

    # Read the configuratin from disk and make sure it matches the original.
    with gfile.GFile(os.path.join(temp_dir, 'projector_config.pbtxt')) as f:
      config2 = projector_config_pb2.ProjectorConfig()
      text_format.Parse(f.read(), config2)
      self.assertEqual(config, config2)
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:19,代码来源:projector_api_test.py


示例12: __get_tensorboard_writer

    def __get_tensorboard_writer(self, path):
        tensorboard_writer = tf.summary.FileWriter(path, graph=self.graph, filename_suffix=".bot")

        # set the projector's configuration to add the embedding summary also:
        conf = projector.ProjectorConfig()
        embedding_field = conf.embeddings.add()
        embedding_content_label = conf.embeddings.add()

        # set the tensors to these embedding matrices
        embedding_field.tensor_name = self.field_embedding_matrix.name
        embedding_content_label.tensor_name = self.content_label_embedding_matrix.name

        # add the metadata paths to these embedding_summaries:
        embedding_field.metadata_path = os.path.join("..", "Metadata/fields.vocab")
        embedding_content_label.metadata_path = os.path.join("..", "Metadata/content_labels.vocab")

        # save the configuration file for this
        projector.visualize_embeddings(tensorboard_writer, conf)

        # return the so created tensorboard_writer
        return tensorboard_writer
开发者ID:ChenglongChen,项目名称:natural-language-summary-generation-from-structured-data,代码行数:21,代码来源:Model.py


示例13: run_latent

def run_latent(model, n_images, data_out_path, sprite=True):
	
	tensorboard_path = os.path.join(data_out_path, 'tensorboard')
	saver = tf.train.Saver()
	with tf.Session() as session:

		# Initializer and restoring model.
		session.run(tf.global_variables_initializer())
		check = get_checkpoint(data_out_path)
		saver.restore(session, check)

		# Inputs for tensorboard.
		tf_data = tf.Variable(tf.zeros((n_images, model.z_dim)), name='tf_data')
		input_sample = tf.placeholder(tf.float32, shape=(n_images, model.z_dim))
		set_tf_data = tf.assign(tf_data, input_sample, validate_shape=False)

		if sprite:
			# Sample images.
			gen_samples, sample_z = show_generated(session=session, z_input=model.z_input, z_dim=model.z_dim, output_fake=model.output_gen, n_images=n_images, show=False)
			# Generate sprite of images.
			write_sprite_image(filename=os.path.join(data_out_path, 'gen_sprite.png'), data=gen_samples)
		else:
			sample_z = np.random.uniform(low=-1., high=1., size=(n_images, model.z_dim))

		# Variable for embedding.
		saver_latent = tf.train.Saver([tf_data])
		session.run(set_tf_data, feed_dict={input_sample: sample_z})
		saver_latent.save(sess=session, save_path=os.path.join(tensorboard_path, 'tf_data.ckpt'))

		# Tensorflow embedding.
		config = projector.ProjectorConfig()
		embedding = config.embeddings.add()
		embedding.tensor_name = tf_data.name
		if sprite:
			embedding.metadata_path = os.path.join(data_out_path, 'metadata.tsv')
			embedding.sprite.image_path = os.path.join(data_out_path, 'gen_sprite.png')
		embedding.sprite.single_image_dim.extend([model.image_height, model.image_width])
		projector.visualize_embeddings(tf.summary.FileWriter(tensorboard_path), config)	
开发者ID:AdalbertoCq,项目名称:Pathology-GAN,代码行数:38,代码来源:tools.py


示例14: visualize_embeddings

    def visualize_embeddings(self, sess, tensor, name):
        """
        Visualises an embedding vector into Tensorboard

        :param sess: Tensorflow session object
        :param tensor:  The embedding tensor to be visualizd
        :param name: Name of the tensor
        """

        # make directory if not exist
        if not tf.os.path.exists(self.save_dir):
            tf.os.makedirs(self.save_dir)

        # summary writer
        summary_writer = tf.summary.FileWriter(self.save_dir, graph=tf.get_default_graph())

        # embedding visualizer
        config = projector.ProjectorConfig()
        emb = config.embeddings.add()
        emb.tensor_name = name  # tensor
        emb.metadata_path = tf.os.path.join(self.DEFAULT_META_DATA_DIR, self.meta_file)  # metadata file
        print(tf.os.path.abspath(emb.metadata_path))
        projector.visualize_embeddings(summary_writer, config)
开发者ID:BenJamesbabala,项目名称:deep-atrous-ner,代码行数:23,代码来源:base_data_loader.py


示例15: embedding_view

def embedding_view(EMB, y, EMB_C, sess, opt):

    EMB = [(x / np.linalg.norm(x)).tolist()  for x in EMB]
    EMB_C = [(x / np.linalg.norm(x) ).tolist() for x in EMB_C]


    
    embedding_var = tf.Variable(EMB + EMB_C,  name='Embedding_of_sentence')
    sess.run(embedding_var.initializer)
    EB_summary_writer = tf.summary.FileWriter(opt.log_path)
    config = projector.ProjectorConfig()
    embedding = config.embeddings.add()
    embedding.metadata_path = os.path.join(opt.log_path, 'metadata.tsv')
    projector.visualize_embeddings(EB_summary_writer, config)
    saver = tf.train.Saver([embedding_var])
    saver.save(sess, os.path.join(opt.log_path, 'model2.ckpt'), 1)
    metadata_file = open(os.path.join(opt.log_path, 'metadata.tsv'), 'w')
    metadata_file.write('ClassID\tClass\n')
    for i in range(len(y)):
        metadata_file.write('%06d\t%s\n' % (y[i], opt.class_name[y[i]]))
    for i in range(opt.num_class):
        metadata_file.write('%06d\t%s\n' % (i, "class_"+opt.class_name[i]))
    metadata_file.close()
    print("embedding created")
开发者ID:niurouli,项目名称:SWEM,代码行数:24,代码来源:utils.py


示例16: visualize_embeddings

    def visualize_embeddings(self) -> None:
        """Insert visualization of embeddings in TensorBoard.

        Visualize the embeddings of `EmbeddedFactorSequence` objects specified
        in the `main.visualize_embeddings` config attribute.
        """
        tb_projector = projector.ProjectorConfig()

        for sequence in self.model.visualize_embeddings:
            for i, (vocabulary, emb_matrix) in enumerate(
                    zip(sequence.vocabularies, sequence.embedding_matrices)):

                # TODO when vocabularies will have name parameter, change it
                path = self.get_path("seq.{}-{}.tsv".format(sequence.name, i))
                vocabulary.save_wordlist(path)

                embedding = tb_projector.embeddings.add()
                # pylint: disable=unsubscriptable-object
                embedding.tensor_name = emb_matrix.name
                embedding.metadata_path = path
                # pylint: enable=unsubscriptable-object

        summary_writer = tf.summary.FileWriter(self.model.output)
        projector.visualize_embeddings(summary_writer, tb_projector)
开发者ID:ufal,项目名称:neuralmonkey,代码行数:24,代码来源:experiment.py


示例17: open

w2v_samples = np.zeros((args.samples, model.vector_size))
with open('{}/{}_metadata.tsv'.format(args.projector, args.prefix), 'w+') as file_metadata:
    for i, word in enumerate(model.wv.index2word[:args.samples]):
        w2v_samples[i] = model[word]
        file_metadata.write(word + '\n')

# define the model without training
sess = tf.InteractiveSession()

with tf.device("/cpu:0"):
    embedding = tf.Variable(w2v_samples, trainable=False, name='{}_embedding'.format(args.prefix))

tf.global_variables_initializer().run()

saver = tf.train.Saver()
writer = tf.summary.FileWriter(args.projector, sess.graph)

# adding into projector
config = projector.ProjectorConfig()
embed = config.embeddings.add()
embed.tensor_name = '{}_embedding'.format(args.prefix)
embed.metadata_path = './{}_metadata.tsv'.format(args.prefix)

# Specify the width and height of a single thumbnail.
projector.visualize_embeddings(writer, config)

saver.save(sess, '{}/{}_model.ckpt'.format(args.projector, args.prefix), global_step=args.samples)

print('Start tensorboard with: \'tensorboard --logdir=\"projector\"\'\n'
      'and check http://localhost:6006/#embeddings to view your embedding')
开发者ID:chrisapril,项目名称:GermanWordEmbeddings,代码行数:30,代码来源:tfvisualize.py


示例18: PCA

    log_dir = args.log_dir
    if args.log_dir is None:
        log_dir = os.path.join(os.getcwd(), "log_dir")

    # Load data
    data_frame = pd.read_csv(args.vector, index_col=False, header=None, sep=' ')
    metadata = args.labels

    # Generating PCA and
    pca = PCA(n_components=50, random_state=123, svd_solver='auto')
    df_pca = pd.DataFrame(pca.fit_transform(data_frame))
    df_pca = df_pca.values

    # Start tensorflow variable setup
    tf_data = tf.Variable(df_pca)

    # Start TF session
    with tf.Session() as sess:
        saver = tf.train.Saver([tf_data])
        sess.run(tf_data.initializer)
        saver.save(sess, os.path.join(log_dir, 'tf_data.ckpt'))
        config = projector.ProjectorConfig()
        embedding = config.embeddings.add()
        embedding.tensor_name = tf_data.name

        # Link this tensor to its metadata(Labels) file
        embedding.metadata_path = metadata

        # Saves a config file that TensorBoard will read during startup.
        projector.visualize_embeddings(tf.summary.FileWriter(log_dir), config)
开发者ID:pegulhane,项目名称:DNN,代码行数:30,代码来源:embedding_visualization.py


示例19: main

def main(args):
    # pass the args as params so the model_fn can use
    # the TPU specific args
    params = vars(args)

    if args.use_tpu:
        # additional configs required for using TPUs
        tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(args.tpu)
        tpu_config = tf.contrib.tpu.TPUConfig(
            num_shards=8, # using Cloud TPU v2-8
            iterations_per_loop=args.save_checkpoints_steps)

        # use the TPU version of RunConfig
        config = tf.contrib.tpu.RunConfig(
            cluster=tpu_cluster_resolver,
            model_dir=args.model_dir,
            tpu_config=tpu_config,
            save_checkpoints_steps=args.save_checkpoints_steps,
            save_summary_steps=100)

        # TPUEstimator
        estimator = tf.contrib.tpu.TPUEstimator(
            model_fn=model_fn,
            config=config,
            params=params,
            train_batch_size=args.train_batch_size,
            # Calling TPUEstimator.predict requires setting predict_bath_size.
            predict_batch_size=PREDICT_BATCH_SIZE,
            eval_batch_size=32,
            export_to_tpu=False)
    else:
        config = tf.estimator.RunConfig(model_dir=args.model_dir)

        estimator = tf.estimator.Estimator(
            model_fn,
            config=config,
            params=params)

    estimator.train(train_input_fn, max_steps=args.max_steps)

    # After training, apply the learned embedding to the test data and visualize with tensorboard Projector.
    embeddings = next(estimator.predict(predict_input_fn, yield_single_examples=False))['embeddings']

    # Put the embeddings into a variable to be visualized.
    embedding_var = tf.Variable(embeddings, name='test_embeddings')

    # Labels do not pass through the estimator.predict call, so we get it separately.
    _, (_, labels) = tf.keras.datasets.mnist.load_data()
    labels = labels[:PREDICT_BATCH_SIZE]

    # Write the metadata file for the projector.
    metadata_path = os.path.join(estimator.model_dir, 'metadata.tsv')
    with tf.gfile.GFile(metadata_path, 'w') as f:
        f.write('index\tlabel\n')
        for i, label in enumerate(labels):
            f.write('{}\t{}\n'.format(i, label))

    # Configure the projector.
    projector_config = projector.ProjectorConfig()
    embedding_config = projector_config.embeddings.add()
    embedding_config.tensor_name = embedding_var.name

    # The metadata_path is relative to the summary_writer's log_dir.
    embedding_config.metadata_path = 'metadata.tsv'

    summary_writer = tf.summary.FileWriter(estimator.model_dir)

    projector.visualize_embeddings(summary_writer, projector_config)

    # Start a session to actually write the embeddings into a new checkpoint.
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    saver.save(sess, os.path.join(estimator.model_dir, 'model.ckpt'), args.max_steps+1)
开发者ID:zhang01GA,项目名称:cloudml-samples,代码行数:74,代码来源:trainer.py


示例20: range

train_summary_writer = tf.summary.FileWriter('/tmp/train', session.graph)

test_images, test_labels = mnist.test.images, mnist.test.labels

for batch_no in range(total_batches):
    image_batch = mnist.train.next_batch(100)
    _, merged_summary = session.run([optimiser, merged_summary_operation], feed_dict={
        x_input: image_batch[0],
        y_input: image_batch[1],
        dropout_bool: True
    })
    train_summary_writer.add_summary(merged_summary, batch_no)

work_dir = ''  # change path
metadata_path = '/tmp/train/metadata.tsv'

with open(metadata_path, 'w') as metadata_file:
    for i in range(no_embedding_data):
        metadata_file.write('{}\n'.format(
            np.nonzero(mnist.test.labels[::1])[1:][0][i]))

from tensorflow.contrib.tensorboard.plugins import projector
projector_config = projector.ProjectorConfig()
embedding_projection = projector_config.embeddings.add()
embedding_projection.tensor_name = embedding_variable.name
embedding_projection.metadata_path = metadata_path
embedding_projection.sprite.image_path = os.path.join(work_dir + '/mnist_10k_sprite.png')
embedding_projection.sprite.single_image_dim.extend([28, 28])
projector.visualize_embeddings(train_summary_writer, projector_config)
tf.train.Saver().save(session, '/tmp/train/model.ckpt', global_step=1)
开发者ID:lokeshsoni,项目名称:Deep-Learning-for-Computer-Vision,代码行数:30,代码来源:1_embedding_vis.py



注:本文中的tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap