• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python pyplot.imsave函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中matplotlib.pyplot.imsave函数的典型用法代码示例。如果您正苦于以下问题:Python imsave函数的具体用法?Python imsave怎么用?Python imsave使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了imsave函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: create_concentration_image

 def create_concentration_image(self, image_data):
     cmap = cm.get_cmap()
     cmap._init()
     is_comparison_run = isinstance(self.scenario_run, models.ComparisonScenarioRun)
     if not is_comparison_run or self.scenario_run.comparison_mode == "Absolute":
         alphas = np.abs([min(n, 1.0) for n in np.linspace(0, 2, cmap.N)])
         vmax = np.max(image_data)
         vmin = np.min(image_data)
     else:
         results_max = np.max(image_data)
         results_min = np.min(image_data)
         if np.abs(results_max) > np.abs(results_min):
             vmax = results_max
             vmin = -results_max
         else:
             vmax = -results_min
             vmin = results_min
         results_range = vmax - vmin
         value_array = np.linspace(vmin, vmax, cmap.N)
         alphas = np.array([min(np.abs(v) / results_range * 2, 1.0) for v in value_array])
     cmap._lut[:-3, -1] = alphas
     if is_comparison_run:
         output_directory = self.scenario_run.output_directory_1
     else:
         output_directory = self.scenario_run.output_directory
     plt.imsave(fname=os.path.join(output_directory, "concentrations.png"),
                arr=image_data, format='png', vmax=vmax, vmin=vmin)
开发者ID:nathan-rice,项目名称:ctools-backend,代码行数:27,代码来源:raster.py


示例2: input_image_setup

def input_image_setup(img_name, img2_name):
	'''	Nimmt ein Bild als input, erstellt eine "Regel-Karte". Bei der Bild-
	erstellung bedenken: Rot = Gitter, Gruen = Verzweigt, Blau = Radial, wobei ein schwarzer
	Pixel ein Zentrum definiert. '''
	#TODO: Document
	import matplotlib.image as mpimg
	import matplotlib.pyplot as plt
	import procedural_city_generation
	import os
	#TODO:translate	
	
	img = mpimg.imread(img_name)
	img2 = mpimg.imread(img2_name)
	
	import matplotlib.pyplot as plt
	path=os.path.dirname(procedural_city_generation.__file__)
	print path
	plt.imsave(path+"/temp/diffused.png",img2,cmap='gray')
	with open(path+"/temp/isdiffused.txt",'w') as f:
		f.write("False")
	
	
	img*=255
	img2*=255
	return img, img2
开发者ID:CodeMason,项目名称:procedural_city_generation,代码行数:25,代码来源:input_image_setup.py


示例3: plot_brights

def plot_brights(ax, path, star, regionList, goal=False):
    '''
    Components of this routine:
        Projected brightness map
         
    Please note that this has been modified for use in diagnostic plots, 
    there should really be a way to specify a windowNumber for real data
    '''
    currentWindow = 0

    ###########################
    # Make the brightness map #
    ###########################
    img = make_bright_image(star, regionList, currentWindow, goal=goal)
    
    plt.imsave(path + "temp.jpg", img, cmap='hot', vmin=0.85, vmax=1.15)
    plt.imshow(img, cmap='hot')
    #Create the plot
    bmap = Basemap(projection='moll', lon_0 = 0, ax=ax)
    bmap.warpimage(path + "temp.jpg", ax=ax)
    
    if goal:
        ax.set_title("Desired Map")
    else:
        ax.set_title("Average Map")
开发者ID:rapidsnow,项目名称:Eclipse-Mapping,代码行数:25,代码来源:plots_scratch.py


示例4: main

def main():
    """
    Args: save_path output_dir
    """
    args = sys.argv
    save_dir = args[1]
    output_dir = args[2]

    layer_list = [
        'conv1/Conv/Conv2D',
        'conv2/Conv/Conv2D',
        'conv3/Conv/Conv2D',
        'conv4/Conv/Conv2D',
        'conv5/Conv/Conv2D',
        'conv5/Conv_1/Conv2D',
        'conv6/Conv/Conv2D'
    ]
    channels = [16, 32, 64, 64, 128, 256, 2]

    sess = tf.Session()

    with sess.as_default():
        maximize_output_multi = layers.prepare_graph(movie.build_net, save_dir)

        for i, layer in enumerate(layer_list):
            folder_name = layer.replace('/', '_')
            directory = os.path.join(output_dir, folder_name)
            create_dir(directory)
            for channel in range(channels[i]):
                result = maximize_output_multi(layer, channel, octave_n=4, iter_n=100, step=5.0, seed=123)
                plt.imsave(os.path.join(directory, str(channel) + '.png'), result)
开发者ID:vlpolyansky,项目名称:video-cnn,代码行数:31,代码来源:movie_save_all_layers.py


示例5: toFilenameAndTiff

 def toFilenameAndTiff(outputDirPath, kv):
     key, image=kv
     fname = outputDirPath+'/w_'+str(key)+'.tif'
     if(len(image.shape)==3):
         image=image.T
         image=np.swapaxes(image,1,2)
     imsave(fname, image)
开发者ID:genialwang,项目名称:lambda-image,代码行数:7,代码来源:images.py


示例6: draw_tile

def draw_tile(metadata, config, target_path):
    decoder = config.build_decoder()
    decoder_layers = nn.layers.get_all_layers(decoder.l_out)
    print "  decoder layer output shapes:"
    nparams = len(nn.layers.get_all_params(decoder.l_out))
    nn.layers.set_all_param_values(decoder.l_out, metadata['param_values'][-nparams:])

    for layer in decoder_layers:
        name = layer.__class__.__name__
        print "    %s %s" % (string.ljust(name, 32), nn.layers.get_output_shape(layer))

    mesh = np.linspace(0.001, 0.999, 20)
    z = np.zeros((400, 2), dtype='float32')
    for i in xrange(20):
        for j in xrange(20):
            z[20 * i + j, :] = np.array([norm.ppf(mesh[i]), norm.ppf(mesh[j])])

    sample = theano.function([decoder.l_z.input_var], nn.layers.get_output(decoder_layers[-1]))

    digits = sample(z)

    tile = np.zeros((20 * 28, 20 * 28), dtype='float32')

    for i in xrange(20):
        for j in xrange(20):
            d = np.reshape(digits[20 * i + j, :], (28, 28))
            tile[i * 28:(i + 1) * 28, j * 28:(j + 1) * 28] = d

    plt.imsave(target_path + 'tile.png', tile, cmap=matplotlib.cm.Greys)
开发者ID:IraKorshunova,项目名称:vae,代码行数:29,代码来源:draw.py


示例7: segment

def segment(sourceImage, DstImage):
    import sys
    sys.path.insert(0,'/home/joe/github/caffe-with_crop/python')
    import numpy as np
    from PIL import Image
    import matplotlib.pyplot as plt
    import caffe
    # caffe.set_mode_gpu()
    # caffe.set_device(0)
    # load image, switch to BGR, subtract mean, and make dims C x H x W for Caffe
    im = Image.open(sourceImage)
    in_ = np.array(im, dtype=np.float32)
    in_ = in_[:,:,::-1]
    in_ -= np.array((104.00698793,116.66876762,122.67891434))
    in_ = in_.transpose((2,0,1))

    # load net
    net = caffe.Net('/home/joe/github/caffe-with_crop/examples/fcn-32s-pascal-context/deploy.prototxt', '/home/joe/github/caffe-with_crop/examples/fcn-32s-pascal-context/fcn-32s-pascalcontext.caffemodel', caffe.TEST)
    # shape for input (data blob is N x C x H x W), set data
    net.blobs['data'].reshape(1, *in_.shape)
    net.blobs['data'].data[...] = in_
    # run net and take argmax for prediction
    net.forward()
    out = net.blobs['score'].data[0].argmax(axis=0)
#    plt.imshow(out)
    plt.imsave(DstImage, out)
开发者ID:joe8767,项目名称:fcn-web,代码行数:26,代码来源:application.py


示例8: showEigFace

 def showEigFace(self, idx=0):
     eigface = np.float32(self.eigenfaces[idx])
     print 'Eigface', eigface
     print 'SHAPE:', eigface.shape
     im = np.reshape(eigface, self.imsize)
     picName = 'eigFaceImage' + str(idx) + '.png'
     plt.imsave(picName, im, cmap=pylab.gray())
开发者ID:jztein,项目名称:eigface,代码行数:7,代码来源:getEigenface.py


示例9: main

def main():
    logging.basicConfig(level=logging.INFO)

    imagename = "1_27_s.bmp"
    unaryfilename = "1_27_s.c_unary.txt"

    logging.info("Read image.")
    img = utility.readimg(imagename)

    logging.info("Load unaries.")
    unaries = utility.loadunaryfile(os.path.join("data", unaryfilename))

    # Calculate energy
    unaries = -np.log(unaries)
    numlabels = unaries.shape[2]

    w = 100000
    l = 0.5
    pd1 = PD1(img, unaries, numlabels, w, l)
    pd1.segment()
    img = pd1.get_labeled_image()

    logging.info("Save image.")
    plt.imsave("img_out", img)

    plt.imshow(img)
    plt.show()
开发者ID:JeGa,项目名称:PD1,代码行数:27,代码来源:pd1.py


示例10: save

	def save(self, face_img, face_id, pose, landmark, name):
		# save the face image
		pose_bin_id = self.get_pose_bin_id(pose.yaw, pose.pitch)
		#print "Yaw=%d Pitch=%d Bin Name=%d" % (int(pose.yaw), int(pose.pitch), pose_bin_id)
		save_path = self.bin2path(pose_bin_id) + '/' + name + '.png'
		#print "saving image to path:", save_path
		plt.imsave(save_path, face_img)
		# save the data file
		# this data file could be optimized, by sorting the face_id
		print "saving image data file to path:", self.data_file_path()

		# here will change what we want to store
		# basically, we need the following data for ranking
		# 1. yaw, pitch and roll: yaw and pitch should match first, 
		# and then roll.
		# 2. distance between eyes: to estimate the resolution
		# 3. and also I think I need the name, but this could be provided by
		# using the image file name
		data_file_handler = open(self.data_file_path(), 'a')
		print >>data_file_handler, "%d"%pose_bin_id,
		print >>data_file_handler, "%s"%name,
		print >>data_file_handler, "%d %d %d"%(pose.yaw, pose.pitch, pose.roll),
		for point in landmark.all_points:
			print >>data_file_handler, point,
		print >>data_file_handler, ""
		return save_path
开发者ID:kumasento,项目名称:ImageProcFinal_Faceswap,代码行数:26,代码来源:faceposebin.py


示例11: detect

def detect():
    try:
        image = request.files.get('file')
        recog_face = bool(int(request.files.get('face').file.read()))
        files = {'file': StringIO(image.file.read())}
        objs = requests.post('http://{}/object'.format(OBJ_SERVER),
                             files=files)
        objs = json.loads(objs.text)

        print recog_face
        if recog_face:
            img = None
            for bb in objs:
                if bb['label'] == 'person':
                    if img is None:
                        image.file.seek(0)
                        img = io.imread(StringIO(image.file.read()))
                    x1, y1, x2, y2 = bb['bbox']
                    person_img = img[y1:y2, x1:x2]

                    # detect face
                    s = StringIO()
                    plt.imsave(s, person_img)
                    s.seek(0)
                    faces = requests.post('http://{}/face'.format(FACE_SERVER),
                                          files={'file': s})
                    bb['face'] = json.loads(faces.text)

        return json.dumps(objs)

    except Exception as e:
        print str(type(e)), e
开发者ID:mitmul,项目名称:cvmodules,代码行数:32,代码来源:server.py


示例12: save_results

def save_results(true_labels, predicted_labels, clf_name, classification_dir):
  cm_int = confusion_matrix (true_labels, predicted_labels);
  cm_float = cm_int/np.apply_along_axis(np.sum, 1, cm_int).astype('float');
  report = classification_report(true_labels, predicted_labels, np.arange(0,len(class_names)), class_names);
  # Save results
  plt.imsave(classification_dir + '/' + clf_name + "_cm.png", cm_float ,  cmap=cmt.gray)
  float_cm_file = classification_dir +'/' + clf_name + "_float_cm.txt"
  fos = open(float_cm_file, 'w');
  np.savetxt(fos,cm_float);
  fos.close();
  int_cm_file = classification_dir +'/' + clf_name + "_int_cm.txt"
  fos = open(int_cm_file, 'w');
  np.savetxt(fos,cm_int);
  fos.close();
  report_file = classification_dir +'/' + clf_name + "_report.txt"
  fos = open(report_file, 'w');
  fos.write(report);
  fos.close();
  labels_file = classification_dir +'/' + clf_name + "_labels.txt"
  fos = open(labels_file, 'w');
  np.savetxt(fos,np.column_stack((true_labels,predicted_labels)));
  fos.close();

  p, r, f1, s = precision_recall_fscore_support(true_labels, predicted_labels,labels=np.arange(0,len(class_names)))
  prf1s_file = classification_dir +'/' + clf_name + "_prf1s.txt"
  fos = open(prf1s_file, 'w');
  np.savetxt(fos,np.column_stack((p, r, f1, s)));
  fos.close();
开发者ID:mirestrepo,项目名称:voxels-at-lems,代码行数:28,代码来源:classify_no_object.py


示例13: plot2

def plot2(fn,p,wa,vmin,vmax,ups):
    
    # Check matrix dimensions are the same
    (m,n)=p.shape
    if (m,n)!=wa.shape:
        print "Matrix dimension mismatch"

    # Set up output array and scaling constant
    o=np.zeros((m*ups,n*ups,3))
    vsca=1.0/(vmax-vmin)

    # Assemble the output array
    for i in range(m):
        iu=i*ups
        for j in range(n):
            ju=j*ups
            if wa[i,j]==1:
                o[iu:iu+ups,ju:ju+ups,0]=1
                o[iu:iu+ups,ju:ju+ups,1]=1
                o[iu:iu+ups,ju:ju+ups,2]=1
            else:
                (re,gr,bl)=palette2(fscale(p[i,j],vmin,vsca))
                o[iu:iu+ups,ju:ju+ups,0]=re
                o[iu:iu+ups,ju:ju+ups,1]=gr
                o[iu:iu+ups,ju:ju+ups,2]=bl

    # Save the image
    plt.imsave(fn,o)
开发者ID:dustinvtran,项目名称:am205-fall-2014,代码行数:28,代码来源:custom_plot.py


示例14: main

def main():
    parser = argparse.ArgumentParser(
        description="Photometric Stereo",
    )
    parser.add_argument(
        "--lightning",
        nargs="?",
        help="Filename of JSON file containing lightning information",
    )
    parser.add_argument(
        "--mask",
        nargs="?",
        help="Filename of an image containing a mask of the object",
    )
    parser.add_argument(
        "image",
        nargs="*",
        help="Images filenames",
    )
    parser.add_argument(
        "--generate-map",
        action='store_true',
        help="Generate a map.png file which represends the colors of the "
             "normal mapping.",
    )
    args = parser.parse_args()

    if args.generate_map:
        normals = generateNormalMap()
        plt.imsave('map.png', normals)
        return

    if not len(args.image) >= 3:
        print("Please specify 3+ image files.")
        return

    if args.lightning:
        normals = photometricStereo(args.lightning, args.image)
        if False:
            try:
                with open('data.pkl', 'rb') as fhdl:
                    normals = pickle.load(fhdl)
            except:
                
                with open('data.pkl', 'wb') as fhdl:
                    pickle.dump(normals, fhdl)
    else:
        normals = photometricStereoWithoutLightning(args.image)

    if args.mask:
        mask = getImage(args.mask)
        mask = mask.T
        print(normals.shape, mask.shape)
        normals[mask<(mask.max() - mask.min())/2.] = np.nan

    color = colorizeNormals(normals)
    plt.imsave('out.png', color)
    mesh.write3dNormals(normals, 'out-3dn.stl')
    surface = mesh.surfaceFromNormals(normals)
    mesh.writeMesh(surface, normals, 'out-mesh.stl')
开发者ID:patricksnape,项目名称:pms,代码行数:60,代码来源:pms.py


示例15: prep_image

def prep_image(url,idx,dataset,datadir,width=224,filetype='jpg',verbose=False):
    '''
    Check to see image file has been downloaded at current size.  If it has not,
    download and resize image. Saves file to datadir/images/[dataset]_[idx]_w[width].[filetype]
    e.g. datadir/images/train_10001_w256.bmp

    args:
        url: url of image source
        idx: image row index
        dataset: string 'train' or 'test' or other identifier
        datadir: data directory
        width: desired width of image. Will be resized to width squared
    returns:
        rawim: scaled and cropped image
    '''
    outpath = datadir + 'images/' + dataset + '_' +  str(idx) + '_w' + str(width) + '.' + filetype

    if not os.path.isfile(outpath):
        if verbose:
            print "downloading image #%s..." %str(idx)
        try:
            rawim = download_and_resize(url,width)
            plt.imsave(outpath,rawim)
            return rawim
        except:
            print "unable to download image #%i from url %s..." %(idx,url)
            return None
    else:
        if verbose:
            print "Image %i already downloaded. Loading from file..." % idx
        rawim = plt.imread(outpath)
        return rawim
开发者ID:LucyWang2014,项目名称:NLP_Product_Classification,代码行数:32,代码来源:download_images_to_directory.py


示例16: _post_step

 def _post_step(self, t):
     file_name = os.path.join(self._dir_name, self._file_fmt.format(t))
     spins = np.array([[self._ising.s(i, j)
                        for j in range(self._ising.N())]
                       for i in range(self._ising.N())])
     plt.imsave(file_name, spins)
     return True
开发者ID:gjbex,项目名称:training-material,代码行数:7,代码来源:runner.py


示例17: post_image

    def post_image(self, im, folder, filename='sources'):
        """
        Post an image to S3 for this pull request

        Parameters
        ----------
        im : array
            The image as a 2D array (grayscale) or 3D array (RGB)

        name : str
            The folder name to put file in
        """
        from matplotlib.pyplot import imsave, cm

        im = asarray(im)
        imfile = io.BytesIO()
        if im.ndim == 3:
            imsave(imfile, im, format="png")
        else:
            imsave(imfile, im, format="png", cmap=cm.gray)

        k = Key(self.bucket)
        k.key = 'neurofinder/images/' + str(
            self.id) + '/' + folder + '/' + filename + '.png'
        k.set_contents_from_string(imfile.getvalue())
开发者ID:sundeepteki,项目名称:neurofinder,代码行数:25,代码来源:job.py


示例18: get_top_nearest_neigbors

def get_top_nearest_neigbors(num_generated, nearneig, real_features_hdf5, gen_features_hdf5, maximum=False, random_select=False, save_path=None):

    real_img_hdf5 = real_features_hdf5.replace('_features_', '_images_')
    gen_img_hdf5 = gen_features_hdf5.replace('_features_', '_images_')

    real_features_file = h5py.File(real_features_hdf5, 'r')
    gen_features_file = h5py.File(gen_features_hdf5, 'r')
    real_img_file = h5py.File(real_img_hdf5, 'r')
    gen_img_file = h5py.File(gen_img_hdf5, 'r')

    real_features = real_features_file['features']
    gen_features = gen_features_file['features']
    real_img = real_img_file['images']
    gen_img = gen_img_file['images']

    with tf.Session() as sess:
        real_features = tf.constant(np.array(real_features), dtype=tf.float32)
        gen_features = tf.constant(np.array(gen_features), dtype=tf.float32)

        # Get Nearest Neighbors for all generated images.
        gen_real_distances = tf.sqrt(tf.abs(euclidean_distance(gen_features, real_features)))
        neg = tf.negative(gen_real_distances)
        neg_s_distances, s_indices = tf.math.top_k(input=neg, k=nearneig, sorted=True)
        s_distances = tf.negative(neg_s_distances)


        # Getting the top smallest distances between Generated and Real images.
        neg_s_distances1, s_indices1 = tf.math.top_k(input=neg, k=1, sorted=True)
        neg_s_distances1 = tf.transpose(neg_s_distances1)
        if not random_select:
            if maximum:
                neg_s_distances1 = tf.negative(neg_s_distances1)
            neg_s_distances1, s_indices1 = tf.math.top_k(input=neg_s_distances1, k=num_generated, sorted=True)
            s_indices1 = tf.transpose(s_indices1)
            s_indices1 = s_indices1.eval()
        else:
            lin = list(range(int(gen_real_distances.shape[0])))
            random.shuffle(lin)
            s_indices1 = np.zeros((num_generated,1), dtype=np.int8)
            s_indices1[:, 0] = lin[:num_generated]
            
        s_indices = s_indices.eval()
        s_distances = s_distances.eval()
        # For the images with top smallest distances, show nearest neighbors.
        height, width, channels = real_img.shape[1:]
        neighbors = dict()
        grid = np.zeros((num_generated*height, (nearneig+1)*width, channels))
        for i, ind in enumerate(s_indices1):
            ind = ind[0]
            total = gen_img[ind]
            neighbors[ind] = list() 
            for j in range(nearneig):
                neighbors[ind].append((s_indices[ind,j], s_distances[ind,j]))
                real = real_img[s_indices[ind,j]]/255.
                total = np.concatenate([total, real], axis=1)
            grid[i*height:(i+1)*height, :, :] = total
        plt.imshow(grid)
        if save_path is not None:
            plt.imsave(save_path, grid)
        return neighbors
开发者ID:AdalbertoCq,项目名称:Pathology-GAN,代码行数:60,代码来源:tools.py


示例19: draw_homotopy

def draw_homotopy(metadata, config, target_path, idx1, idx2):
    if not os.path.isfile(target_path + 'mu.npy'):
        encode(metadata, config, target_path)

    mu = np.load(target_path + 'mu.npy')
    x1, y1 = config.x_train[idx1, :], config.y_train[idx1]
    x2, y2 = config.x_train[idx2, :], config.y_train[idx2]
    mu1, mu2 = mu[idx1, :], mu[idx2, :]

    decoder = config.build_decoder()
    decoder_layers = nn.layers.get_all_layers(decoder.l_out)
    nparams = len(nn.layers.get_all_params(decoder.l_out))
    nn.layers.set_all_param_values(decoder.l_out, metadata['param_values'][-nparams:])
    print '  Decoder'
    for layer in decoder_layers:
        name = layer.__class__.__name__
        print "    %s %s" % (string.ljust(name, 32), nn.layers.get_output_shape(layer))

    decode = theano.function([decoder.l_z.input_var], nn.layers.get_output(decoder.l_out))

    p_range = np.arange(1, 0, -0.05)
    tile = np.reshape(x1, (28, 28))

    for p in p_range:
        zp = p * mu1 + (1 - p) * mu2
        zp = zp[np.newaxis, :]
        xp_hat = decode(zp)

        xp_hat = np.reshape(xp_hat, (28, 28))
        tile = np.hstack((tile, xp_hat))

    tile = np.hstack((tile, np.reshape(x2, (28, 28))))

    plt.imsave(target_path + 'homotopy_%s-%s.png' % (str(y1), str(y2)), tile, cmap=matplotlib.cm.Greys)
开发者ID:IraKorshunova,项目名称:vae,代码行数:34,代码来源:draw.py


示例20: test_imsave

def test_imsave(fmt):
    if fmt in ["jpg", "jpeg", "tiff"]:
        pytest.importorskip("PIL")
    has_alpha = fmt not in ["jpg", "jpeg"]

    # The goal here is that the user can specify an output logical DPI
    # for the image, but this will not actually add any extra pixels
    # to the image, it will merely be used for metadata purposes.

    # So we do the traditional case (dpi == 1), and the new case (dpi
    # == 100) and read the resulting PNG files back in and make sure
    # the data is 100% identical.
    np.random.seed(1)
    # The height of 1856 pixels was selected because going through creating an
    # actual dpi=100 figure to save the image to a Pillow-provided format would
    # cause a rounding error resulting in a final image of shape 1855.
    data = np.random.rand(1856, 2)

    buff_dpi1 = io.BytesIO()
    plt.imsave(buff_dpi1, data, format=fmt, dpi=1)

    buff_dpi100 = io.BytesIO()
    plt.imsave(buff_dpi100, data, format=fmt, dpi=100)

    buff_dpi1.seek(0)
    arr_dpi1 = plt.imread(buff_dpi1, format=fmt)

    buff_dpi100.seek(0)
    arr_dpi100 = plt.imread(buff_dpi100, format=fmt)

    assert arr_dpi1.shape == (1856, 2, 3 + has_alpha)
    assert arr_dpi100.shape == (1856, 2, 3 + has_alpha)

    assert_array_equal(arr_dpi1, arr_dpi100)
开发者ID:QuLogic,项目名称:matplotlib,代码行数:34,代码来源:test_image.py



注:本文中的matplotlib.pyplot.imsave函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pyplot.imshow函数代码示例发布时间:2022-05-27
下一篇:
Python pyplot.imread函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap