• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python cbook.is_numlike函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中matplotlib.cbook.is_numlike函数的典型用法代码示例。如果您正苦于以下问题:Python is_numlike函数的具体用法?Python is_numlike怎么用?Python is_numlike使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了is_numlike函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: is_numlike

 def is_numlike(x):
     """
     The Matplotlib datalim, autoscaling, locators etc work with
     scalars which are the units converted to floats given the
     current unit.  The converter may be passed these floats, or
     arrays of them, even when units are set.
     """
     if iterable(x):
         for thisx in x:
             return is_numlike(thisx)
     else:
         return is_numlike(x)
开发者ID:adnanb59,项目名称:matplotlib,代码行数:12,代码来源:units.py


示例2: is_numlike

 def is_numlike(x):
     """
     The matplotlib datalim, autoscaling, locators etc work with
     scalars which are the units converted to floats given the
     current unit.  The converter may be passed these floats, or
     arrays of them, even when units are set.  Derived conversion
     interfaces may opt to pass plain-ol unitless numbers through
     the conversion interface and this is a helper function for
     them.
     """
     if iterable(x):
         for thisx in x:
             return is_numlike(thisx)
     else:
         return is_numlike(x)
开发者ID:Eric89GXL,项目名称:nitime,代码行数:15,代码来源:_mpl_units.py


示例3: ScanDir

def ScanDir(folder='.',keys=[],pattern=r".*\.h5",return_dict=False,req={}):
    out={}
    for f in os.listdir(folder):
        if re.match(pattern,f) is not None:
            try:
                isreq=(len(req)==0)
                if not isreq:
                    isreq=True
                    fd=GetAttr("{0}/{1}".format(folder,f))
                    for k in req.keys():
                        try:
                            if is_numlike(req[k]):
                                isreq=isreq and (abs(req[k]-fd[k])<1e-9)
                            else:
                                isreq=isreq and (req[k]==fd[k])
                        except KeyError:
                            isreq=False
                if isreq:
                    out[folder+'/'+f]=dict(GetAttr("{0}/{1}".format(folder,f)))
                    s=f
                    if len(keys):
                        s="{0}: ".format(f)
                        if keys=='*':
                            keys=out[folder+'/'+f].keys()
                        for k in keys:
                            try:
                                s="{0} {1}:{2} /".format(s,k,out[folder+'/'+f][k])
                            except KeyError:
                                s="{0} None /".format(s)
                    print(s)
            except IOError:
                print('Could not open \"'+f+'\".')
    if return_dict:
        return out
开发者ID:EPFL-LQM,项目名称:gpvmc,代码行数:34,代码来源:vmc.py


示例4: _calculate_global

    def _calculate_global(self, data):
        # Calculate breaks if x is not categorical
        binwidth = self.params['binwidth']
        self.breaks = self.params['breaks']
        right = self.params['right']
        x = data['x'].values

        # For categorical data we set labels and x-vals
        if is_categorical(x):
            labels = self.params['labels']
            if labels == None:
                labels = sorted(set(x))
            self.labels = labels
            self.length = len(self.labels)

        # For non-categoriacal data we set breaks
        if not (is_categorical(x) or self.breaks):
            # Check that x is numerical
            if not cbook.is_numlike(x[0]):
                raise GgplotError("Cannot recognise the type of x")
            if binwidth is None:
                _bin_count = 30
                self._print_warning(_MSG_BINWIDTH)
            else:
                _bin_count = int(np.ceil(np.ptp(x))) / binwidth
            _, self.breaks = pd.cut(x, bins=_bin_count, labels=False,
                                        right=right, retbins=True)
            self.length = len(self.breaks)
开发者ID:aaronlin,项目名称:ggplot,代码行数:28,代码来源:stat_bin.py


示例5: getname_val

 def getname_val(identifier):
     'return the name and column data for identifier'
     if is_string_like(identifier):
         return identifier, r[identifier]
     elif is_numlike(identifier):
         name = r.dtype.names[int(identifier)]
         return name, r[name]
     else:
         raise TypeError('identifier must be a string or integer')
开发者ID:VinInn,项目名称:pyTools,代码行数:9,代码来源:read_csv.py


示例6: is_known_scalar

def is_known_scalar(value):
    """
    Return True if value is a type we expect in a dataframe
    """
    def _is_datetime_or_timedelta(value):
        # Using pandas.Series helps catch python, numpy and pandas
        # versions of these types
        return pd.Series(value).dtype.kind in ('M', 'm')

    return not cbook.iterable(value) and (cbook.is_numlike(value) or
                                          _is_datetime_or_timedelta(value))
开发者ID:jwhendy,项目名称:plotnine,代码行数:11,代码来源:layer.py


示例7: getname_val

 def getname_val(identifier):
     'return the name and column data for identifier'
     if is_string_like(identifier):
         print "Identifier " + identifier + " is a string"
         col_name = identifier.strip().lower().replace(' ', '_')
         col_name = ''.join([c for c in col_name if c not in delete])
         return identifier, r[col_name]
     elif is_numlike(identifier):
         name = r.dtype.names[int(identifier)]
         return name, r[name]
     else:
         raise TypeError('identifier must be a string or integer')
开发者ID:ctb,项目名称:Advanced_iPlant,代码行数:12,代码来源:main.py


示例8: _convert_numcompatible

 def _convert_numcompatible(self, c):
     """Convert c to a form usable by arithmetic operations"""
     #the compatible dataset to be returned, initialize it to zeros.
     comp = {'x':np.zeros_like(self._x),
           'y':np.zeros_like(self._x),
           'dy':np.zeros_like(self._x),
           'dx':np.zeros_like(self._x)}
     # if c is a DataSet:
     if isinstance(c, AliasedVectorAttributes):
         if self.shape() != c.shape(): # they are of incompatible size, fail.
             raise ValueError('incompatible length')
         # if the size of them is compatible, check if the abscissae are
         # compatible.
         xtol = min(self._xtolerance, c._xtolerance) # use the strictest
         if max(np.abs(self._x - c._x)) < xtol:
             try:
                 comp['x'] = c._x
                 comp['y'] = c._y
                 comp['dy'] = c._dy
                 comp['dx'] = c._dx
             except AttributeError:
                 pass # this is not a fatal error
         else:
             raise ValueError('incompatible abscissae')
     elif isinstance(c, ErrorValue):
         comp['x'] = self._x
         comp['y'] += c.val
         comp['dy'] += c.err
     elif isinstance(c, tuple): # if c is a tuple
         try:
             #the fields of comp were initialized to zero np arrays!
             comp['x'] += c[0]
             comp['y'] += c[1]
             comp['dy'] += c[2]
             comp['dx'] += c[3]
         except IndexError:
             pass # this is not fatal either
     else:
         if is_numlike(c):
             try:
                 comp['x'] = self._x
                 comp['y'] += c # leave this job to numpy.ndarray.__iadd__()
             except:
                 raise DataSetError('Incompatible size')
         else:
             raise DataSetError('Incompatible type')
     return comp
开发者ID:awacha,项目名称:sastool,代码行数:47,代码来源:dataset.py


示例9: from_any

def from_any(size, fraction_ref=None):
    """
    Creates Fixed unit when the first argument is a float, or a
    Fraction unit if that is a string that ends with %. The second
    argument is only meaningful when Fraction unit is created.

      >>> a = Size.from_any(1.2) # => Size.Fixed(1.2)
      >>> Size.from_any("50%", a) # => Size.Fraction(0.5, a)

    """
    if cbook.is_numlike(size):
        return Fixed(size)
    elif cbook.is_string_like(size):
        if size[-1] == "%":
            return Fraction(float(size[:-1])/100., fraction_ref)

    raise ValueError("Unknown format")
开发者ID:AlexSzatmary,项目名称:matplotlib,代码行数:17,代码来源:axes_size.py


示例10: draw_networkx_edges


#.........这里部分代码省略.........
                           for c in edge_color]):
            # If color specs are given as (rgb) or (rgba) tuples, we're OK
            if numpy.alltrue([cb.iterable(c) and len(c) in (3, 4)
                             for c in edge_color]):
                edge_colors = tuple(edge_color)
            else:
                # numbers (which are going to be mapped with a colormap)
                edge_colors = None
        else:
            raise ValueError('edge_color must consist of either color names or numbers')
    else:
        if cb.is_string_like(edge_color) or len(edge_color) == 1:
            edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
        else:
            raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')

    edge_collection = LineCollection(edge_pos,
                                     colors=edge_colors,
                                     linewidths=lw,
                                     antialiaseds=(1,),
                                     linestyle=style,
                                     transOffset = ax.transData,
                                     )

    edge_collection.set_zorder(1)  # edges go behind nodes
    edge_collection.set_label(label)
    ax.add_collection(edge_collection)

    # Note: there was a bug in mpl regarding the handling of alpha values for
    # each line in a LineCollection.  It was fixed in matplotlib in r7184 and
    # r7189 (June 6 2009).  We should then not set the alpha value globally,
    # since the user can instead provide per-edge alphas now.  Only set it
    # globally if provided as a scalar.
    if cb.is_numlike(alpha):
        edge_collection.set_alpha(alpha)

    if edge_colors is None:
        if edge_cmap is not None:
            assert(isinstance(edge_cmap, Colormap))
        edge_collection.set_array(numpy.asarray(edge_color))
        edge_collection.set_cmap(edge_cmap)
        if edge_vmin is not None or edge_vmax is not None:
            edge_collection.set_clim(edge_vmin, edge_vmax)
        else:
            edge_collection.autoscale()

    arrow_collection = None

    if G.is_directed() and arrows:

        # a directed graph hack
        # draw thick line segments at head end of edge
        # waiting for someone else to implement arrows that will work
        arrow_colors = edge_colors
        a_pos = []
        p = 1.0-0.25  # make head segment 25 percent of edge length
        for src, dst in edge_pos:
            x1, y1 = src
            x2, y2 = dst
            dx = x2-x1   # x offset
            dy = y2-y1   # y offset
            d = numpy.sqrt(float(dx**2 + dy**2))  # length of edge
            if d == 0:   # source and target at same position
                continue
            if dx == 0:  # vertical edge
                xa = x2
开发者ID:chrisnatali,项目名称:networkx,代码行数:67,代码来源:nx_pylab.py


示例11: __init__

    def __init__(
        self,
        fig,
        rect,
        nrows_ncols,
        ngrids=None,
        direction="row",
        axes_pad=0.02,
        add_all=True,
        share_all=False,
        aspect=True,
        label_mode="L",
        cbar_mode=None,
        cbar_location="right",
        cbar_pad=None,
        cbar_size="5%",
        cbar_set_cax=True,
        axes_class=None,
    ):
        """
        Build an :class:`ImageGrid` instance with a grid nrows*ncols
        :class:`~matplotlib.axes.Axes` in
        :class:`~matplotlib.figure.Figure` *fig* with
        *rect=[left, bottom, width, height]* (in
        :class:`~matplotlib.figure.Figure` coordinates) or
        the subplot position code (e.g., "121").

        Optional keyword arguments:

          ================  ========  =========================================
          Keyword           Default   Description
          ================  ========  =========================================
          direction         "row"     [ "row" | "column" ]
          axes_pad          0.02      float| pad between axes given in inches
          add_all           True      [ True | False ]
          share_all         False     [ True | False ]
          aspect            True      [ True | False ]
          label_mode        "L"       [ "L" | "1" | "all" ]
          cbar_mode         None      [ "each" | "single" ]
          cbar_location     "right"   [ "right" | "top" ]
          cbar_pad          None
          cbar_size         "5%"
          cbar_set_cax      True      [ True | False ]
          axes_class        None      a type object which must be a subclass
                                      of :class:`~matplotlib.axes.Axes`
          ================  ========  =========================================

        *cbar_set_cax* : if True, each axes in the grid has a cax
          attribute that is bind to associated cbar_axes.
        """
        self._nrows, self._ncols = nrows_ncols

        if ngrids is None:
            ngrids = self._nrows * self._ncols
        else:
            if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
                raise Exception("")

        self.ngrids = ngrids

        self._axes_pad = axes_pad

        self._colorbar_mode = cbar_mode
        self._colorbar_location = cbar_location
        if cbar_pad is None:
            self._colorbar_pad = axes_pad
        else:
            self._colorbar_pad = cbar_pad

        self._colorbar_size = cbar_size

        self._init_axes_pad(axes_pad)

        if direction not in ["column", "row"]:
            raise Exception("")

        self._direction = direction

        if axes_class is None:
            axes_class = self._defaultLocatableAxesClass
            axes_class_args = {}
        else:
            if isinstance(axes_class, maxes.Axes):
                axes_class_args = {}
            else:
                axes_class, axes_class_args = axes_class

        self.axes_all = []
        self.axes_column = [[] for i in range(self._ncols)]
        self.axes_row = [[] for i in range(self._nrows)]

        self.cbar_axes = []

        h = []
        v = []
        if cbook.is_string_like(rect) or cbook.is_numlike(rect):
            self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v, aspect=aspect)
        elif len(rect) == 3:
            kw = dict(horizontal=h, vertical=v, aspect=aspect)
            self._divider = SubplotDivider(fig, *rect, **kw)
#.........这里部分代码省略.........
开发者ID:radford,项目名称:matplotlib,代码行数:101,代码来源:axes_grid.py


示例12: draw_networkx_edges


#.........这里部分代码省略.........
            # list of color letters such as ['k','r','k',...]
            edge_colors = tuple([colorConverter.to_rgba(c,alpha) 
                                 for c in edge_color])
        elif np.alltrue([not cb.is_string_like(c) 
                           for c in edge_color]):
            # If color specs are given as (rgb) or (rgba) tuples, we're OK
            if np.alltrue([cb.iterable(c) and len(c) in (3,4)
                             for c in edge_color]):
                edge_colors = tuple(edge_color)
                alpha=None
            else:
                # numbers (which are going to be mapped with a colormap)
                edge_colors = None
        else:
            raise ValueError('edge_color must consist of either color names or numbers')
    else:
        if len(edge_color)==1:
            edge_colors = ( colorConverter.to_rgba(edge_color, alpha), )
        else:
            raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
    edge_collection = LineCollection(edge_pos,
                                     colors       = edge_colors,
                                     linewidths   = lw,
                                     antialiaseds = (1,),
                                     linestyle    = style,     
                                     transOffset = ax.transData,             
                                     )

    # Note: there was a bug in mpl regarding the handling of alpha values for
    # each line in a LineCollection.  It was fixed in matplotlib in r7184 and
    # r7189 (June 6 2009).  We should then not set the alpha value globally,
    # since the user can instead provide per-edge alphas now.  Only set it
    # globally if provided as a scalar.
    if cb.is_numlike(alpha):
        edge_collection.set_alpha(alpha)

    # need 0.87.7 or greater for edge colormaps.  No checks done, this will
    # just not work with an older mpl
    if edge_colors is None:
        if edge_cmap is not None: assert(isinstance(edge_cmap, Colormap))
        edge_collection.set_array(np.asarray(edge_color))
        edge_collection.set_cmap(edge_cmap)
        if edge_vmin is not None or edge_vmax is not None:
            edge_collection.set_clim(edge_vmin, edge_vmax)
        else:
            edge_collection.autoscale()
        pylab.sci(edge_collection)

    arrow_collection=None

    if G.is_directed() and arrows:

        # a directed graph hack
        # draw thick line segments at head end of edge
        # waiting for someone else to implement arrows that will work 
        arrow_colors = ( colorConverter.to_rgba('k', alpha), )
        a_pos=[]
        p=1.0-0.25 # make head segment 25 percent of edge length
        for src,dst in edge_pos:
            x1,y1=src
            x2,y2=dst
            dx=x2-x1 # x offset
            dy=y2-y1 # y offset
            d=np.sqrt(float(dx**2+dy**2)) # length of edge
            if d==0: # source and target at same position
                continue
开发者ID:EhsanTadayon,项目名称:brainx,代码行数:67,代码来源:nxplot.py


示例13: kepcotrendsc


#.........这里部分代码省略.........
	# test log file
	logfile = kepmsg.test(logfile)

	# clobber output file
	if clobber:
		status = kepio.clobber(outfile,logfile,verbose)
	if kepio.fileexists(outfile):
		message = 'ERROR -- KEPCOTREND: ' + outfile + ' exists. Use --clobber'
		status = kepmsg.err(logfile,message,verbose)

	# open input file
	if status == 0:
		instr, status = kepio.openfits(infile,'readonly',logfile,verbose)
		tstart, tstop, bjdref, cadence, status = kepio.timekeys(instr,
			infile,logfile,verbose,status)

	# fudge non-compliant FITS keywords with no values
	if status == 0:
		instr = kepkey.emptykeys(instr,file,logfile,verbose)

	if status == 0:
		if not kepio.fileexists(bvfile):
			message = 'ERROR -- KEPCOTREND: ' + bvfile + ' does not exist.'
			status = kepmsg.err(logfile,message,verbose)

	#lsq_sq - nonlinear least squares fitting and simplex_abs have been
	#removed from the options in PyRAF but they are still in the code!
	if status == 0:
		if fitmethod not in ['llsq','matrix','lst_sq','simplex_abs','simplex']:
			message = 'Fit method must either: llsq, matrix, lst_sq or simplex'
			status = kepmsg.err(logfile,message,verbose)

	if status == 0:
		if not is_numlike(fitpower) and fitpower is not None:
			message = 'Fit power must be an real number or None'
			status = kepmsg.err(logfile,message,verbose)



	if status == 0:
		if fitpower is None:
			fitpower = 1.

	# input data
	if status == 0:
		short = False
		try:
			test = str(instr[0].header['FILEVER'])
			version = 2
		except KeyError:
			version = 1

		table = instr[1].data
		if version == 1:
			if str(instr[1].header['DATATYPE']) == 'long cadence':
				#print 'Light curve was taken in Lond Cadence mode!'
				quarter = str(instr[1].header['QUARTER'])
				module = str(instr[1].header['MODULE'])
				output = str(instr[1].header['OUTPUT'])
				channel = str(instr[1].header['CHANNEL'])

				lc_cad_o = table.field('cadence_number')
				lc_date_o = table.field('barytime')
				lc_flux_o = table.field('ap_raw_flux') / 1625.3468 #convert to e-/s
				lc_err_o = table.field('ap_raw_err') / 1625.3468 #convert to e-/s
			elif str(instr[1].header['DATATYPE']) == 'short cadence':
开发者ID:mrtommyb,项目名称:PyKE,代码行数:67,代码来源:kepcotrend.py


示例14: _calculate

    def _calculate(self, data):
        x = data.pop('x')
        right = self.params['right']

        # y values are not needed
        try:
            del data['y']
        except KeyError:
            pass
        else:
            self._print_warning(_MSG_YVALUE)

        if len(x) > 0 and isinstance(x.get(0), datetime.date):
            def convert(d):
                d = datetime.datetime.combine(d, datetime.datetime.min.time())
                return time.mktime(d.timetuple())
            x = x.apply(convert)
        elif len(x) > 0 and isinstance(x.get(0), datetime.datetime):
            x = x.apply(lambda d: time.mktime(d.timetuple()))
        elif len(x) > 0 and isinstance(x.get(0), datetime.time):
            raise GgplotError("Cannot recognise the type of x")

        # If weight not mapped to, use one (no weight)
        try:
            weights = data.pop('weight')
        except KeyError:
            weights = np.ones(len(x))
        else:
            weights = make_iterable_ntimes(weights, len(x))

        if is_categorical(x.values):
            x_assignments = x
            x = self.labels
            width = make_iterable_ntimes(self.params['width'], self.length)
        elif cbook.is_numlike(x.iloc[0]):
            x_assignments = pd.cut(x, bins=self.breaks, labels=False,
                                           right=right)
            width = np.diff(self.breaks)
            x = [self.breaks[i] + width[i] / 2
                 for i in range(len(self.breaks)-1)]
        else:
            raise GgplotError("Cannot recognise the type of x")

        # Create a dataframe with two columns:
        #   - the bins to which each x is assigned
        #   - the weights of each x value
        # Then create a weighted frequency table
        _df = pd.DataFrame({'assignments': x_assignments,
                            'weights': weights
                            })
        _wfreq_table = pd.pivot_table(_df, values='weights',
                                      rows=['assignments'], aggfunc=np.sum)

        # For numerical x values, empty bins get have no value
        # in the computed frequency table. We need to add the zeros and
        # since frequency table is a Series object, we need to keep it ordered
        try:
            empty_bins = set(self.labels) - set(x_assignments)
        except:
            empty_bins = set(range(len(width))) - set(x_assignments)
        _wfreq_table = _wfreq_table.to_dict()
        for _b in empty_bins:
            _wfreq_table[_b] = 0
        _wfreq_table = pd.Series(_wfreq_table).sort_index()

        y = list(_wfreq_table)
        new_data = pd.DataFrame({'x': x, 'y': y, 'width': width})

        # Copy the other aesthetics into the new dataframe
        n = len(x)
        for ae in data:
            new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
        return new_data
开发者ID:2dpodcast,项目名称:ggplot,代码行数:73,代码来源:stat_bin.py


示例15: run


#.........这里部分代码省略.........

        Hfile = np.load(Hfilename)
        H = Hfile['H']; xe = Hfile['xe']; ye = Hfile['ye']
        #levels = np.linspace(0, np.nanmax(H), 11)

    # which time index to plot
    # a number or 'mean' or 'none' (for coast and cross)
    if whichtype == 'D2':
        itind = 100 # 100 
    elif (whichtype == 'cross') or ('coast' in whichtype):
        itind = 'none'
    # Choose consistent levels to plot
    locator = ticker.MaxNLocator(11)
    locator.create_dummy_axis()
    # don't use highest max since everything is washed out then
    # pdb.set_trace()
    # 12000 for mean interannual-summer, 20000 for mean, interannual-winter, 1400 for 100 seasonal
    # 1800 for 100 interannual-winter, 1800 for 100 interannual-summer
    if whichtype == 'D2':
        if itind == 30:
            locator.set_bounds(0, 10)
        elif itind == 100: 
            locator.set_bounds(0, 160) 
        elif itind == 150: 
            locator.set_bounds(0, 450) 
        elif itind == 300: 
            locator.set_bounds(0, 2200) 
        elif itind == 600: 
            locator.set_bounds(0, 8000) 
        elif itind == 900: 
            locator.set_bounds(0, 15000) 
        # locator.set_bounds(0, 0.2*np.nanmax(H[:,:,:,itind]))
        #locator.set_bounds(0, 0.75*np.nanmax(np.nanmax(H[:,:,:,itind], axis=1), axis=1).mean())
        levels = locator()
    elif 'coast' in whichtype and whichdir == 'back':
        hist, bin_edges = np.histogram(H.flat, bins=100) # find # of occurrences of histogram bin values
        n = np.cumsum(hist)
        Hmax = bin_edges[find(n<(n.max()-n.min())*.7+n.min())[-1]] # take the 80% of histogram occurrences as the max instead of actual max since too high
        locator.set_bounds(0, 1) 
        levels = locator()
        extend = 'max'
        H = H/Hmax
    else:
        extend = 'neither'


    # Set up overall plot, now that everything is calculated
    fig, axarr = plot_setup(whichtime, grid) # depends on which plot we're doing

    # Loop through calculation files to calculate overall histograms
    # pdb.set_trace()
    for i in xrange(H.shape[0]): # Files has multiple entries, 1 for each subplot

        # Do subplot
        # pdb.set_trace()
        # which time index to plot?
        #itind = 100
        if cbook.is_numlike(itind): # plot a particular time
            mappable = plot_stuff(xe, ye, H[i,:,:,itind], cmap, grid, shelf_depth, axarr.flatten()[i], levels=levels)
        elif itind=='mean': # plot the mean over time
            mappable = plot_stuff(xe, ye, np.nansum(H[i,:,:,:], axis=-1)/np.sum(~np.isnan(H[i,:,:,:]), axis=-1), cmap, grid, shelf_depth, axarr.flatten()[i], levels=levels)
        elif itind=='none': # just plot what is there
            if 'levels' in locals():
                mappable = plot_stuff(xe, ye, H[i,:,:].T, cmap, grid, shelf_depth, axarr.flatten()[i], extend=extend, levels=levels)
            else:
                mappable = plot_stuff(xe, ye, H[i,:,:].T, cmap, grid, shelf_depth, axarr.flatten()[i], extend=extend)
        #axarr.flatten()[i].set_title(np.nanmax(H[i,:,:,itind]))
        # Add coastline area if applicable
        if 'coast' in whichtype:
            coastloc = whichtype.split('coast')[-1]
            pts = np.load('calcs/' + coastloc + 'pts.npz')[coastloc]
            axarr.flatten()[i].plot(pts[:,0], pts[:,1], color='0.0', lw=3)
            # verts = np.vstack((pts[:,0], pts[:,1]))
            # # Form path
            # path = Path(verts.T)
            # if not path.contains_point(np.vstack((xp[jd,it],yp[jd,it]))):

        # Overlay mean wind arrows
        if addwind:
            # Right now is just for cross, interannual, winter
            year = years[i]
            # year = File.split('/')[-1].split('-')[0]
            season = whichtime.split('-')[-1]
            wind = np.load('../txla_plots/calcs/wind_stress/1st/jfm/' + str(year) + season +  '.npz')
            x = wind['x']; y = wind['y']; u = wind['u']; v = wind['v']
            q = axarr.flatten()[i].quiver(x, y, u, v, color = '0.3',
                        pivot='middle', zorder=1e35, width=0.003)
                        # scale=1.0/scale, pivot='middle', zorder=1e35, width=0.003)

            # if year == 2008:
            #     plt.quiverkey(q, 0.85, 0.07, 0.1, label=r'0.1 N m$^{2}$', coordinates='axes')



    # Add colorbar
    plot_colorbar(fig, mappable, whichtype, whichdir=whichdir, whichtime=whichtime)
    # pdb.set_trace()

    # save and close
    plot_finish(fig, whichtype, whichtime, shelf_depth, itind, r, numdays)
开发者ID:kthyng,项目名称:shelf_transport,代码行数:101,代码来源:make_plots.py


示例16: draw_networkx_edges


#.........这里部分代码省略.........
            if np.alltrue([cb.iterable(c) and len(c) in (3, 4)
                          for c in edge_color]):
                edge_colors = tuple(edge_color)
            else:
                # numbers (which are going to be mapped with a colormap)
                edge_colors = None
        else:
            raise ValueError('edge_color must contain color names or numbers')
    else:
        if is_string_like(edge_color) or len(edge_color) == 1:
            edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
        else:
            msg = 'edge_color must be a color or list of one color per edge'
            raise ValueError(msg)

    if (not G.is_directed() or not arrows):
        edge_collection = LineCollection(edge_pos,
                                         colors=edge_colors,
                                         linewidths=lw,
                                         antialiaseds=(1,),
                                         linestyle=style,
                                         transOffset=ax.transData,
                                         )

        edge_collection.set_zorder(1)  # edges go behind nodes
        edge_collection.set_label(label)
        ax.add_collection(edge_collection)

        # Note: there was a bug in mpl regarding the handling of alpha values
        # for each line in a LineCollection. It was fixed in matplotlib by
        # r7184 and r7189 (June 6 2009). We should then not set the alpha
        # value globally, since the user can instead provide per-edge alphas
        # now.  Only set it globally if provided as a scalar.
        if cb.is_numlike(alpha):
            edge_collection.set_alpha(alpha)

        if edge_colors is None:
            if edge_cmap is not None:
                assert(isinstance(edge_cmap, Colormap))
            edge_collection.set_array(np.asarray(edge_color))
            edge_collection.set_cmap(edge_cmap)
            if edge_vmin is not None or edge_vmax is not None:
                edge_collection.set_clim(edge_vmin, edge_vmax)
            else:
                edge_collection.autoscale()
        return edge_collection

    arrow_collection = None

    if G.is_directed() and arrows:
        # Note: Waiting for someone to implement arrow to intersection with
        # marker.  Meanwhile, this works well for polygons with more than 4
        # sides and circle.

        def to_marker_edge(marker_size, marker):
            if marker in "s^>v<d":  # `large` markers need extra space
                return np.sqrt(2 * marker_size) / 2
            else:
                return np.sqrt(marker_size) / 2

        # Draw arrows with `matplotlib.patches.FancyarrowPatch`
        arrow_collection = []
        mutation_scale = arrowsize  # scale factor of arrow head
        arrow_colors = edge_colors
        if arrow_colors is None:
            if edge_cmap is not None:
开发者ID:dtrckd,项目名称:networkx,代码行数:67,代码来源:nx_pylab.py


示例17: find_best_d

def find_best_d(ftsfile,d_plt_ini,center=None,caltype=None,obj=None):
# ctrxy: array([x,y]) - pixel location of the object center

    ssw=ftsfile['SSWD4']
    slw=ftsfile['SLWC3']
    sswidx=where(ssw.data['wave'] < fmax_slw)
    sswidx=sswidx[0][1:]
    ssw=ssw.data[sswidx]
    ssw=ssw
    slwidx=where(slw.data['wave'] > fmin_ssw)
    slwidx=slwidx[0][0:-1]
    slw=slw.data[slwidx]

    if not is_numlike(center):
        ctrxy=array([128,128])
    else:
        hdr=ftsfile['SLWC3'].header
        dummywcs=make_dummy_wcs(hdr['RA'],hdr['DEC'])
        xyradec=array([center])
        ctrxy_tmp=dummywcs.wcs_world2pix(xyradec,0)
        ctrxy=copy(ctrxy_tmp[0])

    xs=ssw['wave']
    xl=slw['wave']

    if caltype == 'point':
        ssw_cal=zeros(len(cps['SSWD4'].data['pointConv']))+1.
        slw_cal=zeros(len(cps['SLWC3'].data['pointConv']))+1.
        
    if caltype == 'extended':
        ssw_cal=cps['SSWD4'].data['pointConv'].copy()
        slw_cal=cps['SLWC3'].data['pointConv'].copy()

    ssw_cal=ssw_cal[sswidx]
    slw_cal=slw_cal[slwidx]

    if (obj == 'm83' or obj == 'M83') or \
            (obj == 'm82' or obj == 'M82') or \
            (obj == 'lmc-n159' or obj == 'LMC-N159'):
        scal=cps['SSWD4'].data['pointConv'].copy()
        lcal=cps['SLWC3'].data['pointConv'].copy()
        scal=scal[sswidx]
        lcal=lcal[slwidx]
        ssw['flux'][:]=ssw['flux'].copy()*scal
        ssw['error'][:]=ssw['error'].copy()*scal
        slw['flux'][:]=slw['flux'].copy()*lcal
        slw['error'][:]=slw['error'].copy()*lcal
        sim_func=sim_gauss
    else:
        sim_func=sim_planet


    ssw_wnidx=(wn_ssw[0].data*30. < fmax_slw+5.) & \
        (wn_ssw[0].data*30. > fmin_ssw-5.)
    slw_wnidx=(wn_slw[0].data*30. < fmax_slw+5.) & \
        (wn_slw[0].data*30. > fmin_ssw-5.)
    slw_wnidx[-7]=False
    wn_sfit=wn_ssw[0].data[ssw_wnidx]
    wn_lfit=wn_slw[0].data[slw_wnidx]

    ssw_b=beam_ssw[0].data[ssw_wnidx,:,:].copy()
    slw_b=beam_slw[0].data[slw_wnidx,:,:].copy()
    sum_ssw_b=sum(sum(ssw_b,axis=1),axis=1)
    sum_slw_b=sum(sum(slw_b,axis=1),axis=1)

    refidx=1e9
    chiparam=[]
    chierr=[]
    d_plt_out=None
    d_plt=arange(26)*2.
    dplt=d_plt_ini+d_plt-20.
    dplt=dplt[where(dplt >= 0.)]
    for di in range(len(dplt)):
        d_input=dplt[di]
        planet_mod=sim_func(d_input,ctrxy)*img_mask
        planet_mod=planet_mod.copy()/planet_mod.max()
        planet_area=sum(planet_mod)
        sum_ssw_bp=[]
        sum_slw_bp=[]
        for bi in range(len(sum_ssw_b)):
            sum_ssw_bp.append(sum(ssw_b[bi,:,:]*planet_mod))
        for bi in range(len(sum_slw_b)):
            sum_slw_bp.append(sum(slw_b[bi,:,:]*planet_mod))
        sum_ssw_bp=array(sum_ssw_bp)
        sum_slw_bp=array(sum_slw_bp)
        f_sum_sbp=interpolate.interp1d(wn_sfit*30.,planet_area/sum_ssw_bp, \
                                           bounds_error=False, kind=3, \
                                           fill_value=planet_area/sum_ssw_bp[0])
        f_sum_lbp=interpolate.interp1d(wn_lfit*30.,planet_area/sum_slw_bp, \
                                           bounds_error=False, kind=3, \
                                           fill_value=planet_area/sum_slw_bp[0])
        ssw_corrf=ssw['flux']*ssw_cal*f_sum_sbp(xs)
        slw_corrf=slw['flux']*slw_cal*f_sum_lbp(xl)
        param=sum((slw_corrf-ssw_corrf)**2./100./ \
                      2./((slw['error']*slw_cal*f_sum_lbp(xl))**2.+ \
                              (ssw['error']*ssw_cal*f_sum_sbp(xs))**2.))
        err=sqrt(sum((slw['error']*slw_cal*f_sum_lbp(xl))**2.+ \
                    (ssw['error']*ssw_cal*f_sum_sbp(xs))**2.))
        chiparam.append(param)
        chierr.append(err)
#.........这里部分代码省略.........
开发者ID:ivvv,项目名称:herspy,代码行数:101,代码来源:sect_etaomega.py


示例18: hist


#.........这里部分代码省略.........
            xmin = min(xmin, xi.min())
            xmax = max(xmax, xi.max())
        range = (xmin, xmax)

    #hist_kwargs = dict(range=range, normed=bool(normed))
    # We will handle the normed kwarg within mpl until we
    # get to the point of requiring numpy >= 1.5.
    hist_kwargs = dict(range=range)
    if np.__version__ < "1.3": # version 1.1 and 1.2
        hist_kwargs['new'] = True

    n = []
    for i in range(nx):
        # this will automatically overwrite bins,
        # so that each histogram uses the same bins
        m, bins = np.histogram(x[i], bins, weights=w[i], **hist_kwargs)
        if normed:
            db = np.diff(bins)
            m = (m.astype(float) / db) / m.sum()
        n.append(m)
    if normed and db.std() > 0.01 * db.mean():
        warnings.warn("""
        This release fixes a normalization bug in the NumPy histogram
        function prior to version 1.5, occuring with non-uniform
        bin widths. The returned and plotted value is now a density:
            n / (N * bin width),
        where n is the bin count and N the total number of points.
        """)



    if cumulative:
        slc = slice(None)
        if cbook.is_numlike(cumulative) and cumulative < 0:
            slc = slice(None,None,-1)

        if normed:
            n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
        else:
            n = [m[slc].cumsum()[slc] for m in n]

    patches = []

    if histtype.startswith('bar'):
        totwidth = np.diff(bins)

        if rwidth is not None:
            dr = min(1.0, max(0.0, rwidth))
        elif len(n)>1:
            dr = 0.8
        else:
            dr = 1.0

        if histtype=='bar':
            width = dr*totwidth/nx
            dw = width

            if nx > 1:
                boffset = -0.5*dr*totwidth*(1.0-1.0/nx)
            else:
                boffset = 0.0
            stacked = False
        elif histtype=='barstacked':
            width = dr*totwidth
            boffset, dw = 0.0, 0.0
            stacked = True
开发者ID:Moanwar,项目名称:cmssw,代码行数:67,代码来源:mpl_axes_hist_fix.py


示例19: _calculate

    def _calculate(self, data):
        x = data.pop('x')
        breaks = self.params['breaks']
        right = self.params['right']
        binwidth = self.params['binwidth']

        # y values are not needed
        try:
            del data['y']
        except KeyError:
            pass
        else:
            self._print_warning(_MSG_YVALUE)

        # If weight not mapped to, use one (no weight)
        try:
            weights = data.pop('weight')
        except KeyError:
            weights = np.ones(len(x))
        else:
            weights = make_iterable_ntimes(weights, len(x))

        categorical = is_categorical(x.values)
        if categorical:
            x_assignments = x
            x = sorted(set(x))
            width = make_iterable_ntimes(self.params['width'], len(x))
        elif cbook.is_numlike(x.iloc[0]):
            if breaks is None and binwidth is None:
                _bin_count = 30
                self._print_warning(_MSG_BINWIDTH)
            if binwidth:
                _bin_count = int(np.ceil(np.ptp(x))) / binwidth

            # Breaks have a higher precedence and,
            # pandas accepts either the breaks or the number of bins
            _bins_info = breaks or _bin_count
            x_assignments, breaks = pd.cut(x, bins=_bins_info, labels=False,
                                           right=right, retbins=True)
            width = np.diff(breaks)
            x = [breaks[i] + width[i] / 2
                 for i in range(len(breaks)-1)]
        else:
            raise GgplotError("Cannot recognise the type of x")

        # Create a dataframe with two columns:
        #   - the bins to which each x is assigned
        #   - the weights of each x value
        # Then create a weighted frequency table
        _df = pd.DataFrame({'assignments': x_assignments,
                            'weights': weights
                            })
        _wfreq_table = pd.pivot_table(_df, values='weights',
                                      rows=['assignments'], aggfunc=np.sum)

        # For numerical x values, empty bins get have no value
        # in the computed frequency table. We need to add the zeros and
        # since frequency table is a Series object, we  

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python cbook.is_string_like函数代码示例发布时间:2022-05-27
下一篇:
Python cbook.get_sample_data函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap