• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python pylab.log函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pylab.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了log函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: obs

 def obs(f=vars['rate_stoch'],
         age_indices=age_indices,
         age_weights=age_weights,
         value=pl.log(dm.value_per_1(d)),
         tau=se**-2, data=d):
     f_i = dismod3.utils.rate_for_range(f, age_indices, age_weights)
     return mc.normal_like(value, pl.log(f_i), tau)
开发者ID:aflaxman,项目名称:gbd,代码行数:7,代码来源:log_normal_model.py


示例2: likelihood

	def likelihood(self, x0, X, Y, U):
		"""returns the log likelihood of the states `X` and observations `Y` 
		under the current model p(X,Y|M)
		
		Parameters
		----------
		x0 : matrix
			initial state
		X : list of matrix
			state sequence
		Y : list of matrix
			observation sequence
		U : list of matrix
			input sequence
		
		Notes
		----------
		This calculates 
			p(X,Y|M) = p(x0)\prod_{t=1}^Tp(y_t|x_t)\prod_{t=1}^Tp(x_t|x_{t-1})
		using the model currently defined in self.
		"""
		l1 = pb.sum([pb.log(self.observation_dist(x,y)) for (x,y) in zip(X,Y)])
		l2 = pb.sum([
			pb.log(self.transition_dist(x,u,xdash)) for (x,u,xdash) in zip(X[:-1],U[:-1],X[1:])])
		l3 = self.init_dist(x0)
		l = l1 + l2 + l3
		assert not pb.isinf(l).any(), (l1,l2,l3)
		return l
开发者ID:mikedewar,项目名称:pyLDS,代码行数:28,代码来源:LDS.py


示例3: query

def query(q,DBs,Ms,n,l=1000,minVal=0.0,maxVal=1.0):
    #parameters
    P1=.01779
    P2=.0000156
    rho = log(P1)/log(P2)
    sims = len(Ms)#number of multi-runs for whp
    k = len(Ms[0])#number of random matrix projections per run
    
    candidates = set()
    #first iterate over the runs
    for s in xrange(sims):
        #next iterate over the n^rho nearby points
        hashVal = decodeGt24(q,Ms[s],minVal,maxVal)
        if DBs[s].has_key(hashVal):
        
            for c in DBs[s][hashVal]:
                candidates.add(c)
                
        for r in xrange(int(n**rho+.5)):
            hashVal = decodeGt24(q,Ms[s],minVal,maxVal,True)
            
            if DBs[s].has_key(hashVal):
                for c in DBs[s][hashVal]:
                    candidates.add(c)
            if len(candidates)>2*l:return candidates
            
    return candidates
开发者ID:leecarraher,项目名称:CardinalityShiftClustering,代码行数:27,代码来源:knn.py


示例4: add_thermodynamic_constraints

def add_thermodynamic_constraints(cpl, dG0_f, c_range=(1e-6, 1e-2), T=default_T, bounds=None):   
    """
        For any compound that does not have an explicit bound set by the 'bounds' argument,
        create a bound using the 'margin' variables (the last to columns of A).
    """
    
    Nc = dG0_f.shape[0]

    if bounds != None and len(bounds) != Nc:
        raise Exception("The concentration bounds list must be the same length as the number of compounds")
    if bounds == None:
        bounds = [(None, None)] * Nc
    
    for c in xrange(Nc):
        if pylab.isnan(dG0_f[c, 0]):
            continue # unknown dG0_f - cannot bound this compound's concentration at all

        b_low = bounds[c][0] or c_range[0]
        b_high = bounds[c][1] or c_range[1]

        # lower bound: dG0_f + R*T*ln(Cmin) <= x_i
        cpl.variables.set_lower_bounds('c%d' % c, dG0_f[c, 0] + R*T*pylab.log(b_low))

        # upper bound: x_i <= dG0_f + R*T*ln(Cmax)
        cpl.variables.set_upper_bounds('c%d' % c, dG0_f[c, 0] + R*T*pylab.log(b_high))
开发者ID:issfangks,项目名称:milo-lab,代码行数:25,代码来源:feasibility.py


示例5: covariate_constraint

    def covariate_constraint(mu=vars['mu_age'], alpha=vars['alpha'], beta=vars['beta'],
                             U_all=U_all,
                             X_sex_max=X_sex_max,
                             X_sex_min=X_sex_min,
                             lower=pl.log(model.parameters[name]['level_bounds']['lower']),
                             upper=pl.log(model.parameters[name]['level_bounds']['upper'])):
        log_mu_max = pl.log(mu.max())
        log_mu_min = pl.log(mu.min())

        alpha = pl.array([float(x) for x in alpha])
        if len(alpha) > 0:
            for U_i in U_all:
                log_mu_max += max(0, alpha[U_i].max())
                log_mu_min += min(0, alpha[U_i].min())

        # this estimate is too crude, and is causing problems
        #if len(beta) > 0:
        #    log_mu_max += pl.sum(pl.maximum(X_max*beta, X_min*beta))
        #    log_mu_min += pl.sum(pl.minimum(X_max*beta, X_min*beta))

        # but leaving out the sex effect results in strange problems, too
        log_mu_max += X_sex_max*float(beta[sex_index])
        log_mu_min += X_sex_min*float(beta[sex_index])

        lower_violation = min(0., log_mu_min - lower)
        upper_violation = max(0., log_mu_max - upper)
        return mc.normal_like([lower_violation, upper_violation], 0., 1.e-6**-2)
开发者ID:aflaxman,项目名称:gbd,代码行数:27,代码来源:expert_prior_model.py


示例6: _make_log_freq_map

    def _make_log_freq_map(self):
        """
        ::

            For the given ncoef (bands-per-octave) and nfft, calculate the center frequencies
            and bandwidths of linear and log-scaled frequency axes for a constant-Q transform.
        """
        fp = self.feature_params
        bpo = float(self.nbpo) # Bands per octave
        self._fftN = float(self.nfft)
        hi_edge = float( self.hi )
        lo_edge = float( self.lo )
        f_ratio = 2.0**( 1.0 / bpo ) # Constant-Q bandwidth
        self._cqtN = float( P.floor(P.log(hi_edge/lo_edge)/P.log(f_ratio)) )
        self._dctN = self._cqtN
        self._outN = float(self.nfft/2+1)
        if self._cqtN<1: print "warning: cqtN not positive definite"
        mxnorm = P.empty(self._cqtN) # Normalization coefficients        
        fftfrqs = self._fftfrqs #P.array([i * self.sample_rate / float(self._fftN) for i in P.arange(self._outN)])
        logfrqs=P.array([lo_edge * P.exp(P.log(2.0)*i/bpo) for i in P.arange(self._cqtN)])
        logfbws=P.array([max(logfrqs[i] * (f_ratio - 1.0), self.sample_rate / float(self._fftN)) 
                         for i in P.arange(self._cqtN)])
        #self._fftfrqs = fftfrqs
        self._logfrqs = logfrqs
        self._logfbws = logfbws
        self._make_cqt()
开发者ID:BinRoot,项目名称:BregmanToolkit,代码行数:26,代码来源:features_base.py


示例7: projectCl

def projectCl(lvec,P,D,dNdz,z,growthFac=None):
    """
    project C_l's given a power spectrum class P (Camb or
    BBKS) and Distance class D together

    arguments:
    lvec: vector of l values
    P: p.pk p.k contains the power spectrum, e.g. pt.Camb instance
    D: frw.Distance instance
    dNdz,z, growthFac: vectors suitable for trapezoid z-integration

    presently it crashes if z=0.0 is included, start from a small z value
    """
    lvec = M.asarray(lvec)
    dNdz2 = M.asarray(dNdz)**2
    z = M.asarray(z)
    da1 = 1./D.rtc(z)/D.h #comoving Da in h^1Mpc

    dNdz2vc = dNdz2/D.vc(z)/D.h**3 # comovin volume in (h^-1Mpc)^3
    #`use growth factor if given
    if growthFac:
        dNdz2vc = dNdz2vc*(growthFac**2)
    lk = M.log(P.k)
    pk = P.pk
    
##     return M.asarray([utils.trapz(utils.splineResample(pk,lk,
##                      M.log(l*da1))*dNdz2vc,z) for l in lvec])
    return M.asarray([utils.trapz(utils.interpolateLin(pk,lk,
                     M.log(l*da1))*dNdz2vc,z) for l in lvec])
开发者ID:astrofanlee,项目名称:project_TL,代码行数:29,代码来源:proj.py


示例8: duxbury_icdf

def duxbury_icdf(X,L,s):
	"""
	Returns the inverse duxbury cdf evaluated at X.
	The duxbury CDF is 1 - exp( -(L^2)*exp( - (s/x)^2 ) )
	"""

	return (-s*s/pylab.log( -pylab.log(1-X)/(L*L)) )**(0.5)
开发者ID:ashivni,项目名称:FuseNetwork,代码行数:7,代码来源:statUtils.py


示例9: weibull_lsq

def weibull_lsq(data):
	"""
	Returns the weibull parameters estimated by using 
	the least square method for the given data.
	The weibull CDF is 1 - exp(-(x/l)^k).
	One should be aware of the fact that this approach weighs 
	the extreme (small or large) observations more than the 
	bulk.
	"""

	# Evaluate the emperical CDF at the observations
	# and rescale to convert into emperical probability
	n = len(data)
	print type(data)
	print type(empCDF(data,data))	
	ecdf = empCDF(data,data)*n/(1.0  + n)	

	# Make the array of "infered" variables and independent variables
	y = pylab.log(-pylab.log(1-ecdf))
	x = pylab.log(data)

	# estimate regression coefficients of y = a*x + b
	a, b = lsqReg(x,y)

	# Extract the weibull parameters
	k = a 
	l = pylab.exp(-b/k)

	return k, l
开发者ID:ashivni,项目名称:FuseNetwork,代码行数:29,代码来源:statUtils.py


示例10: obs

def obs(pi=pi):
    return (
        pop_A_prev * pop_A_N * pl.log(pi)
        + (1 - pop_A_prev) * pop_A_N * pl.log(1 - pi)
        + pop_B_prev * pop_B_N * pl.log(pi)
        + (1 - pop_B_prev) * pop_B_N * pl.log(1 - pi)
    )
开发者ID:aflaxman,项目名称:gbd,代码行数:7,代码来源:binomial_model.py


示例11: calcAUC

def calcAUC(data, y0, lag, mgr, asym, time):
    """
    Calculate the area under the curve of the logistic function
    using its integrated formula
    [ A( [A-y0] log[ exp( [4m(l-t)/A]+2 )+1 ]) / 4m ] + At
    """

    # First check that max growth rate is not zero
    # If so, calculate using the data instead of the equation
    if mgr == 0:
        auc = calcAUCData(data, time)
    else:
        timeS = time[0]
        timeE = time[-1]
        t1 = asym - y0
        #try:
        t2_s = py.log(py.exp((4 * mgr * (lag - timeS) / asym) + 2) + 1)
        t2_e = py.log(py.exp((4 * mgr * (lag - timeE) / asym) + 2) + 1)
        #except RuntimeWarning as rw:
            # Exponent is too large, setting to 10^3
        #    newexp = 1000
        #    t2_s = py.log(newexp + 1)
        #    t2_e = py.log(newexp + 1)
        t3 = 4 * mgr
        t4_s = asym * timeS
        t4_e = asym * timeE

        start = (asym * (t1 * t2_s) / t3) + t4_s
        end = (asym * (t1 * t2_e) / t3) + t4_e
        auc = end - start

    if py.absolute(auc) == float('Inf'):
        x = py.diff(time)
        auc = py.sum(x * data[1:])
    return auc
开发者ID:dacuevas,项目名称:PMAnalyzer,代码行数:35,代码来源:GrowthCurve.py


示例12: setup

def setup(dm, key, data_list, rate_stoch):
    """ Generate the PyMC variables for a log-normal model of
    a function of age

    Parameters
    ----------
    dm : dismod3.DiseaseModel
      the object containing all the data, priors, and additional
      information (like input and output age-mesh)
      
    key : str
      the name of the key for everything about this model (priors,
      initial values, estimations)

    data_list : list of data dicts
      the observed data to use in the beta-binomial liklihood function

    rate_stoch : pymc.Stochastic
      a PyMC stochastic (or deterministic) object, with
      len(rate_stoch.value) == len(dm.get_estimation_age_mesh()).

    Results
    -------
    vars : dict
      Return a dictionary of all the relevant PyMC objects for the
      log-normal model.  vars['rate_stoch'] is of particular
      relevance, for details see the beta_binomial_model
    """
    vars = {}
    est_mesh = dm.get_estimate_age_mesh()
    vars['rate_stoch'] = rate_stoch

    # set up priors and observed data
    prior_str = dm.get_priors(key)
    dismod3.utils.generate_prior_potentials(vars, prior_str, est_mesh)

    vars['observed_rates'] = []
    for d in data_list:
        age_indices = dismod3.utils.indices_for_range(est_mesh, d['age_start'], d['age_end'])
        age_weights = d.get('age_weights', pl.ones(len(age_indices)) / len(age_indices))

        lb, ub = dm.bounds_per_1(d)
        se = (pl.log(ub) - pl.log(lb)) / (2. * 1.96)
        if pl.isnan(se) or se <= 0.:
            se = 1.

        print 'data %d: log(value) = %f, se = %f' % (d['id'], pl.log(dm.value_per_1(d)), se)
        
        @mc.observed
        @mc.stochastic(name='obs_%d' % d['id'])
        def obs(f=vars['rate_stoch'],
                age_indices=age_indices,
                age_weights=age_weights,
                value=pl.log(dm.value_per_1(d)),
                tau=se**-2, data=d):
            f_i = dismod3.utils.rate_for_range(f, age_indices, age_weights)
            return mc.normal_like(value, pl.log(f_i), tau)
        vars['observed_rates'].append(obs)
        
    return vars
开发者ID:aflaxman,项目名称:gbd,代码行数:60,代码来源:log_normal_model.py


示例13: add_localized_dGf_constraints

    def add_localized_dGf_constraints(self, cid2dG0_f, cid2bounds, c_range, T=300):
        self.T = T
        for (rid, sparse) in self.reactions:
            dG0_r = 0
            for (cid, coeff) in sparse.iteritems():
                if cid in cid2dG0_f:
                    dG0_r += coeff * cid2dG0_f[cid]
                else:
                    dG0_r = None
                    break

                (curr_c_min, curr_c_max) = cid2bounds.get(cid, (None, None))
                if curr_c_min == None:
                    curr_c_min = c_range[0]
                if curr_c_max == None:
                    curr_c_max = c_range[1]

                if coeff < 0:
                    dG0_r += coeff * common.R * T * pylab.log(curr_c_max)
                else:
                    dG0_r += coeff * common.R * T * pylab.log(curr_c_min)

            if dG0_r != None and dG0_r > 0:
                # this reaction is a localized bottleneck, add a constraint that its flux = 0
                constraint_name = rid + "_irreversible"
                self.cpl.linear_constraints.add(names=[constraint_name], senses="E", rhs=[0])
                self.cpl.linear_constraints.set_coefficients(constraint_name, rid, 1)
开发者ID:issfangks,项目名称:milo-lab,代码行数:27,代码来源:yeast_stoichiometric_lp.py


示例14: make_pCr_problem

def make_pCr_problem(S, dG0_f,
                     c_mid=1e-3,
                     ratio=3.0,
                     T=default_T,
                     bounds=None,
                     log_stream=None):
    """Creates a Cplex problem for finding the pCr.
    
    Simply sets up all the constraints. Does not set the objective.
    
    Args:
        S: stoichiometric matrix.
        dG0_f: deltaG0'-formation values for all compounds (in kJ/mol) (1 x compounds)
        c_mid: the default concentration to center the pCr on.
        ratio: the ratio between the distance of the upper bound from c_mid
            and the lower bound from c_mid (in logarithmic scale)
        bounds: the concentration bounds for metabolites.
        log_stream: where to write Cplex logs to.
    
    Returns:
        A cplex.Cplex object for the problem.
    """
    Nc = S.shape[1]
    if Nc != dG0_f.shape[0]:
        raise Exception("The S matrix has %d columns, while the dG0_f vector has %d" % (Nc, dG0_f.shape[0]))
    if bounds and len(bounds) != Nc:
        raise Exception("The concentration bounds list must be the same length as the number of compounds")

    cpl = create_cplex(S, dG0_f, log_stream)
    
    # Add pC variable.
    cpl.variables.add(names=['pC'], lb=[0], ub=[1e6])
    
    # Add variables for concentration bounds for each metabolite.
    for c in xrange(Nc):
        if pylab.isnan(dG0_f[c, 0]):
            continue # unknown dG0_f - cannot bound this compound's concentration at all

        # dG at the center concentration.
        dG_f_mid = dG0_f[c, 0] + R*T*pylab.log(c_mid)
        if bounds == None or bounds[c][0] == None:
            # lower bound: x_i + r/(1+r) * R*T*ln(10)*pC >= dG0_f + R*T*ln(Cmid) 
            cpl.linear_constraints.add(senses='G', names=['c%d_lower' % c], rhs=[dG_f_mid])
            cpl.linear_constraints.set_coefficients('c%d_lower' % c, 'c%d' % c, 1)
            cpl.linear_constraints.set_coefficients('c%d_lower' % c, 'pC', R*T*pylab.log(10) * ratio / (ratio + 1.0))
        else:
            # this compound has a specific lower bound on its activity
            cpl.variables.set_lower_bounds('c%d' % c, dG0_f[c, 0] + R*T*pylab.log(bounds[c][0]))

        if bounds == None or bounds[c][1] == None:
            # upper bound: x_i - 1/(1+r) * R*T*ln(10)*pC <= dG0_f + R*T*ln(Cmid)
            cpl.linear_constraints.add(senses='L', names=['c%d_upper' % c], rhs=[dG_f_mid])
            cpl.linear_constraints.set_coefficients('c%d_upper' % c, 'c%d' % c, 1)
            cpl.linear_constraints.set_coefficients('c%d_upper' % c, 'pC', -R*T*pylab.log(10) / (ratio + 1.0))
        else:
            # this compound has a specific upper bound on its activity
            cpl.variables.set_upper_bounds('c%d' % c, dG0_f[c, 0] + R*T*pylab.log(bounds[c][1]))

    return cpl
开发者ID:issfangks,项目名称:milo-lab,代码行数:59,代码来源:feasibility.py


示例15: tune_alpha

    def tune_alpha(self, drug_name, alphas=None, N=80, l1_ratio=0.5,
                   n_folds=10, show=True, shuffle=False, alpha_range=[-2.8,0.1]):
        """Interactive tuning of the model (alpha).

        This is much faster than :meth:`plot_cindex` but much slower than
        ElasticNetCV

        .. plot::
            :include-source:

            from gdsctools import *
            ic = IC50(gdsctools_data("IC50_v5.csv.gz"))
            gf = GenomicFeatures(gdsctools_data("genomic_features_v5.csv.gz"))

            en = GDSCElasticNet(ic, gf)

            en.tune_alpha(1047, N=40, l1_ratio=0.1)

        """
        if alphas is None:
            # logspace returns a vector in natural space that guarantees a
            # uniform spacing in a log space (log10 or ln)
            # -2.8 to 0.5 means alpha from 1.58e-3 to 3.16
            # This is equivalent to log(1.58e-3)=-6.45 to log(3.16)=1.15 in ln
            # scale
            a1, a2 = alpha_range
            alphas = pylab.logspace(a1, a2, N)

        # Let us now do a CV across difference alphas
        all_scores = []
        for alpha in alphas:
            scores = self.fit(drug_name, alpha, l1_ratio=l1_ratio,
                              n_folds=n_folds, shuffle=shuffle)
            all_scores.append(scores)

        # We can now plot the results that is the mean scores + error enveloppe
        df = pd.DataFrame(all_scores)

        # we also identify the max correlation and corresponding alpha
        maximum = df.mean(axis=1).max()
        alpha_best = alphas[df.mean(axis=1).argmax()]

        if show is True:
            mu = df.mean(axis=1)
            sigma = df.var(axis=1)
            pylab.clf()
            pylab.errorbar(pylab.log(alphas), mu, yerr=sigma, color="gray")
            pylab.plot(pylab.log(alphas), mu, 'or')
            pylab.axvline(pylab.log(alpha_best), lw=4, alpha=0.5, color='g')
            pylab.title("Mean scores (pearson) across alphas for Kfold=%s" % n_folds)
            pylab.xlabel("ln(alpha)")
            pylab.ylabel("mean score (pearson)")
            pylab.grid()

        results = {"alpha_best":alpha_best, "ln_alpha":pylab.log(alpha_best),
            "maximum_Rp":maximum}
        return results
开发者ID:CancerRxGene,项目名称:gdsctools,代码行数:57,代码来源:regression.py


示例16: plot_risetimes

def plot_risetimes(a, b, **kwargs):

    # plt.ion()
    # if kwargs is not None:
    #     for key, value in kwargs.iteritems():
    #         if key == 'file_list':
    #             file_list = value
    #         if key == 'scan_line':
    #             scan_line = value
    # varray = plt.array(get_value_from_cfg(file_list, scan_line))

    n_files = a.shape[-1]
    cmap = plt.get_cmap('jet')
    c = [cmap(i) for i in plt.linspace(0, 1, n_files)]

    fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
    [ax.set_color_cycle(c) for ax in (ax1, ax2)]

    r = []
    for i in xrange(n_files):
        x, y = a[:,i], b[:,i]
        # xo, yo = x, y #, get_envelope(x, y)
        xo, yo = get_envelope(x, y)
        p = plt.polyfit(xo, np.log(yo), 1)

        # Right way to fit... a la Nicolas - the fit expert!
        l = ax1.plot(x, plt.log(plt.absolute(y)))
        lcolor = l[-1].get_color()
        ax1.plot(xo, plt.log(yo), color=lcolor, marker='o', mec=None)
        ax1.plot(x, p[1] + x * p[0], color=lcolor, ls='--', lw=3)

        l = ax2.plot(x, y)
        lcolor = l[-1].get_color()
        ax2.plot(xo, yo, 'o', color=lcolor)
        xi = plt.linspace(plt.amin(x), plt.amax(x))
        yi = plt.exp(p[1] + p[0] * xi)
        ax2.plot(xi, yi, color=lcolor, ls='--', lw=3)

        print p[1], p[0], 1 / p[0]
        # plt.draw()
        # ax1.cla()
        # ax2.cla()

        r.append(1/p[0])

    ax2.set_ylim(0, 1000)
    plt.figure(2)
    plt.plot(r, lw=3, c='purple')
    # plt.gca().set_ylim(0, 10000)

    # ax3 = plt.subplot(111)
    # ax3.semilogy(x, y)
    # ax3.semilogy(xo, yo)

    return r
开发者ID:like2000,项目名称:Pyheana,代码行数:55,代码来源:plot_risetimes.py


示例17: run

    def run(self):
        """
        create an inifile from the parameters and run camb on it
        and store the results in k,pk
        """
        self.printIniFile()
        os.system(self.cambPath + "/camb " + self.iniName)
        self.k, self.pk = utils.readColumns(self.cp.output_root + "_matterpower.dat")

        self.logk, self.logpk = M.log(self.k), M.log(self.pk)
        self.pkSplineCoeff = SS.cspline1d(self.logpk)
开发者ID:jizhi,项目名称:project_TL,代码行数:11,代码来源:pt.py


示例18: log_mat

def log_mat(X):
    res = zeros(X.shape) * complex(0,0)
    for i in range(X.shape[0]):
        for j in range(X.shape[1]):
            if (X[i,j] > 0):
                res[i,j] = complex(log(X[i,j]), 0)
            elif (X[i,j] < 0):
                res[i,j] = complex(log(-X[i,j]), pi)
            else:
                res[i,j] = nan
    return res
开发者ID:issfangks,项目名称:milo-lab,代码行数:11,代码来源:log_matrix.py


示例19: mStar

def mStar(m,nu):
    """
    Returns M* based on an array of nu(M)'s.
    M* is defined to be the mass at which nu(M) = 1.
    Used for concentration distribution.
    """
    closest = N.where(nu < 1.)[0][-1] #nu increases with M

    logmstar = M.log(m[closest]) + M.log(m[closest+1]/m[closest])/M.log(nu[closest+1]/nu[closest])*\
               M.fabs(M.log(nu[closest]))
    return M.exp(logmstar)
开发者ID:astrofanlee,项目名称:project_TL,代码行数:11,代码来源:halo.py


示例20: runEisensteinHu

    def runEisensteinHu(self, sig8):
        """
        use (much faster, but somewhat less accurate) Eisenstein & Hu
        code.

        Not tested recently.
        """
        #Output EHu file
        f = file('ehu.in','w')

        #f.write((str(self.cp.omega_baryon + self.cp.omega_cdm))+', '+str(self.cp.omega_lambda)+', '+\
        #        str(self.cp.omega_neutrino)+', '+str(self.cp.omega_baryon)+'\n')

        h = self.cp.hubble/100.
        om0 = (self.cp.ombh2 + self.cp.omch2)/h**2
        f.write(str(om0)+', '+str(1.-om0)+', '+ str(self.cp.omega_neutrino)+', '+str(self.cp.ombh2/h**2)+'\n')
        f.write(str(h)+', '+str(self.cp.temp_cmb)+', '+str(self.cp.massless_neutrinos)+'\n')
        f.write(str(self.cp.transfer_redshift[0])+'\n')
        f.write(str(self.cp.transfer_kmax)+', '+str(self.cp.transfer_k_per_logint)+'\n')
        f.write('1\n')
        tilt = self.cp.scalar_spectral_index[0]
        f.write(str(tilt)+'\n')
        f.write('0\n')

        f.close()

        # run EHu code
        os.system('../ehu/power < ehu.in > ehu.crap')

        # read into c.k, c.pk
        eh = N.loadtxt('trans.dat')
        self.k = eh[:,0]*1.
        #print self.k
        self.logk = M.log(self.k)
        self.trans = eh[:,1]*1.
        if tilt == 1.:
            delH =  1.94e-5*(self.cp.omega_cdm + self.cp.omega_baryon)**(-0.785)
            delta = delH**2*(3000.0*self.k/(self.cp.hubble/100.))**4*self.trans**2
        else:
            delH =  1.94e-5*self.cp.omega_cdm**(-0.785 - 0.05*M.log(tilt))\
                   * M.exp(-0.95*(tilt - 1.) - 0.169*(tilt - 1)**2)
            delta = delH**2*(3000.0*self.k/(self.cp.hubble/100.))**(3 + tilt)*self.trans**2

        # Just an approximate normalization; really need sig8.
            
        self.pk = (2.*M.pi**2 * delta/self.k**3)*(self.cp.hubble/100.)**3
        if self.cp.transfer_redshift[0] > 0.:
            ps = PowerSpectrum(self.cp)
            sig8use = sig8*ps.d1(self.cp.transfer_redshift[0])/ps.d1(0.)
        else:
            sig8use = sig8
        normalizePk(self,sig8use) # sets c.logpk, too

        return
开发者ID:ARepp,项目名称:Fisher,代码行数:54,代码来源:pt.py



注:本文中的pylab.log函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pylab.log10函数代码示例发布时间:2022-05-25
下一篇:
Python pylab.loadtxt函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap