本文整理汇总了Python中math.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _get_value_log
def _get_value_log(self, x, mu, v):
"""log basic 2"""
try:
return loggamma(x+v) - loggamma(x+1) - loggamma(v) + v*log(v) - v*log(v+mu) + x*log(mu) - x*log(v+mu)
except ValueError:
#print('_get_value_log ValueError', x, mu, v, file=sys.stderr)
return 1
开发者ID:jovesus,项目名称:reg-gen,代码行数:7,代码来源:neg_bin.py
示例2: make_dictionary
def make_dictionary(file_array):
cufflinks_dict={}
for i in range(0,len(file_array)):
if file_array[i] != '':
prelim_info_list=[]
each_gene_list=file_array[i].split("\t")
try:
##Prelimnary Info
entry_name=each_gene_list[0]
Gene_ID=each_gene_list[3]
Gene_Name=each_gene_list[4]
tss_id=each_gene_list[5]
locus=each_gene_list[6]
length=each_gene_list[7]
coverage=each_gene_list[8]
FPKM=each_gene_list[9]
log2_FPKM=math.log(float(FPKM)+1)/math.log(2)
if entry_name not in cufflinks_dict:
cufflinks_dict[entry_name]=entry_name+"\t"+Gene_ID+"\t"+Gene_Name+"\t"+tss_id+"\t"+locus+"\t"+str("%.4f" % log2_FPKM)
else:
pass
except:
pass
return cufflinks_dict
开发者ID:patidarr,项目名称:ngs_pipeline,代码行数:30,代码来源:transformlog2_FPKM.py
示例3: getFitness
def getFitness(self, tagList):
tagList = list(tagList)
# add start symbols and end symbols
for i in range(self.N - 1):
tagList.insert(0, '^')
tagList.append('$')
# initialize the variables
answer = float(0.0)
# calculate numerator & denominator
length = len(tagList)
# print "----- before calculation -----"
for start in range(length - self.N + 1):
tmp = []
for index in range(self.N):
tmp.append(tagList[start+index])
gramTuple = tuple(tmp) # now gramTuple is the tuple for this NGRAM (self).
gramTupleProb = self.getProb(gramTuple)
answer += math.log(gramTupleProb)
if start != 0:
prefixGramTuple = self.getPrefixGram(gramTuple)
prefixGramTupleProb = self.prefixNGRAM.getProb(prefixGramTuple)
answer -= math.log(prefixGramTupleProb)
# print "numerator = %f, denominator = %f, answer = %f" % (numerator, denominator, answer)
# print "----- after calculation -----"
# special casef
return answer
开发者ID:sycLin,项目名称:NLP-Term-Project,代码行数:32,代码来源:main.py
示例4: compute_disp_ntaps
def compute_disp_ntaps(dm,bw,freq):
NTLIMIT=65536*2
#
# Dt calculations are in Mhz, rather than Hz
# crazy astronomers....
mbw = bw/1.0e6
mfreq = freq/1.0e6
f_lower = mfreq-(mbw/2)
f_upper = mfreq+(mbw/2)
# Compute smear time
Dt = dm/2.41e-4 * (1.0/(f_lower*f_lower)-1.0/(f_upper*f_upper))
# ntaps is now bandwidth*smeartime
ntaps = bw*Dt
if (ntaps < 32):
ntaps = 32
# special "flag" from command-line invoker to get around a bug
# in Gnu Radio involving the FFT filter implementation
# we can *never* increase the size of an FFT filter at runtime
# but can decrease it. So there's a special "startup" flag (dm=1500.0)
# that causes us to return the NTLIMIT number of taps
#
if (dm >= 1500.0):
ntaps = NTLIMIT
if (ntaps > NTLIMIT):
ntaps = NTLIMIT
ntaps = int(math.log(ntaps) / math.log(2))
ntaps = int(math.pow(2,ntaps+1))
return(int(ntaps))
开发者ID:patchvonbraun,项目名称:IRA,代码行数:31,代码来源:iraconfig.py
示例5: TF_IDF
def TF_IDF():
print('Doing TF_IDF', file=sys.stderr)
global TFIDF, docWeight, index
if os.path.isfile('TFIDF.dat') and os.path.isfile('docWeight.dat') and os.path.isfile('index.dat'):
f = open('TFIDF.dat', 'rb')
TFIDF = pickle.load(f)
f.close()
f = open('docWeight.dat', 'rb')
docWeight = pickle.load(f)
f.close()
f = open('index.dat', 'rb')
index = pickle.load(f)
f.close()
else:
print('.dat not exist, generating', file=sys.stderr)
TFIDF = {}
docCnt = len(docSize)
avgSize = 0
index = [[] for i in range(docCnt)]
for i in range(docCnt):
avgSize += docSize[i]
avgSize /= docCnt
docWeight = [0 for i in range(docCnt)]
para_b = 0.7 # tuning
d = [(1 - para_b + para_b*docSize[i]/avgSize) for i in range(docCnt)]
for i in invIndexUnigram: # word id
IDF = math.log( docCnt / len(invIndexUnigram[i]) )
TFIDF[i] = {}
for j in invIndexUnigram[i]: # doc id
v = (invIndexUnigram[i][j] / d[j]) * IDF
TFIDF[i][j] = v
docWeight[j] += v * v
index[j].append(i)
for i in invIndexBigram: # word id
IDF = math.log( docCnt / len(invIndexBigram[i]) )
TFIDF[i] = {}
for j in invIndexBigram[i]: # doc id
v = (invIndexBigram[i][j] / d[j]) * IDF
TFIDF[i][j] = v
docWeight[j] += v * v
index[j].append(i)
f = open('TFIDF.dat', 'wb')
pickle.dump(TFIDF, f)
f.close()
f = open('docWeight.dat', 'wb')
pickle.dump(docWeight, f)
f.close()
f = open('index.dat', 'wb')
pickle.dump(index, f)
f.close()
printTime()
开发者ID:fei6409,项目名称:IR,代码行数:60,代码来源:main.py
示例6: could_be_prime
def could_be_prime(n):
'''Performs some trials to compute whether n could be prime. Run time is O(N^3 / (log N)^2) for N bits.
Returns whether it is possible for n to be prime (True or False).
'''
if n < 2:
return False
if n == 2:
return True
if not n & 1:
return False
product = ONE
log_n = int(math.log(n)) + 1
bound = int(math.log(n) / (LOG_2 * math.log(math.log(n))**2)) + 1
if bound * log_n >= n:
bound = 1
log_n = int(sqrt(n))
prime_bound = 0
prime = 3
for _ in xrange(bound):
p = []
prime_bound += log_n
while prime <= prime_bound:
p.append(prime)
prime = next_prime(prime)
if p != []:
p = prod(p)
product = (product * p) % n
return gcd(n, product) == 1
开发者ID:elhb,项目名称:borgstrom,代码行数:32,代码来源:pyecm.py
示例7: mdl
def mdl (g):
"""
the Minimum Descrition Length calculator for Bayesian network g
"""
n = len (g.V) # the variable count
N = len (g.data)# the sample number
logn = math.log (n, 2) # value of log (n)
logN = math.log (N, 2) # value of log (N)
complexity = sum([logn * len(g.getParentOf(v)) + logN / 2 * product (g.getParentOf(v).cards()) * (v.card - 1)
for v in g.V])
logll = 0 #log likelihood
for v in g.V:
for parentVals in g.getParentOf (v).allAssignments ():
for val in v.values:
# assignment of the parent
parentAssignments = dict(zip(map(lambda p: p.var, g.getParentOf (v)), parentVals))
assignments = parentAssignments.copy () #including the child value in the assignment
assignments[v.var] = val
#the empirical count of the given assignments of parent
parentN = g.N (**parentAssignments)
#the empirical count of the given assignments of parent and child
childN = g.N (**assignments)
if childN != 0:
logll += (childN * math.log (childN / parentN, 2))
else:
pass #nothing happens
return -logll + complexity
开发者ID:xiaohan2012,项目名称:structure-learning,代码行数:35,代码来源:scores.py
示例8: __init__
def __init__(self, ref_file, max_n=100, verbose=False):
'''
Read the reference file and store wordcounts as class variables:
- a dictionary mapping words to their log probabilities
- a dictionary mapping character patterns (e.g. 'abccda' for 'dotted')
to a list of words and their log probabilities, sorted by probability
'''
self.max_n = max_n
self.verbose = verbose
if self.verbose:
print 'processing reference file...'
# Get words and word probabilities from text and put in dictionary
self.vectorizer = CountVectorizer(token_pattern=r'(?u)\b[a-zA-Z]+\b')
wordcounts = self.__get_wordcounts(ref_file)
self.word_dict = {word:math.log(count+1.0) for count, word in wordcounts}
# Also put words and probabilities into the dictionary keyed by pattern
self.words_by_pattern = {}
for count, word in wordcounts:
pattern = self.__word_to_pattern(word)
prob = math.log(count+1.0)
if pattern in self.words_by_pattern:
self.words_by_pattern[pattern].append((prob, word))
else:
self.words_by_pattern[pattern] = [(prob, word)]
# Initial null solution
self.solution = None
if self.verbose:
print '...done\n'
开发者ID:pbs929,项目名称:decipher,代码行数:31,代码来源:decipher.py
示例9: __next__
def __next__(self):
rv = self.value
#------------------------------------------------------------------------
# need to round or we might succumb to the dreaded python rounding
# error (eg 0.99999 < 0 when multiplying 1/24.0 by 24)
#------------------------------------------------------------------------
if round(self.pos, 8) >= round(self.length_cur, 8):
self.value = 1.0
rv = 1.0
self.pos = 0
self.length_cur = Pattern.value(self.length)
amp_cur = Pattern.value(self.amp)
rate_start = 1.0
rate_end = 1.0 + amp_cur
steps = TICKS_PER_BEAT * self.length_cur
self.dv = math.exp(math.log(rate_end / rate_start) / steps)
self.pos += 1.0 / TICKS_PER_BEAT
self.value = self.value * self.dv
#------------------------------------------------------------------------
# subtract
#------------------------------------------------------------------------
rv = math.log(rv, 2)
print("warp: %f" % rv)
return rv
开发者ID:ideoforms,项目名称:isobar,代码行数:26,代码来源:warp.py
示例10: make_non_differential_constellation
def make_non_differential_constellation(m, gray_coded):
side = int(pow(m, 0.5))
if (not isinstance(m, int) or m < 4 or not is_power_of_four(m)):
raise ValueError("m must be a power of 4 integer.")
# Each symbol holds k bits.
k = int(log(m) / log(2.0))
if gray_coded:
# Number rows and columns using gray codes.
gcs = gray_code(side)
# Get inverse gray codes.
i_gcs = mod_codes.invert_code(gcs)
else:
i_gcs = range(0, side)
# The distance between points is found.
step = 2.0/(side-1)
gc_to_x = [-1 + i_gcs[gc]*step for gc in range(0, side)]
# First k/2 bits determine x position.
# Following k/2 bits determine y position.
const_map = []
for i in range(m):
y = gc_to_x[get_bits(i, 0, k/2)]
x = gc_to_x[get_bits(i, k/2, k/2)]
const_map.append(complex(x,y))
return const_map
开发者ID:ychang,项目名称:gr-gtlib,代码行数:25,代码来源:qam.py
示例11: predict_class
def predict_class(prediction, prob_other, class_doc_stats, class_prob, word_list, word_dict):
prob_values = []
new_prob_values = []
for class_name in class_prob:
prob_values.append((class_name, class_prob[class_name]))
inpfile = open("stopWords.txt", "r")
line = inpfile.readline()
stopWords = []
while line:
stopWord = line.strip()
stopWords.append(stopWord)
line = inpfile.readline()
inpfile.close()
for val in prob_values:
prob = math.log(val[1], 2)
class_name = val[0]
for word in word_list:
word = word.lower()
# val = re.search(r"^[a-zA-Z][a-zA-Z0-9]*[a-zA-Z]+[a-zA-Z0-9]*$", word)
# if (word in stopWords):
# continue
if word in word_dict:
prob = prob + math.log(Decimal(word_dict[word][class_name]), 2)
else:
prob = prob + math.log(Decimal(prob_other[class_name]), 2)
new_prob_values.append((class_name, prob))
prob_values = new_prob_values
prob_values.sort(key=lambda tup: tup[1], reverse=True)
return prob_values, prob_values[0][0]
开发者ID:mnandinic,项目名称:ReviewAnalyzer,代码行数:29,代码来源:nbtestLatest.py
示例12: deviation_score
def deviation_score(percentage, lower_bound, upper_bound):
if percentage < lower_bound:
return math.log(lower_bound - percentage, lower_bound) * 100
elif percentage > upper_bound:
return math.log(percentage - upper_bound, 100 - upper_bound) * 100
else:
return 0
开发者ID:ProgramFOX,项目名称:GibberishClassifier-Python,代码行数:7,代码来源:gibberishclassifier.py
示例13: relate
def relate(size, base):
if size == 0:
return base
size = float(size)
base = float(base)
if abs(size - base) < 0.1:
return 0
sign = -1 if size < base else 1
endp = 0 if size < base else 36
diff = (abs(base - size) * 3) + ((36 - size) / 100)
logb = abs(base - endp)
if logb == 1.0:
logb = 1.1
try:
result = sign * math.log(diff, logb)
except ValueError:
if diff < 0:
# Size is both very large and close to base
return 0
if logb == 0:
logb = 1e-6
if diff == 0:
diff = 1e-6
result = sign * math.log(diff, logb)
return result
开发者ID:JimmXinu,项目名称:calibre,代码行数:25,代码来源:flatcss.py
示例14: estimDiv
def estimDiv(c, psmc, r, t):
"""Estimate divergence using eq 12
"""
N0 = 0
if psmc:
if not r:
# parse psmc
f = open(psmc, 'r')
line = f.readline().split("-eN ")
t = [float(i.split()[0]) for i in line[1:]]
t.insert(0, 0.0)
r = [float(i.split()[1]) for i in line[1:]]
N0 = float(line[0].split()[1]) / float(line[0].split()[4])
r.insert(0, 1.0)
i = 0
nc = 1.0
while (1-nc*exp(-(t[i+1]-t[i])/r[i])) < c:
nc *= exp(-(t[i+1]-t[i])/r[i])
i += 1
#print("i:{}, t[i]:{}, t[i+1]:{}, r[i]:{}, nc:{}".format(i, t[i], t[i+1], r[i], nc))
j = i
print("nc = {}, 1-nc = {}".format(nc, 1-nc))
T_hat = -r[j]*log((1-c) / nc) + t[j]
else:
T_hat = -log(1-c) # assumes constant popsize
return(r, t, N0, T_hat)
开发者ID:stsmall,项目名称:An_funestus,代码行数:26,代码来源:divTime.py
示例15: command_line
def command_line(veb, ra, ov, pr):
l = len(sys.argv)
for i in xrange(1, l):
if not is_switch(sys.argv[i]):
break
for j in xrange(i, l): # Start with the first non-switch
if j != i: # Pretty printing
print
response = sys.argv[j]
if valid_input(response):
response = response.replace('^', '**')
try:
n = eval(response)
int(n)
except (SyntaxError, TypeError, ValueError):
help()
else:
help()
print 'Factoring %d:' % n
if n < 0:
print -1
n = -n
if n == 0:
print '0 does not have a well-defined factorization.'
continue
elif n == 1:
print 1
continue
if ov == DUMMY:
ov = 2*math.log(math.log(n))
for factor in factors(n, veb, ra, ov, pr):
print factor
开发者ID:elhb,项目名称:borgstrom,代码行数:35,代码来源:pyecm.py
示例16: solve
def solve(Xs_info, C, aa, f_cost, epsilon):
"""
Args:
Xs_info: map from distribution to tuple of type (probe cost, p_ij)
C: the cost budget
aa: the values taken on by the distribution
epsilon: the epsilon
"""
# Compute interval lengths. Note that I_i = [a_j, a_{j + 1}].
assert len(set(aa)) == len(aa), \
'The values taken on by the distribution must be distinct'
assert all(itertools.imap(lambda a: a >= 0, aa)), \
'The values taken on by the distribution must be nonnegative'
ls = [t - s for s, t in zip(aa, aa[1:])]
cs, ps = zip(*Xs_info.itervalues())
z, ys, problem = lp(cs, C, ls, ps)
print problem
problem.solve()
print 'z = {}, ys = {}'.format(z.value(), [y.value() for y in ys])
# The subset S corresponding to i s.t. y_i = 1 is feasible
s0 = [x for x, y in zip(Xs_info.iterkeys(), ys) if y.value() == 1]
# new cost budget C(log log m + log 1/e)
m = len(aa)
C_relaxed = C * (math.log(math.log(m)) + math.log(1.0 / epsilon))
X_cost = {k: c for k, (c, _) in Xs_info.iteritems()}
return minimum_element.minimum_element(X_cost, C_relaxed, f_cost, S_0=s0)
开发者ID:napping,项目名称:linear-programming-benchmark,代码行数:33,代码来源:cip_greedy.py
示例17: __call__
def __call__(self, state, scope, pos, paramTypes, datum, classModel):
ll = 0.0
if isinstance(datum, list) or isinstance(datum, tuple):
if len(datum) != len(classModel):
raise PFARuntimeException("datum and classModel misaligned", self.errcodeBase + 0, self.name, pos)
for i, x in enumerate(datum):
mu = classModel[i]["mean"]
vari = classModel[i]["variance"]
if vari <= 0.0:
raise PFARuntimeException("variance less than or equal to zero", self.errcodeBase + 1, self.name, pos)
ll += -0.5*math.log(2.*math.pi * vari)
ll += -0.5*((x - mu)**2 / vari)
return ll
else:
datumkeys = datum.keys()
modelkeys = classModel.keys()
if set(datumkeys) != set(modelkeys):
raise PFARuntimeException("datum and classModel misaligned", self.errcodeBase + 0, self.name, pos)
for feature in datumkeys:
x = datum[feature]
mu = classModel[feature]["mean"]
vari = classModel[feature]["variance"]
if vari <= 0.0:
raise PFARuntimeException("variance less than or equal to zero", self.errcodeBase + 1, self.name, pos)
ll += -0.5*math.log(2.*math.pi * vari)
ll += -0.5*((x - mu)**2 / vari)
return ll
开发者ID:nkhuyu,项目名称:hadrian,代码行数:27,代码来源:naive.py
示例18: lp
def lp(cs, C, ls, ps):
"""
Args:
cs: a list containing the cost of probing `X_1, ..., X_n`
C: the cost budget
ls: a list of the lengths of the intervals `I_1, ..., I_m`. Each
element of the list contains the length of the corresponding
interval.
ps: a list of functions, each of which takes that take in one argument
`j` and returns `Pr[X_i >= a_j]`
Returns:
a triple of type `(pulp.LpVariable, list of pulp.LpVariable,
pulp.LpProblem)` with values of `(z, list of y_i,
unsolved linear program)`.
"""
assert len(ps) == len(cs)
n = len(ps)
m = len(ls)
problem = pulp.LpProblem('Step 1', pulp.LpMinimize)
z = pulp.LpVariable('z', cat='Integer')
ys = [pulp.LpVariable('y' + str(i), lowBound=0, upBound=1, cat='Integer')
for i in xrange(n)]
problem += z
for j in xrange(1, m + 1):
aa = (math.log(1.0 / p(j)) for p in ps)
problem += pulp.lpDot(ys, aa) <= math.log(ls[j - 1]) - z, 'j=' + str(j)
problem += pulp.lpDot(cs, ys) <= C, 'cost'
return z, ys, problem
开发者ID:napping,项目名称:linear-programming-benchmark,代码行数:32,代码来源:cip_greedy.py
示例19: optimize_hyperparameters
def optimize_hyperparameters(self, samples=5, step=3.0):
old_hyper_parameters = [math.log(self._alpha_alpha), math.log(self._alpha_beta)]
for ii in xrange(samples):
log_likelihood_old = self.compute_likelihood(self._alpha_alpha, self._alpha_beta)
log_likelihood_new = math.log(random.random()) + log_likelihood_old
#print("OLD: %f\tNEW: %f at (%f, %f)" % (log_likelihood_old, log_likelihood_new, self._alpha_alpha, self._alpha_beta))
l = [x - random.random() * step for x in old_hyper_parameters]
r = [x + step for x in old_hyper_parameters]
for jj in xrange(self._alpha_maximum_iteration):
new_hyper_parameters = [l[x] + random.random() * (r[x] - l[x]) for x in xrange(len(old_hyper_parameters))]
trial_alpha, trial_beta = [math.exp(x) for x in new_hyper_parameters]
lp_test = self.compute_likelihood(trial_alpha, trial_beta)
if lp_test > log_likelihood_new:
self._alpha_alpha = math.exp(new_hyper_parameters[0])
self._alpha_beta = math.exp(new_hyper_parameters[1])
#self._alpha_sum = self._alpha_alpha * self._K
#self._beta_sum = self._alpha_beta * self._number_of_language_types
old_hyper_parameters = [math.log(self._alpha_alpha), math.log(self._alpha_beta)]
break
else:
for dd in xrange(len(new_hyper_parameters)):
if new_hyper_parameters[dd] < old_hyper_parameters[dd]:
l[dd] = new_hyper_parameters[dd]
else:
r[dd] = new_hyper_parameters[dd]
assert l[dd] <= old_hyper_parameters[dd]
assert r[dd] >= old_hyper_parameters[dd]
print("\nNew hyperparameters (%i): %f %f" % (jj, self._alpha_alpha, self._alpha_beta))
开发者ID:kzhai,项目名称:PyNaiveBayes,代码行数:33,代码来源:monte_carlo.py
示例20: energy
def energy(self):
sum = 0.0
sum -= di.norm.logpdf(self.data, loc=self.mu, scale=self.sigma).sum()
#Now add in the priors...
sum -= log(self.sigma)*(-0.5) - self.nu/2 * (self.mu-self.priormu)**2/self.sigma
sum -= log(self.sigma)*(self.kappa+2)/(-2) - 0.5*self.priorsigma/self.sigma
return sum
开发者ID:binarybana,项目名称:samcnet,代码行数:7,代码来源:tail.py
注:本文中的math.log函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论