• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python math.log2函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中math.log2函数的典型用法代码示例。如果您正苦于以下问题:Python log2函数的具体用法?Python log2怎么用?Python log2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了log2函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: log_probability

    def log_probability( self, sequence, transitions_weight = None, outputs_weight = 1 ):
        """
        Returns the log-probability of the given symbol sequence. If the
        sequence is labelled, then returns the joint log-probability of the
        symbol, state sequence. Otherwise, uses the forward algorithm to find
        the log-probability over all label sequences.

        :return: the log-probability of the sequence
        :rtype: float
        :param sequence: the sequence of symbols which must contain the TEXT
            property, and optionally the TAG property
        :type sequence:  Token
        """
        if transitions_weight is None:
            transitions_weight = 1

        sequence = self._transform( sequence )

        T = len( sequence )
        EPSILON = ''
        channelModel = 0
        sourceModel = 0

        if T > 0 and sequence[ 0 ][ _TAG ] is not None:
            last_state = sequence[ 0 ][ _TAG ]

            if last_state != EPSILON:
                if transitions_weight:
                    sourceModel += transitions_weight * self._priors.logprob( last_state )

            else:
                if transitions_weight:
                    sourceModel += transitions_weight * math.log2( globalModelParameters.EpsilonTransition )

            channelModel += outputs_weight * self._output_logprob( last_state, sequence[ 0 ][ _TEXT ] )

            for t in range( 1, T ):
                state = sequence[ t ][ _TAG ]

                if last_state != EPSILON:
                    if state != EPSILON:
                        if transitions_weight:
                            sourceModel += transitions_weight * self._transitions[ last_state ].logprob( state )
                    else:
                        if transitions_weight:
                            sourceModel += transitions_weight * math.log2( globalModelParameters.EpsilonTransition )
                else:
                    # check if last_state is epsilon; if so then transition with probability of Epsilon
                    if transitions_weight:
                        sourceModel += transitions_weight * math.log2( globalModelParameters.EpsilonTransition )

                channelModel += outputs_weight * self._output_logprob( state, sequence[ t ][ _TEXT ] )

                last_state = state

            # FIXME changed exponentiation
            return { 'HMMtotal':  (sourceModel + channelModel),
                     'HMMchannel':  channelModel,
                     'HMMsource':  sourceModel,
                     'sequence': sequence}
开发者ID:jcavalieri8619,项目名称:OCRerror_correct,代码行数:60,代码来源:HiddenMarkovModel.py


示例2: run_simulation

def run_simulation(num_blocks_per_set, num_words_per_block, cache_size,
                   replacement_policy, num_addr_bits, word_addrs):

    num_blocks = cache_size // num_words_per_block
    num_sets = num_blocks // num_blocks_per_set

    # Ensure that the number of bits used to represent each address is always
    # large enough to represent the largest address
    num_addr_bits = max(num_addr_bits, int(math.log2(max(word_addrs))) + 1)

    num_offset_bits = int(math.log2(num_words_per_block))
    num_index_bits = int(math.log2(num_sets))
    num_tag_bits = num_addr_bits - num_index_bits - num_offset_bits

    refs = get_addr_refs(
        word_addrs, num_addr_bits,
        num_offset_bits, num_index_bits, num_tag_bits)

    cache, ref_statuses = read_refs_into_cache(
        num_sets, num_blocks_per_set, num_index_bits,
        num_words_per_block, replacement_policy, refs)

    print()
    display_addr_refs(refs, ref_statuses)
    print()
    display_cache(cache)
    print()
开发者ID:chubbymaggie,项目名称:cache-simulator,代码行数:27,代码来源:simulator.py


示例3: calc_posprob

def calc_posprob(sentence,file1,file2):
    prob_p =math.log2(pos_count_sep/(pos_count_sep+neg_count_sep))

    voca=open(file1).read()
    vocab=voca.split()
    with open(file1) as f:
        vocab_len= sum(1 for _ in f)

    pos_word=open(file2).read()
    pos_words=pos_word.split()
    #with open(file1) as f:
    #    total_pos= sum(1 for _ in f)
    total_pos=sum_words(file2)

    for word in sentence:
        if word in vocab:
            if word in pos_words:
                index= pos_words.index(word)
                count= int(pos_words[index+1])
                prob_1= math.log2(count+1/(total_pos+vocab_len))
                prob_p= prob_p+prob_1
        else:
            prob_1 = math.log2(1/(total_pos+vocab_len))
            prob_p= prob_p+prob_1
    return prob_p
开发者ID:sowmyabejjipuram,项目名称:NLP,代码行数:25,代码来源:naive_bayes_with_cross_validation.py


示例4: clog

def clog(pagerank):
    vector = list(sorted(pagerank, reverse=True))
    k = [math.log2(i) for i in range(1, len(vector) + 1)]
    y = [math.log2(i) for i in vector]
    A = np.vstack([k, np.ones(len(k))]).T
    m, c = np.linalg.lstsq(A, y)[0]
    return m
开发者ID:KoIIdun,项目名称:PageRank-gradient,代码行数:7,代码来源:tests.py


示例5: test_tag2

    def test_tag2(self):
        tagset = {"D", "N", "V"}
        trans = {
            ("<s>", "<s>"): {"D": 1.0},
            ("<s>", "D"): {"N": 1.0},
            ("D", "N"): {"V": 0.8, "N": 0.2},
            ("N", "N"): {"V": 1.0},
            ("N", "V"): {"</s>": 1.0},
        }
        out = {"D": {"the": 1.0}, "N": {"dog": 0.4, "barks": 0.6}, "V": {"dog": 0.1, "barks": 0.9}}
        hmm = HMM(3, tagset, trans, out)
        tagger = ViterbiTagger(hmm)

        x = "the dog barks".split()
        y = tagger.tag(x)

        pi = {
            0: {("<s>", "<s>"): (log2(1.0), [])},
            1: {("<s>", "D"): (log2(1.0), ["D"])},
            2: {("D", "N"): (log2(0.4), ["D", "N"])},
            3: {
                ("N", "V"): (log2(0.8 * 0.4 * 0.9), ["D", "N", "V"]),
                ("N", "N"): (log2(0.2 * 0.4 * 0.6), ["D", "N", "N"]),
            },
        }
        self.assertEqualPi(tagger._pi, pi)

        self.assertEqual(y, "D N V".split())
开发者ID:PLN-FaMAF,项目名称:PLN-2015,代码行数:28,代码来源:test_viterbi_tagger.py


示例6: test_effective_window_size

    def test_effective_window_size(self):
        log_window_sizes = [math.log2(z) for z in self.window_sizes]
        plot = PointPlot()
        plot.new_plot("Effective Window Size", rows=1, num_curves=self.num_estimations+1)

        avg_err_bayes = self.get_errors(self.num_estimations)

        for i in range (0,len(self.window_sizes)):
            for k in range (0, self.num_estimations+1):
                self.print_values(k, self.window_sizes[i], avg_err_bayes[i][k], avg_err_bayes[i][0])

        for k in range(0, len(avg_err_bayes[0])): # which is numestimations+1
            k_array = avg_err_bayes[:,k]

            log_k_array =  [math.log2(y) for y in k_array]
            if k == 0:
                plot.add_data_to_plot(log_k_array,log_window_sizes,label = "Naive ("+str(k)+" Shifts)")
            else:
                plot.add_data_to_plot(log_k_array, log_window_sizes, label=str(k)+" Shifts")

        """naive = avg_err_bayes[0]
        avg_err_naive = [naive]* len(avg_err_bayes)


        plot.add_to_plot()"""

        plot.create_legend()
        plot.save_plot("effective_window_size_plot")
开发者ID:alexlafleur,项目名称:LDStreamHMMLearn,代码行数:28,代码来源:effective_window_size.py


示例7: test_EquationBC_mixedpoisson_matrix_fieldsplit

def test_EquationBC_mixedpoisson_matrix_fieldsplit(eq_type, mat_type, porder):

    # Mixed poisson with EquationBCs
    # aij with fieldsplit pc

    solver_parameters = {"mat_type": mat_type,
                         "ksp_type": "gmres",
                         "ksp_rtol": 1.e-10,
                         "ksp_atol": 1.e-10,
                         "ksp_max_it": 500000,
                         "pc_type": "fieldsplit",
                         "pc_fieldsplit_type": "schur",
                         "pc_fieldsplit_schur_fact_type": "full",
                         "fieldsplit_0_ksp_type": "gmres",
                         "fieldsplit_0_pc_type": "asm",
                         "fieldsplit_0_ksp_rtol": 1.e-12,
                         "fieldsplit_1_ksp_type": "gmres",
                         "fieldsplit_1_ksp_rtol": 1.e-12,
                         "fieldsplit_1_pc_type": "none"}
    err = []

    if eq_type == "linear":
        for i, mesh_num in enumerate([8, 16]):
            err.append(linear_poisson_mixed(solver_parameters, mesh_num, porder))
    elif eq_type == "nonlinear":
        for i, mesh_num in enumerate([8, 16]):
            err.append(nonlinear_poisson_mixed(solver_parameters, mesh_num, porder))

    assert(abs(math.log2(err[0][0]) - math.log2(err[1][0]) - (porder+1)) < 0.03)
开发者ID:firedrakeproject,项目名称:firedrake,代码行数:29,代码来源:test_equation_bcs.py


示例8: __create_tournament_tree

    def __create_tournament_tree(self):
        '''
        Creates list for every rounds. Connects every list item between other
        items, that connections makes tournament tree.

        @return: list of interconnected list items
        '''
        tournament_rounds = []
        # create lists for every round
        for i in range(int(math.log2(self.competitors_count))):
            round_list = self._init_round_list(i)
            tournament_rounds.append(round_list)
        # make interconnections between rounds - tournament tree
        for i in range(int(math.log2(self.competitors_count - 1))):
            if len(tournament_rounds[- 1 - i]) > 1:
                for j in range(len(tournament_rounds[- 1 - i]) // 2):
                    k = (2 * j)
                    tournament_rounds[- 1 - i][k].next_round = \
                        tournament_rounds[- 1 - i - 1][j]
                    tournament_rounds[- 1 - i][k + 1].next_round = \
                        tournament_rounds[- 1 - i - 1][j]
                    tournament_rounds[- 1 - i - 1][j].previous_match1 = \
                        tournament_rounds[- 1 - i][k]
                    tournament_rounds[- 1 - i - 1][j].previous_match2 = \
                        tournament_rounds[- 1 - i][k + 1]
        # set current round variable to index for the first round
        self.__current_round = len(tournament_rounds) - 1
        # return all rounds
        return tournament_rounds
开发者ID:adpro,项目名称:TournamentsTest,代码行数:29,代码来源:tournaments.py


示例9: qsort_based_counter

def qsort_based_counter(a, b, x):
    len_x = len(x)
    result = [0] * len_x
    checked = {}
    len_a = len(a)
    len_b = len(b)
    if not len_a:
        return result
    qsort(a, 0, len_a, math.floor(math.log2(len_a)))
    qsort(b, 0, len_b, math.floor(math.log2(len_b)))
    print(a)
    print(b)
    # a = sorted(a)
    # b = sorted(b)
    for i in range(0, len(x)):
        if x[i] < a[0]:
            continue
        if x[i] in checked:
            result[i] = result[checked[x[i]]]
        else:
            a_idx = bisect.bisect_right(a, x[i])
            b_idx = bisect.bisect_left(b, x[i])
            result[i] = a_idx - b_idx
        checked[x[i]] = i
    return result
开发者ID:Se7ge,项目名称:csc,代码行数:25,代码来源:4413_2.py


示例10: GetSampleLincData

def GetSampleLincData(sample, linc_exp):
	""" 
	Get the data for each linc for that sample. Get log2 fold change for FPKM compared to average and median of all samples for the linc.

	Args:
		sample = Sample from the input file name.
		linc_exp = Name of file containing the expression data for each linc in every sample.

	Returns:
		linc_dict = Dict containing signal of every SE position for the sample {(chr, (start, stop)): ((linc_id, linc_name), signal)}
	"""

	# Dict to hold data.
	linc_dict = {}

	with open(linc_exp) as f:

		# Get the sample index.
		header = f.readline().strip()
		sample_idx = GetSampleIdx(header, sample)

		for line in f:
			line = line.strip().split("\t")
			data = [float(x) for x in line[5:]]  # Convert all data to floats.
			linc_med = log2(float(median(data)))  # Get log2 median of list.
			linc_avg = log2(float(mean(data)))  # Get log2 average of the list.
			linc_val = log2(float(line[sample_idx]))  # Get log2 of the linc FPKM for the sample.
			linc_med_FC = linc_val - linc_med
			linc_avg_FC = linc_val - linc_avg

			# Grab data and add to the dict.
			chrom, start, stop, linc_id, linc_name  = line[0], int(line[1]), int(line[2]), line[3], line[4]
			linc_dict[(chrom, (start, stop))] = ((linc_id, linc_name), (linc_med_FC, linc_avg_FC))

	return linc_dict
开发者ID:j-andrews7,项目名称:Pipelines,代码行数:35,代码来源:get_sample_linc_cnv_loads.py


示例11: I

    def I(self, term, cluster):
        n = len(self.docVector)
        n00 = n10 = n11 = n01 = 0

        for id in self.docVector:
            if self.docCluster[id] == cluster:
                if term in self.docVector[id].dict.keys():
                    n11 += 1
                else:
                    n01 += 1
            else:
                if term in self.docVector[id].dict.keys():
                    n10 += 1
                else:
                    n00 += 1
        n1_ = n10 + n11
        n_1 = n01 + n11
        n0_ = n00 + n01
        n_0 = n00 + n10
        # #print('cluster : '+cluster.__str__())
        # #print('n00 = ',n00)
        # #print('n01 = ', n01)
        # #print('n10 = ',n10)
        # #print('n11 = ', n11)
        a1 =  n11 / n * log2(n * n11 / (n1_ * n_1)) if n11 != 0 else 0
        a2 = n01 / n * log2(n * n01 / (n0_ * n_1)) if n01 != 0 else 0
        a3 = n10 / n * log2(n * n10 / (n1_ * n_0)) if n10 != 0 else 0
        a4 = n00 / n * log2(n * n00 / (n0_ * n_0)) if n00 != 0 else 0
        return a1 +a2  + a3 + a4
开发者ID:MJafarMashhadi,项目名称:MiniGoogle,代码行数:29,代码来源:K_means.py


示例12: channelModel

def channelModel( candidate_object ):
    partitionProbData = [ ]

    for partition in candidate_object[ 'partitions' ]:


        partitionProbData.append( HMMmodel.log_probability( partition,
                                                        transitions_weight = globalModelParameters.TransitionWeight ) )


    partitionProbData.sort(key=lambda arg: arg['HMMtotal'],reverse=True)

    TopPartitionsProbData=partitionProbData[:globalModelParameters.NUM_PARTITIONS]



    candidate_object.pop( 'partitions' )

    candidate_object[ 'totalProb' ] = 0

    candidate_object[ 'channelProb' ] = round( math.log2(sum(map(lambda arg: 2**arg[ 'HMMtotal' ],TopPartitionsProbData),0)) ,
    3 )
    candidate_object[ 'langProb' ] = 0

    candidate_object[ 'HMMchannel' ] = round( math.log2(sum(map(lambda arg: 2**arg[ 'HMMchannel' ],TopPartitionsProbData),0)) ,
                                              3 )

    candidate_object[ 'HMMsource' ] = round( math.log2(sum(map(lambda arg: 2**arg[ 'HMMsource' ],TopPartitionsProbData),0)) , 3 )

    candidate_object[ 'maxPartition' ] = TopPartitionsProbData[0]['sequence']

    candidate_object['topPartitionsDict'] = TopPartitionsProbData

    return candidate_object
开发者ID:jcavalieri8619,项目名称:OCRerror_correct,代码行数:34,代码来源:ErrorCorrector.py


示例13: score

    def score(self, text):    
        total_score = 0
        prev_word = None
        for current_word in text:
            current_score = 0
            #print('current word is {}'.format(current_word))
            if current_word in self.pos_features:
                current_score += math.log2(self.pos_features[current_word])
            #    #print('+1')

            if current_word in self.neg_features:
                current_score -= math.log2(self.neg_features[current_word])
            #    print('-1')

            if prev_word is not None:
            #    print('prev word is {}'.format(prev_word))
                if prev_word in self.inc_features:
                    current_score *= 1.5 
            #        print('*2')
                elif prev_word in self.dec_features:
                    current_score /= 1.5 
            #        print('/2')
                elif prev_word in self.inv_features:
                    current_score *= -1.0
            #        print('-')
            prev_word = current_word
            total_score += current_score

        return total_score
开发者ID:Finaleblue,项目名称:Cooper_ece467,代码行数:29,代码来源:tag_based_analysis.py


示例14: NaiveBayes

def NaiveBayes(class_list,variables_counter,path):
    test = pd.read_csv(path,sep='\t',names=['num','class','desc'],header = None)
    final_list = []
    desc_list = test['desc'].tolist()
    test_class = test['class']
    total_train_files = sum(class_count.values())
    i = 0
    for line in desc_list:
        class_prob.clear()

        if type(line) is not str:
            continue
        record = line.split()
        for word in record:
            for classes in class_list:
                prob_word_in_class = 0.0
                class_counter = complete_dict[classes]
                class_desc_overall_count = sum(class_counter.values())
                class_word_unique_count = len(variables_counter)
                word_count = class_counter.get(word,0)
                prob_word_in_class = ( ((math.log2((word_count+1)) - math.log2((class_desc_overall_count + class_word_unique_count)))))
                if class_prob.get(classes,0) != 0:
                    class_prob[classes] =  class_prob[classes] + prob_word_in_class
                else:
                    class_prob[classes] =  prob_word_in_class
        class_prob[classes] = class_prob[classes] + math.log2(class_count[classes]/total_train_files)
        #print(class_prob,max(class_prob,key=class_prob.get))
        final_list.append(max(class_prob,key=class_prob.get))            
    accuracy(final_list,test)       
开发者ID:yagamiram,项目名称:Multinomial_Classifier_for_Text_Classification,代码行数:29,代码来源:NaiveBayes.py


示例15: computeHash

def computeHash(inputFile):
    # Initialize a list for storing each transaction from the file
    try:
        transactionsList = open(inputFile, 'rt').read().split('\n')
    except FileNotFoundError:
        print("The file cannot be found. Please enter a valid name.")
        return

    # If there's a newline character at the end, account for it
    if len(transactionsList[len(transactionsList) - 1]) == 0:
        transactionsList = transactionsList[:len(transactionsList) - 1]

    nextLogOfTwo = math.log2(len(transactionsList))

    # If the number of transactions in the list is not a power of 2, then append the string 'null' into it until it is
    if not nextLogOfTwo.is_integer():
        # Find what the next log of two is
        nextLogOfTwo = math.ceil(math.log2(len(transactionsList)))
        targetNumOfList = int(math.pow(2, nextLogOfTwo))

        # And append 'null'
        for i in range(0, targetNumOfList - len(transactionsList), 1):
            transactionsList.append('null')
    else:
        nextLogOfTwo = int(nextLogOfTwo)
   
    # Encode each of the items in transactionsList to their corresponding representations in bytes
    for indexOfTrans in range(0, len(transactionsList), 1):
        transactionsList[indexOfTrans] = bytes(transactionsList[indexOfTrans], 'utf-8')
 
    hashes = []
    currLevelHash = list(transactionsList)
    nextLevelHash = []

    for j in range(0, len(currLevelHash), 1):
        hashOfEachElem = hashlib.sha256()
        hashOfEachElem.update(currLevelHash[j])

        nextLevelHash.append(hashOfEachElem)
    currLevelHash = nextLevelHash

    # Now start hashing and concatenating each pair of elements up till nextLogOfTwo
    for i in range(0, nextLogOfTwo, 1):
        nextLevelHash = []
        for j in range(0, len(currLevelHash) - 1, 2):
            hashOfFirstElem = currLevelHash[j].hexdigest()
            hashOfSecondElem = currLevelHash[j+1].hexdigest()

            bothElemsConcatenated = hashOfFirstElem + hashOfSecondElem
            hashOfBothElems = hashlib.sha256()
            hashOfBothElems.update(bytes(bothElemsConcatenated, 'utf-8'))

            nextLevelHash.append(hashOfBothElems)
        currLevelHash = nextLevelHash

    # Set hashes to be equal to currLevelHash
    hashes = currLevelHash

    # And return the hexdigest of the root hash
    return hashes[0].hexdigest()
开发者ID:RylanSchaeffer,项目名称:ECS198-Cryptocurrency-Technologies,代码行数:60,代码来源:KoradiaSohamHW2Submission.py


示例16: filter_mapping_bias

def filter_mapping_bias(genomeA, genomeB):
    '''Takes two dicts of hyrbid data mapped on each genome and
    returns a single dict of genomeA data with the biased genes
    removed. GenomeA is ideally the genome with the best assembly.'''

    unbiased = {}
    genesA = list(hybridA.keys())
    genesB = list(hybridB.keys())
    genes = list(set(genesA).intersection(genesB))
    genes.sort()

    for gene in genes:
        
        expAA = genomeA[gene][0]
        expAB = genomeA[gene][1]
        expBA = genomeB[gene][0]
        expBB = genomeB[gene][1]

        # Minimum cutoff of 20 reads per gene, half or more replicates significant
        if genomeA[gene][2] + genomeA[gene][3] > 20 and genomeA[gene][4] >= 0.5:

            valueA = log2(expAA/expAB)
            valueB = log2(expBA/expBB)

            # Cutoff for mapping bias (could play around with this)
            if abs(valueA-valueB) < 1.5:
                unbiased[gene] = valueA

            else: 
                unbiased[gene] = "NA"

        else: 
            unbiased[gene] = "NA"

    return(unbiased)
开发者ID:rmagoglia,项目名称:Genomics,代码行数:35,代码来源:ASE_Parse.py


示例17: __init__

	def __init__(self, Loader, Encoder, Parser, freq_threshold, vectors, candidates):
	
		print("Initializing MDL Learner for this round (loading data).")
		
		#Initialize
		self.language = Encoder.language
		self.Encoder = Encoder
		self.Loader = Loader
		self.Parser = Parser
		self.freq_threshold = freq_threshold
		self.tabu_start = False
		
		#Get fixed units costs per representation type
		self.type_cost = -math.log2(float(1.0/3.0))
		
		number_of_words = len(list(self.Encoder.word_dict.keys()))
		self.lex_cost = -math.log2(float(1.0/number_of_words))
		
		number_of_pos = len(list(self.Encoder.pos_dict.keys()))
		self.pos_cost = -math.log2(float(1.0/number_of_pos))
		
		number_of_domains = len(list(set(self.Encoder.domain_dict.values())))
		self.domain_cost = -math.log2(float(1.0/number_of_domains))
		
		#Load candidate constructions to use as grammar
		self.vectors = vectors
	
		#Reformat candidate to be equal length for numba
		self.candidates = self.Parser.format_grammar(candidates)
开发者ID:jonathandunn,项目名称:c2xg,代码行数:29,代码来源:MDL_Learner.py


示例18: log_value

 def log_value(x):
     import math
     import numpy as np
     if type(x) == list or type(x) == np.ndarray:
         return [math.log2(x_i) for x_i in x if x_i != 0]
     else:
         return math.log2(x)
开发者ID:alexlafleur,项目名称:LDStreamHMMLearn,代码行数:7,代码来源:util_math.py


示例19: test_EquationBC_poisson_matfree

def test_EquationBC_poisson_matfree(eq_type, mat_type, porder, with_bbc):

    # Test standard poisson with EquationBCs
    # matfree

    solver_parameters = {'mat_type': mat_type,
                         'ksp_type': 'gmres',
                         'ksp_atol': 1e-10,
                         'ksp_rtol': 1e-10,
                         'ksp_max_it': 200000,
                         'ksp_divtol': 1e8}
    err = []

    if with_bbc:
        if eq_type == "linear":
            for mesh_num in [8, 16]:
                err.append(linear_poisson_bbc(solver_parameters, mesh_num, porder))
        elif eq_type == "nonlinear":
            for mesh_num in [8, 16]:
                err.append(nonlinear_poisson_bbc(solver_parameters, mesh_num, porder))
    else:
        if eq_type == "linear":
            for mesh_num in [8, 16]:
                err.append(linear_poisson(solver_parameters, mesh_num, porder))
        elif eq_type == "nonlinear":
            for mesh_num in [8, 16]:
                err.append(nonlinear_poisson(solver_parameters, mesh_num, porder))

    assert(abs(math.log2(err[0]) - math.log2(err[1]) - (porder+1)) < 0.01)
开发者ID:firedrakeproject,项目名称:firedrake,代码行数:29,代码来源:test_equation_bcs.py


示例20: compute_mutual_info

def compute_mutual_info(N00, N01, N11, N10):

    if N11 <= 0:
        return 0.

    if N10 <= 0:
        return 0.

    N0x = N01 + N00
    Nx0 = N10 + N00
    N1x = N10 + N11
    Nx1 = N01 + N11
    N = N00 + N01 + N11 + N10

    #print(N00, N01, N10, N11)

    term1 = (N*N11)/(N1x*Nx1)
    term2 = (N*N01)/(N0x*Nx1)
    term3 = (N*N10)/(N1x*Nx0)
    term4 = (N*N00)/(N0x*Nx0)

    w1 = N11 / N
    w2 = N01 / N
    w3 = N10 / N
    w4 = N00 / N

    score = w1 * math.log2(term1) + w2 * math.log2(term2) + w3 * math.log2(term3) + w4 * math.log2(term4)
    return score
开发者ID:tigeroon,项目名称:bigdata_project,代码行数:28,代码来源:mutual_information.py



注:本文中的math.log2函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math.modf函数代码示例发布时间:2022-05-27
下一篇:
Python math.log1p函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap