• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python utils.normalize函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中utils.normalize函数的典型用法代码示例。如果您正苦于以下问题:Python normalize函数的具体用法?Python normalize怎么用?Python normalize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了normalize函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: evaluate_one_percent

    def evaluate_one_percent(self):
        """
        evaluate one percent of all fonts
        """

        if self.percent >= len(self.df_percents):
            self.got_results = True
            return False

        norm_user_img = utils.normalize(self.user_image)

        fonts   = self.df_percents[self.percent]['aws_bucket_key']
        indices = [(self.font_index_map[font]) for font in fonts]
        imgs    = self.char_array[indices]

        norm_imgs = np.zeros(imgs.shape)

        for i in range(imgs.shape[0]):
            norm_imgs[i] = utils.normalize(imgs[i])

        norm_imgs.shape = (norm_imgs.shape[0], norm_imgs.shape[1]*norm_imgs.shape[2])

        predictions = self.nn.predict_proba(norm_imgs)
        scores      = np.divide(predictions[:,0], np.max(predictions[:,1:], axis=1))

        self.scores.update(dict(zip(fonts, scores)))

        self.percent += 1

        if self.percent % 10 == 0:
            print '{0}% fonts evaluated'.format(self.percent)

        return True
开发者ID:emitc2h,项目名称:fontfinder,代码行数:33,代码来源:engine.py


示例2: add_doc_attributes

def add_doc_attributes(doc):
    doc_json = proposal_utils.doc_info(doc)
    properties = extract.get_properties(doc_json)

    for name, value in properties.items():
        logger.info("Adding %s attribute", name)
        published = doc.published or datetime.now()
        handle = normalize(name)

        try:
            attr = Attribute.objects.get(proposal=doc.proposal,
                                         handle=handle)
        except Attribute.DoesNotExist:
            attr = Attribute(proposal=doc.proposal,
                             name=name,
                             handle=normalize(name),
                             published=published)
            attr.set_value(value)
        else:
            # TODO: Either mark the old attribute as stale and create a
            # new one or create a record that the value has changed
            if published > attr.published:
                attr.clear_value()
                attr.set_value(value)

        attr.save()

    add_doc_events(doc, properties)

    return doc
开发者ID:Matt4d,项目名称:cornerwise,代码行数:30,代码来源:tasks.py


示例3: album

    def album(self, album):
        title = normalize(album.name)

        # TODO album years
        #if Prefs["displayAlbumYear"] and album.getYear() != 0:
        #    title = "%s (%s)" % (title, album.getYear())

        cover_url = self.image(album.covers)

        track_count = None

        if album.discs:
            track_count = len(album.discs[0].tracks)

        return DirectoryObject(
            key=route_path('album', album.uri),
            #rating_key=album.uri,

            title=title,
            tagline=', '.join([normalize(ar.name) for ar in album.artists]),

            #track_count=track_count,

            art=cover_url,
            thumb=cover_url,
        )
开发者ID:fuzeman,项目名称:Spotify2.bundle,代码行数:26,代码来源:objects.py


示例4: triangle

def triangle(i, amp, phase=0):
    phase = normalize(phase, 360)
    i = normalize(i + phase/360.)
    if i < 0.5:
        return amp * 2 * i
    else:
        return amp * 2 * (1 - i)
开发者ID:iterati,项目名称:flamelfo,代码行数:7,代码来源:lfos.py


示例5: _tick

    def _tick(self):
        if self.has_rock:
            # Try to drop at base.
            if self._drop_available():
                self.has_rock = False
                self.world.rock_collected()
                return

            # Call for a carrier to pick up.
            self._broadcast_come_message()

            # Head towards base if carriers not available.
            if not self.world.carriers:
                self.dx, self.dy = normalize(self.world.mars_base.x - self.x,
                                             self.world.mars_base.y - self.y)
            else:
                return
        else:
            # Pick up.
            rock = self._rock_available()
            if rock:
                self.has_rock = True
                self.world.remove_entity(rock)
                return

            # Head towards rock.
            rock = self._sense_rock()
            if rock:
                self.dx, self.dy = normalize(rock.x - self.x, rock.y - self.y)

        # Keep walkin'.
        while not self._can_move():
            self.dx, self.dy = self._get_new_direction()
        self._move()
开发者ID:mihneadb,项目名称:mars-explorer,代码行数:34,代码来源:explorer.py


示例6: track

    def track(self, track, index=None):
        rating_key = track.uri

        if index is not None:
            rating_key = '%s::%s' % (track.uri, index)

        cover_url = self.image(track.album.covers)

        return TrackObject(
            items=[
                MediaObject(
                    parts=[PartObject(
                        key=self.client.track_url(track),
                        duration=int(track.duration)
                    )],
                    duration=int(track.duration),
                    container=Container.MP3,
                    audio_codec=AudioCodec.MP3
                )
            ],

            key=route_path('metadata', str(track.uri)),
            rating_key=quote(rating_key),

            title=normalize(track.name),
            album=normalize(track.album.name),
            artist=', '.join([normalize(ar.name) for ar in track.artists]),

            index=int(track.number),
            duration=int(track.duration),

            art=cover_url,
            thumb=cover_url
        )
开发者ID:fuzeman,项目名称:Spotify2.bundle,代码行数:34,代码来源:objects.py


示例7: square

def square(i, amp, phase=0):
    phase = normalize(phase, 360)
    i = normalize(i + phase/360.)
    if i < 0.5:
        return 0
    else:
        return amp
开发者ID:iterati,项目名称:flamelfo,代码行数:7,代码来源:lfos.py


示例8: SIM

def SIM(saliency_map1, saliency_map2):
    '''
    Similarity between two different saliency maps when viewed as distributions
    (SIM=1 means the distributions are identical).

    This similarity measure is also called **histogram intersection**.

    Parameters
    ----------
    saliency_map1 : real-valued matrix
        If the two maps are different in shape, saliency_map1 will be resized to match saliency_map2.
    saliency_map2 : real-valued matrix

    Returns
    -------
    SIM : float, between [0,1]
    '''
    map1 = np.array(saliency_map1, copy=False)
    map2 = np.array(saliency_map2, copy=False)
    if map1.shape != map2.shape:
        map1 = resize(map1, map2.shape, order=3, mode='nearest') # bi-cubic/nearest is what Matlab imresize() does by default
    # Normalize the two maps to have values between [0,1] and sum up to 1
    map1 = normalize(map1, method='range')
    map2 = normalize(map2, method='range')
    map1 = normalize(map1, method='sum')
    map2 = normalize(map2, method='sum')
    # Compute histogram intersection
    intersection = np.minimum(map1, map2)
    return np.sum(intersection)
开发者ID:herrlich10,项目名称:saliency,代码行数:29,代码来源:metrics.py


示例9: write_xml

def write_xml(matching, f):
    """ `matching` contains a list of pairs (tagged_string, its_superstring)
        Tagged superstrings are written to the file `f'
    """
    for (tagged, raw) in matching:
        print >>f, '<aff>'
        #print tagged, raw
        i = 0
        tag_to_write = None
        for c in tagged:
            if not normalize(c):
                continue
            if len(c) > 1 and c[1] == '/': # closing tag
                f.write(c)
            elif len(c) > 1: # opening tag
                if tag_to_write:
                    f.write(tag_to_write)
                    tag_to_write = None
                tag_to_write = c
            else:
                while normalize(raw[i]) != normalize(c):
                    f.write(xml_escape(raw[i]))
                    i += 1

                if tag_to_write:
                    f.write(tag_to_write)
                    tag_to_write = None
                f.write(xml_escape(raw[i]))
                i += 1
            
        f.write(''.join(xml_escape(c) for c in raw[i:]))

        print >>f
        print >>f, '</aff>'
开发者ID:donvel,项目名称:affiliations,代码行数:34,代码来源:match_text.py


示例10: generate_smooth_normals

def generate_smooth_normals(vertices, faces):

    print "generating normals for", vertices.shape[0], "vertices"

    vertex_normals = [[] for _ in xrange(vertices.shape[0])]

    print len(vertex_normals), "vertices"

    normalize = lambda n: n / numpy.sqrt(numpy.sum(n ** 2))

    for i in range(faces.shape[0]):
        face_vertices = faces[i, :]
        v1, v2, v3 = [vertices[face_vertices[j], :] for j in range(3)]

        n = normalize(numpy.cross(v2 - v1, v3 - v1))
        for v in face_vertices:
            vertex_normals[v].append(n)

    normals = numpy.ones(vertices.shape)
    for i in range(normals.shape[0]):
        if len(vertex_normals[i]) == 0:
            print "WARNING: no normal for vertex", i
            continue
        avg_normal = numpy.mean(numpy.vstack(vertex_normals[i]), 0)
        normals[i, :] = normalize(avg_normal)

    return normals
开发者ID:oseiskar,项目名称:raytracer,代码行数:27,代码来源:triangle_mesh.py


示例11: normalize_dataset

def normalize_dataset(dataset):
    fiveMinuteMean = dataset['fiveMinuteMean']
    trafficVolume = dataset['trafficVolume']
    actualTravelTime = dataset['actualTravelTime']
    dataset['fiveMinuteMean'] = normalize(fiveMinuteMean, min(fiveMinuteMean), max(fiveMinuteMean))
    dataset['trafficVolume'] = normalize(trafficVolume, min(trafficVolume), max(trafficVolume))
    dataset['actualTravelTime'] = normalize(actualTravelTime, min(actualTravelTime), max(actualTravelTime))
开发者ID:ajanigyasi,项目名称:master,代码行数:7,代码来源:lokrr.py


示例12: get_left_elbow_yaw

def get_left_elbow_yaw(kinect_pos, shoulder_roll=None, shoulder_pitch=None, world=None):
    if world is None:
        world = get_robot_world(kinect_pos)
    if shoulder_roll is None:
        shoulder_roll = get_left_shoulder_roll(kinect_pos, world)
    if shoulder_pitch is None:
        shoulder_pitch = get_left_shoulder_pitch(kinect_pos, world)
    shoulder = kinect_pos[kinecthandler.joints_map[joints.SHOULDER_LEFT]]
    elbow = kinect_pos[kinecthandler.joints_map[joints.ELBOW_LEFT]]
    wrist = kinect_pos[kinecthandler.joints_map[joints.WRIST_LEFT]]
    pitch_matrix = np.matrix([[1, 0, 0],
                              [0, np.cos(shoulder_pitch), -np.sin(shoulder_pitch)],
                              [0, np.sin(shoulder_pitch), np.cos(shoulder_pitch)]])
    roll_matrix = np.matrix([[np.cos(shoulder_roll), 0, np.sin(shoulder_roll)],
                             [0, 1, 0],
                             [-np.sin(shoulder_roll), 0, np.cos(shoulder_roll)]])
    transform = world[0] * pitch_matrix * roll_matrix
    elbow_shoulder = utils.get_vector(shoulder, elbow, transform=transform)
    elbow_shoulder = utils.normalize(elbow_shoulder)
    modified_elbow = [elbow[0], elbow[1] + 2, elbow[2]]
    elbow_vertical = utils.get_vector(modified_elbow, elbow, transform=transform)
    elbow_wrist = utils.get_vector(wrist, elbow, transform=transform)
    elbow_wrist = utils.normalize([elbow_wrist[0], elbow_wrist[1]])
    cross_arm = np.cross(elbow_vertical, elbow_shoulder)
    cross_arm = utils.normalize([cross_arm[0], cross_arm[1]])
    # cross_arm = np.array([cross_arm[0], cross_arm[1]])
    # elbow_wrist = np.array([elbow_wrist[0], elbow_wrist[1]])
    sign = -1
    if elbow_wrist[1] > 0:
        sign = 1
    dot = utils.normalized_dot(elbow_wrist, cross_arm)
    return sign * (np.arccos(dot))
开发者ID:Angeall,项目名称:pyKinectNAO,代码行数:32,代码来源:converter.py


示例13: random_rhyme

 def random_rhyme(self):
     c1 = 'a'
     c2 = 'a'
     while not rhymes_with(c1, c2, self.span):
         c1 = normalize(random.choice(self.words))
         c2 = normalize(random.choice(self.words))
     return (c1, c2)
开发者ID:donvel,项目名称:elektrybalt,代码行数:7,代码来源:rhymes.py


示例14: generate_features

    def generate_features(self):
        # prepare variables
        img_lab = rgb2lab(self._img)
        segments = slic(img_lab, n_segments=500, compactness=30.0, convert2lab=False)
        max_segments = segments.max() + 1

        # create x,y feather
        shape = self._img.shape
        a = shape[0]
        b = shape[1]
        x_axis = np.linspace(0, b - 1, num=b)
        y_axis = np.linspace(0, a - 1, num=a)

        x_coordinate = np.tile(x_axis, (a, 1,))  # 创建X轴的坐标表
        y_coordinate = np.tile(y_axis, (b, 1,))  # 创建y轴的坐标表
        y_coordinate = np.transpose(y_coordinate)

        coordinate_segments_mean = np.zeros((max_segments, 2))

        # create lab feather
        img_l = img_lab[:, :, 0]
        img_a = img_lab[:, :, 1]
        img_b = img_lab[:, :, 2]

        img_segments_mean = np.zeros((max_segments, 3))

        for i in xrange(max_segments):
            segments_i = segments == i

            coordinate_segments_mean[i, 0] = x_coordinate[segments_i].mean()
            coordinate_segments_mean[i, 1] = y_coordinate[segments_i].mean()

            img_segments_mean[i, 0] = img_l[segments_i].mean()
            img_segments_mean[i, 1] = img_a[segments_i].mean()
            img_segments_mean[i, 2] = img_b[segments_i].mean()

        # element distribution
        wc_ij = np.exp(-cdist(img_segments_mean, img_segments_mean) ** 2 / (2 * self._sigma_distribution ** 2))
        wc_ij = wc_ij / wc_ij.sum(axis=1)[:, None]
        mu_i = np.dot(wc_ij, coordinate_segments_mean)
        distribution = np.dot(wc_ij, np.linalg.norm(coordinate_segments_mean - mu_i, axis=1) ** 2)
        distribution = normalize(distribution)
        distribution = np.array([distribution]).T

        # element uniqueness feature
        wp_ij = np.exp(
            -cdist(coordinate_segments_mean, coordinate_segments_mean) ** 2 / (2 * self._sigma_uniqueness ** 2))
        wp_ij = wp_ij / wp_ij.sum(axis=1)[:, None]
        uniqueness = np.sum(cdist(img_segments_mean, img_segments_mean) ** 2 * wp_ij, axis=1)
        uniqueness = normalize(uniqueness)
        uniqueness = np.array([uniqueness]).T

        # save features and variables
        self.img_lab = img_lab
        self.segments = segments
        self.img_segments_mean = img_segments_mean
        self.coordinate_segments_mean = coordinate_segments_mean
        self.uniqueness = uniqueness
        self.distribution = distribution
开发者ID:lee88688,项目名称:saliency_method,代码行数:59,代码来源:sf_method.py


示例15: rhymes_with

def rhymes_with(c1, c2, span):
    w1 = normalize(c1)
    w2 = normalize(c2)
    if len(w1) < 3 or len(w2) < 3:
        return False
    if w1 in w2 or w2 in w1:
        return False
    return get_end(w1, span) == get_end(w2, span)
开发者ID:donvel,项目名称:elektrybalt,代码行数:8,代码来源:rhymes.py


示例16: __computeNewMask__

	def __computeNewMask__(self,frame_feat,shape,models):
		assert(shape[0]*shape[1] == frame_feat.__len__())
		(fg_eigt,fg_mean) = models[0]
		fg_score = normalize(np.sum(np.dot(frame_feat-fg_mean,fg_eigt.transpose()),1))
		(bg_eigt,bg_mean) = models[1]
		bg_score = normalize(np.sum(np.dot(frame_feat-bg_mean,bg_eigt.transpose()),1))
		frames_mask =  (fg_score>bg_score+0.2).reshape((shape[0],shape[1]))
		return frames_mask;
开发者ID:sudhargk,项目名称:video-annotator,代码行数:8,代码来源:eigen_based.py


示例17: generate_training_sample

    def generate_training_sample(self):
        """
        A function to generate the training samples
        """
    
        random.seed(42)

        ## normalize the image
        norm_img = utils.normalize(self.user_image)
    
        ## Get image dimensions
        w,h = norm_img.shape
        assert w == h, 'Character image should be square'
    
        ## Obtain similar enough random fonts
        random_fonts = []

        n_fonts = self.char_array.shape[0]

        endloop = 0

        n_random = self.n_random

        while len(random_fonts) < n_random:

            rdn_img = self.char_array[random.randint(0, n_fonts)]

            rdn_norm_img = utils.normalize(rdn_img)
            pbp          = utils.pixbypix_similarity(rdn_norm_img, norm_img)
            if (pbp < 0.9999):
                random_fonts.append(np.ravel(rdn_norm_img))

            ## Bail out of the loop if not enough similar fonts are found
            if endloop > 20000:
                n_random = len(random_fonts)
                break

            endloop += 1

        print 'Found {0} fonts for the random sample'.format(n_random)

        ## Put together the different types of training samples    
        n_signal = n_random

        n_variations = n_signal//4

        variations = []
        variations += utils.scale_variations(norm_img, scale_factors=np.linspace(0.95, 0.99, n_variations))
        variations += utils.skew_variations(norm_img, vertical_shear=np.linspace(-0.02, 0.02, math.ceil(math.sqrt(n_variations))), horizontal_shear=np.linspace(-0.02, 0.02, math.ceil(math.sqrt(n_variations))))
        variations += utils.rotate_variations(norm_img, angles=np.linspace(-5,5, n_variations))
        variations += [norm_img]*n_variations

        signal = [np.ravel(var) for var in variations]

        self.X = np.stack(signal + random_fonts, axis=0)
        self.y = np.array([0]*len(signal) + range(1, n_random+1))
开发者ID:emitc2h,项目名称:fontfinder,代码行数:56,代码来源:engine.py


示例18: do_mstep_b

def do_mstep_b(d):
    result = np.zeros( [ number_of_topics ])
    for z in range(number_of_topics):
        s = 0
        for w_index in range(vocabulary_size):
            count = term_doc_matrix[d][w_index]
            s = s + count * topic_prob[d, w_index, z]
        result[z] = s
    normalize(result)
    return result
开发者ID:Huskyeder,项目名称:PLSA-1,代码行数:10,代码来源:plsa_multi.py


示例19: do_estep

def do_estep(d):
    result = np.zeros([vocabulary_size, number_of_topics])
    
    for w in range(vocabulary_size):
        prob = document_topic_prob[d, :] * topic_word_prob[:, w]
        if sum(prob) == 0.0:
            print 'exit'
        else:
            normalize(prob)
        result[w] = prob
    return result
开发者ID:Huskyeder,项目名称:PLSA-1,代码行数:11,代码来源:plsa_multi.py


示例20: do_mstep_a

def do_mstep_a(t):
    result = np.zeros([ vocabulary_size ])

    for w_index in range(vocabulary_size):
        s = 0
        for d_index in range(number_of_documents):
            count = term_doc_matrix[d_index][w_index]
            s = s + count * topic_prob[d_index, w_index, t]
        result[w_index] = s
    normalize(result)
    return result
开发者ID:Huskyeder,项目名称:PLSA-1,代码行数:11,代码来源:plsa_multi.py



注:本文中的utils.normalize函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.notify函数代码示例发布时间:2022-05-26
下一篇:
Python utils.norm_geometric_average函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap