• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python methods.normalize函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中methods.normalize函数的典型用法代码示例。如果您正苦于以下问题:Python normalize函数的具体用法?Python normalize怎么用?Python normalize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了normalize函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: update

    def update(self,frame,recent_pupil_positions,events):
        img = frame.img
        self.img_shape = frame.img.shape

        if self.robust_detection.value:
            self.markers = detect_markers_robust(img,
                                                grid_size = 5,
                                                prev_markers=self.markers,
                                                min_marker_perimeter=self.min_marker_perimeter,
                                                aperture=self.aperture.value,
                                                visualize=0,
                                                true_detect_every_frame=3)
        else:
            self.markers = detect_markers_simple(img,
                                                grid_size = 5,
                                                min_marker_perimeter=self.min_marker_perimeter,
                                                aperture=self.aperture.value,
                                                visualize=0)

        # locate surfaces
        for s in self.surfaces:
            s.locate(self.markers)
            if s.detected:
                events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp})

        if self.draw_markers.value:
            draw_markers(img,self.markers)

        # edit surfaces by user
        if self.surface_edit_mode:
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos,glfwGetWindowSize(window))
            pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels

            for s,v_idx in self.edit_surfaces:
                if s.detected:
                    pos = normalize(pos,(self.img_shape[1],self.img_shape[0]),flip_y=True)
                    new_pos =  s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx,new_pos)

        #map recent gaze onto detected surfaces used for pupil server
        for s in self.surfaces:
            if s.detected:
                s.gaze_on_srf = []
                for p in recent_pupil_positions:
                    if p['norm_pupil'] is not None:
                        gp_on_s = tuple(s.img_to_ref_surface(np.array(p['norm_gaze'])))
                        p['realtime gaze on '+s.name] = gp_on_s
                        s.gaze_on_srf.append(gp_on_s)


        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()
开发者ID:Azique,项目名称:pupil,代码行数:58,代码来源:marker_detector.py


示例2: _checkFrame

    def _checkFrame(self, img):
        img_size = img.shape[1], img.shape[0]

        # Check whole frame
        if not self._flag_check_roi:
            b0, b1, b2, b3 = 0, img_size[0], 0, img_size[1]

        # Check roi
        else:
            previous_fingertip_center = self._previous_finger_dict["screen_pos"]
            # Set up the boundary of the roi
            temp = img_size[0] / 16
            if self._predict_motion is not None:
                predict_center = (
                    previous_fingertip_center[0] + self._predict_motion[0],
                    previous_fingertip_center[1] + self._predict_motion[1],
                )
                b0 = predict_center[0] - temp * 0.5 - abs(self._predict_motion[0]) * 2
                b1 = predict_center[0] + temp * 0.5 + abs(self._predict_motion[0]) * 2
                b2 = predict_center[1] - temp * 0.8 - abs(self._predict_motion[1]) * 2
                b3 = predict_center[1] + temp * 2.0 + abs(self._predict_motion[1]) * 2
            else:
                predict_center = previous_fingertip_center
                b0 = predict_center[0] - temp * 0.5
                b1 = predict_center[0] + temp * 0.5
                b2 = predict_center[1] - temp * 0.8
                b3 = predict_center[1] + temp * 2.0

            b0 = 0 if b0 < 0 else int(b0)
            b1 = img_size[0] - 1 if b1 > img_size[0] - 1 else int(b1)
            b2 = 0 if b2 < 0 else int(b2)
            b3 = img_size[1] - 1 if b3 > img_size[1] - 1 else int(b3)
            col_slice = b0, b1
            row_slice = b2, b3
            img = img[slice(*row_slice), slice(*col_slice)]

        handmask = self.method.generateMask(img)
        handmask_smooth = self._smoothmask(handmask)
        f_dict = self._findFingertip(handmask_smooth, img_size, b0, b2)

        if f_dict is not None:
            norm_pos = normalize(f_dict["fingertip_center"], img_size, flip_y=True)
            norm_rect_points = [
                normalize(p, img_size, flip_y=True) for p in f_dict["rect_points"]
            ]
            return {
                "screen_pos": f_dict["fingertip_center"],
                "norm_pos": norm_pos,
                "norm_rect_points": norm_rect_points,
            }
        else:
            return None
开发者ID:pupil-labs,项目名称:pupil,代码行数:52,代码来源:fingertip_detector.py


示例3: on_button

 def on_button(button, pressed):
     if not atb.TwEventMouseButtonGLFW(button,pressed):
         if pressed:
             pos = glfwGetMousePos()
             pos = normalize(pos,glfwGetWindowSize())
             pos = denormalize(pos,(img.shape[1],img.shape[0]) ) #pos in img pixels
             ref.detector.new_ref(pos)
开发者ID:pangshu2007,项目名称:CodeForRead,代码行数:7,代码来源:world.py


示例4: _map_monocular

    def _map_monocular(self,p):
        if '3d' not in p['method']:
            return None

        gaze_point =  np.array(p['circle_3d']['normal'] ) * self.gaze_distance  + np.array( p['sphere']['center'] )

        image_point, _  =  cv2.projectPoints( np.array([gaze_point]) , self.rotation_vector, self.translation_vector , self.camera_matrix , self.dist_coefs )
        image_point = image_point.reshape(-1,2)
        image_point = normalize( image_point[0], self.world_frame_size , flip_y = True)

        eye_center = self.toWorld(p['sphere']['center'])
        gaze_3d = self.toWorld(gaze_point)
        normal_3d = np.dot( self.rotation_matrix, np.array( p['circle_3d']['normal'] ) )

        g = {   'norm_pos':image_point,
                'eye_center_3d':eye_center.tolist(),
                'gaze_normal_3d':normal_3d.tolist(),
                'gaze_point_3d':gaze_3d.tolist(),
                'confidence':p['confidence'],
                'timestamp':p['timestamp'],
                'base_data':[p]}

        if self.visualizer.window:
            self.gaze_pts_debug.append( gaze_3d )
            self.sphere['center'] = eye_center #eye camera coordinates
            self.sphere['radius'] = p['sphere']['radius']
        return g
开发者ID:prinkkala,项目名称:pupil,代码行数:27,代码来源:gaze_mappers.py


示例5: on_button

 def on_button(window,button, action, mods):
     if not atb.TwEventMouseButtonGLFW(button,action):
         pos = glfwGetCursorPos(window)
         pos = normalize(pos,glfwGetWindowSize(world_window))
         pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
         for p in g_pool.plugins:
             p.on_click(pos,button,action)
开发者ID:foretama,项目名称:pupil,代码行数:7,代码来源:world.py


示例6: on_button

 def on_button(window,button, action, mods):
     g_pool.gui.update_button(button,action,mods)
     pos = glfwGetCursorPos(window)
     pos = normalize(pos,glfwGetWindowSize(window))
     pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
     for p in g_pool.plugins:
         p.on_click(pos,button,action)
开发者ID:PolaviejaLab,项目名称:pupil,代码行数:7,代码来源:main.py


示例7: update

    def update(self,frame,recent_pupil_positions,events):
        if self.active:
            img = frame.img
            if self.first_img is None:
                self.first_img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)

            self.detected = False

            if self.count:
                gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
                # in cv2.3 nextPts is falsy required as an argument.
                nextPts_dummy = self.point.copy()
                nextPts,status, err = cv2.calcOpticalFlowPyrLK(self.first_img,gray,self.point,nextPts_dummy,winSize=(100,100))
                if status[0]:
                    self.detected = True
                    self.point = nextPts
                    self.first_img = gray
                    nextPts = nextPts[0]
                    self.pos = normalize(nextPts,(img.shape[1],img.shape[0]),flip_y=True)
                    self.count -=1

                    ref = {}
                    ref["norm_pos"] = self.pos
                    ref["timestamp"] = frame.timestamp
                    self.ref_list.append(ref)

            #always save pupil positions
            for p_pt in recent_pupil_positions:
                if p_pt['norm_pupil'] is not None:
                    self.pupil_list.append(p_pt)
开发者ID:WillemVlakveld,项目名称:pupil,代码行数:30,代码来源:natural_features_calibration.py


示例8: update

    def update(self,frame,events):

        gaze_pts = []
        for p in events['pupil_positions']:
            if p['method'] == '3d c++' and p['confidence'] > self.g_pool.pupil_confidence_threshold:

                gaze_point =  np.array(p['circle_3d']['normal'] ) * self.gaze_distance  + np.array( p['sphere']['center'] )

                image_point, _  =  cv2.projectPoints( np.array([gaze_point]) , self.rotation_vector, self.translation_vector , self.camera_matrix , self.dist_coefs )
                image_point = image_point.reshape(-1,2)
                image_point = normalize( image_point[0], (frame.width, frame.height) , flip_y = True)

                eye_center = self.toWorld(p['sphere']['center'])
                gaze_3d = self.toWorld(gaze_point)
                normal_3d = np.dot( self.rotation_matrix, np.array( p['circle_3d']['normal'] ) )

                gaze_pts.append({   'norm_pos':image_point,
                                    'eye_center_3d':eye_center.tolist(),
                                    'gaze_normal_3d':normal_3d.tolist(),
                                    'gaze_point_3d':gaze_3d.tolist(),
                                    'confidence':p['confidence'],
                                    'timestamp':p['timestamp'],
                                    'base':[p]})

                if self.visualizer.window:
                    self.gaze_pts_debug.append( gaze_3d )
                    self.sphere['center'] = eye_center #eye camera coordinates
                    self.sphere['radius'] = p['sphere']['radius']

        events['gaze_positions'] = gaze_pts
开发者ID:JihadOsakaUniversity,项目名称:pupil,代码行数:30,代码来源:gaze_mappers.py


示例9: recent_events

    def recent_events(self, events):
        frame = events.get("frame")
        if not frame:
            return
        if self.drag_offset is not None:
            pos = glfwGetCursorPos(glfwGetCurrentContext())
            pos = normalize(pos, glfwGetWindowSize(glfwGetCurrentContext()))
            pos = denormalize(
                pos, (frame.img.shape[1], frame.img.shape[0])
            )  # Position in img pixels
            self.pos[0] = pos[0] + self.drag_offset[0]
            self.pos[1] = pos[1] + self.drag_offset[1]

        if self.watermark is not None:
            # keep in image bounds, do this even when not dragging because the image sizes could change.
            self.pos[1] = max(
                0,
                min(frame.img.shape[0] - self.watermark.shape[0], max(self.pos[1], 0)),
            )
            self.pos[0] = max(
                0,
                min(frame.img.shape[1] - self.watermark.shape[1], max(self.pos[0], 0)),
            )
            pos = int(self.pos[0]), int(self.pos[1])
            img = frame.img
            roi = (
                slice(pos[1], pos[1] + self.watermark.shape[0]),
                slice(pos[0], pos[0] + self.watermark.shape[1]),
            )
            w_roi = slice(0, img.shape[0] - pos[1]), slice(0, img.shape[1] - pos[0])
            img[roi] = self.watermark[w_roi] * self.alpha_mask[w_roi] + img[roi] * (
                1 - self.alpha_mask[w_roi]
            )
开发者ID:pupil-labs,项目名称:pupil,代码行数:33,代码来源:vis_watermark.py


示例10: on_resize

 def on_resize(window,w, h):
     active_window = glfwGetCurrentContext()
     glfwMakeContextCurrent(window)
     norm_size = normalize((w,h),glfwGetWindowSize(window))
     fb_size = denormalize(norm_size,glfwGetFramebufferSize(window))
     atb.TwWindowSize(*map(int,fb_size))
     glfwMakeContextCurrent(active_window)
开发者ID:Esperadoce,项目名称:pupil,代码行数:7,代码来源:main.py


示例11: get_markers_data

def get_markers_data(detection, img_size, timestamp):
    return {
        "id": detection.tag_id,
        "verts": detection.corners[::-1].tolist(),
        "centroid": normalize(detection.center, img_size, flip_y=True),
        "timestamp": timestamp,
    }
开发者ID:pupil-labs,项目名称:pupil,代码行数:7,代码来源:detection_worker.py


示例12: uroi_on_mouse_button

 def uroi_on_mouse_button(button, action, mods):
     if g_pool.display_mode == "roi":
         if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
             g_pool.u_r.active_edit_pt = False
             # if the roi interacts we dont want
             # the gui to interact as well
             return
         elif action == glfw.GLFW_PRESS:
             x, y = glfw.glfwGetCursorPos(main_window)
             # pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
             x *= hdpi_factor
             y *= hdpi_factor
             pos = normalize((x, y), camera_render_size)
             if g_pool.flip:
                 pos = 1 - pos[0], 1 - pos[1]
             # Position in img pixels
             pos = denormalize(
                 pos, g_pool.capture.frame_size
             )  # Position in img pixels
             if g_pool.u_r.mouse_over_edit_pt(
                 pos, g_pool.u_r.handle_size, g_pool.u_r.handle_size
             ):
                 # if the roi interacts we dont want
                 # the gui to interact as well
                 return
开发者ID:pupil-labs,项目名称:pupil,代码行数:25,代码来源:eye.py


示例13: update

    def update(self,frame,events):

        pupil_pts_0 = []
        pupil_pts_1 = []
        for p in events['pupil_positions']:
            if p['confidence'] > self.g_pool.pupil_confidence_threshold:
                if p['id'] == 0:
                    pupil_pts_0.append(p)
                else:
                    pupil_pts_1.append(p)

        # try binocular mapping (needs at least 1 pupil position in each list)
        gaze_pts = []
        if len(pupil_pts_0) > 0 and len(pupil_pts_1) > 0:
            gaze_pts = self.map_binocular_intersect(pupil_pts_0, pupil_pts_1 ,frame )
        # fallback to monocular if something went wrong
        else:
            for p in pupil_pts_0:

                gaze_point =  np.array(p['circle_3d']['normal'] ) * self.last_gaze_distance  + np.array( p['sphere']['center'] )

                image_point, _  =  cv2.projectPoints( np.array([gaze_point]) , self.rotation_vector0, self.translation_vector0 , self.camera_matrix , self.dist_coefs )
                image_point = image_point.reshape(-1,2)
                image_point = normalize( image_point[0], (frame.width, frame.height) , flip_y = True)
                gaze_pts.append({'norm_pos':image_point,'confidence':p['confidence'],'timestamp':p['timestamp'],'base':[p]})

                if self.visualizer.window:
                    self.gaze_pts_debug0.append( self.eye0_to_World(gaze_point) )
                    self.sphere0['center'] = self.eye0_to_World(p['sphere']['center']) #eye camera coordinates
                    self.sphere0['radius'] = p['sphere']['radius']


            for p in pupil_pts_1:

                gaze_point =  np.array(p['circle_3d']['normal'] ) * self.last_gaze_distance  + np.array( p['sphere']['center'] )

                image_point, _  =  cv2.projectPoints( np.array([gaze_point]) , self.rotation_vector1, self.translation_vector1 , self.camera_matrix , self.dist_coefs )
                image_point = image_point.reshape(-1,2)
                image_point = normalize( image_point[0], (frame.width, frame.height) , flip_y = True)
                gaze_pts.append({'norm_pos':image_point,'confidence':p['confidence'],'timestamp':p['timestamp'],'base':[p]})

                if self.visualizer.window:
                    self.gaze_pts_debug1.append( self.eye1_to_World(gaze_point) )
                    self.sphere1['center'] = self.eye1_to_World(p['sphere']['center']) #eye camera coordinates
                    self.sphere1['radius'] = p['sphere']['radius']

        events['gaze_positions'] = gaze_pts
开发者ID:AlienorV,项目名称:pupil,代码行数:47,代码来源:gaze_mappers.py


示例14: update

    def update(self,frame,events):
        self.img_shape = frame.height,frame.width,3

        if self.running:
            gray = frame.gray

            if self.robust_detection:
                self.markers = detect_markers_robust(gray,
                                                    grid_size = 5,
                                                    prev_markers=self.markers,
                                                    min_marker_perimeter=self.min_marker_perimeter,
                                                    aperture=self.aperture,
                                                    visualize=0,
                                                    true_detect_every_frame=3,
                                                    invert_image=self.invert_image)
            else:
                self.markers = detect_markers(gray,
                                                grid_size = 5,
                                                min_marker_perimeter=self.min_marker_perimeter,
                                                aperture=self.aperture,
                                                visualize=0,
                                                invert_image=self.invert_image)


            if self.mode == "Show marker IDs":
                draw_markers(frame.gray,self.markers)

        events['surface'] = []

        # locate surfaces
        for s in self.surfaces:
            s.locate(self.markers,self.camera_calibration,self.min_marker_perimeter, self.locate_3d)
            if s.detected:
                events['surface'].append({'name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen.tolist(),'m_from_screen':s.m_from_screen.tolist(), 'timestamp':frame.timestamp,'camera_pose_3d':s.camera_pose_3d.tolist()})

        if self.running:
            self.button.status_text = '%s/%s'%(len([s for s in self.surfaces if s.detected]),len(self.surfaces))
        else:
            self.button.status_text = 'tracking paused'

        if self.mode == 'Show Markers and Surfaces':
            # edit surfaces by user
            if self.edit_surf_verts:
                window = glfwGetCurrentContext()
                pos = glfwGetCursorPos(window)
                pos = normalize(pos,glfwGetWindowSize(window),flip_y=True)
                for s,v_idx in self.edit_surf_verts:
                    if s.detected:
                        new_pos = s.img_to_ref_surface(np.array(pos))
                        s.move_vertex(v_idx,new_pos)

        #map recent gaze onto detected surfaces used for pupil server
        for s in self.surfaces:
            if s.detected:
                s.gaze_on_srf = []
                for p in events.get('gaze_positions',[]):
                    gp_on_s = tuple(s.img_to_ref_surface(np.array(p['norm_pos'])))
                    p['realtime gaze on ' + s.name] = gp_on_s
                    s.gaze_on_srf.append(gp_on_s)
开发者ID:Ventrella,项目名称:pupil,代码行数:59,代码来源:surface_tracker.py


示例15: update

    def update(self,frame,events):
        if self.active:
            recent_pupil_positions = events['pupil_positions']
            gray_img = frame.gray

            if self.clicks_to_close <=0:
                self.stop()
                return

            #detect the marker
            self.candidate_ellipses = get_candidate_ellipses(gray_img,
                                                            area_threshold=self.area_threshold,
                                                            dist_threshold=self.dist_threshold,
                                                            min_ring_count=5,
                                                            visual_debug=False)

            if len(self.candidate_ellipses) > 0:
                self.detected= True
                marker_pos = self.candidate_ellipses[0][0]
                self.pos = normalize(marker_pos,(frame.width,frame.height),flip_y=True)

            else:
                self.detected = False
                self.pos = None #indicate that no reference is detected


            #only save a valid ref position if within sample window of calibraiton routine
            on_position = self.lead_in < self.screen_marker_state < (self.lead_in+self.sample_duration)

            if on_position and self.detected:
                ref = {}
                ref["norm_pos"] = self.pos
                ref["screen_pos"] = marker_pos
                ref["timestamp"] = frame.timestamp
                self.ref_list.append(ref)

            #always save pupil positions
            for p_pt in recent_pupil_positions:
                if p_pt['confidence'] > self.g_pool.pupil_confidence_threshold:
                    self.pupil_list.append(p_pt)

            # Animate the screen marker
            if self.screen_marker_state < self.sample_duration+self.lead_in+self.lead_out:
                if self.detected or not on_position:
                    self.screen_marker_state += 1
            else:
                self.screen_marker_state = 0
                if not self.sites:
                    self.stop()
                    return
                self.active_site = self.sites.pop(0)
                logger.debug("Moving screen marker to site at %s %s"%tuple(self.active_site))



            #use np.arrays for per element wise math
            self.display_pos = np.array(self.active_site)
            self.on_position = on_position
            self.button.status_text = '%s / %s'%(self.active_site,9)
开发者ID:AlienorV,项目名称:pupil,代码行数:59,代码来源:screen_marker_calibration.py


示例16: update

    def update(self, frame, events):
        img = frame.img
        img_shape = img.shape[:-1][::-1]  # width,height

        succeeding_frame = frame.index - self.prev_frame_idx == 1
        same_frame = frame.index == self.prev_frame_idx
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # vars for calcOpticalFlowPyrLK
        lk_params = dict(
            winSize=(90, 90), maxLevel=3, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.03)
        )

        updated_past_gaze = []

        # lets update past gaze using optical flow: this is like sticking the gaze points onto the pixels of the img.
        if self.past_gaze_positions and succeeding_frame:
            past_screen_gaze = np.array(
                [denormalize(ng["norm_pos"], img_shape, flip_y=True) for ng in self.past_gaze_positions],
                dtype=np.float32,
            )
            new_pts, status, err = cv2.calcOpticalFlowPyrLK(
                self.prev_gray, gray_img, past_screen_gaze, minEigThreshold=0.005, **lk_params
            )
            for gaze, new_gaze_pt, s, e in zip(self.past_gaze_positions, new_pts, status, err):
                if s:
                    # print "norm,updated",gaze['norm_gaze'], normalize(new_gaze_pt,img_shape[:-1],flip_y=True)
                    gaze["norm_pos"] = normalize(new_gaze_pt, img_shape, flip_y=True)
                    updated_past_gaze.append(gaze)
                    # logger.debug("updated gaze")

                else:
                    # logger.debug("dropping gaze")
                    # Since we will replace self.past_gaze_positions later,
                    # not appedning tu updated_past_gaze is like deliting this data point.
                    pass
        else:
            # we must be seeking, do not try to do optical flow, or pausing: see below.
            pass

        if same_frame:
            # paused
            # re-use last result
            events["gaze_positions"][:] = self.past_gaze_positions[:]
        else:
            # trim gaze that is too old
            if events["gaze_positions"]:
                now = events["gaze_positions"][0]["timestamp"]
                cutoff = now - self.timeframe
                updated_past_gaze = [g for g in updated_past_gaze if g["timestamp"] > cutoff]

            # inject the scan path gaze points into recent_gaze_positions
            events["gaze_positions"][:] = updated_past_gaze + events["gaze_positions"]
            events["gaze_positions"].sort(key=lambda x: x["timestamp"])  # this may be redundant...

        # update info for next frame.
        self.prev_gray = gray_img
        self.prev_frame_idx = frame.index
        self.past_gaze_positions = events["gaze_positions"]
开发者ID:masaii1224,项目名称:pupil,代码行数:59,代码来源:scan_path.py


示例17: update

    def update(self,frame,recent_pupil_positions,events):
        self.img = frame.img
        self.img_shape = frame.img.shape
        self.update_marker_cache()
        self.markers = self.cache[frame.index]
        if self.markers == False:
            self.markers = []
            self.seek_marker_cacher(frame.index) # tell precacher that it better have every thing from here on analyzed

        # locate surfaces
        for s in self.surfaces:
            if not s.locate_from_cache(frame.index):
                s.locate(self.markers)
            if s.detected:
                events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp,'gaze_on_srf':s.gaze_on_srf})

        if self.mode.value == 4:
            draw_markers(frame.img,self.markers)

        # edit surfaces by user
        if self.mode.value == 1:
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos,glfwGetWindowSize(window))
            pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels

            for s,v_idx in self.edit_surfaces:
                if s.detected:
                    pos = normalize(pos,(self.img_shape[1],self.img_shape[0]),flip_y=True)
                    new_pos =  s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx,new_pos)
                    s.cache = None
                    self.heatmap = None
        else:
            # update srf with no or invald cache:
            for s in self.surfaces:
                if s.cache == None:
                    s.init_cache(self.cache)


        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()
开发者ID:Bored-Bohr,项目名称:pupil,代码行数:46,代码来源:offline_marker_detector.py


示例18: gaze_mapper

 def gaze_mapper(self, pupil_positions):
     gaze_pts = []
     pupil_confidence_threshold = 0.6
     for p in pupil_positions:
         if p['confidence'] > self.g_pool.pupil_confidence_threshold:
             norm_pos = normalize()
             gaze_point = self.map_fn(p['norm_pos'])
             gaze_pts.append({'norm_pos':gaze_point,'confidence':p['confidence'],'timestamp':p['timestamp'],'base':[p]})
开发者ID:mothman1,项目名称:pupil,代码行数:8,代码来源:split_into_frames.py


示例19: on_button

 def on_button(window, button, action, mods):
     g_pool.gui.update_button(button, action, mods)
     pos = glfw.glfwGetCursorPos(window)
     pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
     # Position in img pixels
     pos = denormalize(pos, g_pool.capture.frame_size)
     for p in g_pool.plugins:
         p.on_click(pos, button, action)
开发者ID:samtuhka,项目名称:pupil,代码行数:8,代码来源:world.py


示例20: on_button

 def on_button(button, pressed):
     if not atb.TwEventMouseButtonGLFW(button,pressed):
         if pressed:
             pos = glfwGetMousePos()
             pos = normalize(pos,glfwGetWindowSize())
             pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
             for p in g.plugins:
                 p.on_click(pos)
开发者ID:Flavsditz,项目名称:projects,代码行数:8,代码来源:world.py



注:本文中的methods.normalize函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python cbook.get_test_data函数代码示例发布时间:2022-05-27
下一篇:
Python methods.denormalize函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap