• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python video_capture.autoCreateCapture函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中video_capture.autoCreateCapture函数的典型用法代码示例。如果您正苦于以下问题:Python autoCreateCapture函数的具体用法?Python autoCreateCapture怎么用?Python autoCreateCapture使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了autoCreateCapture函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self,g_pool,alpha=0.6,mirror=True):
        super(Eye_Video_Overlay, self).__init__(g_pool)
        self.order = .6
        self.menu = None

        # user controls
        self.alpha = alpha
        self.mirror = mirror


        # load eye videos and eye timestamps
        if g_pool.rec_version < VersionFormat('0.4'):
            eye0_video_path = os.path.join(g_pool.rec_dir,'eye.avi')
            eye0_timestamps_path = os.path.join(g_pool.rec_dir,'eye_timestamps.npy')
        else:
            eye0_video_path = os.path.join(g_pool.rec_dir,'eye0.mkv')
            eye0_timestamps_path = os.path.join(g_pool.rec_dir,'eye0_timestamps.npy')
            eye1_video_path = os.path.join(g_pool.rec_dir,'eye1.mkv')
            eye1_timestamps_path = os.path.join(g_pool.rec_dir,'eye1_timestamps.npy')


        # Initialize capture -- for now we just try with monocular
        try:
            self.cap = autoCreateCapture(eye0_video_path,timestamps=eye0_timestamps_path)
        except FileCaptureError:
            logger.error("Could not load eye video.")
            self.alive = False
            return

        self._frame = self.cap.get_frame()
        self.width, self.height = self.cap.frame_size

        eye0_timestamps = list(np.load(eye0_timestamps_path))
        self.eye0_world_frame_map = correlate_eye_world(eye0_timestamps,g_pool.timestamps)
开发者ID:anjith2006,项目名称:pupil,代码行数:34,代码来源:eye_video_overlay.py


示例2: __init__

    def __init__(self,g_pool,alpha=0.6,eye_scale_factor=.5,move_around=0,mirror={'0':False,'1':False}, flip={'0':False,'1':False},pos=[(640,10),(10,10)]):
        super(Eye_Video_Overlay, self).__init__(g_pool)
        self.order = .6
        self.menu = None

        # user controls
        self.alpha = alpha #opacity level of eyes
        self.eye_scale_factor = eye_scale_factor #scale
        self.showeyes = 0,1 #modes: any text containg both means both eye is present, on 'only eye1' if only one eye recording
        self.move_around = move_around #boolean whether allow to move clip around screen or not
        self.video_size = [0,0] #video_size of recording (bc scaling)

        #variables specific to each eye
        self.eye_frames = []
        self.eye_world_frame_map = []
        self.eye_cap = []
        self.mirror = mirror #do we horiz flip first eye
        self.flip = flip #do we vert flip first eye
        self.pos = [list(pos[0]),list(pos[1])] #positions of 2 eyes
        self.drag_offset = [None,None]

        # load eye videos and eye timestamps
        if g_pool.rec_version < VersionFormat('0.4'):
            eye_video_path = os.path.join(g_pool.rec_dir,'eye.avi'),None
            eye_timestamps_path = os.path.join(g_pool.rec_dir,'eye_timestamps.npy'),None
        else:
            eye_video_path = os.path.join(g_pool.rec_dir,'eye0.mkv'),os.path.join(g_pool.rec_dir,'eye1.mkv')
            eye_timestamps_path = os.path.join(g_pool.rec_dir,'eye0_timestamps.npy'),os.path.join(g_pool.rec_dir,'eye1_timestamps.npy')

        #try to load eye video and ts for each eye.
        for video,ts in zip(eye_video_path,eye_timestamps_path):
            try:
                self.eye_cap.append(autoCreateCapture(video,timestamps=ts))
            except FileCaptureError:
                pass
            else:
                self.eye_frames.append(self.eye_cap[-1].get_frame())
            try:
                eye_timestamps = list(np.load(ts))
            except:
                pass
            else:
                self.eye_world_frame_map.append(correlate_eye_world(eye_timestamps,g_pool.timestamps))

        if len(self.eye_cap) == 2:
            logger.debug("Loaded binocular eye video data.")
        elif len(self.eye_cap) == 1:
            logger.debug("Loaded monocular eye video data")
            self.showeyes = (0,)
        else:
            logger.error("Could not load eye video.")
            self.alive = False
            return
开发者ID:Reidzhang,项目名称:pupil,代码行数:53,代码来源:eye_video_overlay.py


示例3: test

def test():

    import os
    import cv2
    from video_capture import autoCreateCapture
    logging.basicConfig(level=logging.DEBUG)

    writer = AV_Writer(os.path.expanduser("~/Desktop/av_writer_out.mp4"))
    # writer = cv2.VideoWriter(os.path.expanduser("~/Desktop/av_writer_out.avi"),cv2.cv.CV_FOURCC(*"DIVX"),30,(1280,720))
    cap = autoCreateCapture(0,(1280,720))
    frame = cap.get_frame()
    # print writer.video_stream.time_base
    # print writer.

    for x in xrange(300):
        frame = cap.get_frame()
        writer.write_video_frame(frame)
        # writer.write(frame.img)
        # print writer.video_stream

    cap.close()
    writer.close()
开发者ID:Mazzafish,项目名称:pupil,代码行数:22,代码来源:av_writer.py


示例4: eye

def eye(timebase, is_alive_flag, ipc_pub_url, ipc_sub_url,ipc_push_url, user_dir, version, eye_id, cap_src):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """


    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools
    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx,ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx,ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,ipc_sub_url,topics=("notify",))

    with Is_Alive_Manager(is_alive_flag,ipc_socket,eye_id):

        #logging setup
        import logging
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        logger = logging.getLogger()
        logger.handlers = []
        logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx,ipc_push_url))
        # create logger for the context of this function
        logger = logging.getLogger(__name__)

        #general imports
        import numpy as np
        import cv2

        #display
        import glfw
        from pyglui import ui,graph,cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline, Named_Texture, Sphere
        import OpenGL.GL as gl
        from gl_utils import basic_gl_setup,adjust_gl_view, clear_gl_screen ,make_coord_system_pixel_based,make_coord_system_norm_based, make_coord_system_eye_camera_based
        from ui_roi import UIRoi
        #monitoring
        import psutil
        import math


        # helpers/utils
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, Roi, timer
        from video_capture import autoCreateCapture, FileCaptureError, EndofVideoFileError, CameraCaptureError
        from av_writer import JPEG_Writer,AV_Writer

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {Detector_2D.__name__:Detector_2D,Detector_3D.__name__:Detector_3D}



        #UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600,300*eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 1.0
            window_position_default = (600,31+300*eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600,300*eye_id)


        #g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.timebase = timebase


        # Callback functions
        def on_resize(window,w, h):
            if not g_pool.iconified:
                active_window = glfw.glfwGetCurrentContext()
#.........这里部分代码省略.........
开发者ID:filmonelias,项目名称:pupil,代码行数:101,代码来源:eye.py


示例5: world

def world(timebase,eyes_are_alive,ipc_pub_url,ipc_sub_url,ipc_push_url,user_dir,version,cap_src):
    """Reads world video and runs plugins.

    Creates a window, gl context.
    Grabs images from a capture.
    Maps pupil to gaze data
    Can run various plug-ins.

    Reacts to notifications:
        ``set_detection_mapping_mode``
        ``eye_process.started``
        ``start_plugin``

    Emits notifications:
        ``eye_process.should_start``
        ``eye_process.should_stop``
        ``set_detection_mapping_mode``
        ``world_process.started``
        ``world_process.stopped``
        ``recording.should_stop``: Emits on camera failure
        ``launcher_process.should_stop``

    Emits data:
        ``gaze``: Gaze data from current gaze mapping plugin.``
        ``*``: any other plugin generated data in the events that it not [dt,pupil,gaze].
    """

    # We defer the imports because of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    # This is not harmful but unnecessary.

    #general imports
    from time import time,sleep
    import numpy as np
    import logging
    import zmq
    import zmq_tools
    #zmq ipc setup
    zmq_ctx = zmq.Context()
    ipc_pub = zmq_tools.Msg_Dispatcher(zmq_ctx,ipc_push_url)
    gaze_pub = zmq_tools.Msg_Streamer(zmq_ctx,ipc_pub_url)
    pupil_sub = zmq_tools.Msg_Receiver(zmq_ctx,ipc_sub_url,topics=('pupil',))
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,ipc_sub_url,topics=('notify',))

    #log setup
    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx,ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    #display
    import glfw
    from pyglui import ui,graph,cygl
    from pyglui.cygl.utils import Named_Texture
    from gl_utils import basic_gl_setup,adjust_gl_view, clear_gl_screen,make_coord_system_pixel_based,make_coord_system_norm_based,glFlush

    #check versions for our own depedencies as they are fast-changing
    from pyglui import __version__ as pyglui_version
    assert pyglui_version >= '0.8'

    #monitoring
    import psutil

    # helpers/utils
    from file_methods import Persistent_Dict
    from methods import normalize, denormalize, delta_t, get_system_info
    from video_capture import autoCreateCapture, FileCaptureError, EndofVideoFileError, CameraCaptureError
    from version_utils import VersionFormat
    import audio
    from uvc import get_time_monotonic


    #trigger pupil detector cpp build:
    import pupil_detectors
    del pupil_detectors

    # Plug-ins
    from plugin import Plugin,Plugin_List,import_runtime_plugins
    from calibration_routines import calibration_plugins, gaze_mapping_plugins
    from recorder import Recorder
    from show_calibration import Show_Calibration
    from display_recent_gaze import Display_Recent_Gaze
    from pupil_sync import Pupil_Sync
    from pupil_remote import Pupil_Remote
    from surface_tracker import Surface_Tracker
    from log_display import Log_Display
    from annotations import Annotation_Capture
    from log_history import Log_History

    logger.info('Application Version: %s'%version)
    logger.info('System Info: %s'%get_system_info())

    #UI Platform tweaks
    if platform.system() == 'Linux':
        scroll_factor = 10.0
        window_position_default = (0,0)
    elif platform.system() == 'Windows':
        scroll_factor = 1.0
#.........这里部分代码省略.........
开发者ID:Ventrella,项目名称:pupil,代码行数:101,代码来源:world.py


示例6: world

def world(g_pool,cap_src,cap_size):
    """world
    Creates a window, gl context.
    Grabs images from a capture.
    Receives Pupil coordinates from eye process[es]
    Can run various plug-ins.
    """

    # Callback functions
    def on_resize(window,w, h):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(window)
        hdpi_factor = glfwGetFramebufferSize(window)[0]/glfwGetWindowSize(window)[0]
        w,h = w*hdpi_factor, h*hdpi_factor
        g_pool.gui.update_window(w,h)
        g_pool.gui.collect_menus()
        graph.adjust_size(w,h)
        adjust_gl_view(w,h)
        for p in g_pool.plugins:
            p.on_window_resize(window,w,h)

        glfwMakeContextCurrent(active_window)


    def on_iconify(window,iconfied):
        pass

    def on_key(window, key, scancode, action, mods):
        g_pool.gui.update_key(key,scancode,action,mods)

    def on_char(window,char):
        g_pool.gui.update_char(char)


    def on_button(window,button, action, mods):
        g_pool.gui.update_button(button,action,mods)
        pos = glfwGetCursorPos(window)
        pos = normalize(pos,glfwGetWindowSize(main_window))
        pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
        for p in g_pool.plugins:
            p.on_click(pos,button,action)

    def on_pos(window,x, y):
        hdpi_factor = float(glfwGetFramebufferSize(window)[0]/glfwGetWindowSize(window)[0])
        x,y = x*hdpi_factor,y*hdpi_factor
        g_pool.gui.update_mouse(x,y)

    def on_scroll(window,x,y):
        g_pool.gui.update_scroll(x,y*scroll_factor)


    def on_close(window):
        g_pool.quit.value = True
        logger.info('Process closing from window')



    # load session persistent settings
    session_settings = Persistent_Dict(os.path.join(g_pool.user_dir,'user_settings_world'))

    # Initialize capture
    cap = autoCreateCapture(cap_src, cap_size, 24, timebase=g_pool.timebase)

    # Test capture
    try:
        frame = cap.get_frame()
    except CameraCaptureError:
        logger.error("Could not retrieve image from capture")
        cap.close()
        return


    # any object we attach to the g_pool object *from now on* will only be visible to this process!
    # vars should be declared here to make them visible to the code reader.
    g_pool.update_textures = session_settings.get("update_textures",2)


    g_pool.capture = cap
    g_pool.pupil_confidence_threshold = session_settings.get('pupil_confidence_threshold',.6)
    g_pool.active_calibration_plugin = None


    #UI callback functions
    def reset_timebase():
        #the last frame from worldcam will be t0
        g_pool.timebase.value = g_pool.capture.get_now()
        logger.info("New timebase set to %s all timestamps will count from here now."%g_pool.timebase.value)

    def set_calibration_plugin(new_calibration):
        g_pool.active_calibration_plugin = new_calibration
        new_plugin = new_calibration(g_pool)
        g_pool.plugins.add(new_plugin)

    def open_plugin(plugin):
        if plugin ==  "Select to load":
            return
        logger.debug('Open Plugin: %s'%plugin)
        new_plugin = plugin(g_pool)
        g_pool.plugins.add(new_plugin)

#.........这里部分代码省略.........
开发者ID:Tyex,项目名称:pupil,代码行数:101,代码来源:world.py


示例7: get_pupil_list_from_csv

    def get_pupil_list_from_csv(self, data_path):

        ''' Detect the pupil after cropping the image based on the crowd located pupil center '''
        data_path = self.g_pool.user_dir
        settings_path = os.path.join(self.g_pool.user_settings_path,'user_settings_eye0')
        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {Detector_2D.__name__:Detector_2D,Detector_3D.__name__:Detector_3D}
        # get latest settings
        session_settings = Persistent_Dict(settings_path)
        pupil_detector_settings = session_settings.get('pupil_detector_settings',None)
        last_pupil_detector = pupil_detectors[session_settings.get('last_pupil_detector',Detector_2D.__name__)]
        pupil_detector = last_pupil_detector(self.g_pool,pupil_detector_settings)

        # Detect pupil
        video_path = [f for f in glob(os.path.join(data_path,"eye0.*")) if f[-3:] in ('mp4','mkv','avi')][0]
        # video_capture = cv2.VideoCapture(e_video_path)
        # pos_frame = video_capture.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)

        from player_settings.plugins.offline_crowd_process.ui_roi import UIRoi
        self.g_pool.display_mode = session_settings.get('display_mode','camera_image')
        self.g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
                                'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
                                'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."}

        # Get 75px before and after the crowd located pupil center -> the cropped image is 150X150 pixels
        # crop_width = 300
        # crop_height = 225
        self.g_pool.u_r = UIRoi((480,640,3))
        # self.g_pool.u_r.set(session_settings.get('roi',self.g_pool.u_r.get()))
        ''' End '''

        pupil_list = []
        with open(os.path.join(data_path, 'crowdpos/eye.csv'), 'rU') as csvfile:
            all = csv.reader(csvfile, delimiter=',')
            for row in all:
                norm_center = make_tuple(row[1])
                center = (norm_center[0] * 640, norm_center[1] * 480)
                prow = {'timestamp':float(row[0]), 'confidence':1, 'center':(center[0],center[1]), 'norm_pos': (norm_center[0],norm_center[1]), 'id':0, 'method': '2d c++'}
                pupil_list.append(prow)

        timebase = Value(c_double,0)
        capture_eye = autoCreateCapture(os.path.join(data_path, 'eye0.mp4'), timebase=timebase)
        default_settings = {'frame_size':(640,480),'frame_rate':30}
        capture_eye.settings = default_settings
        # import cv2
        # video_capture = cv2.VideoCapture('/Developments/NCLUni/pupil_crowd4Jul16/recordings/2016_07_06/003/world.mp4')
        # pos_frame = video_capture.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
        save_crowd_detected = os.path.join(data_path, 'crowd_detected_pupil')
        if not os.path.exists(save_crowd_detected):
            os.makedirs(save_crowd_detected)
        idx = 0
        count_detected = 0
        pupil_list_ret = []
        while len(pupil_list) > idx:
            try:
                # get frame by frame
                frame = capture_eye.get_frame()
                # pos_frame = video_capture.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
                r_idx, related_csv_line = self.search(frame.timestamp, pupil_list)
                if related_csv_line:
                    norm_pos = related_csv_line["norm_pos"]
                    center = (norm_pos[0] * 640, (1 - norm_pos[1]) * 480)
                    crowd_center = (center[0], center[1])

                    ''' auto detect the center and ellipse after cropping the image based on the crowd located pupil '''
                    lx,ly,ux,uy = center[0] - 75,center[1]-75,center[0]+75,center[1]+75
                    ellipse = None
                    if lx >= 1 and ly >= 1 and ux < 640 and uy < 480:
                        self.g_pool.u_r.set((lx,ly,ux,uy, (480,640,3)))
                        # orig_img = frame.img
                        # orig_gray = frame.gray
                        # print frame.img.flags
                        # print frame.img.shape
                        # print '---'
                        # print frame.gray.flags
                        # print frame.gray.shape
                        # print '---'
                        # cropped_img_nparr = frame.img[ly:uy,lx:ux].copy(order='C')
                        # frame._img = cropped_img_nparr
                        # cropped_img = Image.fromarray(cropped_img_nparr)
                        # frame._gray = frame.gray[ly:uy,lx:ux].copy(order='C')
                        # print frame.img.flags
                        # print frame.img.shape
                        # print '---'
                        # print frame.gray.flags
                        # print frame.gray.shape
                        # frame.width = crop_width
                        # frame.height = crop_height
                        # print frame.width, 'x', frame.height
                        result = pupil_detector.detect(frame, self.g_pool.u_r, self.g_pool.display_mode == 'algorithm')
                        result['id'] = 0
                        # cropped_center = (result["norm_pos"][0]*crop_width,result["norm_pos"][0]*crop_height)
                        # crowd_auto_center = (result["norm_pos"][0]*640,result["norm_pos"][0]*480)
                        # self.save_image_from_array(frame.img, os.path.join(save_crowd_detected, '%s_cropped.jpg'%repr(frame.timestamp)), center=cropped_center)
                        # self.save_image_from_array(orig_img, os.path.join(save_crowd_detected, '%s_crowd_auto.jpg'%repr(frame.timestamp)), center=crowd_auto_center)
                        if result['ellipse'] is not None and result['confidence'] > 0:
                            ellipse =result['ellipse']
                            center = (ellipse['center'][0],ellipse['center'][1])
                            norm_pos = (result['norm_pos'][0], result['norm_pos'][1])
#.........这里部分代码省略.........
开发者ID:mothman1,项目名称:pupil,代码行数:101,代码来源:offline_eyetracking.py


示例8: world

def world(g_pool,cap_src,cap_size):
    """world
    Creates a window, gl context.
    Grabs images from a capture.
    Receives Pupil coordinates from eye process[es]
    Can run various plug-ins.
    """

    #manage plugins
    runtime_plugins = import_runtime_plugins(os.path.join(g_pool.user_dir,'plugins'))
    user_launchable_plugins = [Show_Calibration,Pupil_Server,Pupil_Sync,Marker_Detector]+runtime_plugins
    system_plugins  = [Log_Display,Display_Recent_Gaze,Recorder]
    plugin_by_index =  system_plugins+user_launchable_plugins+calibration_plugins+gaze_mapping_plugins
    name_by_index = [p.__name__ for p in plugin_by_index]
    plugin_by_name = dict(zip(name_by_index,plugin_by_index))
    default_plugins = [('Log_Display',{}),('Dummy_Gaze_Mapper',{}),('Display_Recent_Gaze',{}), ('Screen_Marker_Calibration',{}),('Recorder',{})]



    # Callback functions
    def on_resize(window,w, h):
        if not g_pool.iconified:
            g_pool.gui.update_window(w,h)
            g_pool.gui.collect_menus()
            graph.adjust_size(w,h)
            adjust_gl_view(w,h)
            for p in g_pool.plugins:
                p.on_window_resize(window,w,h)

    def on_iconify(window,iconified):
        g_pool.iconified = iconified

    def on_key(window, key, scancode, action, mods):
        g_pool.gui.update_key(key,scancode,action,mods)

    def on_char(window,char):
        g_pool.gui.update_char(char)

    def on_button(window,button, action, mods):
        g_pool.gui.update_button(button,action,mods)
        pos = glfwGetCursorPos(window)
        pos = normalize(pos,glfwGetWindowSize(main_window))
        pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
        for p in g_pool.plugins:
            p.on_click(pos,button,action)

    def on_pos(window,x, y):
        hdpi_factor = float(glfwGetFramebufferSize(window)[0]/glfwGetWindowSize(window)[0])
        x,y = x*hdpi_factor,y*hdpi_factor
        g_pool.gui.update_mouse(x,y)

    def on_scroll(window,x,y):
        g_pool.gui.update_scroll(x,y*scroll_factor)


    def on_close(window):
        g_pool.quit.value = True
        logger.info('Process closing from window')


    tick = delta_t()
    def get_dt():
        return next(tick)

    # load session persistent settings
    session_settings = Persistent_Dict(os.path.join(g_pool.user_dir,'user_settings_world'))
    if session_settings.get("version",VersionFormat('0.0')) < g_pool.version:
        logger.info("Session setting are from older version of this app. I will not use those.")
        session_settings.clear()

    # Initialize capture
    cap = autoCreateCapture(cap_src, timebase=g_pool.timebase)
    default_settings = {'frame_size':cap_size,'frame_rate':24}
    previous_settings = session_settings.get('capture_settings',None)
    if previous_settings and previous_settings['name'] == cap.name:
        cap.settings = previous_settings
    else:
        cap.settings = default_settings

    # Test capture
    try:
        frame = cap.get_frame()
    except CameraCaptureError:
        logger.error("Could not retrieve image from capture")
        cap.close()
        return


    # any object we attach to the g_pool object *from now on* will only be visible to this process!
    # vars should be declared here to make them visible to the code reader.
    g_pool.update_textures = session_settings.get("update_textures",2)
    g_pool.iconified = False

    g_pool.capture = cap
    g_pool.pupil_confidence_threshold = session_settings.get('pupil_confidence_threshold',.6)
    g_pool.active_calibration_plugin = None


    def open_plugin(plugin):
        if plugin ==  "Select to load":
#.........这里部分代码省略.........
开发者ID:zuxfoucault,项目名称:pupil,代码行数:101,代码来源:world.py


示例9: world

def world(pupil_queue, timebase, lauchner_pipe, eye_pipes, eyes_are_alive, user_dir, version, cap_src):
    """world
    Creates a window, gl context.
    Grabs images from a capture.
    Receives Pupil coordinates from eye process[es]
    Can run various plug-ins.
    """

    import logging

    # Set up root logger for this process before doing imports of logged modules.
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    # create file handler which logs even debug messages
    fh = logging.FileHandler(os.path.join(user_dir, "world.log"), mode="w")
    fh.setLevel(logger.level)
    # create console handler with a higher log level
    ch = logging.StreamHandler()
    ch.setLevel(logger.level + 10)
    # create formatter and add it to the handlers
    formatter = logging.Formatter("World Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s")
    fh.setFormatter(formatter)
    formatter = logging.Formatter("WORLD Process [%(levelname)s] %(name)s : %(message)s")
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    # silence noisy modules
    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logging.getLogger("libav").setLevel(logging.ERROR)
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    # This is not harmfull but unnessasary.

    # general imports
    from time import time
    import numpy as np

    # display
    import glfw
    from pyglui import ui, graph, cygl
    from pyglui.cygl.utils import Named_Texture
    from gl_utils import (
        basic_gl_setup,
        adjust_gl_view,
        clear_gl_screen,
        make_coord_system_pixel_based,
        make_coord_system_norm_based,
    )

    # check versions for our own depedencies as they are fast-changing
    from pyglui import __version__ as pyglui_version

    assert pyglui_version >= "0.7"

    # monitoring
    import psutil

    # helpers/utils
    from file_methods import Persistent_Dict
    from methods import normalize, denormalize, delta_t
    from video_capture import autoCreateCapture, FileCaptureError, EndofVideoFileError, CameraCaptureError
    from version_utils import VersionFormat

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from calibration_routines import calibration_plugins, gaze_mapping_plugins
    from recorder import Recorder
    from show_calibration import Show_Calibration
    from display_recent_gaze import Display_Recent_Gaze
    from pupil_server import Pupil_Server
    from pupil_sync import Pupil_Sync
    from marker_detector import Marker_Detector
    from log_display import Log_Display
    from annotations import Annotation_Capture

    # create logger for the context of this function

    # UI Platform tweaks
    if platform.system() == "Linux":
        scroll_factor = 10.0
        window_position_default = (0, 0)
    elif platform.system() == "Windows":
        scroll_factor = 1.0
        window_position_default = (8, 31)
    else:
        scroll_factor = 1.0
        window_position_default = (0, 0)

    # g_pool holds variables for this process
    g_pool = Global_Container()

    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.version = version
    g_pool.app = "capture"
    g_pool.pupil_queue = pupil_queue
#.........这里部分代码省略.........
开发者ID:peerumporn,项目名称:pupil,代码行数:101,代码来源:world.py


示例10: main


#.........这里部分代码省略.........
    patch_meta_info(rec_dir)

    #parse info.csv file
    meta_info_path = rec_dir + "info.csv"
    with open(meta_info_path) as info:
        meta_info = dict( ((line.strip().split('\t')) for line in info.readlines() ) )


    rec_version = read_rec_version(meta_info)
    if rec_version < VersionFormat('0.4'):
        video_path = rec_dir + "world.avi"
        timestamps_path = rec_dir + "timestamps.npy"
    else:
        video_path = rec_dir + "world.mkv"
        timestamps_path = rec_dir + "world_timestamps.npy"


    gaze_positions_path = rec_dir + "gaze_positions.npy"
    pupil_positions_path = rec_dir + "pupil_positions.npy"
    #load gaze information
    gaze_list = np.load(gaze_positions_path)
    timestamps = np.load(timestamps_path)

    #correlate data
    if rec_version < VersionFormat('0.4'):
        gaze_positions_by_frame = correlate_gaze_legacy(gaze_list,timestamps)
        pupil_positions_by_frame = [[]for x in range(len(timestamps))]
    else:
        pupil_list = np.load(pupil_positions_path)
        gaze_positions_by_frame = correlate_gaze(gaze_list,timestamps)
        pupil_positions_by_frame = correlate_pupil_data(pupil_list,timestamps)

    # Initialize capture
    cap = autoCreateCapture(video_path,timestamps=timestamps_path)

    if isinstance(cap,FakeCapture):
        logger.error("could not start capture.")
        return

    # load session persistent settings
    session_settings = Persistent_Dict(os.path.join(user_dir,"user_settings"))
    print session_settings.get("version",VersionFormat('0.0'))
    if session_settings.get("version",VersionFormat('0.0')) < get_version(version_file):
        logger.info("Session setting are from older version of this app. I will not use those.")
        session_settings.clear()


    width,height = session_settings.get('window_size',cap.frame_size)
    window_pos = session_settings.get('window_position',(0,0)) # not yet using this one.


    # Initialize glfw
    glfwInit()
    main_window = glfwCreateWindow(width, height, "Pupil Player: "+meta_info["Recording Name"]+" - "+ rec_dir.split(os.path.sep)[-1], None, None)
    glfwSetWindowPos(main_window,window_pos[0],window_pos[1])
    glfwMakeContextCurrent(main_window)

    cygl.utils.init()


    # Register callbacks main_window
    glfwSetWindowSizeCallback(main_window,on_resize)
    glfwSetWindowCloseCallback(main_window,on_close)
    glfwSetKeyCallback(main_window,on_key)
    glfwSetCharCallback(main_window,on_char)
    glfwSetMouseButtonCallback(main_window,on_button)
开发者ID:anjith2006,项目名称:pupil,代码行数:67,代码来源:main.py


示例11: split_recordings

    def split_recordings(self):
        data_path = '/Developments/NCLUni/pupil_crowd4Jul16/recordings/2016_07_06/003'#self.rec_path
        # Set user_dir to data_path so all related plugins save to the same folder as the recordings
        self.g_pool.user_dir = data_path
        # Manage plugins
        plugin_by_index =  calibration_plugins+gaze_mapping_plugins
        name_by_index = [p.__name__ for p in plugin_by_index]
        plugin_by_name = dict(zip(name_by_index,plugin_by_index))

        settings_path = os.path.join(data_path[:data_path.index('recordings')] + 'capture_settings')

        # Step 1: when possible detect all pupil positions
        pupil_list = self.get_pupil_list(data_path, os.path.join(settings_path,'user_settings_eye0'))

        if pupil_list:
            # create events variable that should sent to plugins
            events = {'pupil_positions':pupil_list,'gaze_positions':[]}
            # get world settings
            settings_path = os.path.join(settings_path,'user_settings_world')
            session_world_settings = Persistent_Dict(settings_path)
            default_plugins = [('Dummy_Gaze_Mapper',{})]
            manual_calibration_plugin = [('Manual_Marker_Calibration',{})]
            self.g_pool.plugins = Plugin_List(self.g_pool,plugin_by_name,session_world_settings.get('loaded_plugins',default_plugins)+manual_calibration_plugin)
            self.g_pool.pupil_confidence_threshold = session_world_settings.get('pupil_confidence_threshold',.6)
            self.g_pool.detection_mapping_mode = session_world_settings.get('detection_mapping_mode','2d')

            ''' Step 2: before calculating gaze positions we shall process calibration data
            For calibration we need pupil_list (in events variable) and ref_list - ref_list contains all frames of detected marker
            Using manual_marker_calibration plugin use plugin.update to pass pupil_list and world frames for marker detection
            However, pupil_list is by this point fully detected. Thus, we shall do the following:
            First iteration: send events with all pupil_list with first world frame to manual_marker_calibration plugin.update
            Following iterations: send empty [] pupil_list with next world frame to manual_marker_calibration plugin.update
            '''
            # start calibration - It will always be manual calibration
            cal_plugin = None
            for p in self.g_pool.plugins:
                if 'Manual_Marker_Calibration' in p.class_name:
                    cal_plugin = p
                    break
            cal_plugin.on_notify({'subject':'should_start_calibration'})
            self.active_cal = True

            # read world frames
            w_video_path = [f for f in glob(os.path.join(data_path,"world.*")) if f[-3:] in ('mp4','mkv','avi')][0]
            timestamps_path = os.path.join(data_path, "world_timestamps.npy")
            timestamps = np.load(timestamps_path)

            from ctypes import c_double
            from multiprocessing import Value
            timebase = Value(c_double,0)
            capture_world = autoCreateCapture(w_video_path, timebase=timebase)
            default_settings = {'frame_size':(1280,720),'frame_rate':30}
            capture_world.settings = session_world_settings.get('capture_settings',default_settings)
            # previous_settings = session_world_settings.get('capture_settings',None)
            # if previous_settings and previous_settings['name'] == cap.name:
            #     cap.settings = previous_settings
            # else:
            #     cap.settings = default_settings

            # Test capture
            frame = None
            try:
                frame = capture_world.get_frame()
            except CameraCaptureError:
                logger.error("Could not retrieve image from world.mp4")
                capture_world.close()
                return
            # send first world frame to calibration class via update WITH all pupil_list
            cal_plugin.update(frame,events)

            save_undetected = os.path.join(data_path,'undetected_cal')
            save_detected = os.path.join(data_path,'detected_cal')
            if not os.path.exists(save_detected):
                print "creating %s and save images to"%save_detected
                os.makedirs(save_detected)
            if not os.path.exists(save_undetected):
                print "creating %s and save images to"%save_undetected
                os.makedirs(save_undetected)
            # Send all world frames to calibration class via update WITHOUT pupil_list
            idx = 0
            while cal_plugin.active:
                try:
                    frame = capture_world.get_frame()
                    cal_plugin.update(frame, {'pupil_positions':[],'gaze_positions':[]})
                    detected, pos = self.detect_marker(frame)
                    if pos:
                        self.save_image(frame.img, os.path.join(save_detected,'%s.jpg'%repr(timestamps[idx])), center=pos)
                    else:
                        self.save_image(frame.img, os.path.join(save_undetected,'%s.jpg'%repr(timestamps[idx])))
                    idx += 1
                except EndofVideoFileError:
                    logger.warning("World video file is done. Stopping")
                    break

            ''' Step 3: calculate gaze positions
            passe events to gaze mapper plugin without the world frame
            '''
            for p in self.g_pool.plugins:
                if 'Simple_Gaze_Mapper' in p.class_name:
                    p.update(None,events)
#.........这里部分代码省略.........
开发者ID:mothman1,项目名称:pupil,代码行数:101,代码来源:split_into_frames.py


示例12: get_pupil_list

    def get_pupil_list(self, data_path, settings_path):
        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {Detector_2D.__name__:Detector_2D,Detector_3D.__name__:Detector_3D}
        # get latest settings
        session_settings = Persistent_Dict(settings_path)
        pupil_detector_settings = session_settings.get('pupil_detector_settings',None)
        last_pupil_detector = pupil_detectors[session_settings.get('last_pupil_detector',Detector_2D.__name__)]
        pupil_detector = last_pupil_detector(self.g_pool,pupil_detector_settings)

        # Detect pupil
        video_path = [f for f in glob(os.path.join(data_path,"eye0.*")) if f[-3:] in ('mp4','mkv','avi')][0]
        # video_capture = cv2.VideoCapture(e_video_path)
        # pos_frame = video_capture.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)

        from ctypes import c_double
        from multiprocessing import Value
        timebase = Value(c_double,0)
        video_capture = autoCreateCapture(video_path, timebase=timebase)
        default_settings = {'frame_size':(640,480),'frame_rate':30}
        video_capture.settings = session_settings.get('capture_settings',default_settings)
        # previous_settings = session_settings.get('capture_settings',default_settings)
        # if previous_settings and previous_settings['name'] == cap.name:
        #     video_capture.settings = previous_settings
        # else:
        #     video_capture.settings = default_settings

        frame = None
        # Test capture
        sequence = 0
        try:
            frame = video_capture.get_frame()
            sequence += 1
        except CameraCaptureError:
            logger.error("Could not retrieve image from world.mp4")
            video_capture.close()
            return

        from player_settings.plugins.offline_crowd_process.ui_roi import UIRoi
        self.g_pool.display_mode = session_settings.get('display_mode','camera_image')
        self.g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
                                'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
                                'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."}
        self.g_pool.u_r = UIRoi(frame.img.shape)
        self.g_pool.u_r.set(session_settings.get('roi',self.g_pool.u_r.get()))
        save_undetected = os.path.join(data_path,'undetected')
        save_detected = os.path.join(data_path,'detected')
        if not os.path.exists(save_detected):
            print "creating %s and save images to"%save_detected
            os.makedirs(save_detected)
        if not os.path.exists(save_undetected):
            print "creating %s and save images to"%save_undetected
            os.makedirs(save_undetected)
        pupil_list = []
        while True:
            try:
                # get frame by frame
                frame = video_capture.get_frame()
                # pupil ellipse detection
                result = pupil_detector.detect(frame, self.g_pool.u_r, self.g_pool.display_mode == 'algorithm')
                result['id'] = 0
                sequence +=1
                # Use sequence to sort the frames on server and when back from the server
                result['seq'] = sequence
                pupil_list += [result]
                if result['confidence'] >0:
                    if result.has_key('ellipse'):
                        self.save_image(frame.img, os.path.join(save_detected,'%s_%s.jpg'%(repr(result['timestamp']),
                                                                       repr(result['confidence']))), result['ellipse'])
                else:
                    self.save_image(frame.img, os.path.join(save_undetected,'%s_%s.jpg'%(repr(result['timestamp']),
                                                                       repr(result['confidence']))))
            except EndofVideoFileError:
                logger.warning("Eye video file is done. Stopping")
                break
        return pupil_list
开发者ID:mothman1,项目名称:pupil,代码行数:76,代码来源:split_into_frames.py


示例13: calibrate_from_csv

    def calibrate_from_csv(self, pupil_list, data_path):
        import csv
        ref_list = []
        all_ref_list = []
        with open(os.path.join(data_path, 'crowdpos/cal.csv'), 'rU') as csvfile:
            all = csv.reader(csvfile, delimiter=',')
            smooth_pos1 = 0.,0.
            smooth_vel1 = 0
            sample_site1 = (-2,-2)
            counter1 = 0
            counter_max = 30
            count_all_detected = 0
            for row in all:
                norm_center = make_tuple(row[1])
                center = (norm_center[0] * 1280, (1 - norm_center[1]) * 720)
                center = (int(round(center[0])),int(round(center[1])))

                # calculate smoothed manhattan velocity
                smoother = 0.3
                smooth_pos = np.array(smooth_pos1)
                pos = np.array(norm_center)
                new_smooth_pos = smooth_pos + smoother*(pos-smooth_pos)
                smooth_vel_vec = new_smooth_pos - smooth_pos
                smooth_pos = new_smooth_pos
                smooth_pos1 = list(smooth_pos)
                #manhattan distance for velocity
                new_vel = abs(smooth_vel_vec[0])+abs(smooth_vel_vec[1])
                smooth_vel1 = smooth_vel1 + smoother*(new_vel-smooth_vel1)

                #distance to last sampled site
                sample_ref_dist = smooth_pos-np.array(sample_site1)
                sample_ref_dist = abs(sample_ref_dist[0])+abs(sample_ref_dist[1])

                # start counter if ref is resting in place and not at last sample site
                if not counter1:

                    if smooth_vel1 < 0.01 and sample_ref_dist > 0.1:
                        sample_site1 = smooth_pos1
                        logger.debug("Steady marker found. Starting to sample %s datapoints" %counter_max)
                        # self.notify_all({'subject':'calibration marker found','timestamp':self.g_pool.capture.get_timestamp(),'record':True,'network_propagate':True})
                        counter1 = counter_max

                if counter1:
                    if smooth_vel1 > 0.01:
                        logger.warning("Marker moved too quickly: Aborted sample. Sampled %s datapoints. Looking for steady marker again."%(counter_max-counter1))
                        # self.notify_all({'subject':'calibration marker moved too quickly','timestamp':self.g_pool.capture.get_timestamp(),'record':True,'network_propagate':True})
                        counter1 = 0
                    else:
                        count_all_detected += 1
                        counter1 -= 1
                        ref = {}
                        ref["norm_pos"] = norm_center
                        ref["screen_pos"] = center
                        ref["timestamp"] = float(row[0])
                        ref_list.append(ref)
                        if counter1 == 0:
                            #last sample before counter done and moving on
                            logger.debug("Sampled %s datapoints. Stopping to sample. Looking for steady marker again."%counter_max)
                            # self.notify_all({'subject':'calibration marker sample completed','timestamp':self.g_pool.capture.get_timestamp(),'record':True,'network_propagate':True})
                # save all ref to look at pos on the images
                ref = {}
                ref["norm_pos"] = norm_center
                ref["screen_pos"] = center
                ref["timestamp"] = float(row[0])
                all_ref_list.append(ref)

        ref_list.sort(key=lambda d: d['timestamp'])
        all_ref_list.sort(key=lambda d: d['timestamp'])
        timebase = Value(c_double,0)
        capture_world = autoCreateCapture(os.path.join(data_path, 'world.mp4') 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python video_models.Video类代码示例发布时间:2022-05-26
下一篇:
Python video.Video类代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap