• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python gui.fileOpenDlg函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中psychopy.gui.fileOpenDlg函数的典型用法代码示例。如果您正苦于以下问题:Python fileOpenDlg函数的具体用法?Python fileOpenDlg怎么用?Python fileOpenDlg使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了fileOpenDlg函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: load

 def load(self, fileName=''):
     """read and return header + row x col data from a pickle file
     """
     if not fileName:
         fileName = self.defaultFileName
     if not os.path.isfile(fileName):
         _base = os.path.basename(fileName)
         fullPathList = gui.fileOpenDlg(tryFileName=_base,
                                        allowed="All files (*.*)|*.*")
         if fullPathList:
             fileName = fullPathList[0]  # wx.MULTIPLE -> list
     if os.path.isfile(fileName) and fileName.endswith('.pkl'):
         f = open(fileName, 'rb')
         # Converting newline characters.
         if PY3:
             # 'b' is necessary in Python3 because byte object is 
             # returned when file is opened in binary mode.
             buffer = f.read().replace(b'\r\n',b'\n').replace(b'\r',b'\n')
         else:
             buffer = f.read().replace('\r\n','\n').replace('\r','\n')
         contents = pickle.loads(buffer)
         f.close()
         if self.parent:
             self.parent.conditionsFile = fileName
         return contents
     elif not os.path.isfile(fileName):
         print('file %s not found' % fileName)
     else:
         print('only .pkl supported at the moment')
开发者ID:flipphillips,项目名称:psychopy,代码行数:29,代码来源:dlgsConditions.py


示例2: csvFromPsydat

    def csvFromPsydat(self, evt=None):
        from psychopy import gui
        from psychopy.tools.filetools import fromFile

        names = gui.fileOpenDlg(allowed='*.psydat',
                    prompt=_translate("Select .psydat file(s) to extract"))
        for name in names or []:
            filePsydat = os.path.abspath(name)
            print("psydat: {0}".format(filePsydat))

            exp = fromFile(filePsydat)
            if filePsydat.endswith('.psydat'):
                fileCsv = filePsydat[:-7]
            else:
                fileCsv = filePsydat
            fileCsv += '.csv'
            exp.saveAsWideText(fileCsv)
            print('   -->: {0}'.format(os.path.abspath(fileCsv)))
开发者ID:harmadillo,项目名称:psychopy,代码行数:18,代码来源:_psychopyApp.py


示例3: load

 def load(self, fileName=''):
     """read and return header + row x col data from a pickle file
     """
     if not fileName:
         fileName = self.defaultFileName
     if not os.path.isfile(fileName):
         fullPathList = gui.fileOpenDlg(tryFileName=os.path.basename(fileName),
                         allowed="All files (*.*)|*.*")
         if fullPathList:
             fileName = fullPathList[0] # wx.MULTIPLE -> list
     if os.path.isfile(fileName) and fileName.endswith('.pkl'):
         f = open(fileName)
         contents = cPickle.load(f)
         f.close()
         if self.parent:
             self.parent.conditionsFile = fileName
         return contents
     elif not os.path.isfile(fileName):
         print('file %s not found' % fileName)
     else:
         print('only .pkl supported at the moment')
开发者ID:papr,项目名称:psychopy,代码行数:21,代码来源:dlgsConditions.py


示例4:

from os import path
from psychopy import misc, gui
import pandas as pd
import scipy
from scipy import stats

#choose some data files to analyse
filenames = gui.fileOpenDlg(allowed="*.csv")

#loop through the files
for thisFilename in filenames:
    print thisFilename
    thisDat = pd.read_csv(thisFilename)
    
    #filter out bad data
    filtered = thisDat[ thisDat['rt']<=1.0 ]
    filtered = filtered[ filtered['corr']==1 ]
    
    #separate conflicting from congruent reaction times
    conflict = filtered[filtered.description == 'conflict']
    congruent = filtered[filtered.description != 'conflict']
    #get mean/std.dev
    meanConfl = scipy.mean(conflict.rt)
    semConfl = scipy.std(conflict.rt, ddof=1)
    meanCongr = scipy.mean(congruent.rt)
    semCongr = scipy.std(congruent.rt, ddof=1)
    print "Conflict = %.3f (std=%.3f)" %(meanConfl, semConfl)
    print "Congruent = %.3f (std=%.3f)" %(meanCongr, semCongr)
    
    #run a t-test
    t, p = stats.ttest_ind(conflict.rt, congruent.rt)
开发者ID:MWiechmann,项目名称:posner,代码行数:31,代码来源:analyse.py


示例5: isinstance

#!/usr/bin/env python

#This analysis script takes one or more staircase datafiles as input from a GUI
#It then plots the staircases on top of each other on the left 
#and a combined psychometric function from the same data
#on the right
#

from psychopy import data, gui, core
from psychopy.tools.filetools import fromFile
import pylab, scipy

files = gui.fileOpenDlg('.')
if not files:
    core.quit()

#get the data from all the files
allIntensities, allResponses = [],[]
for thisFileName in files:
    thisDat = fromFile(thisFileName)
    assert isinstance(thisDat, data.StairHandler)
    allIntensities.append( thisDat.intensities )
    allResponses.append( thisDat.data )
    
#plot each staircase
pylab.subplot(121)
colors = 'brgkcmbrgkcm'
lines, names = [],[]
for fileN, thisStair in enumerate(allIntensities):
    #lines.extend(pylab.plot(thisStair))
    #names = files[fileN]
开发者ID:GentBinaku,项目名称:psychopy,代码行数:31,代码来源:JND_staircase_analysis.py


示例6: main

def main(file_name=None):
    """ Run the analysis on data in a file"""

    # Define these two within the scope of main:
    def func(pars):
        a,b,c = pars
        return a*(x**2) + b*x + c 

    def errfunc(pars):
        return y-func(pars)

    if file_name is None: 
        #path_to_files = '/Volumes/Plata1/Shared/Ariel/texture_data/'
        file_name =  fileOpenDlg()[0]
    
    p,l,data_rec = utils.get_data(file_name)
    neutral = data_rec[np.where(data_rec['neutral'])]
    peripheral = data_rec[np.where(data_rec['neutral']==0)]
    cond_str = ['Neutral', 'Cued']
    colors = ['b','r']
    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)

    print("SOA used was: %s msec"%(1000*p[' texture_dur']))
    print("% correct: ")

    for cond_idx,cond_rec in enumerate([neutral,peripheral]):
        correct = cond_rec['correct']
        ecc = cond_rec['target_ecc']

        #Bin the eccentricities: 
        a = np.floor(ecc)
        eccs_used = np.unique(a)

        #Initialize counters: 
        b = np.zeros(len(eccs_used))
        c = np.zeros(len(eccs_used))

        #Loop over the trials and add the correct to one counter and the number of
        #trials to the other: 
        for i in xrange(len(correct)):
                idx = np.where(eccs_used==np.floor(ecc[i]))
                b[idx]+=correct[i]
                c[idx]+=1.0

        p_correct = b/c
        print("%s: %s "%(cond_str[cond_idx], np.mean(p_correct)*100))
        
        for i,p in enumerate(p_correct):
                ax.plot(eccs_used[i],p,'o',color=colors[cond_idx],markersize=c[i])

        x = []
        y = []

        for i,this_ecc in enumerate(eccs_used):
            x = np.hstack([x,c[i]*[this_ecc]])    
            y = np.hstack([y,c[i]*[p_correct[i]]])

        guess = 1,1,1
        fit, mesg = leastsq(errfunc,guess)
        x = np.arange(0,np.max(x),0.01)        
        ax.plot(x,func(fit),'--',color=colors[cond_idx],
                label=cond_str[cond_idx])
        
    ax.legend()
    ax.set_xlim([-1,13])
    ax.set_ylim([0,1.1])
    ax.set_xlabel('Eccentricity (degrees)')
    ax.set_ylabel('Proportion correct responses')

    fig_name = 'figures/' + file_name.split('.')[0].split('/')[-1] + '.png'
    fig.savefig(fig_name)
    os.system('open %s'%fig_name)
开发者ID:BaladityaY,项目名称:voluntary_texture_discrimination,代码行数:73,代码来源:analyze_run.py


示例7: Workbook

        'set6_y':'G',
        'set1_n':'H',
        'set2_n':'I',
        'set3_n':'J',
        'set4_n':'K',
        'set5_n':'L',
        'set6_n':'M',
        }

#libs for handling excel files:
from openpyxl.workbook import Workbook
from openpyxl.writer.excel import ExcelWriter
from openpyxl.cell import get_column_letter

#use a file open dialog to choose the files to include
files = gui.fileOpenDlg(tryFilePath=".", allowed='*.psydat')
if not files:#user pressed cancel
    core.quit()
    
xlBook = Workbook()
xlsxWriter = ExcelWriter(workbook = xlBook)
xlSheet = xlBook.worksheets[0]#use the first worksheet (index 0)
xlSheet.title = groupName
#make a header row
for condition in outCols.keys():
    xlSheet.cell(outCols[condition]+'1').value = condition
    
outRow = 2#starting row for data
#do the actual analysis, file-by-file
condNames = []
for fileName in files:
开发者ID:JimGrange,项目名称:lab-book,代码行数:31,代码来源:sternbergBatchAnalysis3.py


示例8: isinstance

#!/usr/bin/env python

# This analysis script takes one or more staircase datafiles as input from a GUI
# It then plots the staircases on top of each other on the left
# and a combined psychometric function from the same data
# on the right
#

from psychopy import data, gui, misc, core, compatibility
import pylab, scipy
import numpy as np

files = gui.fileOpenDlg(".")
if not files:
    core.quit()

# get the data from all the files
allIntensities, allResponses = [], []
for thisFileName in files:
    thisDat = compatibility.fromFile(thisFileName)
    assert isinstance(thisDat, data.StairHandler)
    allIntensities.append(thisDat.intensities)
    allResponses.append(thisDat.data)

# plot each staircase
pylab.subplot(121)
colors = "brgkcmbrgkcm"
lines, names = [], []
for fileN, thisStair in enumerate(allIntensities):
    # lines.extend(pylab.plot(thisStair))
    # names = files[fileN]
开发者ID:RSharman,项目名称:Experiments,代码行数:31,代码来源:JND_staircase_analysis_Compat.py


示例9: files

        'subject':'1', 
        'session': 1, 
        'skipPrompts':False, 
        'paramsFile':['DEFAULT','Load...']}
# overwrite params struct if you just saved a new parameter set
if saveParams:
    expInfo['paramsFile'] = [newParamsFilename,'Load...']

#present a dialogue to change select params
dlg = gui.DlgFromDict(expInfo, title=scriptName, order=['subject','session','skipPrompts','paramsFile'])
if not dlg.OK:
    core.quit() # the user hit cancel, so exit

# find parameter file
if expInfo['paramsFile'] == 'Load...':
    dlgResult = gui.fileOpenDlg(prompt='Select parameters file',tryFilePath=os.getcwd(),
        allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
    expInfo['paramsFile'] = dlgResult[0]
# load parameter file
if expInfo['paramsFile'] not in ['DEFAULT', None]: # otherwise, just use defaults.
    # load params file
    params = fromFile(expInfo['paramsFile'])


# transfer skipPrompts from expInfo (gui input) to params (logged parameters)
params['skipPrompts'] = expInfo['skipPrompts']

# print params to Output
print 'params = {'
for key in sorted(params.keys()):
    print "   '%s': %s"%(key,params[key]) # print each value as-is (no quotes)
print '}'
开发者ID:djangraw,项目名称:PsychoPyParadigms,代码行数:32,代码来源:SampleExperiment_d1.py


示例10: main

def main(file_name=None):
    """ Run the analysis on data in a file"""

    # Define these two within the scope of main:
    def func(pars):
        a, b, c = pars
        return a * (x ** 2) + b * x + c

    def errfunc(pars):
        return y - func(pars)

    if file_name is None:
        # path_to_files = '/Volumes/Plata1/Shared/Ariel/texture_data/'
        file_name = fileOpenDlg()[0]

    p, l, data_rec = utils.get_data(file_name)

    # For backwards compatibility, check if this variable exists:
    if "eye_moved" in l:
        data_rec = data_rec[np.where(data_rec["eye_moved"] == 0)]

    neutral = data_rec[np.where(data_rec["neutral"])]
    peripheral = data_rec[np.where(data_rec["neutral"] == 0)]
    cond_str = ["Neutral", "Cued"]
    colors = ["b", "r"]
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)

    print("SOA used was: %s msec" % (1000 * p[" texture_dur"]))
    print("% correct: ")

    for cond_idx, cond_rec in enumerate([neutral, peripheral]):
        correct = cond_rec["correct"]
        ecc = cond_rec["target_ecc"]

        # Bin the eccentricities:
        a = np.floor(ecc)
        eccs_used = np.unique(a)

        # Initialize counters:
        b = np.zeros(len(eccs_used))
        c = np.zeros(len(eccs_used))

        # Loop over the trials and add the correct to one counter and the number of
        # trials to the other:
        for i in xrange(len(correct)):
            idx = np.where(eccs_used == np.floor(ecc[i]))
            b[idx] += correct[i]
            c[idx] += 1.0

        p_correct = b / c
        print("%s: %s " % (cond_str[cond_idx], np.mean(p_correct) * 100))

        for i, p in enumerate(p_correct):
            ax.plot(eccs_used[i], p, "o", color=colors[cond_idx], markersize=c[i])

        x = []
        y = []

        for i, this_ecc in enumerate(eccs_used):
            x = np.hstack([x, c[i] * [this_ecc]])
            y = np.hstack([y, c[i] * [p_correct[i]]])

        guess = 1, 1, 1
        fit, mesg = leastsq(errfunc, guess)
        x = np.arange(0, np.max(x), 0.01)
        ax.plot(x, func(fit), "--", color=colors[cond_idx], label=cond_str[cond_idx])

    ax.legend()
    ax.set_xlim([-1, 13])
    ax.set_ylim([0, 1.1])
    ax.set_xlabel("Eccentricity (degrees)")
    ax.set_ylabel("Proportion correct responses")

    fig_name = "figures/" + file_name.split(".")[0].split("/")[-1] + ".png"
    fig.savefig(fig_name)
    os.system("open %s" % fig_name)
开发者ID:arokem,项目名称:texture_voluntary,代码行数:77,代码来源:analyze_run.py


示例11: open

from __future__ import print_function
from builtins import next
import csv, codecs
from psychopy import gui

filename = gui.fileOpenDlg('.', allowed='*.csv')[0]
        
#use csv from python (not from numpy) due to handling newlines within quote char
with open(filename, 'rU') as csvFile:
    spamreader = csv.reader(csvFile, delimiter=',', quotechar='"', dialect=csv.excel)
    headers = next(spamreader)
    print('headers:', type(headers), headers)
    entries=[]
    for thisRow in spamreader:
        print(thisRow)
        thisEntry = {}
        for fieldN, thisFieldName in enumerate(headers):
            thisEntry[thisFieldName] = thisRow[fieldN]
        entries.append(thisEntry)

companHead="Your Company or Institution"
nameHead='Your name (or anon, but a name is nicer)'
testimHead='Your thoughts on PsychoPy'
posnHead = 'Your position'


with open('testimonialsText.html', 'wb') as outFile:
    for thisEntry in entries:
        outFile.write('    <hr>%s <p>\n' %(thisEntry[testimHead].replace('\n', '<br>')))
        nameStr = '    - <em>%s' %thisEntry[nameHead]
        if thisEntry[posnHead]:
开发者ID:ChenTzuYin,项目名称:psychopy,代码行数:31,代码来源:compileTestimonials.py


示例12:

"""This is not a particularly useful example of saving out
csv files from a set of psydat files
"""
from os import path
from psychopy import misc, gui

#select some files to use
filenames = gui.fileOpenDlg(allowed="*.psydat")

#loop through the files
for thisFilename in filenames:
    #get a new name
    fileNoExt, fileExt = path.splitext(thisFilename)
    newName = fileNoExt+"NEW.csv"
    #load and save
    dat = misc.fromFile(thisFilename)
    dat.saveAsWideText(newName)
    print 'saved', newName
开发者ID:Complex501,项目名称:posner,代码行数:18,代码来源:resaveData.py


示例13: enumerate

import analysis_utils as ana

contrast = []
correct = []

if __name__=="__main__":

    bootstrap_n = 1000

    #Weibull params set in analysis_utils: The guessing rate is 0.25 for 4afc
    guess = 0.5
    flake = 0.01
    slope = 3.5

    file_names = gui.fileOpenDlg(tryFilePath='./data')

    for file_idx, file_name in enumerate(file_names):
        print file_idx
        if file_idx == 0:
            file_stem =  file_name.split('/')[-1].split('.')[0]
        else:
            file_stem = file_stem + file_name[-8]
        p, l, data_rec = ana.get_data(str(file_name))
        trials_per_condition = float(p[' trials_per_block'])*(float(p[' num_blocks'])/2.0)
        print trials_per_condition
        contrast = np.ones([len(file_names)*trials_per_condition,1])
        correct = np.ones([len(file_names)*trials_per_condition,1])
        data_rec = csv2rec(file_name)
        contrast_this_run = data_rec['annulus_target_contrast']
        correct_this_run = data_rec['correct']
开发者ID:arokem,项目名称:surround_suppression,代码行数:30,代码来源:analyze_run.py


示例14: files

# 2D Fast Fourier Transform on the lum, lm and s components of natural scenes - March 2011

#IF YOU DO NOT CHANGE THE DESTINATION FILE NAME, IT WILL OVER WRITE ANY
#EXISTING FILE WITH THE SAME NAME

from psychopy import misc, core, visual, event, data, gui
from scipy import fftpack
import scipy, Image, copy, pylab, glob
import numpy as np
import radialProfile, radial_data
from openpyxl.workbook import Workbook
from openpyxl.writer.excel import ExcelWriter
#
#files = glob.glob('C:\Documents and Settings\lpxrs\My Documents\Colour Data\Image Statistics\McGill Image Library/Textures/*.tif')
#files = gui.fileOpenDlg('.', allowed = "JPEG files (*.jpg) | *.jpg")
files = gui.fileOpenDlg('.', allowed = "TIFF files (*.tif) | *.tif")
if not files:
    core.quit()

counter = 2

#Name of file where data will be saved
#fName = "Textures2.xlsx"

#Create Excel File
wb = Workbook()
ew = ExcelWriter(workbook = wb)

ws = wb.get_active_sheet()
ws.title = "Raw Data"
开发者ID:RSharman,项目名称:Experiments,代码行数:30,代码来源:2dfft.py



注:本文中的psychopy.gui.fileOpenDlg函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python iohub.launchHubServer函数代码示例发布时间:2022-05-25
下一篇:
Python components.getInitVals函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap