• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python config.enable_debug_mode函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nipype.config.enable_debug_mode函数的典型用法代码示例。如果您正苦于以下问题:Python enable_debug_mode函数的具体用法?Python enable_debug_mode怎么用?Python enable_debug_mode使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了enable_debug_mode函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: check_mask_coverage

def check_mask_coverage(epi,brainmask):
    from os.path import abspath
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nilearn import plotting
    from nipype.interfaces.nipy.preprocess import Trim

    trim = Trim()
    trim.inputs.in_file = epi
    trim.inputs.end_index = 1
    trim.inputs.out_file = 'epi_vol1.nii.gz'
    trim.run()
    epi_vol = abspath('epi_vol1.nii.gz')

    maskcheck_filename='maskcheck.png'
    display = plotting.plot_anat(epi_vol, display_mode='ortho',
                                 draw_cross=False,
                                 title = 'brainmask coverage')
    display.add_contours(brainmask,levels=[.5], colors='r')
    display.savefig(maskcheck_filename)
    display.close()
    maskcheck_file = abspath(maskcheck_filename)

    return(maskcheck_file)
开发者ID:catcamacho,项目名称:infant_rest,代码行数:25,代码来源:preprocessing_classic.py


示例2: sort_pes

def sort_pes(pes):
    from nipype import config, logging
    from nipype.interfaces.fsl import Merge
    from os.path import abspath
    config.enable_debug_mode()
    logging.update_logging(config)

    print(pes)
    pe1s = []
    pe0s = []
    for file in pes:
        if 'pe0' in file:
            pe0s.append(file)
        elif 'pe1' in file:
            pe1s.append(file)

    pe1s = sorted(pe1s)
    pe0s = sorted(pe0s)

    me = Merge()
    merged_pes = []

    for i in range(0,len(pe1s)):
        num=pe1s[i][-12:-11]
        me.inputs.in_files = [pe1s[i],pe0s[i]]
        me.inputs.dimension='t'
        me.inputs.merged_file = 'merged_pes%s.nii.gz' % num
        me.run()
        file = abspath('merged_pes%s.nii.gz' % num)
        merged_pes.append(file)

    return(merged_pes)
开发者ID:catcamacho,项目名称:infant_rest,代码行数:32,代码来源:preprocessing_classic.py


示例3: run

def run(args):
    """Get and process specific information"""
    project = gather_project_info()
    exp = gather_experiment_info(args.experiment, args.model)

    # Subject is always highest level of parameterization
    subject_list = determine_subjects(args.subjects)
    subj_source = make_subject_source(subject_list)

    # Get the full correct name for the experiment
    if args.experiment is None:
        exp_name = project["default_exp"]
    else:
        exp_name = args.experiment

    exp['exp_name'] = exp_name
    exp['model_name'] = args.model if args.model else ''

    # Set roots of output storage
    project['analysis_dir'] = op.join(project["analysis_dir"], exp_name)
    project['working_dir'] = op.join(project["working_dir"], exp_name,
                                     exp['model_name'])

    config.set("execution", "crashdump_dir", project["crash_dir"])
    if args.verbose > 0:
        config.set("logging", "filemanip_level", 'DEBUG')
        config.enable_debug_mode()
        logging.update_logging(config)

    if not op.exists(project['analysis_dir']):
        os.makedirs(project['analysis_dir'])

    workflows_dir = os.path.join(os.environ['FITZ_DIR'], exp['pipeline'],
                                 'workflows')
    if not op.isdir(workflows_dir):
        missing_pipe = 'raise'
        if missing_pipe == 'install':
            install(args)
        else:
            raise IOError("Run `fitz install` to set up your pipeline of "
                          "workflows, %s does not exist." % workflows_dir)
    sys.path.insert(0, workflows_dir)
    for wf_name in args.workflows:
        try:
            mod = imp.find_module(wf_name)
            wf_module = imp.load_module("wf", *mod)
        except (IOError, ImportError):
            print "Could not find any workflows matching %s" % wf_name
            raise

        params = update_params(wf_module, exp)
        workflow = wf_module.workflow_manager(
            project, params, args, subj_source)

        # Run the pipeline
        plugin, plugin_args = determine_engine(args)
        workflow.write_graph(str(workflow)+'.dot', format='svg')
        if not args.dontrun:
            workflow.run(plugin, plugin_args)
开发者ID:kastman,项目名称:fitz,代码行数:59,代码来源:frontend.py


示例4: setup_environment

def setup_environment(argv):
    print("Configuring environment...")
    import os
    import os.path
    from BAW.utilities.configFileParser import resolveDataSinkOption, parseFile
    from BAW.utilities.pathHandling import validatePath
    from BAW.utilities import misc
    from collections import OrderedDict  # Need OrderedDict internally to ensure consistent ordering
    environment, experiment, pipeline, cluster = parseFile(
        argv["--ExperimentConfig"], argv["--pe"], argv["--workphase"])
    pipeline['ds_overwrite'] = resolveDataSinkOption(argv, pipeline)
    if cluster is None:
        print("Running on local")
        # raise NotImplementedError("Running local has old code and has not been tested!")
        # assert argv["--wfrun"] in argvWFRUN, \
        #    "wfrun  options for clusters can only be given when the configuration file's CLUSTER option == True"
        # os.environ['NSLOTS'] = str(misc.get_cpus(argv["--wf_template_runner"]))
    else:
        load_modules(cluster['modules'])  # Load modules if not already done  ## MODS PATH
        # print os.environ['LOADEDMODULES']
        # if environment['virtualenv_dir'] is not None:  # MODS PATH
        # activate_this = validatePath(
        #    os.path.join(environment['virtualenv_dir'], 'bin', 'activate_this.py'), False, False)
        # if os.path.exists( activate_this ) :
        #    exec(open(activate_this).read(), OrderedDict(__file__=activate_this))
    utilities_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'utilities')
    configure_env = validatePath(os.path.join(utilities_path, 'configure_env.py'), False, False)
    # Add the AutoWorkup directory to the PYTHONPATH every time - REQUIRED FOR CLUSTER DISPATCHING
    environment['env']['PYTHONPATH'] = environment['env']['PYTHONPATH'] + ":" + os.path.dirname(__file__)

    exec(open(configure_env).read(), OrderedDict(__file__=__file__,
                                          append_os_path=environment['env']['PATH'],
                                          append_sys_path=environment['env']['PYTHONPATH'])
         )  # MODS PATH

    print(("@" * 80))
    print((environment['env']['PYTHONPATH']))
    print(("@" * 80))
    print((environment['env']['PATH']))
    print(("@" * 80))

    from nipype import config
    config.enable_debug_mode()
    # config.enable_provenance()

    from BAW.utilities.package_check import verify_packages
    verify_packages()
    if 'FREESURFER' in experiment['components']:  # FREESURFER MODS
        configure_FS = validatePath(os.path.join(utilities_path, 'utilities', 'configure_FS.py'), False, False)
        exec(open(configure_FS).read(), OrderedDict(FS_VARS=misc.FS_VARS, env=environment['env']))
        print("FREESURFER needs to check for sane environment here!")  # TODO: raise warning, write method, what???
    for key, value in list(environment['env'].items()):
        if key in ['PATH', 'PYTHONPATH'] + misc.FS_VARS:
            pass
        else:
            os.environ[key] = value  # Do not use os.putenv (see Python documentation)
    return environment, experiment, pipeline, cluster
开发者ID:NIRALUser,项目名称:BRAINSTools,代码行数:57,代码来源:AutoWorkup.py


示例5: combine_masks

def combine_masks(mask1,mask2):
    from nipype.interfaces.fsl.utils import Merge
    from os.path import abspath
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    vols = []
    vols.append(mask1)
    vols.append(mask2)

    return(vols)
开发者ID:catcamacho,项目名称:infant_rest,代码行数:12,代码来源:preprocessing_classic.py


示例6: run_examples

def run_examples(example, pipelines, data_path, plugin=None, rm_base_dir=True):
    from nipype import config
    from nipype.interfaces.base import CommandLine

    if plugin is None:
        plugin = 'MultiProc'

    print('running example: %s with plugin: %s' % (example, plugin))
    config.enable_debug_mode()
    config.enable_provenance()
    CommandLine.set_default_terminal_output("stream")

    plugin_args = {}
    if plugin == 'MultiProc':
        plugin_args['n_procs'] = int(
            os.getenv('NIPYPE_NUMBER_OF_CPUS', cpu_count()))

    __import__(example)
    for pipeline in pipelines:
        wf = getattr(sys.modules[example], pipeline)
        wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)

        results_dir = os.path.join(wf.base_dir, wf.name)
        if rm_base_dir and os.path.exists(results_dir):
            rmtree(results_dir)

        # Handle a logging directory
        log_dir = os.path.join(os.getcwd(), 'logs', example)
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        wf.config = {
            'execution': {
                'hash_method': 'timestamp',
                'stop_on_first_rerun': 'true',
                'write_provenance': 'true',
                'poll_sleep_duration': 2
            },
            'logging': {
                'log_directory': log_dir,
                'log_to_file': True
            }
        }
        try:
            wf.inputs.inputnode.in_data = os.path.abspath(data_path)
        except AttributeError:
            pass  # the workflow does not have inputnode.in_data

        wf.run(plugin=plugin, plugin_args=plugin_args)
        # run twice to check if nothing is rerunning
        wf.run(plugin=plugin)
开发者ID:TheChymera,项目名称:nipype,代码行数:50,代码来源:run_examples.py


示例7: convertafni

def convertafni(in_file):
    from nipype.interfaces.afni.utils import AFNItoNIFTI
    from os import path
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    cvt = AFNItoNIFTI()
    cvt.inputs.in_file = in_file
    cvt.inputs.out_file = 'func_filtered.nii.gz'
    cvt.run()

    out_file = path.abspath('func_filtered.nii.gz')
    return(out_file)
开发者ID:catcamacho,项目名称:infant_rest,代码行数:14,代码来源:preprocessing_classic.py


示例8: main

def main(argv=None):
    import os
    import sys

    from nipype import config
    config.enable_debug_mode()

    #-------------------------------- argument parser
    import argparse
    argParser = argparse.ArgumentParser( description="""****************************
        10-cross validation analysis
        """)
    # workup arguments
    argWfGrp = argParser.add_argument_group( 'argWfGrp', """****************************
        auto workflow arguments for cross validation
        """)
    argWfGrp.add_argument('--experimentalConfigurationFile',
                          help="""experimentalConfigurationFile
        Configuration file name with FULL PATH""",
                          dest='experimentalConfigurationFile', required=True)
    argWfGrp.add_argument( '--expDir',    help="""expDir
        """,
                           dest='expDir', required=False, default=".")
    argWfGrp.add_argument( '--baseDir',    help="""baseDir
        """,
                           dest='baseDir', required=False, default=".")
    argWfGrp.add_argument( '--runOption',    help="""runOption [local/cluster]
        """,
                           dest='runOption', required=False, default="local")
    argWfGrp.add_argument( '--PythonBinDir',    help="""PythonBinDir [local/cluster]
        """,
                           dest='PythonBinDir', required=False, default="NA")
    argWfGrp.add_argument( '--BRAINSToolsSrcDir',    help="""BRAINSToolsSrcDir [local/cluster]
        """,
                           dest='BRAINSToolsSrcDir', required=False, default="NA")
    argWfGrp.add_argument( '--BRAINSToolsBuildDir',    help="""BRAINSToolsBuildDir [local/cluster]
        """,
                           dest='BRAINSToolsBuildDir', required=False, default="NA")

    args = argParser.parse_args()
    similarityComputeWorkflow(args.expDir,
                              args.baseDir,
                              args.experimentalConfigurationFile,
                              args.runOption,
                              args.PythonBinDir,
                              args.BRAINSToolsSrcDir,
                              args.BRAINSToolsBuildDir)
开发者ID:aghayoor,项目名称:BRAINSTools,代码行数:47,代码来源:analysis.py


示例9: test_debug_mode

def test_debug_mode():
    from ... import logging

    sofc_config = config.get('execution', 'stop_on_first_crash')
    ruo_config = config.get('execution', 'remove_unnecessary_outputs')
    ki_config = config.get('execution', 'keep_inputs')
    wf_config = config.get('logging', 'workflow_level')
    if_config = config.get('logging', 'interface_level')
    ut_config = config.get('logging', 'utils_level')

    wf_level = logging.getLogger('nipype.workflow').level
    if_level = logging.getLogger('nipype.interface').level
    ut_level = logging.getLogger('nipype.utils').level

    config.enable_debug_mode()

    # Check config is updated and logging levels, too
    assert config.get('execution', 'stop_on_first_crash') == 'true'
    assert config.get('execution', 'remove_unnecessary_outputs') == 'false'
    assert config.get('execution', 'keep_inputs') == 'true'
    assert config.get('logging', 'workflow_level') == 'DEBUG'
    assert config.get('logging', 'interface_level') == 'DEBUG'
    assert config.get('logging', 'utils_level') == 'DEBUG'

    assert logging.getLogger('nipype.workflow').level == 10
    assert logging.getLogger('nipype.interface').level == 10
    assert logging.getLogger('nipype.utils').level == 10

    # Restore config and levels
    config.set('execution', 'stop_on_first_crash', sofc_config)
    config.set('execution', 'remove_unnecessary_outputs', ruo_config)
    config.set('execution', 'keep_inputs', ki_config)
    config.set('logging', 'workflow_level', wf_config)
    config.set('logging', 'interface_level', if_config)
    config.set('logging', 'utils_level', ut_config)
    logging.update_logging(config)

    assert config.get('execution', 'stop_on_first_crash') == sofc_config
    assert config.get('execution', 'remove_unnecessary_outputs') == ruo_config
    assert config.get('execution', 'keep_inputs') == ki_config
    assert config.get('logging', 'workflow_level') == wf_config
    assert config.get('logging', 'interface_level') == if_config
    assert config.get('logging', 'utils_level') == ut_config

    assert logging.getLogger('nipype.workflow').level == wf_level
    assert logging.getLogger('nipype.interface').level == if_level
    assert logging.getLogger('nipype.utils').level == ut_level
开发者ID:TheChymera,项目名称:nipype,代码行数:47,代码来源:test_config.py


示例10: combine_par

def combine_par(par_list):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from os.path import abspath
    from numpy import vstack, savetxt, genfromtxt

    motion = genfromtxt(par_list[0], dtype=float)
    if len(par_list)>1:
        for file in par_list[1:]:
            temp = genfromtxt(par_list[0], dtype=float)
            motion=vstack((motion,temp))

    filename = 'motion.par'
    savetxt(filename, motion, delimiter=' ')
    combined_par = abspath(filename)
    return(combined_par)
开发者ID:catcamacho,项目名称:infant_rest,代码行数:17,代码来源:preprocessing_classic.py


示例11: create_coreg_plot

def create_coreg_plot(epi,anat):
    import os
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from nilearn import plotting

    coreg_filename='coregistration.png'
    display = plotting.plot_anat(epi, display_mode='ortho',
                                 draw_cross=False,
                                 title = 'coregistration to anatomy')
    display.add_edges(anat)
    display.savefig(coreg_filename)
    display.close()
    coreg_file = os.path.abspath(coreg_filename)

    return(coreg_file)
开发者ID:catcamacho,项目名称:infant_rest,代码行数:17,代码来源:preprocessing_classic.py


示例12: run_examples

def run_examples(example, pipelines, plugin):
    print('running example: %s with plugin: %s' % (example, plugin))
    from nipype import config
    config.enable_debug_mode()
    from nipype.interfaces.base import CommandLine
    CommandLine.set_default_terminal_output("stream")

    __import__(example)
    for pipeline in pipelines:
        wf = getattr(sys.modules[example], pipeline)
        wf.base_dir = os.path.join(os.getcwd(), 'output', example, plugin)
        if os.path.exists(wf.base_dir):
            rmtree(wf.base_dir)
        wf.config = {'execution': {'hash_method': 'timestamp', 'stop_on_first_rerun': 'true'}}
        wf.run(plugin=plugin, plugin_args={'n_procs': 4})
        # run twice to check if nothing is rerunning
        wf.run(plugin=plugin)
开发者ID:Garyfallidis,项目名称:nipype,代码行数:17,代码来源:run_examples.py


示例13: brightthresh

def brightthresh(func):
    import nibabel as nib
    from numpy import median, where

    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)

    func_nifti1 = nib.load(func)
    func_data = func_nifti1.get_data()
    func_data = func_data.astype(float)

    brain_values = where(func_data > 0)
    median_thresh = median(brain_values)
    bright_thresh = 0.75 * median_thresh

    return(bright_thresh)
开发者ID:catcamacho,项目名称:infant_rest,代码行数:17,代码来源:preprocessing_classic.py


示例14: combine_fd

def combine_fd(fd_list):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from os.path import abspath
    from numpy import asarray, savetxt

    motion = open(fd_list[0]).read().splitlines()

    if len(fd_list)>1:
        for file in fd_list[1:]:
            temp = open(file).read().splitlines()
            motion = motion+temp

    motion = asarray(motion).astype(float)
    filename = 'FD_full.txt'
    savetxt(filename,motion)
    combined_fd = abspath(filename)
    return(combined_fd)
开发者ID:catcamacho,项目名称:infant_rest,代码行数:19,代码来源:preprocessing_classic.py


示例15: main

def main(argv=None):
    import os
    import sys

    from nipype import config
    config.enable_debug_mode()
    #-------------------------------- argument parser
    import argparse
    argParser = argparse.ArgumentParser( description="""****************************
        similarity computation between two labels 
        """)
    # workup arguments
    argParser.add_argument('--labelMapFilename1',
                          help="""a filename that will be compared to. """,
                          dest='labelMapFilename1', required=False)

    argParser.add_argument('--labelMapFilename2',
                          help="""a filename that will be compared to. """,
                          dest='labelMapFilename2', required=False)

    argParser.add_argument('--outputCSVFilename',
                          help="""a filename that will store comparative results to. """,
                          dest='outputCSVFilename', required=False)

    argParser.add_argument('--doUnitTest', action='store_true',
                          help="""Do unit test if given""",
                          dest='doUnitTest', required=False)
    args = argParser.parse_args()

    action=False
    if args.doUnitTest :
        unitTest()
        action=True
    if args.labelMapFilename1 or args.labelMapFilename2:
        print os.path.abspath( args.labelMapFilename1 )
        print os.path.abspath( args.labelMapFilename2 )
        print os.path.abspath( args.outputCSVFilename )
        computeSimilarity( os.path.abspath( args.labelMapFilename1 ), 
                           os.path.abspath( args.labelMapFilename2 ),
                           os.path.abspath( args.outputCSVFilename ) )
        action=True
    if not action:
        print """        ***
开发者ID:reginakim,项目名称:MultiAtlasLabelFusionValidation,代码行数:43,代码来源:computeSimilarity.py


示例16: setup

def setup(argv):
    print "Configuring environment..."
    import os, os.path
    from utilities.configFileParser import resolveDataSinkOption, parseFile
    from utilities.pathHandling import validatePath
    from utilities import misc
    environment, experiment, pipeline, cluster = parseFile(argv["--ExperimentConfig"], argv["--pe"])
    pipeline['ds_overwrite'] = resolveDataSinkOption(argv, pipeline)

    if cluster is None:
        assert argv["--wf_template_runner"] in misc.WFRUN, \
          "wf_template_runner options for clusters can only be given when the configuration file's CLUSTER option == True"
        os.environ['NSLOTS'] = str(misc.get_cpus(argv["--wf_template_runner"]))
    else:
        load_modules(cluster['modules'])  # Load modules if not already done  ## MODS PATH
        # print os.environ['LOADEDMODULES']
    if environment['virtualenv_dir']:  ## MODS PATH
        activate_this = validatePath(os.path.join(environment['virtualenv_dir'], 'bin', 'activate_this.py'), False, False)
        execfile(activate_this, dict(__file__=activate_this))
    utilities_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'utilities')
    configure_env = validatePath(os.path.join(utilities_path, 'configure_env.py'), False, False)
    # Add the AutoWorkup directory to the PYTHONPATH every time - REQUIRED FOR CLUSTER DISPATCHING
    environment['env']['PYTHONPATH'] =  environment['env']['PYTHONPATH'] + ":" + os.path.dirname(__file__)
    execfile(configure_env, dict(__file__=__file__,
                                 append_os_path=environment['env']['PATH'],
                                 append_sys_path=environment['env']['PYTHONPATH'])
        )  ## MODS PATH
    from nipype import config
    config.enable_debug_mode()
    from utilities.package_check import verify_packages
    verify_packages()
    if 'FREESURFER' in experiment['components']:  ## FREESURFER MODS
        configure_FS = validatePath(os.path.join(utilities_path, 'utilities', 'configure_FS.py'), False, False)
        execfile(configure_FS, dict(FS_VARS=misc.FS_VARS, env=environment['env']))
        print "FREESURFER needs to check for sane environment here!"  # TODO: raise warning, write method, what???
    for key, value in environment['env'].items():
        if key in ['PATH', 'PYTHONPATH'] + misc.FS_VARS:
            pass
        else:
            os.environ[key] = value  # Do not use os.putenv (see Python documentation)
    return environment, experiment, pipeline, cluster
开发者ID:gang-liu,项目名称:BRAINSTools,代码行数:41,代码来源:AutoWorkup.py


示例17: summarize_motion

def summarize_motion(motion_df_file, motion_file, vols_to_censor, TR):
    from nipype import config, logging
    config.enable_debug_mode()
    logging.update_logging(config)
    from os.path import dirname, basename
    from numpy import asarray, mean, insert, zeros, sort
    from pandas import DataFrame, Series, read_csv

    motion_df = read_csv(motion_df_file, index_col=0)

    motion = asarray(open(motion_file).read().splitlines()).astype(float)
    censvols = asarray(open(vols_to_censor).read().splitlines()).astype(int)
    sec_not_censored = (len(motion)-len(censvols))*TR

    if censvols[0]>0:
        periods_not_censored = insert(censvols,0,0)
    else:
        periods_not_censored = censvols

    if periods_not_censored[-1]<len(motion):
        periods_not_censored = insert(periods_not_censored,len(periods_not_censored),len(motion))

    lengths = zeros(len(periods_not_censored)-1)
    for a in range(0,len(lengths)):
        lengths[a] = periods_not_censored[a+1] - periods_not_censored[a] - 1

    lengths = lengths*TR

    # sort lengths in descending order
    lengths = sort(lengths)[::-1]

    fp = dirname(motion_file)
    subject = basename(fp)

    motion_df.loc[subject] = [mean(motion),max(motion),len(censvols),len(motion),sec_not_censored,lengths]
    motion_df.to_csv(motion_df_file)

    return()
开发者ID:catcamacho,项目名称:infant_rest,代码行数:38,代码来源:preprocessing_classic.py


示例18: unitWorkUp

def unitWorkUp ( configurationFilename, 
                 doApply = False,
                 baseDir = "."):
    import os
    import sys
    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function
    import ConfigurationParser
    import crossValidationUnit as this
    
    from nipype import config
    config.enable_debug_mode()
    
    workflow = pe.Workflow( name = 'balancedTraning' )
    workflow.base_dir = baseDir
    
    configurationMap = ConfigurationParser.ConfigurationSectionMap( configurationFilename) 
    Options          = configurationMap[ 'Options' ]
    roiDict          = Options[ 'roiBooleanCreator'.lower() ]

    #
    #-------------------------------- filenameGeneratorND is dummy node
    # to create proper probability file location for nipype
    #

    filenameGeneratorND = pe.Node( name      = "filenameGeneratorND",
                                   interface = Function( 
                                      input_names  = ['roiList',
                                                      'gaussianSigma'],
                                      output_names = ['probabilityMapFilename'],
                                      function     = this.getProbabilityMapFilename )
                                 )
    filenameGeneratorND.inputs.roiList = roiDict.keys()

    #
    #--------------------------------  start from generate probability
    #
    probabilityMapGeneratorND = pe.Node( name = "probabilityMapGeneratorND",
                                         interface = Function( 
                                             input_names = ['configurationFilename',
                                                            'probabilityMapDict',
                                                            'gaussianSigma',
                                                            'outputXmlFilename'],
                                             output_names = [ 'probabilityMapDict',
                                                              'outputXmlFilename',
                                                              'outputConfigurationFilename'],
                                             function     = ConfigurationParser.BRAINSCutGenerateProbabilityMap )
                                       )
    
    probabilityMapGeneratorND.inputs.outputXmlFilename = 'netConfiguration.xml'
    probabilityMapGeneratorND.inputs.configurationFilename = configurationFilename 
    probabilityMapGeneratorND.inputs.gaussianSigma = Options[ 'gaussianSigma'.lower() ]
    
    workflow.connect( filenameGeneratorND, 'probabilityMapFilename',
                      probabilityMapGeneratorND, 'probabilityMapDict' )
    
    #
    #--------------------------------  create vectors for each ROI
    #
    configFileND = pe.Node( name = "configFileND",
                            interface = Function(
                                input_names = ['originalFilename',
                                               'editedFilenamePrefix' ],
                                output_names = ['editiedFilenames'],
                                function     = ConfigurationParser.ConfigurationFileEditor ) 
                          )
    
    configFileND.inputs.originalFilename = configurationFilename  
    configFileND.inputs.editedFilenamePrefix = 'ROI'
    workflow.add_nodes( [ configFileND ] )
    
    vectorCreatorND = pe.MapNode( name = "vectorCreatorND", 
                                  interface = Function(
                                      input_names = ['configurationFilename',
                                                     'probabilityMapDict',
                                                     'normalization',
                                                     'outputXmlFilename',
                                                     'outputVectorFilename'],
                                      output_names = ['outputVectorFilename',
                                                      'outputVectorHdrFilename',
                                                      'outputNormalization',
                                                      'outputXmlFilename'],
                                      function     = ConfigurationParser.BRAINSCutCreateVector ),
                                  iterfield = [ 'configurationFilename']
                                )
    vectorCreatorND.inputs.outputVectorFilename = 'oneROIVectorFile.txt'
    vectorCreatorND.inputs.outputXmlFilename = 'oneROICreateVectorNetConfiguration.xml'
    import ast
    normalizationOption = Options[ 'normalization'.lower()]  
    #normalizationOption = ast.literal_eval( Options[ 'normalization'.lower()]  )
    print( """Normalization Option: {str}
           """.format( str=normalizationOption ) )
    vectorCreatorND.iterables = ( 'normalization', normalizationOption )
    #
    #--------------------------------  workflow connections
    #
    workflow.connect( configFileND, 'editiedFilenames',
                      vectorCreatorND, 'configurationFilename' )
    workflow.connect( probabilityMapGeneratorND, 'probabilityMapDict',
                      vectorCreatorND, 'probabilityMapDict' )
#.........这里部分代码省略.........
开发者ID:reginakim,项目名称:BRAINSStandAlone,代码行数:101,代码来源:crossValidationUnit.py


示例19: main

def main(argv=None):
    import argparse
    import ConfigParser
    import csv
    import string

    if argv == None:
        argv = sys.argv

    # Create and parse input arguments
    parser = argparse.ArgumentParser(description='Runs a mini version of BuildTemplateParallel')
    group = parser.add_argument_group('Required')
    group.add_argument('-pe', action="store", dest='processingEnvironment', required=True,
                       help='The name of the processing environment to use from the config file')
    group.add_argument('-wfrun', action="store", dest='wfrun', required=True,
                       help='The name of the workflow running plugin to use')
    group.add_argument('-ExperimentConfig', action="store", dest='ExperimentConfig', required=True,
                       help='The path to the file that describes the entire experiment')
    input_arguments = parser.parse_args()


    expConfig = ConfigParser.ConfigParser()
    expConfig.read(input_arguments.ExperimentConfig)

    # Experiment specific information
    session_db=expConfig.get('EXPERIMENT_DATA','SESSION_DB')
    ExperimentName=expConfig.get('EXPERIMENT_DATA','EXPERIMENTNAME')

    # Platform specific information
    #     Prepend the python search paths
    PYTHON_AUX_PATHS=expConfig.get(input_arguments.processingEnvironment,'PYTHON_AUX_PATHS')
    PYTHON_AUX_PATHS=PYTHON_AUX_PATHS.split(':')
    PYTHON_AUX_PATHS.extend(sys.path)
    sys.path=PYTHON_AUX_PATHS
    #     Prepend the shell environment search paths
    PROGRAM_PATHS=expConfig.get(input_arguments.processingEnvironment,'PROGRAM_PATHS')
    PROGRAM_PATHS=PROGRAM_PATHS.split(':')
    PROGRAM_PATHS.extend(os.environ['PATH'].split(':'))
    os.environ['PATH']=':'.join(PROGRAM_PATHS)
    #    Define platform specific output write paths
    BASEOUTPUTDIR=expConfig.get(input_arguments.processingEnvironment,'BASEOUTPUTDIR')
    ExperimentBaseDirectoryPrefix=os.path.realpath(os.path.join(BASEOUTPUTDIR,ExperimentName))
    ExperimentBaseDirectoryCache=ExperimentBaseDirectoryPrefix+"_CACHE"
    ExperimentBaseDirectoryResults=ExperimentBaseDirectoryPrefix +"_Results"
    if not os.path.exists(ExperimentBaseDirectoryCache):
        os.makedirs(ExperimentBaseDirectoryCache)
    if not os.path.exists(ExperimentBaseDirectoryResults):
        os.makedirs(ExperimentBaseDirectoryResults)

    print os.environ
    #sys.exit(-1)

    CLUSTER_QUEUE=expConfig.get(input_arguments.processingEnvironment,'CLUSTER_QUEUE')

    ## Setup environment for CPU load balancing of ITK based programs.
    import multiprocessing
    total_CPUS=multiprocessing.cpu_count()
    if input_arguments.wfrun == 'helium_all.q':
        pass
    elif input_arguments.wfrun == 'ipl_OSX':
        pass
    elif input_arguments.wfrun == 'local_4':
        os.environ['NSLOTS']="{0}".format(total_CPUS/4)
    elif input_arguments.wfrun == 'local_3':
        os.environ['NSLOTS']="{0}".format(total_CPUS/3)
    elif input_arguments.wfrun == 'local_12':
        os.environ['NSLOTS']="{0}".format(total_CPUS/12)
    elif input_arguments.wfrun == 'local':
        os.environ['NSLOTS']="{0}".format(total_CPUS/1)
    else:
        print "You must specify the run environment type. [helium_all.q,ipl_OSX,local_3,local_4,local_12,local]"
        print input_arguments.wfrun
        sys.exit(-1)

    print "Configuring Pipeline"
    from nipype import config  ## NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    config.enable_debug_mode() ## NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    import buildTemplateParallelDriver ## NOTE:  This needs to occur AFTER the PYTHON_AUX_PATHS has been modified
    btp=buildTemplateParallelDriver.BuildTemplateParallelWorkFlow(
      ExperimentBaseDirectoryCache,
      ExperimentBaseDirectoryResults,
      session_db)
    print "Start Processing"

    ## Create the shell wrapper script for ensuring that all jobs running on remote hosts from SGE
    #  have the same environment as the job submission host.
    JOB_SCRIPT=get_global_sge_script(sys.path,PROGRAM_PATHS)
    print JOB_SCRIPT

    SGEFlavor='SGE'
    if input_arguments.wfrun == 'helium_all.q':
        btp.run(plugin=SGEFlavor,
            plugin_args=dict(template=JOB_SCRIPT,qsub_args="-S /bin/bash -pe smp1 1-4 -l mem_free=4000M -o /dev/null -e /dev/null "+CLUSTER_QUEUE))
    if input_arguments.wfrun == 'helium_all.q_graph':
        SGEFlavor='SGEGraph' #Use the SGEGraph processing
        btp.run(plugin=SGEFlavor,
            plugin_args=dict(template=JOB_SCRIPT,qsub_args="-S /bin/bash -pe smp1 1-4 -l mem_free=4000M -o /dev/null -e /dev/null "+CLUSTER_QUEUE))
    elif input_arguments.wfrun == 'ipl_OSX':
        btp.write_graph()
        print "Running On ipl_OSX"
#.........这里部分代码省略.........
开发者ID:csmendoza,项目名称:BRAINSStandAlone,代码行数:101,代码来源:btp_exp.py


示例20: MasterProcessingController

def MasterProcessingController(argv=None):
    import argparse
    import ConfigParser
    import csv
    import string

    if argv == None:
        argv = sys.argv

    # Create and parse input arguments
    parser = argparse.ArgumentParser(description='Runs a mini version of BRAINSAutoWorkup')
    group = parser.add_argument_group('Required')
    group.add_argument('-pe', action="store", dest='processingEnvironment', required=True,
                       help='The name of the processing environment to use from the config file')
    group.add_argument('-wfrun', action="store", dest='wfrun', required=True,
                       help='The name of the workflow running plugin to use')
    group.add_argument('-subject', action="store", dest='subject', required=True,
                       help='The name of the subject to process')
    group.add_argument('-ExperimentConfig', action="store", dest='ExperimentConfig', required=True,
                       help='The path to the file that describes the entire experiment')
    parser.add_argument('-rewrite_datasinks', action='store_true', default=False,
                        help='Use if the datasinks should be forced rerun.\nDefault: value in configuration file')
    parser.add_argument('--version', action='version', version='%(prog)s 1.0')
    input_arguments = parser.parse_args()

    expConfig = ConfigParser.ConfigParser()
    expConfig.read(input_arguments.ExperimentConfig)

    # Pipeline-specific information
    GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG = expConfig.getboolean('PIPELINE', 'GLOBAL_DATA_SINK_REWRITE')
    GLOBAL_DATA_SINK_REWRITE = setDataSinkRewriteValue(input_arguments.rewrite_datasinks, GLOBAL_DATA_SINK_REWRITE_FROM_CONFIG)

    # Experiment specific information
    subject_data_file = expConfig.get('EXPERIMENT_DATA', 'SESSION_DB')
    ExperimentName = expConfig.get('EXPERIMENT_DATA', 'EXPERIMENTNAME')
    if expConfig.has_option('EXPERIMENT_DATA', 'PREVIOUSEXPERIMENTNAME'):
        PreviousExperimentName = expConfig.get('EXPERIMENT_DATA', 'PREVIOUSEXPERIMENTNAME')
    else:
         PreviousExperimentName = None

    # Platform specific information
    #     Prepend the python search paths
    PYTHON_AUX_PATHS = expConfig.get(input_arguments.processingEnvironment, 'PYTHON_AUX_PATHS')
    PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(':')
    PYTHON_AUX_PATHS.extend(sys.path)
    sys.path = PYTHON_AUX_PATHS
    #####################################################################################
    #     Prepend the shell environment search paths
    PROGRAM_PATHS = expConfig.get(input_arguments.processingEnvironment, 'PROGRAM_PATHS')
    PROGRAM_PATHS = PROGRAM_PATHS.split(':')
    PROGRAM_PATHS.extend(os.environ['PATH'].split(':'))
    PROGRAM_PATHS = [os.path.dirname(__file__)] + PROGRAM_PATHS
    print "Adding directory {0} to PATH...".format(os.path.dirname(__file__))
    os.environ['PATH'] = ':'.join(PROGRAM_PATHS)
    ############################################################### 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python config.update_config函数代码示例发布时间:2022-05-27
下一篇:
Python caching.Memory类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap