• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python config.CONFIG类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中taca.utils.config.CONFIG的典型用法代码示例。如果您正苦于以下问题:Python CONFIG类的具体用法?Python CONFIG怎么用?Python CONFIG使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了CONFIG类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: cleanup_nas

def cleanup_nas(seconds):
    """Will move the finished runs in NASes to nosync directory.

    :param int seconds: Days/hours converted as second to consider a run to be old
    """
    couch_info = CONFIG.get('statusdb')
    mail_recipients = CONFIG.get('mail', {}).get('recipients')
    check_demux = CONFIG.get('storage', {}).get('check_demux', False)
    host_name = os.getenv('HOSTNAME', os.uname()[1]).split('.', 1)[0]
    for data_dir in CONFIG.get('storage').get('data_dirs'):
        logger.info('Moving old runs in {}'.format(data_dir))
        with filesystem.chdir(data_dir):
            for run in [r for r in os.listdir(data_dir) if re.match(filesystem.RUN_RE, r)]:
                rta_file = os.path.join(run, finished_run_indicator)
                if os.path.exists(rta_file):
                    if check_demux:
                        if misc.run_is_demuxed(run, couch_info):
                            logger.info('Moving run {} to nosync directory'.format(os.path.basename(run)))
                            shutil.move(run, 'nosync')
                        elif os.stat(rta_file).st_mtime < time.time() - seconds:
                            logger.warn('Run {} is older than given time, but it is not demultiplexed yet'
                                        .format(run))
                            sbt = "Run not demultiplexed - {}".format(run)
                            msg = ("Run '{}' in '{}' is older then given threshold, but seems like it is not "
                                  "yet demultiplexed".format(os.path.join(data_dir, run), host_name))
                            misc.send_mail(sbt, msg, mail_recipients)
                    else:
                        if os.stat(rta_file).st_mtime < time.time() - seconds:
                            logger.info('Moving run {} to nosync directory'.format(os.path.basename(run)))
                            shutil.move(run, 'nosync')
                        else:
                            logger.info('{} file exists but is not older than given time, skipping run {}'
                                        .format(finished_run_indicator, run))
开发者ID:vezzi,项目名称:TACA,代码行数:33,代码来源:cleanup.py


示例2: cleanup_processing

def cleanup_processing(seconds):
    """Cleanup runs in processing server.

    :param int seconds: Days/hours converted as second to consider a run to be old
    """
    try:
        #Remove old runs from archiving dirs
        for archive_dir in CONFIG.get('storage').get('archive_dirs').values():
            logger.info('Removing old runs in {}'.format(archive_dir))
            with filesystem.chdir(archive_dir):
                for run in [r for r in os.listdir(archive_dir) if re.match(filesystem.RUN_RE, r)]:
                    rta_file = os.path.join(run, finished_run_indicator)
                    if os.path.exists(rta_file):
                        if os.stat(rta_file).st_mtime < time.time() - seconds:
                            logger.info('Removing run {} to nosync directory'.format(os.path.basename(run)))
                            shutil.rmtree(run)
                        else:
                            logger.info('{} file exists but is not older than given time, skipping run {}'.format(
                                        finished_run_indicator, run))
    except IOError:
        sbj = "Cannot archive old runs in processing server"
        msg = ("Could not find transfer.tsv file, so I cannot decide if I should "
               "archive any run or not.")
        cnt = CONFIG.get('contact', None)
        if not cnt:
            cnt = "{}@localhost".format(getpass.getuser())
        logger.error(msg)
        misc.send_mail(sbj, msg, cnt)
开发者ID:SciLifeLab,项目名称:TACA,代码行数:28,代码来源:cleanup.py


示例3: cleanup_uppmax

def cleanup_uppmax(site, days, dry_run=False):
    """Remove project/run that have been closed more than 'days'
    from the given 'site' on uppmax

    :param str site: site where the cleanup should be performed
    :param int days: number of days to check for closed projects
    """
    days = check_days(site, days, config)
    if not days:
        return
    root_dir = CONFIG.get("cleanup").get(site).get("root")
    deleted_log = CONFIG.get("cleanup").get("deleted_log")
    assert os.path.exists(os.path.join(root_dir, deleted_log)), "Log directory {} doesn't exist in {}".format(
        deleted_log, root_dir
    )
    log_file = os.path.join(root_dir, "{fl}/{fl}.log".format(fl=deleted_log))

    # make a connection for project db #
    pcon = statusdb.ProjectSummaryConnection()
    assert pcon, "Could not connect to project database in StatusDB"

    if site != "archive":
        ## work flow for cleaning up illumina/analysis ##
        projects = [p for p in os.listdir(root_dir) if re.match(filesystem.PROJECT_RE, p)]
        list_to_delete = get_closed_projects(projects, pcon, days)
    else:
        ##work flow for cleaning archive ##
        list_to_delete = []
        archived_in_swestore = filesystem.list_runs_in_swestore(
            path=CONFIG.get("cleanup").get("swestore").get("root"), no_ext=True
        )
        runs = [r for r in os.listdir(root_dir) if re.match(filesystem.RUN_RE, r)]
        with filesystem.chdir(root_dir):
            for run in runs:
                fc_date = run.split("_")[0]
                if misc.days_old(fc_date) > days:
                    if run in archived_in_swestore:
                        list_to_delete.append(run)
                    else:
                        logger.warn(
                            "Run {} is older than {} days but not in " "swestore, so SKIPPING".format(run, days)
                        )

    ## delete and log
    for item in list_to_delete:
        if dry_run:
            logger.info("Will remove {} from {}".format(item, root_dir))
            continue
        try:
            shutil.rmtree(os.path.join(root_dir, item))
            logger.info("Removed project {} from {}".format(item, root_dir))
            with open(log_file, "a") as to_log:
                to_log.write("{}\t{}\n".format(item, datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M")))
        except OSError:
            logger.warn("Could not remove path {} from {}".format(item, root_dir))
            continue
开发者ID:guillermo-carrasco,项目名称:TACA,代码行数:56,代码来源:storage.py


示例4: _archive_run

def _archive_run((run, days, force, compress_only)):
    """ Archive a specific run to swestore

    :param str run: Run directory
    :param int days: Days to consider a run old
    :param bool force: Force the archiving even if the run is not complete
    :param bool compress_only: Only compress the run without sending it to swestore
    """

    def _send_to_swestore(f, dest, remove=True):
        """ Send file to swestore checking adler32 on destination and eventually
        removing the file from disk

        :param str f: File to remove
        :param str dest: Destination directory in Swestore
        :param bool remove: If True, remove original file from source
        """
        if not filesystem.is_in_swestore(f):
            logger.info("Sending {} to swestore".format(f))
            misc.call_external_command("iput -K -P {file} {dest}".format(file=f, dest=dest), with_log_files=True)
            logger.info("Run {} sent correctly and checksum was okay.".format(f))
            if remove:
                logger.info("Removing run".format(f))
                os.remove(f)
        else:
            logger.warn("Run {} is already in Swestore, not sending it again nor removing from the disk".format(f))

    # Create state file to say that the run is being archived
    open("{}.archiving".format(run.split(".")[0]), "w").close()
    if run.endswith("bz2"):
        if os.stat(run).st_mtime < time.time() - (86400 * days):
            _send_to_swestore(run, CONFIG.get("storage").get("irods").get("irodsHome"))
        else:
            logger.info("Run {} is not {} days old yet. Not archiving".format(run, str(days)))
    else:
        rta_file = os.path.join(run, "RTAComplete.txt")
        if not os.path.exists(rta_file) and not force:
            logger.warn(
                (
                    "Run {} doesn't seem to be completed and --force option was "
                    "not enabled, not archiving the run".format(run)
                )
            )
        if force or (os.path.exists(rta_file) and os.stat(rta_file).st_mtime < time.time() - (86400 * days)):
            logger.info("Compressing run {}".format(run))
            # Compress with pbzip2
            misc.call_external_command("tar --use-compress-program=pbzip2 -cf {run}.tar.bz2 {run}".format(run=run))
            logger.info("Run {} successfully compressed! Removing from disk...".format(run))
            shutil.rmtree(run)
            if not compress_only:
                _send_to_swestore("{}.tar.bz2".format(run), CONFIG.get("storage").get("irods").get("irodsHome"))
        else:
            logger.info("Run {} is not completed or is not {} days old yet. Not archiving".format(run, str(days)))
    os.remove("{}.archiving".format(run.split(".")[0]))
开发者ID:guillermo-carrasco,项目名称:TACA,代码行数:54,代码来源:storage.py


示例5: fetch_config_info

 def fetch_config_info(self):
     """Try to fecth required info from the config file. Log and exit if any neccesary info is missing"""
     try:
         self.data_dirs = CONFIG['backup']['data_dirs']
         self.archive_dirs = CONFIG['backup']['archive_dirs']
         self.keys_path = CONFIG['backup']['keys_path']
         self.gpg_receiver = CONFIG['backup']['gpg_receiver']
         self.mail_recipients = CONFIG['mail']['recipients']
         self.check_demux = CONFIG.get('backup', {}).get('check_demux', False)
         self.couch_info = CONFIG.get('statusdb')
     except KeyError as e:
         logger.error("Config file is missing the key {}, make sure it have all required information".format(str(e)))
         raise SystemExit
开发者ID:vezzi,项目名称:TACA,代码行数:13,代码来源:backup.py


示例6: _archive_run

def _archive_run((run, seconds, force, compress_only)):
    """ Archive a specific run to swestore

    :param str run: Run directory
    :param int seconds: Days/hours converted as seconds to check
    :param bool force: Force the archiving even if the run is not complete
    :param bool compress_only: Only compress the run without sending it to swestore
    """

    def _send_to_swestore(f, dest, remove=True):
        """ Send file to swestore checking adler32 on destination and eventually
        removing the file from disk

        :param str f: File to remove
        :param str dest: Destination directory in Swestore
        :param bool remove: If True, remove original file from source
        """
        if not filesystem.is_in_swestore(f):
            logger.info("Sending {} to swestore".format(f))
            misc.call_external_command('iput -R swestoreArchCacheResc -P {file} {dest}'.format(file=f, dest=dest),
                    with_log_files=True, prefix=f.replace('.tar.bz2',''), log_dir="swestore_logs")
            logger.info('Run {} sent to swestore.'.format(f))
            if remove:
                logger.info('Removing run'.format(f))
                os.remove(f)
        else:
            logger.warn('Run {} is already in Swestore, not sending it again nor removing from the disk'.format(f))

    # Create state file to say that the run is being archived
    open("{}.archiving".format(run.split('.')[0]), 'w').close()
    if run.endswith('bz2'):
        if os.stat(run).st_mtime < time.time() - seconds:
            _send_to_swestore(run, CONFIG.get('storage').get('irods').get('irodsHome'))
        else:
            logger.info("Run {} is not older than given time yet. Not archiving".format(run))
    else:
        rta_file = os.path.join(run, finished_run_indicator)
        if not os.path.exists(rta_file) and not force:
            logger.warn(("Run {} doesn't seem to be completed and --force option was "
                      "not enabled, not archiving the run".format(run)))
        if force or (os.path.exists(rta_file) and os.stat(rta_file).st_mtime < time.time() - seconds):
            logger.info("Compressing run {}".format(run))
            # Compress with pbzip2
            misc.call_external_command('tar --use-compress-program=pbzip2 -cf {run}.tar.bz2 {run}'.format(run=run))
            logger.info('Run {} successfully compressed! Removing from disk...'.format(run))
            shutil.rmtree(run)
            if not compress_only:
                _send_to_swestore('{}.tar.bz2'.format(run), CONFIG.get('storage').get('irods').get('irodsHome'))
        else:
            logger.info("Run {} is not completed or is not older than given time yet. Not archiving".format(run))
    os.remove("{}.archiving".format(run.split('.')[0]))
开发者ID:SciLifeLab,项目名称:TACA,代码行数:51,代码来源:cleanup.py


示例7: transfer_run

def transfer_run(run_dir, analysis):
    """ Interface for click to force a transfer a run to uppmax
        :param: string run_dir: the run to tranfer
        :param bool analysis: if trigger or not the analysis
    """
    runObj = get_runObj(run_dir)
    mail_recipients = CONFIG.get('mail', {}).get('recipients')
    if runObj is None:
        mail_recipients = CONFIG.get('mail', {}).get('recipients')
        # Maybe throw an exception if possible?
        logger.error("Trying to force a transfer of run {} but the sequencer was not recognized.".format(run_dir))
    else:
        runObj.transfer_run(os.path.join("nosync",CONFIG['analysis']['status_dir'], 'transfer.tsv'),
                            analysis, mail_recipients) # do not start analsysis automatically if I force the transfer
开发者ID:SciLifeLab,项目名称:TACA,代码行数:14,代码来源:analysis.py


示例8: fail_run

def fail_run(runid, project):
    """Updates status of specified run or project-run to Failed"""
    username = CONFIG.get('statusdb', {}).get('username')
    password = CONFIG.get('statusdb', {}).get('password')
    url = CONFIG.get('statusdb', {}).get('url')
    port = CONFIG.get('statusdb', {}).get('port')
    status_db_url = "http://{username}:{password}@{url}:{port}".format(username=username, password=password, url=url, port=port)
    logger.info('Connecting to status db: {}:{}'.format(url, port))
    try:
        status_db = couchdb.Server(status_db_url)
    except Exception, e:
        logger.error("Can't connect to status_db: {}".format(status_db_url))
        logger.error(e)
        raise e
开发者ID:SciLifeLab,项目名称:TACA,代码行数:14,代码来源:bioinfo_tab.py


示例9: archive_to_swestore

def archive_to_swestore(days, run=None, max_runs=None, force=False, compress_only=False):
    """Send runs (as archives) in NAS nosync to swestore for backup

    :param int days: number fo days to check threshold
    :param str run: specific run to send swestore
    :param int max_runs: number of runs to be processed simultaneously
    :param bool force: Force the archiving even if the run is not complete
    :param bool compress_only: Compress the run without sending it to swestore
    """
    # If the run is specified in the command line, check that exists and archive
    if run:
        run = os.path.basename(run)
        base_dir = os.path.dirname(run)
        if re.match(filesystem.RUN_RE, run):
            # If the parameter is not an absolute path, find the run in the archive_dirs
            if not base_dir:
                for archive_dir in CONFIG.get("storage").get("archive_dirs"):
                    if os.path.exists(os.path.join(archive_dir, run)):
                        base_dir = archive_dir
            if not os.path.exists(os.path.join(base_dir, run)):
                logger.error(
                    (
                        "Run {} not found. Please make sure to specify "
                        "the absolute path or relative path being in "
                        "the correct directory.".format(run)
                    )
                )
            else:
                with filesystem.chdir(base_dir):
                    _archive_run((run, days, force, compress_only))
        else:
            logger.error("The name {} doesn't look like an Illumina run".format(os.path.basename(run)))
    # Otherwise find all runs in every data dir on the nosync partition
    else:
        logger.info("Archiving old runs to SWESTORE")
        for to_send_dir in CONFIG.get("storage").get("archive_dirs"):
            logger.info("Checking {} directory".format(to_send_dir))
            with filesystem.chdir(to_send_dir):
                to_be_archived = [
                    r
                    for r in os.listdir(to_send_dir)
                    if re.match(filesystem.RUN_RE, r) and not os.path.exists("{}.archiving".format(r.split(".")[0]))
                ]
                if to_be_archived:
                    pool = Pool(processes=len(to_be_archived) if not max_runs else max_runs)
                    pool.map_async(_archive_run, ((run, days, force, compress_only) for run in to_be_archived))
                    pool.close()
                    pool.join()
                else:
                    logger.info("No old runs to be archived")
开发者ID:guillermo-carrasco,项目名称:TACA,代码行数:50,代码来源:storage.py


示例10: cleanup_processing

def cleanup_processing(days):
    """Cleanup runs in processing server.

    :param int days: Number of days to consider a run to be old
    """
    transfer_file = os.path.join(CONFIG.get("preprocessing", {}).get("status_dir"), "transfer.tsv")
    if not days:
        days = CONFIG.get("cleanup", {}).get("processing-server", {}).get("days", 10)
    try:
        # Move finished runs to nosync
        for data_dir in CONFIG.get("storage").get("data_dirs"):
            logger.info("Moving old runs in {}".format(data_dir))
            with filesystem.chdir(data_dir):
                for run in [r for r in os.listdir(data_dir) if re.match(filesystem.RUN_RE, r)]:
                    if filesystem.is_in_file(transfer_file, run):
                        logger.info("Moving run {} to nosync directory".format(os.path.basename(run)))
                        shutil.move(run, "nosync")
                    else:
                        logger.info(
                            ("Run {} has not been transferred to the analysis " "server yet, not archiving".format(run))
                        )
        # Remove old runs from archiving dirs
        for archive_dir in CONFIG.get("storage").get("archive_dirs").values():
            logger.info("Removing old runs in {}".format(archive_dir))
            with filesystem.chdir(archive_dir):
                for run in [r for r in os.listdir(archive_dir) if re.match(filesystem.RUN_RE, r)]:
                    rta_file = os.path.join(run, "RTAComplete.txt")
                    if os.path.exists(rta_file):
                        # 1 day == 60*60*24 seconds --> 86400
                        if os.stat(rta_file).st_mtime < time.time() - (86400 * days) and filesystem.is_in_swestore(
                            "{}.tar.bz2".format(run)
                        ):
                            logger.info("Removing run {} to nosync directory".format(os.path.basename(run)))
                            shutil.rmtree(run)
                        else:
                            logger.info(
                                "RTAComplete.txt file exists but is not older than {} day(s), skipping run {}".format(
                                    str(days), run
                                )
                            )

    except IOError:
        sbj = "Cannot archive old runs in processing server"
        msg = "Could not find transfer.tsv file, so I cannot decide if I should " "archive any run or not."
        cnt = CONFIG.get("contact", None)
        if not cnt:
            cnt = "{}@localhost".format(getpass.getuser())
        logger.error(msg)
        misc.send_mail(sbj, msg, cnt)
开发者ID:guillermo-carrasco,项目名称:TACA,代码行数:49,代码来源:storage.py


示例11: trigger_analysis

def trigger_analysis(run_id):
    """ Trigger the analysis of the flowcell in the analysis sever.

    :param str run_id: run/flowcell id
    """
    if not CONFIG.get('analysis', {}).get('analysis_server', {}):
        logger.warn(("No configuration found for remote analysis server. "
                     "Not triggering analysis of {}"
                     .format(os.path.basename(run_id))))
    else:
        url = ("http://{host}:{port}/flowcell_analysis/{dir}"
               .format(host=CONFIG['analysis']['analysis_server']['host'],
                       port=CONFIG['analysis']['analysis_server']['port'],
                       dir=os.path.basename(run_id)))
        params = {'path': CONFIG['analysis']['analysis_server']['sync']['data_archive']}
        try:
            r = requests.get(url, params=params)
            if r.status_code != requests.status_codes.codes.OK:
                logger.warn(("Something went wrong when triggering the "
                             "analysis of {}. Please check the logfile "
                             "and make sure to start the analysis!"
                             .format(os.path.basename(run_id))))
            else:
                logger.info('Analysis of flowcell {} triggered in {}'
                            .format(os.path.basename(run_id),
                                    CONFIG['analysis']['analysis_server']['host']))
                a_file = os.path.join(CONFIG['analysis']['status_dir'], 'analysis.tsv')
                with open(a_file, 'a') as analysis_file:
                    tsv_writer = csv.writer(analysis_file, delimiter='\t')
                    tsv_writer.writerow([os.path.basename(run_id), str(datetime.now())])
        except requests.exceptions.ConnectionError:
            logger.warn(("Something went wrong when triggering the analysis "
                         "of {}. Please check the logfile and make sure to "
                         "start the analysis!".format(os.path.basename(run_id))))
开发者ID:Galithil,项目名称:TACA,代码行数:34,代码来源:analysis.py


示例12: update_cronjob_db

def update_cronjob_db():
    server = platform.node().split(".")[0]
    timestamp = datetime.datetime.now()
    # parse results
    result = _parse_crontab()
    # connect to db
    url = "http://{username}:{password}@{url}:{port}".format(
        url=CONFIG.get("statusdb", {}).get("url"),
        username=CONFIG.get("statusdb", {}).get("username"),
        password=CONFIG.get("statusdb", {}).get("password"),
        port=CONFIG.get("statusdb", {}).get("port"),
    )
    logging.info("Connecting to database: {}".format(CONFIG.get("statusdb", {}).get("url")))
    try:
        couch = couchdb.Server(url)
    except Exception, e:
        logging.error(e.message)
开发者ID:vezzi,项目名称:TACA,代码行数:17,代码来源:cronjobs.py


示例13: __init__

 def __init__(self, projectid=None, sampleid=None, pi_email=None, sensitive=True, hard_stage_only=False, **kwargs):
     super(GrusProjectDeliverer, self).__init__(
         projectid,
         sampleid,
         **kwargs
     )
     self.stagingpathhard = getattr(self, 'stagingpathhard', None)
     if self.stagingpathhard is None:
         raise AttributeError("stagingpathhard is required when delivering to GRUS")
     self.config_snic = CONFIG.get('snic',None)
     if self.config_snic is None:
         raise AttributeError("snic confoguration is needed  delivering to GRUS (snic_api_url, snic_api_user, snic_api_password")
     self.config_statusdb = CONFIG.get('statusdb',None)
     if self.config_statusdb is None:
         raise AttributeError("statusdb configuration is needed  delivering to GRUS (url, username, password, port")
     self.orderportal = CONFIG.get('order_portal',None) # do not need to raise exception here, I have already checked for this and monitoring does not need it
     self.pi_email  = pi_email
     self.sensitive = sensitive
     self.hard_stage_only = hard_stage_only
开发者ID:sylvinite,项目名称:taca-ngi-pipeline,代码行数:19,代码来源:deliver_grus.py


示例14: cleanup_swestore

def cleanup_swestore(seconds, dry_run=False):
    """Remove archived runs from swestore

    :param int seconds: Days/hours converted as seconds to check
    """
    seconds = check_default(site, seconds, CONFIG)
    if not seconds:
        return
    runs = filesystem.list_runs_in_swestore(path=CONFIG.get('cleanup').get('swestore').get('root'))
    for run in runs:
        date = run.split('_')[0]
        if misc.to_seconds(misc.days_old(date)) > seconds:
            if dry_run:
                logger.info('Will remove file {} from swestore'.format(run))
                continue
            misc.call_external_command('irm -f {}'.format(run))
            logger.info('Removed file {} from swestore'.format(run))
开发者ID:SciLifeLab,项目名称:TACA,代码行数:17,代码来源:cleanup.py


示例15: cleanup_swestore

def cleanup_swestore(days, dry_run=False):
    """Remove archived runs from swestore

    :param int days: Threshold days to check and remove
    """
    days = check_days('swestore', days, config)
    if not days:
        return
    runs = filesystem.list_runs_in_swestore(path=CONFIG.get('cleanup').get('swestore').get('root'))
    for run in runs:
        date = run.split('_')[0]
        if misc.days_old(date) > days:
            if dry_run:
                logger.info('Will remove file {} from swestore'.format(run))
                continue
            misc.call_external_command('irm -f {}'.format(run))
            logger.info('Removed file {} from swestore'.format(run))
开发者ID:eriksjolund,项目名称:TACA,代码行数:17,代码来源:storage.py


示例16: __init__

 def __init__(self, projectid, sampleid, **kwargs):
     """
         :param string projectid: id of project to deliver
         :param string sampleid: id of sample to deliver
         :param bool no_checksum: if True, skip the checksum computation
         :param string hash_algorithm: algorithm to use for calculating 
             file checksums, defaults to sha1
     """
     # override configuration options with options given on the command line
     self.config = CONFIG.get('deliver', {})
     self.config.update(kwargs)
     # set items in the configuration as attributes
     for k, v in self.config.items():
         setattr(self, k, v)
     self.projectid = projectid
     self.sampleid = sampleid
     self.hash_algorithm = getattr(self, 'hash_algorithm', 'sha1')
     self.no_checksum = getattr(self, 'no_checksum', False)
     self.files_to_deliver = getattr(self, 'files_to_deliver', None)
     self.deliverystatuspath = getattr(self, 'deliverystatuspath', None)
     self.stagingpath = getattr(self, 'stagingpath', None)
     self.deliverypath = getattr(self, 'deliverypath', None)
     self.logpath = getattr(self, 'logpath', None)
     self.reportpath = getattr(self, 'reportpath', None)
     self.force = getattr(self, 'force', False)
     self.stage_only = getattr(self, 'stage_only', False)
     self.ignore_analysis_status = getattr(self, 'ignore_analysis_status', False)
     #Fetches a project name, should always be availble; but is not a requirement
     try:
         self.projectname = db.project_entry(db.dbcon(), projectid)['name']
     except KeyError:
         pass
     # only set an attribute for uppnexid if it's actually given or in the db
     try:
         getattr(self, 'uppnexid')
     except AttributeError:
         try:
             self.uppnexid = db.project_entry(db.dbcon(), projectid)['uppnex_id']
         except KeyError:
             pass
     # set a custom signal handler to intercept interruptions
     signal.signal(signal.SIGINT, _signal_handler)
     signal.signal(signal.SIGTERM, _signal_handler)
开发者ID:sylvinite,项目名称:taca-ngi-pipeline,代码行数:43,代码来源:deliver.py


示例17: cleanup_nas

def cleanup_nas(days):
    """Will move the finished runs in NASes to nosync directory.

    :param int days: Number of days to consider a run to be old
    """
    for data_dir in CONFIG.get('storage').get('data_dirs'):
        logger.info('Moving old runs in {}'.format(data_dir))
        with filesystem.chdir(data_dir):
            for run in [r for r in os.listdir(data_dir) if re.match(filesystem.RUN_RE, r)]:
                rta_file = os.path.join(run, finished_run_indicator)
                if os.path.exists(rta_file):
                    # 1 day == 60*60*24 seconds --> 86400
                    if os.stat(rta_file).st_mtime < time.time() - (86400 * days):
                        logger.info('Moving run {} to nosync directory'
                                    .format(os.path.basename(run)))
                        shutil.move(run, 'nosync')
                    else:
                        logger.info('{} file exists but is not older than {} day(s), skipping run {}'.format(
                                    finished_run_indicator, str(days), run))
开发者ID:eriksjolund,项目名称:TACA,代码行数:19,代码来源:storage.py


示例18: update_status_db

def update_status_db(data, server_type=None):
    """ Pushed the data to status db,
        data can be from nases or from uppmax
        server_type should be either 'uppmax' or 'nas'
    """
    db_config = CONFIG.get('statusdb')
    if db_config is None:
        logging.error("'statusdb' must be present in the config file!")
        raise RuntimeError("'statusdb' must be present in the config file!")

    server = "http://{username}:{password}@{url}:{port}".format(
        url=db_config['url'],
        username=db_config['username'],
        password=db_config['password'],
        port=db_config['port'])
    try:
        couch = couchdb.Server(server)
    except Exception, e:
        logging.error(e.message)
        raise
开发者ID:SciLifeLab,项目名称:TACA,代码行数:20,代码来源:server_status.py


示例19: create

def create(projects, ngi_config_file, fastq_1, fastq_2):
    #connect to statusdb
    couch_info = CONFIG.get('statusdb')
    if couch_info is None:
        logger.error("No statusdb field in taca configuration file")
        return 1
    if "dev" not in couch_info["url"]:
        logger.error("url for status db is {}, but dev must be specified in this case".format(couch_info["url"]))
    couch=setupServer(couch_info)
    # connect to db and to view
    projectsDB = couch["projects"]
    project_summary = projectsDB.view("project/summary")
    projects_closed_more_than_three_months = {}
    projects_closed_more_than_one_month_less_than_three = {}
    projects_closed_less_than_one_month    = {}
    projects_opened = {}
    current_date =  datetime.datetime.today()
    date_limit_one_year = current_date - relativedelta(months=6) #yes yes I know.. but in this way i am sure all data in in xflocell_db
    date_limit_one_month = current_date - relativedelta(months=1)
    date_limit_three_month = current_date - relativedelta(months=3)
    for row in project_summary:
        project_id = row["key"][1]
        project_status = row["key"][0]
        if "application" not in row["value"]:
            continue
        if row["value"]["no_samples"] > 50:
            continue #skip large projects
        application = row["value"]["application"]
        if project_status == "closed":
            if "close_date" in row["value"]:
                close_date = datetime.datetime.strptime(row["value"]["close_date"], '%Y-%m-%d')
                if close_date > date_limit_one_year: #if the project has been closed after the date limit
                    if close_date >= date_limit_one_month:
                        projects_closed_less_than_one_month[project_id] = {"project_name": row["value"]["project_name"],
                                                                            "application": application, "no_samples": row["value"]["no_samples"]}
                    elif close_date < date_limit_one_month and close_date >= date_limit_three_month:
                        projects_closed_more_than_one_month_less_than_three[project_id] = {"project_name": row["value"]["project_name"],
                                                                            "application": application, "no_samples": row["value"]["no_samples"]}
                    elif close_date < date_limit_three_month:
                        projects_closed_more_than_three_months[project_id] = {"project_name": row["value"]["project_name"],
                                                                            "application": application, "no_samples": row["value"]["no_samples"]}
        elif project_status == "open":
            if "lanes_sequenced" in row["value"] and row["value"]["lanes_sequenced"] > 0:
                projects_opened[project_id] =  {"project_name": row["value"]["project_name"],
                                            "application": application, "no_samples": row["value"]["no_samples"]}
        else:
            print "status {}".format(project_status)
    ##now I can parse the x_flowcell db to check what I can and cannot use
    ##it is less than one year we are using the flowcell_db so old projects might be not present
    whole_genome_projects = int(2*projects/3)
    projects_to_reproduce = []
    select_random_projects(projects_closed_more_than_three_months, whole_genome_projects/4+1, "WG re-seq", projects_to_reproduce, "WGreseq_tot_closed")
    select_random_projects(projects_closed_more_than_one_month_less_than_three, whole_genome_projects/4+1, "WG re-seq", projects_to_reproduce, "WGreseq_closed_clean_no_del")
    select_random_projects(projects_closed_less_than_one_month,whole_genome_projects/4+1, "WG re-seq", projects_to_reproduce, "WGreseq_closed_no_clean")
    select_random_projects(projects_opened, whole_genome_projects/4+1, "WG re-seq", projects_to_reproduce, "WGreseq_open")

    other_projects = int(projects/3)
    select_random_projects(projects_closed_more_than_three_months, other_projects/4+1, "other", projects_to_reproduce, "noWGreseq_tot_closed")
    select_random_projects(projects_closed_more_than_one_month_less_than_three, other_projects/4+1, "other", projects_to_reproduce, "noWGreseq_closed_clean_no_del")
    select_random_projects(projects_closed_less_than_one_month, other_projects/4+1, "other", projects_to_reproduce, "noWGreseq_closed_no_clean")
    select_random_projects(projects_opened, other_projects/4+1, "other", projects_to_reproduce, "noWGreseq_open")

    ### create ngi_pipeline enviorment
    print "#NGI_CONFIG varaible is {} . This variable needs to be in the .bashrc file".format(ngi_config_file)
    print "NGI_CONFIG={}".format(ngi_config_file)
    try:
        ngi_config = conf.load_config(ngi_config_file)
    except IOError as e:
        print "ERROR: {}".format(e.message)
    #now create uppmax env
    paths = create_uppmax_env(ngi_config)


    print "#going to reproduce {} projects (if this number is different from the one you specified.... trust me... do not worry".format(len(projects_to_reproduce))
    ### At this point I scan over x_flowcell and reproduce FCs
    flowcellDB = couch["x_flowcells"]
    reproduced_projects = {}
    for fc_doc in flowcellDB:
        try:
            samplesheet_csv = flowcellDB[fc_doc]["samplesheet_csv"]
        except KeyError:
            continue #parse only FC that have a samplesheet
        #now check if this FC contains one of the proejcts I need to replicate.
        projects_in_FC = set()
        if "SampleName" in samplesheet_csv[0]:
            projects_in_FC = set([line["SampleName"].split("_")[0] for line in samplesheet_csv])
        else:
            projects_in_FC = set([line["Sample_Name"].split("_")[0] for line in samplesheet_csv])
        found = False
        for project_pair in projects_to_reproduce:
            project = project_pair[0]
            if project in projects_in_FC:
                #this FC needs to be created
                if not found:
                    #I create the FC only the first time I see a project belonging to it
                    create_FC(paths["flowcell_inbox"] , flowcellDB[fc_doc]["RunInfo"]["Id"], samplesheet_csv, fastq_1, fastq_2)
                    found = True
                #but I keep track of all projects-run I need to organise
                if project not in reproduced_projects:
                    reproduced_projects[project] = []
#.........这里部分代码省略.........
开发者ID:SciLifeLab,项目名称:TACA,代码行数:101,代码来源:create_uppmax_like_env.py


示例20: run_preprocessing


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python _i18n._函数代码示例发布时间:2022-05-27
下一篇:
Python tabulator.topen函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap