• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python generaloption.simple_option函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中vsc.utils.generaloption.simple_option函数的典型用法代码示例。如果您正苦于以下问题:Python simple_option函数的具体用法?Python simple_option怎么用?Python simple_option使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了simple_option函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main():
    """
    Main script.
    """

    options = {
        "jobid": ("The PBS_JOBID of the job for which we want information", None, "store", None),
        "information": (
            "Comma-separated list of the job info to print. " "Entries of the format input_key:output_key",
            None,
            "store",
            None,
        ),
    }
    opts = simple_option(options)

    if not opts.options.jobid:
        logger.error("jobid is a required option. Bailing.")
        sys.exit(1)

    pquery = PBSQuery()
    current_job = pquery.getjob(opts.options.jobid)

    s = transform_info(current_job, opts.options.information)

    print "\n".join(s)
开发者ID:wpoely86,项目名称:vsc-jobs,代码行数:26,代码来源:qstat_wrapper.py


示例2: main

def main():
    opts = {
        'github-account': ("GitHub account where repository is located", None, 'store', 'hpcugent', 'a'),
        'github-user': ("GitHub user to use (for authenticated access)", None, 'store', 'boegel', 'u'),
        'repository': ("Repository to use", None, 'store', 'easybuild-easyconfigs', 'r'),
    }
    go = simple_option(go_dict=opts, descr="Script to print overview of pull requests for a GitHub repository")

    pickle_file = None
    if go.args:
        pickle_file = go.args[0]

    prs = fetch_pr_data(pickle_file, go.options.github_user, go.options.github_account, go.options.repository)

    html_file = HTML_FILE % go.options.repository
    print("Generating %s..." % html_file)
    handle = open(html_file, 'w')
    handle.write(HTML_HEADER)
    handle.write(gen_table_header())
    pr_cnt, table_rows, merged_today, last_update = gen_table_rows(prs)
    handle.write(table_rows)
    handle.write(HTML_FOOTER % {
        'merged_today': merged_today,
        'pr_cnt': pr_cnt,
        'repo': '%s/%s' % (go.options.github_account, go.options.repository),
        'timestamp': last_update, #datetime.now().strftime(format='%d %B %Y %H:%M:%S'),
    })
    handle.close()
开发者ID:Caylo,项目名称:eb-scripts,代码行数:28,代码来源:gen_pr_overview_page.py


示例3: main

def main():

    options = {
        'storage': ('the VSC filesystems that are checked by this script', 'strlist', 'store', []),
        'threshold': ('allowed the time difference between the cached quota and the time of running', None, 'store',
                      DEFAULT_ALLOWED_TIME_THRESHOLD),
        'fileset_prefixes': ('the filesets that we allow for showing QuotaUser', 'strlist', 'store', []),
        'vo': ('provide storage details for the VO you belong to', None, 'store_true', False)
    }
    opts = simple_option(options, config_files=['/etc/quota_information.conf'])

    storage = VscStorage()
    vsc = VSC(False)
    user_name = getpwuid(os.getuid())[0]

    vos = [g.gr_name for g in grp.getgrall()
                     if user_name in g.gr_mem
                     and g.gr_name.startswith('gvo')
                     and g.gr_name != vsc.default_vo]  # default VO has no quota associated with it

    opts.options.vo = opts.options.vo and vos

    now = time.time()

    print_user_quota(opts, storage, user_name, now)

    if opts.options.vo:
        print_vo_quota(opts, storage, vos, now)
开发者ID:boegel,项目名称:vsc-filesystems,代码行数:28,代码来源:show_quota.py


示例4: main

def main():
    """ Builds a zookeeper tree with ACLS on from a config file"""
    options = {
        'servers':('list of zk servers', 'strlist', 'store', None)
    }
    go = simple_option(options)

    rpasswd, rpath = get_rootinfo(go.configfile_remainder)
    znodes, users = parse_zkconfig(go.configfile_remainder)

    logger.debug("znodes: %s" % znodes)
    logger.debug("users: %s" % users)

    # Connect to zookeeper
    # initial authentication credentials and acl for admin on root level
    acreds = [('digest', 'root:' + rpasswd)]
    root_acl = make_digest_acl('root', rpasswd, all=True)

    # Create kazoo/zookeeper connection with root credentials
    servers = go.options.servers
    zkclient = VscKazooClient(servers, auth_data=acreds)

    # Iterate paths
    for path, attrs in znodes.iteritems():
        logger.debug("path %s attribs %s" % (path, attrs))
        acls = dict((arg, attrs[arg]) for arg in attrs if arg not in ('value', 'ephemeral', 'sequence', 'makepath'))
        acl_list = parse_acls(acls, users, root_acl)
        kwargs = dict((arg, attrs[arg]) for arg in attrs if arg in ('ephemeral', 'sequence', 'makepath'))
        if not zkclient.exists_znode(path):
            zkclient.make_znode(path, value=attrs.get('value', ''), acl=acl_list, **kwargs)
        else:
            logger.warning('node %s already exists' % path)
            zkclient.znode_acls(path, acl_list)

    zkclient.exit()
开发者ID:kwaegema,项目名称:vsc-zk,代码行数:35,代码来源:zkinitree.py


示例5: main

def main():

    options = {
        'storage': ('the VSC filesystems that are checked by this script', None, 'extend', []),
        'threshold': ('allowed the time difference between the cached quota and the time of running', None, 'store',
                      DEFAULT_ALLOWED_TIME_THRESHOLD),
    }
    opts = simple_option(options, config_files='/etc/quota_information.conf')

    storage = VscStorage()
    user_name = getpwuid(os.getuid())[0]
    now = time.time()

    for storage_name in opts.options.storage:

        mount_point = storage[storage_name].login_mount_point
        path_template = storage.path_templates[storage_name]['user']
        path = os.path.join(mount_point, path_template[0], path_template(user_name))

        cache = FileCache(path)
        (timestamp, quota) = cache.load('quota')

        if now - timestamp > opts.options.threshold:
            print "%s: WARNING: no recent quota information (age of data is %d minutes)" % (storage_name,

                                                                                               (now-timestamp)/60)
        else:
            for (fileset, qi) in quota.quota_map.items():
            print "%s: used %d MiB (%d%%) quota %d MiB in fileset %d" % (storage_name,
                                                           quota)


if __name__ == '__main__':
    main()
开发者ID:stdweird,项目名称:vsc-filesystems,代码行数:34,代码来源:show_quota.py


示例6: main

def main():
    """ Start a new rsync client (destination or source) in a specified session """
    options = {
        # Zookeeper connection options:
        'servers'     : ('list of zk servers', 'strlist', 'store', None),
        'user'        : ('user with creation rights on zookeeper', None, 'store', 'root', 'u'),
        'passwd'      : ('password for user with creation rights', None, 'store', 'admin', 'p'),
        # Role options, define exactly one of these:
        'source'      : ('rsync source', None, 'store_true', False, 'S'),
        'destination' : ('rsync destination', None, 'store_true', False, 'D'),
        'pathsonly'   : ('Only do a test run of the pathlist building', None, 'store_true', False),
        'state'       : ('Only do the state', None, 'store_true', False),
        # Session options; should be the same on all clients of the session!
        'session'     : ('session name', None, 'store', 'default', 'N'),
        'netcat'      : ('run netcat test instead of rsync', None, 'store_true', False),
        'dryrun'      : ('run rsync in dry run mode', None, 'store_true', False, 'n'),
        'rsyncpath'   : ('rsync basepath', None, 'store', None, 'r'),  # May differ between sources and dests
        # Pathbuilding (Source clients and pathsonly ) specific options:
        'excludere'   : ('Exclude from pathbuilding', None, 'regex', re.compile('/\.snapshots(/.*|$)')),
        'depth'       : ('queue depth', "int", 'store', 3),
        # Source clients options; should be the same on all clients of the session!:
        'delete'      : ('run rsync with --delete', None, 'store_true', False),
        # Individual client options
        'daemon'      : ('daemonize client', None, 'store_true', False),
        'domain'      : ('substitute domain', None, 'store', None),
        'logfile'     : ('Output to logfile', None, 'store', '/tmp/zkrsync/%(session)s-%(rstype)s-%(pid)s.log'),
        'pidfile'     : ('Pidfile template', None, 'store', '/tmp/zkrsync/%(session)s-%(rstype)s-%(pid)s.pid'),
        # Individual Destination client specific options
        'rsyncport'   : ('force port on which rsyncd binds', "int", 'store', None),
        'startport'   : ('offset to look for rsyncd ports', "int", 'store', 4444)
    }

    go = simple_option(options)
    acreds, admin_acl, rstype = zkrsync_parse(go.options)
    if go.options.logfile:
        init_logging(go.options.logfile, go.options.session, rstype)

    kwargs = {
        'session'     : go.options.session,
        'default_acl' : [admin_acl],
        'auth_data'   : acreds,
        'rsyncpath'   : go.options.rsyncpath,
        'netcat'      : go.options.netcat,
        }

    if go.options.daemon:
        pidfile = init_pidfile(go.options.pidfile, go.options.session, rstype)
        zkrsdaemon = ZkrsDaemon(pidfile, rstype, go.options, kwargs)
        zkrsdaemon.start()
    else:
        start_zkrs(rstype, go.options, kwargs)
开发者ID:kwaegema,项目名称:vsc-zk,代码行数:51,代码来源:zkrsync.py


示例7: main

def main(args):
    """Main script."""

    options = {
        'nagios': ('print out nagion information', None, 'store_true', False, 'n'),
        'nagios_check_filename': ('filename of where the nagios check data is stored', str, 'store', NAGIOS_CHECK_FILENAME),
        'nagios_check_interval_threshold': ('threshold of nagios checks timing out', None, 'store', NAGIOS_CHECK_INTERVAL_THRESHOLD),
        'mail-report': ('mail a report to the hpc-admin list with job list for gracing or inactive users',
                        None, 'store_true', False),
        'ha': ('high-availability master IP address', None, 'store', None),
        'dry-run': ('do not make any updates whatsoever', None, 'store_true', False),
    }
    opts = simple_option(options)

    nagios_reporter = NagiosReporter(NAGIOS_HEADER, NAGIOS_CHECK_FILENAME, NAGIOS_CHECK_INTERVAL_THRESHOLD)

    if opts.options.nagios:
        nagios_reporter.report_and_exit()
        sys.exit(0)  # not reached

    if not proceed_on_ha_service(opts.options.ha):
        logger.warning("Not running on the target host in the HA setup. Stopping.")
        nagios_reporter(NAGIOS_EXIT_WARNING,
                        NagiosResult("Not running on the HA master."))
        sys.exit(NAGIOS_EXIT_WARNING)

    try:
        vsc_config = VscConfiguration()
        LdapQuery(vsc_config)

        grace_users = get_user_with_status('grace')
        inactive_users = get_user_with_status('inactive')

        pbs_query = PBSQuery()

        t = time.ctime()
        jobs = pbs_query.getjobs()  # we just get them all

        removed_queued = remove_queued_jobs(jobs, grace_users, inactive_users, opts.options.dry_run)
        removed_running = remove_running_jobs(jobs, inactive_users, opts.options.dry_run)

        if opts.options.mail_report and not opts.options.dry_run:
            if len(removed_queued) > 0 or len(removed_running) > 0:
                mail_report(t, removed_queued, removed_running)
    except Exception, err:
        logger.exception("Something went wrong: {err}".format(err=err))
        nagios_reporter.cache(NAGIOS_EXIT_CRITICAL,
                              NagiosResult("Script failed, check log file ({logfile})".format(logfile=PBS_CHECK_LOG_FILE)))
        sys.exit(NAGIOS_EXIT_CRITICAL)
开发者ID:hpcugent,项目名称:master-scripts,代码行数:49,代码来源:pbs_check_inactive_user_jobs.py


示例8: main

def main():
    """Yeah, so, erm. The main function and such."""

    options = {
        "summary": ("Give the summary", None, "store_true", False, "s"),
        "detail": ("Detailed information", None, "store_true", False),
        "virtualorganisation": ("Give VO details if available", None, "store_true", False, "v"),
        "running": ("Display running job information", None, "store_true", False, "r"),
        "idle": ("Display idle job information", None, "store_true", False, "i"),
        "blocked": ("Dispay blocked job information", None, "store_true", False, "b"),
        "hosts": ("Hosts/clusters to check", None, "extend", []),
        "location_environment": (
            "the location for storing the pickle file depending on the cluster",
            str,
            "store",
            "VSC_HOME",
        ),
    }

    opts = simple_option(options, config_files=["/etc/myshowq.conf"])

    if not (opts.options.running or opts.options.idle or opts.options.blocked):
        opts.options.running = True
        opts.options.idle = True
        opts.options.blocked = True

    my_uid = os.geteuid()
    my_name = pwd.getpwuid(my_uid)[0]

    (res, user_map) = readbuffer(
        my_name,
        opts.options.virtualorganisation,
        opts.options.running,
        opts.options.idle,
        opts.options.blocked,
        opts.options.location_environment,
    )

    if not res or len(res) == 0:
        print "no data"
        sys.exit(0)

    if opts.options.summary:
        showsummary(opts.options.hosts, res, user_map, my_name, opts.options.virtualorganisation)
    if opts.options.detail:
        showdetail(opts.options.hosts, res, user_map, my_name, opts.options.virtualorganisation)
开发者ID:piojo,项目名称:vsc-jobs,代码行数:46,代码来源:myshowq.py


示例9: main

def main():

    options = {
        'jobid': ('Fully qualified identification of the job', None, 'store', None),
        'location_environment': ('the location for storing the pickle file depending on the cluster', str, 'store', 'VSC_SCRATCH_DELCATTY'),
    }
    opts = simple_option(options, config_files=['/etc/mycheckjob.conf'])

    storage = VscStorage()
    user_name = getpwuid(os.getuid())[0]

    mount_point = storage[opts.options.location_environment].login_mount_point
    path_template = storage.path_templates[opts.options.location_environment]['user']
    path = os.path.join(mount_point, path_template[0], path_template[1](user_name), ".checkjob.json.gz")

    checkjob_info = read_cache(path)

    print checkjob_info.display(opts.options.jobid)
开发者ID:hpcugent,项目名称:vsc-jobs,代码行数:18,代码来源:mycheckjob.py


示例10: main

def main():

    opts = {
        'github-account': ("GitHub account where repository is located", None, 'store', 'hpcugent', 'a'),
        'github-user': ("GitHub user to use (for authenticated access)", None, 'store', 'boegel', 'u'),
        'repository': ("Repository to use", None, 'store', 'easybuild-easyconfigs', 'r'),
    }
    go = simple_option(go_dict=opts, descr="Script to print overview of pull requests for a GitHub repository")

    github_token = fetch_github_token(go.options.github_user)
    github = RestClient(GITHUB_API_URL, username=go.options.github_user, token=github_token, user_agent='eb-pr-overview')

    downloading_msg = "Downloading PR data for %s/%s repo..." % (go.options.github_account, go.options.repository)
    print(downloading_msg)

    prs_data = fetch_prs_data(github, go.options.github_account, go.options.repository, downloading_msg)
    gh_repo = github.repos[go.options.github_account][go.options.repository]
    create_pr_overview(prs_data, gh_repo)
开发者ID:Caylo,项目名称:eb-scripts,代码行数:18,代码来源:pr_overview.py


示例11: main

def main():
    """Yeah, so, erm. The main function and such."""

    options = {
        "summary": ("Give the summary", None, "store_true", False, 's'),
        "detail": ("Detailed information", None, "store_true", False,),
        "virtualorganisation": ("Give VO details if available", None, "store_true", False, 'v'),
        "running": ("Display running job information", None, "store_true", False, 'r'),
        "idle": ("Display idle job information", None, "store_true", False, 'i'),
        "blocked": ("Dispay blocked job information", None, "store_true", False, 'b'),
        'hosts': ("Hosts/clusters to check", None, 'extend', []),
        'location_environment': ('the location for storing the pickle file depending on the cluster', str, 'store', 'VSC_SCRATCH_DELCATTY'),
    }

    opts = simple_option(options, config_files=['/etc/myshowq.conf'])

    if not (opts.options.running or opts.options.idle or opts.options.blocked):
        opts.options.running = True
        opts.options.idle = True
        opts.options.blocked = True

    storage = VscStorage()
    user_name = getpwuid(os.getuid())[0]
    now = time.time()

    mount_point = storage[opts.options.location_environment].login_mount_point
    path_template = storage.path_templates[opts.options.location_environment]['user']
    path = os.path.join(mount_point, path_template[0], path_template[1](user_name), ".showq.json.gz")

    (res, user_map) = read_cache(user_name,
                                 opts.options.virtualorganisation,
                                 opts.options.running,
                                 opts.options.idle,
                                 opts.options.blocked,
                                 path)

    if not res or len(res) == 0:
        print "no data"
        sys.exit(0)

    if opts.options.summary:
        showsummary(opts.options.hosts, res, user_map, user_name, opts.options.virtualorganisation)
    if opts.options.detail:
        showdetail(opts.options.hosts, res, user_map, user_name, opts.options.virtualorganisation)
开发者ID:wpoely86,项目名称:vsc-jobs,代码行数:44,代码来源:myshowq.py


示例12: main

def main():
    opts = {
        "github-account": ("GitHub account where repository is located", None, "store", "hpcugent", "a"),
        "github-user": ("GitHub user to use (for authenticated access)", None, "store", "boegel", "u"),
        "repository": ("Repository to use", None, "store", "easybuild-easyconfigs", "r"),
    }
    go = simple_option(go_dict=opts, descr="Script to print overview of pull requests for a GitHub repository")

    pickle_file = None
    if go.args:
        pickle_file = go.args[0]

    prs = fetch_pr_data(pickle_file, go.options.github_user, go.options.github_account, go.options.repository)

    created_ats = [datetime_parser(pr["created_at"].split("T")[0]) for pr in prs]
    closed_ats = [datetime_parser((pr["closed_at"] or "T").split("T")[0] or "ENDNEXTMONTH") for pr in prs]

    print("Plotting...")
    plot_historic_PR_ages(created_ats, closed_ats, go.options.repository)
    plot_open_closed_PRs(created_ats, closed_ats, go.options.repository)
开发者ID:Caylo,项目名称:eb-scripts,代码行数:20,代码来源:plot_pr_stats.py


示例13: main

def main():

    options = {
        'jobid': ('Fully qualified identification of the job', None, 'store', None),
        'location_environment': ('the location for storing the pickle file depending on the cluster', str, 'store', 'VSC_HOME'),
    }

    opts = simple_option(options, config_files=['/etc/mycheckjob.conf'])

    my_uid = os.geteuid()
    my_name = pwd.getpwuid(my_uid)[0]

    path = checkjob_data_location(my_name, opts.options.location_environment)
    (timeinfo, checkjob) = read_checkjob_data(path)

    age = time.time() - timeinfo

    if age > MAXIMAL_AGE:
        print "Job information is older than %d minutes (%f hours). Information may not be relevant any longer" % (age / 60, age / 60.0 / 60.0)

    print checkjob.display(opts.options.jobid)
开发者ID:piojo,项目名称:vsc-jobs,代码行数:21,代码来源:mycheckjob.py


示例14: main

def main():
    """Main function"""
    options = {
        'nagios_check_filename': ('filename of where the nagios check data is stored', str, 'store', NAGIOS_CHECK_FILENAME),
        'nagios_check_interval_threshold': ('threshold of nagios checks timing out', None, 'store', NAGIOS_CHECK_INTERVAL_THRESHOLD),
        'hosts': ('the hosts/clusters that should be contacted for job information', None, 'extend', []),
        'location': ('the location for storing the pickle file: gengar, muk', str, 'store', 'gengar'),
        'ha': ('high-availability master IP address', None, 'store', None),
        'dry-run': ('do not make any updates whatsoever', None, 'store_true', False),
    }

    opts = simple_option(options)

    nag = SimpleNagios(_cache=NAGIOS_CHECK_FILENAME)

    if opts.options.ha and not proceed_on_ha_service(opts.options.ha):
        _log.info("Not running on the target host in the HA setup. Stopping.")
        nag.ok("Not running on the HA master.")
    else:
        # parse config file
        clusters = {}
        for host in opts.options.hosts:
            master = opts.configfile_parser.get(host, "master")
            showq_path = opts.configfile_parser.get(host, "showq_path")
            mjobctl_path = opts.configfile_parser.get(host, "mjobctl_path")
            clusters[host] = {
                'master': master,
                'spath': showq_path,
                'mpath': mjobctl_path,
            }

        # process the new and previous data
        released_jobids, stats = process_hold(clusters, dry_run=opts.options.dry_run)

        # nagios state
        stats.update(RELEASEJOB_LIMITS)
        stats['message'] = "released %s jobs in hold" % len(released_jobids)
        nag._eval_and_exit(**stats)

    _log.info("Cached nagios state: %s %s" % (nag._final_state[0][1], nag._final_state[1]))
开发者ID:hpcugent,项目名称:master-scripts,代码行数:40,代码来源:release_jobholds.py


示例15: main

def main():

    options = {
        "jobid": ("Fully qualified identification of the job", None, "store", None),
        "location_environment": (
            "the location for storing the pickle file depending on the cluster",
            str,
            "store",
            "VSC_SCRATCH_DELCATTY",
        ),
    }
    opts = simple_option(options, config_files=["/etc/mycheckjob.conf"])

    storage = VscStorage()
    user_name = getpwuid(os.getuid())[0]

    mount_point = storage[opts.options.location_environment].login_mount_point
    path_template = storage.path_templates[opts.options.location_environment]["user"]
    path = os.path.join(mount_point, path_template[0], path_template[1](user_name), ".checkjob.json.gz")

    checkjob_info = read_cache(path)

    print checkjob_info.display(opts.options.jobid)
开发者ID:wpoely86,项目名称:vsc-jobs,代码行数:23,代码来源:mycheckjob.py


示例16: main

def main():

    opts = {
        'dry-run': ("Dry run, don't actually post/push/merge anything", None, 'store_true', False, 'x'),
        'force': ("Use force to execute the specified action", None, 'store_true', False, 'f'),
        'github-account': ("GitHub account where repository is located", None, 'store', 'hpcugent', 'a'),
        'github-user': ("GitHub user to use (for authenticated access)", None, 'store', 'boegel', 'u'),
        'repository': ("Repository to use", None, 'store', 'easybuild-easyconfigs', 'r'),
        # actions
        'comment': ("Post a comment in the pull request", None, 'store', None, 'C'),
        'merge': ("Merge the pull request", None, 'store_true', False, 'M'),
        'review': ("Review the pull request", None, 'store_true', False, 'R'),
        'test': ("Submit job to upload test report", None, 'store_or_None', None, 'T'),
    }

    actions = ['comment', 'merge', 'review', 'test']

    go = simple_option(go_dict=opts, descr="Script to print overview of pull requests for a GitHub repository")

    # determine which action should be taken
    selected_action = None
    for action in sorted(actions):
        action_value = getattr(go.options, action)
        if isinstance(action_value, bool):
            if action_value:
                selected_action = (action, action_value)
                break
        elif action_value is not None:
            selected_action = (action, action_value)
            break  # FIXME: support multiple actions, loop over them (e.g. -C :jok,lgtm -T)

    if selected_action is None:
        avail_actions = ', '.join(["%s (-%s)" % (a, a[0].upper()) for a in sorted(actions)])
        error("No action specified, pick one: %s" % avail_actions)
    else:
        info("Selected action: %s" % selected_action[0])

    # prepare using GitHub API
    global DRY_RUN
    DRY_RUN = go.options.dry_run
    force = go.options.force
    github_account = go.options.github_account
    github_user = go.options.github_user
    repository = go.options.repository

    github_token = fetch_github_token(github_user)
    github = RestClient(GITHUB_API_URL, username=github_user, token=github_token, user_agent='eb-pr-check')

    if len(go.args) == 1:
        pr = go.args[0]
    else:
        usage()

    print "Fetching PR information ",
    print "(using GitHub token for user '%s': %s)... " % (github_user, ('no', 'yes')[bool(github_token)]),
    sys.stdout.flush()
    pr_data = fetch_pr_data(github, github_account, repository, pr)
    print ''

    #print_raw_pr_info(pr_data)

    print_pr_summary(pr_data)

    if selected_action[0] == 'comment':
        comment(github, github_user, repository, pr_data, selected_action[1])
    elif selected_action[0] == 'merge':
        merge(github, github_user, github_account, repository, pr_data, force=force)
    elif selected_action[0] == 'review':
        review(pr_data)
    elif selected_action[0] == 'test':
        test(pr_data, selected_action[1])
    else:
        error("Handling action '%s' not implemented yet" % selected_action[0])
开发者ID:Caylo,项目名称:eb-scripts,代码行数:73,代码来源:pr_check.py


示例17: main

def main():
    """Main script"""

    options = {
        'nagios': ('print out nagios information', None, 'store_true', False, 'n'),
        'nagios-check-filename': ('filename of where the nagios check data is stored', str, 'store', NAGIOS_CHECK_FILENAME),
        'nagios-check-interval-threshold': ('threshold of nagios checks timing out', None, 'store', NAGIOS_CHECK_INTERVAL_THRESHOLD),
        'storage': ('the VSC filesystems that are checked by this script', None, 'extend', []),
        'dry-run': ('do not make any updates whatsoever', None, 'store_true', False),
    }
    opts = simple_option(options)

    logger.info('started GPFS quota check run.')

    nagios_reporter = NagiosReporter(NAGIOS_HEADER,
                                     opts.options.nagios_check_filename,
                                     opts.options.nagios_check_interval_threshold)

    if opts.options.nagios:
        nagios_reporter.report_and_exit()
        sys.exit(0)  # not reached

    lockfile = TimestampedPidLockfile(QUOTA_CHECK_LOCK_FILE)
    lock_or_bork(lockfile, nagios_reporter)

    try:
        user_id_map = map_uids_to_names() # is this really necessary?
        LdapQuery(VscConfiguration())
        gpfs = GpfsOperations()
        filesystems = gpfs.list_filesystems().keys()
        logger.debug("Found the following GPFS filesystems: %s" % (filesystems))

        filesets = gpfs.list_filesets()
        logger.debug("Found the following GPFS filesets: %s" % (filesets))

        quota = gpfs.list_quota()

        for storage in opts.options.storage:

            logger.info("Processing quota for storage %s" % (storage))
            filesystem = opts.configfile_parser.get(storage, 'filesystem')

            if filesystem not in filesystems:
                logger.error("Non-existant filesystem %s" % (filesystem))
                continue

            if filesystem not in quota.keys():
                logger.error("No quota defined for storage %s [%s]" % (storage, filesystem))
                continue

            quota_storage_map = get_mmrepquota_maps(quota[filesystem], storage,filesystem, filesets)

            exceeding_filesets = process_fileset_quota(gpfs, storage, filesystem, quota_storage_map['FILESET'])
            exceeding_users = process_user_quota(gpfs, storage, filesystem, quota_storage_map['USR'], user_id_map)

            logger.warning("storage %s found %d filesets that are exceeding their quota: %s" % (storage,
                                                                                                len(exceeding_filesets),
                                                                                                exceeding_filesets))
            logger.warning("storage %s found %d users who are exceeding their quota: %s" % (storage,
                                                                                            len(exceeding_users),
                                                                                            exceeding_users))

            notify_exceeding_filesets(gpfs=gpfs,
                                      storage=storage,
                                      filesystem=filesystem,
                                      exceeding_items=exceeding_filesets,
                                      dry_run=opts.options.dry_run)
            notify_exceeding_users(gpfs=gpfs,
                                   storage=storage,
                                   filesystem=filesystem,
                                   exceeding_items=exceeding_users,
                                   dry_run=opts.options.dry_run)

        sys.exit(1)

    except Exception, err:
        logger.exception("critical exception caught: %s" % (err))
        if not opts.options.dry_run:
            nagios_reporter.cache(NAGIOS_EXIT_CRITICAL, NagiosResult("CRITICAL script failed - %s" % (err.message)))
        if not opts.options.dry_run:
            lockfile.release()
        sys.exit(1)
开发者ID:itkovian,项目名称:master-scripts,代码行数:82,代码来源:dquota.py


示例18: main

def main():
    """ Start a new rsync client (destination or source) in a specified session """
    options = {
        # Zookeeper connection options:
        'servers'     : ('list of zk servers', 'strlist', 'store', None),
        'user'        : ('user with creation rights on zookeeper', None, 'store', 'root', 'u'),
        'passwd'      : ('password for user with creation rights', None, 'store', 'admin', 'p'),
        # Role options, define exactly one of these:
        'source'      : ('rsync source', None, 'store_true', False, 'S'),
        'destination' : ('rsync destination', None, 'store_true', False, 'D'),
        'pathsonly'   : ('Only do a test run of the pathlist building', None, 'store_true', False),
        'state'       : ('Only do the state', None, 'store_true', False),
        # Session options; should be the same on all clients of the session!
        'session'     : ('session name', None, 'store', 'default', 'N'),
        'netcat'      : ('run netcat test instead of rsync', None, 'store_true', False),
        'dryrun'      : ('run rsync in dry run mode', None, 'store_true', False, 'n'),
        'rsyncpath'   : ('rsync basepath', None, 'store', None, 'r'),  # May differ between sources and dests
        # Pathbuilding (Source clients and pathsonly ) specific options:
        'excludere'   : ('Exclude from pathbuilding', None, 'regex', re.compile('/\.snapshots(/.*|$)')),
        'depth'       : ('queue depth', "int", 'store', 4),
        # Source clients options; should be the same on all clients of the session!:
        'delete'      : ('run rsync with --delete', None, 'store_true', False),
        # Individual client options
        'domain'      : ('substitute domain', None, 'store', None),
        'logfile'     : ('Output to logfile', None, 'store', '/tmp/zkrsync/%(session)s-%(rstype)s-%(pid)s.log'),
        # Individual Destination client specific options
        'rsyncport'   : ('force port on which rsyncd binds', "int", 'store', None),
        'startport'   : ('offset to look for rsyncd ports', "int", 'store', 4444)
    }

    go = simple_option(options)
    acreds, admin_acl, rstype = zkrsync_parse(go.options)

    if go.options.logfile:
        logfile = go.options.logfile % {
            'session': go.options.session,
            'rstype': rstype,
            'pid': str(os.getpid())
        }
        logdir = os.path.dirname(logfile)
        if logdir:
            if not os.path.exists(logdir):
                os.makedirs(logdir)
            os.chmod(logdir, stat.S_IRWXU)

        fancylogger.logToFile(logfile)
        logger.debug('Logging to file %s:' % logfile)

    kwargs = {
        'session'     : go.options.session,
        'default_acl' : [admin_acl],
        'auth_data'   : acreds,
        'rsyncpath'   : go.options.rsyncpath,
        'netcat'      : go.options.netcat,
        }
    if go.options.state:
        rsyncP = RsyncSource(go.options.servers, **kwargs)
        logger.info('Progress: %s of %s paths remaining' % (rsyncP.len_paths(), rsyncP.paths_total))
        rsyncP.exit()
        sys.exit(0)

    elif go.options.pathsonly:
        kwargs['rsyncdepth'] = go.options.depth
        kwargs['excludere'] = go.options.excludere
        rsyncP = RsyncSource(go.options.servers, **kwargs)
        locked = rsyncP.acq_lock()
        if locked:
            starttime = time.time()
            rsyncP.build_pathqueue()
            endtime = time.time()
            timing = endtime - starttime
            pathqueue = rsyncP.path_queue
            logger.info('Building with depth %i took %f seconds walltime. there are %i paths in the Queue'
                         % (go.options.depth, timing, len(pathqueue)))
            rsyncP.delete(pathqueue.path, recursive=True)
            rsyncP.release_lock()
        else:
            logger.error('There is already a lock on the pathtree of this session')

        rsyncP.exit()
        sys.exit(0)

    elif rstype == CL_DEST:
        # Start zookeeper connection and rsync daemon
        kwargs['rsyncport'] = go.options.rsyncport
        kwargs['startport'] = go.options.startport
        kwargs['domain'] = go.options.domain
        rsyncD = RsyncDestination(go.options.servers, **kwargs)
        rsyncD.run()

        logger.debug('%s Ready' % rsyncD.get_whoami())
        rsyncD.exit()
        sys.exit(0)

    elif rstype == CL_SOURCE:
        # Start zookeeper connections
        kwargs['rsyncdepth'] = go.options.depth
        kwargs['dryrun'] = go.options.dryrun
        kwargs['delete'] = go.options.delete
        kwargs['excludere'] = go.options.excludere
#.........这里部分代码省略.........
开发者ID:stdweird,项目名称:vsc-zk,代码行数:101,代码来源:zkrsync.py


示例19: main

def main():
    # Collect all info

    # Note: debug option is provided by generaloption
    # Note: other settings, e.g., ofr each cluster will be obtained from the configuration file
    options = {
        "nagios": ("print out nagios information", None, "store_true", False, "n"),
        "nagios_check_filename": (
            "filename of where the nagios check data is stored",
            str,
            "store",
            NAGIOS_CHECK_FILENAME,
        ),
        "nagios_check_interval_threshold": (
            "threshold of nagios checks timing out",
            None,
            "store",
            NAGIOS_CHECK_INTERVAL_THRESHOLD,
        ),
        "hosts": ("the hosts/clusters that should be contacted for job information", None, "extend", []),
        "location": ("the location for storing the pickle file: home, scratch", str, "store", "home"),
        "ha": ("high-availability master IP address", None, "store", None),
        "dry-run": ("do not make any updates whatsoever", None, "store_true", False),
    }

    opts = simple_option(options)

    if opts.options.debug:
        fancylogger.setLogLevelDebug()

    nagios_reporter = NagiosReporter(
        NAGIOS_HEADER, opts.options.nagios_check_filename, opts.options.nagios_check_interval_threshold
    )
    if opts.options.nagios:
        logger.debug("Producing Nagios report and exiting.")
        nagios_reporter.report_and_exit()
        sys.exit(0)  # not reached

    if not proceed_on_ha_service(opts.options.ha):
        logger.warning("Not running on the target host in the HA setup. Stopping.")
        nagios_reporter.cache(NAGIOS_EXIT_WARNING, NagiosResult("Not running on the HA master."))
        sys.exit(NAGIOS_EXIT_WARNING)

    lockfile = TimestampedPidLockfile(DCHECKJOB_LOCK_FILE)
    lock_or_bork(lockfile, nagios_reporter)

    logger.info("Starting dcheckjob")

    LdapQuery(VscConfiguration())

    clusters = {}
    for host in opts.options.hosts:
        master = opts.configfile_parser.get(host, "master")
        checkjob_path = opts.configfile_parser.get(host, "checkjob_path")
        clusters[host] = {"master": master, "path": checkjob_path}

    checkjob = Checkjob(clusters, cache_pickle=True, dry_run=True)

    (job_information, reported_hosts, failed_hosts) = checkjob.get_moab_command_information()
    timeinfo = time.time()

    active_users = job_information.keys()

    logger.debug("Active users: %s" % (active_users))
    logger.debug("Checkjob information: %s" % (job_information))

    nagios_user_count = 0
    nagios_no_store = 0

    for user in active_users:
        if not opts.options.dry_run:
            try:
                (path, store) = get_pickle_path(opts.options.location, user)
                user_queue_information = CheckjobInfo({user: job_information[user]})
                store(user, path, (timeinfo, user_queue_information))
                nagios_user_count += 1
            except (UserStorageError, FileStoreError, FileMoveError), _:
                logger.error("Could not store pickle file for user %s" % (user))
                nagios_no_store += 1
        else:
            logger.info(
                "Dry run, not actually storing data for user %s at path %s"
                % (user, get_pickle_path(opts.options.location, user)[0])
            )
            logger.debug("Dry run, queue information for user %s is %s" %  

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python missing.get_subclasses函数代码示例发布时间:2022-05-26
下一篇:
Python fancylogger.setLogFormat函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap