• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python utils.whataremyips函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中swift.common.utils.whataremyips函数的典型用法代码示例。如果您正苦于以下问题:Python whataremyips函数的具体用法?Python whataremyips怎么用?Python whataremyips使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了whataremyips函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: collect_jobs

 def collect_jobs(self):
     """
     Returns a sorted list of jobs (dictionaries) that specify the
     partitions, nodes, etc to be rsynced.
     """
     jobs = []
     ips = whataremyips()
     for local_dev in [dev for dev in self.object_ring.devs
             if dev and dev['ip'] in ips and dev['port'] == self.port]:
         dev_path = join(self.devices_dir, local_dev['device'])
         obj_path = join(dev_path, 'objects')
         tmp_path = join(dev_path, 'tmp')
         if self.mount_check and not os.path.ismount(dev_path):
             self.logger.warn(_('%s is not mounted'), local_dev['device'])
             continue
         unlink_older_than(tmp_path, time.time() - self.reclaim_age)
         if not os.path.exists(obj_path):
             continue
         for partition in os.listdir(obj_path):
             try:
                 nodes = [node for node in
                     self.object_ring.get_part_nodes(int(partition))
                          if node['id'] != local_dev['id']]
                 jobs.append(dict(path=join(obj_path, partition),
                     nodes=nodes,
                     delete=len(nodes) > self.object_ring.replica_count - 1,
                     partition=partition))
             except ValueError:
                 continue
     random.shuffle(jobs)
     # Partititons that need to be deleted take priority
     jobs.sort(key=lambda job: not job['delete'])
     self.job_count = len(jobs)
     return jobs
开发者ID:Willtech,项目名称:swift,代码行数:34,代码来源:replicator.py


示例2: run_once

 def run_once(self, *args, **kwargs):
     """Run a replication pass once."""
     self._zero_stats()
     dirs = []
     ips = whataremyips()
     if not ips:
         self.logger.error(_('ERROR Failed to get my own IPs?'))
         return
     for node in self.ring.devs:
         if node and node['ip'] in ips and node['port'] == self.port:
             if self.mount_check and not os.path.ismount(
                     os.path.join(self.root, node['device'])):
                 self.logger.warn(
                     _('Skipping %(device)s as it is not mounted') % node)
                 continue
             unlink_older_than(
                 os.path.join(self.root, node['device'], 'tmp'),
                 time.time() - self.reclaim_age)
             datadir = os.path.join(self.root, node['device'], self.datadir)
             if os.path.isdir(datadir):
                 dirs.append((datadir, node['id']))
     self.logger.info(_('Beginning replication run'))
     for part, object_file, node_id in self.roundrobin_datadirs(dirs):
         self.cpool.spawn_n(
             self._replicate_object, part, object_file, node_id)
     self.cpool.waitall()
     self.logger.info(_('Replication run OVER'))
     self._report_stats()
开发者ID:VictorLowther,项目名称:swift,代码行数:28,代码来源:db_replicator.py


示例3: __init__

 def __init__(self, conf, logger=None):
     self.conf = conf
     self.logger = logger or get_logger(conf, log_route='account-reaper')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.interval = int(conf.get('interval', 3600))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.account_ring = None
     self.container_ring = None
     self.object_ring = None
     self.node_timeout = float(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0'))
     self.bind_port = int(conf.get('bind_port', 6202))
     self.concurrency = int(conf.get('concurrency', 25))
     self.container_concurrency = self.object_concurrency = \
         sqrt(self.concurrency)
     self.container_pool = GreenPool(size=self.container_concurrency)
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
     self.delay_reaping = int(conf.get('delay_reaping') or 0)
     reap_warn_after = float(conf.get('reap_warn_after') or 86400 * 30)
     self.reap_not_done_after = reap_warn_after + self.delay_reaping
     self.start_time = time()
     self.reset_stats()
开发者ID:jgmerritt,项目名称:swift,代码行数:25,代码来源:reaper.py


示例4: collect_jobs

    def collect_jobs(self, override_devices=None, override_partitions=None,
                     override_policies=None):
        """
        Returns a sorted list of jobs (dictionaries) that specify the
        partitions, nodes, etc to be rsynced.

        :param override_devices: if set, only jobs on these devices
            will be returned
        :param override_partitions: if set, only jobs on these partitions
            will be returned
        :param override_policies: if set, only jobs in these storage
            policies will be returned
        """
        jobs = []
        ips = whataremyips(self.bind_ip)
        for policy in POLICIES:
            if policy.policy_type == REPL_POLICY:
                if (override_policies is not None and
                        str(policy.idx) not in override_policies):
                    continue
                # ensure rings are loaded for policy
                self.load_object_ring(policy)
                jobs += self.build_replication_jobs(
                    policy, ips, override_devices=override_devices,
                    override_partitions=override_partitions)
        random.shuffle(jobs)
        if self.handoffs_first:
            # Move the handoff parts to the front of the list
            jobs.sort(key=lambda job: not job['delete'])
        self.job_count = len(jobs)
        return jobs
开发者ID:heemanshu,项目名称:swift_liberty,代码行数:31,代码来源:replicator.py


示例5: collect_jobs

    def collect_jobs(self, override_devices=None, override_partitions=None,
                     override_policies=None):
        """
        Returns a sorted list of jobs (dictionaries) that specify the
        partitions, nodes, etc to be rsynced.

        :param override_devices: if set, only jobs on these devices
            will be returned
        :param override_partitions: if set, only jobs on these partitions
            will be returned
        :param override_policies: if set, only jobs in these storage
            policies will be returned
        """
        jobs = []
        ips = whataremyips()
        for policy in POLICIES:
            if (override_policies is not None
                    and str(policy.idx) not in override_policies):
                continue
            # may need to branch here for future policy types
            jobs += self.process_repl(policy, ips,
                                      override_devices=override_devices,
                                      override_partitions=override_partitions)
        random.shuffle(jobs)
        if self.handoffs_first:
            # Move the handoff parts to the front of the list
            jobs.sort(key=lambda job: not job['delete'])
        self.job_count = len(jobs)
        return jobs
开发者ID:gayana06,项目名称:Thesis,代码行数:29,代码来源:replicator.py


示例6: __init__

 def __init__(self, conf, container_ring=None, object_ring=None):
     #: The dict of configuration values from the [container-sync] section
     #: of the container-server.conf.
     self.conf = conf
     #: Logger to use for container-sync log lines.
     self.logger = get_logger(conf, log_route='container-sync')
     #: Path to the local device mount points.
     self.devices = conf.get('devices', '/srv/node')
     #: Indicates whether mount points should be verified as actual mount
     #: points (normally true, false for tests and SAIO).
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     #: Minimum time between full scans. This is to keep the daemon from
     #: running wild on near empty systems.
     self.interval = int(conf.get('interval', 300))
     #: Maximum amount of time to spend syncing a container before moving on
     #: to the next one. If a conatiner sync hasn't finished in this time,
     #: it'll just be resumed next scan.
     self.container_time = int(conf.get('container_time', 60))
     #: ContainerSyncCluster instance for validating sync-to values.
     self.realms_conf = ContainerSyncRealms(
         os.path.join(
             conf.get('swift_dir', '/etc/swift'),
             'container-sync-realms.conf'),
         self.logger)
     #: The list of hosts we're allowed to send syncs to. This can be
     #: overridden by data in self.realms_conf
     self.allowed_sync_hosts = [
         h.strip()
         for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
         if h.strip()]
     self.http_proxies = [
         a.strip()
         for a in conf.get('sync_proxy', '').split(',')
         if a.strip()]
     #: Number of containers with sync turned on that were successfully
     #: synced.
     self.container_syncs = 0
     #: Number of successful DELETEs triggered.
     self.container_deletes = 0
     #: Number of successful PUTs triggered.
     self.container_puts = 0
     #: Number of containers that didn't have sync turned on.
     self.container_skips = 0
     #: Number of containers that had a failure of some type.
     self.container_failures = 0
     #: Time of last stats report.
     self.reported = time()
     swift_dir = conf.get('swift_dir', '/etc/swift')
     #: swift.common.ring.Ring for locating containers.
     self.container_ring = container_ring or Ring(swift_dir,
                                                  ring_name='container')
     #: swift.common.ring.Ring for locating objects.
     self.object_ring = object_ring or Ring(swift_dir, ring_name='object')
     self._myips = whataremyips()
     self._myport = int(conf.get('bind_port', 6001))
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
开发者ID:10389030,项目名称:swift,代码行数:57,代码来源:sync.py


示例7: _get_my_replication_ips

 def _get_my_replication_ips(self):
     my_replication_ips = set()
     ips = whataremyips()
     for policy in POLICIES:
         self.load_object_ring(policy)
         for local_dev in [dev for dev in policy.object_ring.devs
                           if dev and dev['replication_ip'] in ips and
                           dev['replication_port'] == self.port]:
             my_replication_ips.add(local_dev['replication_ip'])
     return list(my_replication_ips)
开发者ID:chenzhongtao,项目名称:swift,代码行数:10,代码来源:replicator.py


示例8: collect_parts

    def collect_parts(self, override_devices=None, override_partitions=None):
        """
        Helper for yielding partitions in the top level reconstructor
        """
        override_devices = override_devices or []
        override_partitions = override_partitions or []
        ips = whataremyips(self.bind_ip)
        for policy in POLICIES:
            if policy.policy_type != EC_POLICY:
                continue
            self._diskfile_mgr = self._df_router[policy]
            self.load_object_ring(policy)
            data_dir = get_data_dir(policy)
            local_devices = itertools.ifilter(
                lambda dev: dev and is_local_device(ips, self.port, dev["replication_ip"], dev["replication_port"]),
                policy.object_ring.devs,
            )

            for local_dev in local_devices:
                if override_devices and (local_dev["device"] not in override_devices):
                    continue
                dev_path = self._df_router[policy].get_dev_path(local_dev["device"])
                if not dev_path:
                    self.logger.warn(_("%s is not mounted"), local_dev["device"])
                    continue
                obj_path = join(dev_path, data_dir)
                tmp_path = join(dev_path, get_tmp_dir(int(policy)))
                unlink_older_than(tmp_path, time.time() - self.reclaim_age)
                if not os.path.exists(obj_path):
                    try:
                        mkdirs(obj_path)
                    except Exception:
                        self.logger.exception("Unable to create %s" % obj_path)
                    continue
                try:
                    partitions = os.listdir(obj_path)
                except OSError:
                    self.logger.exception("Unable to list partitions in %r" % obj_path)
                    continue
                for partition in partitions:
                    part_path = join(obj_path, partition)
                    if not (partition.isdigit() and os.path.isdir(part_path)):
                        self.logger.warning("Unexpected entity in data dir: %r" % part_path)
                        remove_file(part_path)
                        continue
                    partition = int(partition)
                    if override_partitions and (partition not in override_partitions):
                        continue
                    part_info = {
                        "local_dev": local_dev,
                        "policy": policy,
                        "partition": partition,
                        "part_path": part_path,
                    }
                    yield part_info
开发者ID:helen5haha,项目名称:swift,代码行数:55,代码来源:reconstructor.py


示例9: collect_jobs

    def collect_jobs(self):
        """
        Returns a sorted list of jobs (dictionaries) that specify the
        partitions, nodes, etc to be synced.
        """
        jobs = []
        ips = whataremyips()
        for local_dev in [dev for dev in self.object_ring.devs
                          if dev and dev['replication_ip'] in ips and
                          dev['replication_port'] == self.port]:
            dev_path = join(self.devices_dir, local_dev['device'])
            obj_path = join(dev_path, 'objects')
            tmp_path = join(dev_path, 'tmp')
            if self.mount_check and not ismount(dev_path):
                self.logger.warn(_('%s is not mounted'), local_dev['device'])
                continue
            unlink_older_than(tmp_path, time.time() - self.reclaim_age)
            if not os.path.exists(obj_path):
                try:
                    mkdirs(obj_path)
                except Exception:
                    self.logger.exception('ERROR creating %s' % obj_path)
                continue
            for partition in os.listdir(obj_path):
                try:
                    job_path = join(obj_path, partition)
                    if isfile(job_path):
                        # Clean up any (probably zero-byte) files where a
                        # partition should be.
                        self.logger.warning('Removing partition directory '
                                            'which was a file: %s', job_path)
                        os.remove(job_path)
                        continue
                    part_nodes = \
                        self.object_ring.get_part_nodes(int(partition))
		#MODIFIED LightSync
                    for mypos in range(len(part_nodes)):
                        if part_nodes[mypos]['id'] == local_dev['id']:
                            break
                    nodes = part_nodes[mypos+1:]+part_nodes[:mypos]
		##
                    jobs.append(
                        dict(path=job_path,
                             device=local_dev['device'],
                             nodes=nodes,
                             delete=len(nodes) > len(part_nodes) - 1,
                             partition=partition))
                except (ValueError, OSError):
                    continue
        random.shuffle(jobs)
        if self.handoffs_first:
            # Move the handoff parts to the front of the list
            jobs.sort(key=lambda job: not job['delete'])
        self.job_count = len(jobs)
        return jobs
开发者ID:LightSync,项目名称:patch-openstack-swift,代码行数:55,代码来源:replicator.py


示例10: collect_jobs

 def collect_jobs(self):
     """
     Returns a sorted list of jobs (dictionaries) that specify the
     partitions, nodes, etc to be synced.
     """
     jobs = []
     ips = whataremyips()
     for local_dev in [
         dev
         for dev in self.object_ring.devs
         if dev and dev["replication_ip"] in ips and dev["replication_port"] == self.port
     ]:
         dev_path = join(self.devices_dir, local_dev["device"])
         obj_path = join(dev_path, "objects")
         tmp_path = join(dev_path, "tmp")
         if self.mount_check and not os.path.ismount(dev_path):
             self.logger.warn(_("%s is not mounted"), local_dev["device"])
             continue
         unlink_older_than(tmp_path, time.time() - self.reclaim_age)
         if not os.path.exists(obj_path):
             try:
                 mkdirs(obj_path)
             except Exception:
                 self.logger.exception("ERROR creating %s" % obj_path)
             continue
         for partition in os.listdir(obj_path):
             try:
                 job_path = join(obj_path, partition)
                 if isfile(job_path):
                     # Clean up any (probably zero-byte) files where a
                     # partition should be.
                     self.logger.warning("Removing partition directory " "which was a file: %s", job_path)
                     os.remove(job_path)
                     continue
                 part_nodes = self.object_ring.get_part_nodes(int(partition))
                 nodes = [node for node in part_nodes if node["id"] != local_dev["id"]]
                 jobs.append(
                     dict(
                         path=job_path,
                         device=local_dev["device"],
                         nodes=nodes,
                         delete=len(nodes) > len(part_nodes) - 1,
                         partition=partition,
                     )
                 )
             except (ValueError, OSError):
                 continue
     random.shuffle(jobs)
     if self.handoffs_first:
         # Move the handoff parts to the front of the list
         jobs.sort(key=lambda job: not job["delete"])
     self.job_count = len(jobs)
     return jobs
开发者ID:674009287,项目名称:swift,代码行数:53,代码来源:replicator.py


示例11: __call__

    def __call__(self, env, start_response):
        req = Request(env)
        lxc_host = env.get("HTTP_X_OBJECT_META_LXC_HOST")
        addresses = whataremyips()
        if lxc_host in addresses:
            #path_hash = hash_path(account, container, obj)
            ring = Ring(self.object_ring_path)
            raw_path = env.get("RAW_PATH_INFO").split("/")
            path_hash = hash_path(raw_path[3],raw_path[4],raw_path[5])
            f_location = storage_directory("objects", raw_path[2], path_hash)
            path = "%s/%s/%s" % (self.root, raw_path[1], f_location)
            #Check if container exists and is running
            self.check_container(path, raw_path[5])

        return self.app(env, start_response)
开发者ID:zfeldstein,项目名称:swift-lxc,代码行数:15,代码来源:swift_lxc_manage.py


示例12: collect_jobs

 def collect_jobs(self):
     """
     Returns a sorted list of jobs (dictionaries) that specify the
     partitions, nodes, etc to be rsynced.
     """
     jobs = []
     ips = whataremyips()
     for local_dev in [dev for dev in self.object_ring.devs
                       if dev and dev['ip'] in ips and
                       dev['port'] == self.port]:
         dev_path = join(self.devices_dir, local_dev['device'])
         obj_path = join(dev_path, 'objects')
         tmp_path = join(dev_path, 'tmp')
         if self.mount_check and not os.path.ismount(dev_path):
             self.logger.warn(_('%s is not mounted'), local_dev['device'])
             continue
         unlink_older_than(tmp_path, time.time() - self.reclaim_age)
         if not os.path.exists(obj_path):
             try:
                 mkdirs(obj_path)
             except Exception:
                 self.logger.exception('ERROR creating %s' % obj_path)
             continue
         for partition in os.listdir(obj_path):
             try:
                 job_path = join(obj_path, partition)
                 if isfile(job_path):
                     # Clean up any (probably zero-byte) files where a
                     # partition should be.
                     self.logger.warning('Removing partition directory '
                                         'which was a file: %s', job_path)
                     os.remove(job_path)
                     continue
                 part_nodes = \
                     self.object_ring.get_part_nodes(int(partition))
                 nodes = [node for node in part_nodes
                          if node['id'] != local_dev['id']]
                 jobs.append(
                     dict(path=job_path,
                          device=local_dev['device'],
                          nodes=nodes,
                          delete=len(nodes) > len(part_nodes) - 1,
                          partition=partition))
             except (ValueError, OSError):
                 continue
     random.shuffle(jobs)
     self.job_count = len(jobs)
     return jobs
开发者ID:CiscoAS,项目名称:swift,代码行数:48,代码来源:replicator.py


示例13: collect_jobs

 def collect_jobs(self):
     """
     Returns a sorted list of jobs (dictionaries) that specify the
     partitions, nodes, etc to be rsynced.
     """
     jobs = []
     ips = whataremyips()
     for policy in POLICIES:
         # may need to branch here for future policy types
         self.process_repl(policy, jobs, ips)
     random.shuffle(jobs)
     if self.handoffs_first:
         # Move the handoff parts to the front of the list
         jobs.sort(key=lambda job: not job['delete'])
     self.job_count = len(jobs)
     return jobs
开发者ID:701,项目名称:swift,代码行数:16,代码来源:replicator.py


示例14: __init__

 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='account-reaper')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = conf.get('mount_check', 'true').lower() in \
                           ('true', 't', '1', 'on', 'yes', 'y')
     self.interval = int(conf.get('interval', 3600))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.account_ring = None
     self.container_ring = None
     self.object_ring = None
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.myips = whataremyips()
     self.concurrency = int(conf.get('concurrency', 25))
     self.container_concurrency = self.object_concurrency = \
         sqrt(self.concurrency)
     self.container_pool = GreenPool(size=self.container_concurrency)
开发者ID:double-z,项目名称:swift,代码行数:18,代码来源:reaper.py


示例15: collect_jobs

    def collect_jobs(self, override_devices=None, override_partitions=None,
                     override_policies=None):
        """
        Returns a sorted list of jobs (dictionaries) that specify the
        partitions, nodes, etc to be rsynced.

        :param override_devices: if set, only jobs on these devices
            will be returned
        :param override_partitions: if set, only jobs on these partitions
            will be returned
        :param override_policies: if set, only jobs in these storage
            policies will be returned
        """
        jobs = []
        ips = whataremyips(self.bind_ip)
        for policy in POLICIES:
            # Skip replication if next_part_power is set. In this case
            # every object is hard-linked twice, but the replicator can't
            # detect them and would create a second copy of the file if not
            # yet existing - and this might double the actual transferred
            # and stored data
            next_part_power = getattr(
                policy.object_ring, 'next_part_power', None)
            if next_part_power is not None:
                self.logger.warning(
                    _("next_part_power set in policy '%s'. Skipping"),
                    policy.name)
                continue

            if policy.policy_type == REPL_POLICY:
                if (override_policies is not None and
                        str(policy.idx) not in override_policies):
                    continue
                # ensure rings are loaded for policy
                self.load_object_ring(policy)
                jobs += self.build_replication_jobs(
                    policy, ips, override_devices=override_devices,
                    override_partitions=override_partitions)
        random.shuffle(jobs)
        if self.handoffs_first:
            # Move the handoff parts to the front of the list
            jobs.sort(key=lambda job: not job['delete'])
        self.job_count = len(jobs)
        return jobs
开发者ID:chenzhongtao,项目名称:swift,代码行数:44,代码来源:replicator.py


示例16: __init__

 def __init__(self, conf, container_ring=None, object_ring=None):
     #: The dict of configuration values from the [container-sync] section
     #: of the container-server.conf.
     self.conf = conf
     #: Logger to use for container-sync log lines.
     self.logger = get_logger(conf, log_route="container-sync")
     #: Path to the local device mount points.
     self.devices = conf.get("devices", "/srv/node")
     #: Indicates whether mount points should be verified as actual mount
     #: points (normally true, false for tests and SAIO).
     self.mount_check = config_true_value(conf.get("mount_check", "true"))
     #: Minimum time between full scans. This is to keep the daemon from
     #: running wild on near empty systems.
     self.interval = int(conf.get("interval", 300))
     #: Maximum amount of time to spend syncing a container before moving on
     #: to the next one. If a conatiner sync hasn't finished in this time,
     #: it'll just be resumed next scan.
     self.container_time = int(conf.get("container_time", 60))
     #: The list of hosts we're allowed to send syncs to.
     self.allowed_sync_hosts = [
         h.strip() for h in conf.get("allowed_sync_hosts", "127.0.0.1").split(",") if h.strip()
     ]
     self.proxy = conf.get("sync_proxy")
     #: Number of containers with sync turned on that were successfully
     #: synced.
     self.container_syncs = 0
     #: Number of successful DELETEs triggered.
     self.container_deletes = 0
     #: Number of successful PUTs triggered.
     self.container_puts = 0
     #: Number of containers that didn't have sync turned on.
     self.container_skips = 0
     #: Number of containers that had a failure of some type.
     self.container_failures = 0
     #: Time of last stats report.
     self.reported = time()
     swift_dir = conf.get("swift_dir", "/etc/swift")
     #: swift.common.ring.Ring for locating containers.
     self.container_ring = container_ring or Ring(swift_dir, ring_name="container")
     #: swift.common.ring.Ring for locating objects.
     self.object_ring = object_ring or Ring(swift_dir, ring_name="object")
     self._myips = whataremyips()
     self._myport = int(conf.get("bind_port", 6001))
     swift.common.db.DB_PREALLOCATION = config_true_value(conf.get("db_preallocation", "f"))
开发者ID:jettang,项目名称:icehouse,代码行数:44,代码来源:sync.py


示例17: run_once

 def run_once(self, *args, **kwargs):
     """Run a replication pass once."""
     self._zero_stats()
     dirs = []
     ips = whataremyips(self.bind_ip)
     if not ips:
         self.logger.error(_('ERROR Failed to get my own IPs?'))
         return
     self._local_device_ids = set()
     found_local = False
     for node in self.ring.devs:
         if node and is_local_device(ips, self.port,
                                     node['replication_ip'],
                                     node['replication_port']):
             found_local = True
             if self.mount_check and not ismount(
                     os.path.join(self.root, node['device'])):
                 self._add_failure_stats(
                     [(failure_dev['replication_ip'],
                       failure_dev['device'])
                      for failure_dev in self.ring.devs if failure_dev])
                 self.logger.warning(
                     _('Skipping %(device)s as it is not mounted') % node)
                 continue
             unlink_older_than(
                 os.path.join(self.root, node['device'], 'tmp'),
                 time.time() - self.reclaim_age)
             datadir = os.path.join(self.root, node['device'], self.datadir)
             if os.path.isdir(datadir):
                 self._local_device_ids.add(node['id'])
                 dirs.append((datadir, node['id']))
     if not found_local:
         self.logger.error("Can't find itself %s with port %s in ring "
                           "file, not replicating",
                           ", ".join(ips), self.port)
     self.logger.info(_('Beginning replication run'))
     for part, object_file, node_id in roundrobin_datadirs(dirs):
         self.cpool.spawn_n(
             self._replicate_object, part, object_file, node_id)
     self.cpool.waitall()
     self.logger.info(_('Replication run OVER'))
     self._report_stats()
开发者ID:clayg,项目名称:swift,代码行数:42,代码来源:db_replicator.py


示例18: __call__

    def __call__(self, env, start_response):
        f_arg = start_response
        if self.location.lower() == 'proxy':
            req = Request(env)
            dt = datetime.now()
            ts = mktime(dt.timetuple()) + (dt.microsecond / 1000000.)
            week_day = date.today().strftime("%a")
            server_ip = whataremyips()
            txd = req.environ['swift.trans_id']
            start_time = time.time()

            # URL format is http:[host]/container/object
            version, account, container, obj = split_path(req.path, 1, 4, True)

            if container is None:
                container = ''
            if obj is None:
                obj = ''

            str_env = str(env)
            str_env = str_env.replace('"', '\'')
            user_agent = env['HTTP_USER_AGENT'] if 'HTTP_USER_AGENT' in env else ''
            msg = self.log_fm % (str_env, ts, dt.year, dt.month, dt.day, week_day,
                                 dt.hour, dt.minute, dt.second, dt.microsecond,
                                 req.method, req.path, account, container, obj,
                                 req.content_length, req.params, server_ip[0],
                                 req.remote_addr, user_agent, txd)

            def response_logging(status, response_headers, exc_info=None):
                elapse = time.time() - start_time
                full_msg = '%s,%s,%.8f' % (msg, status.split(' ', 1)[0], elapse)
                self.logger.info(full_msg)
                return start_response(status, response_headers, exc_info)

            f_arg = response_logging

        try:
            resp = self.app(env, f_arg)
        except Exception:
            self.client.captureException()
            raise
        return resp
开发者ID:KoreaCloudObjectStorage,项目名称:swift-logging,代码行数:42,代码来源:middleware.py


示例19: __init__

 def __init__(self, conf):
     self.conf = conf
     self.logger = get_logger(conf, log_route='account-reaper')
     self.devices = conf.get('devices', '/srv/node')
     self.mount_check = config_true_value(conf.get('mount_check', 'true'))
     self.interval = int(conf.get('interval', 3600))
     self.swift_dir = conf.get('swift_dir', '/etc/swift')
     self.account_ring = None
     self.container_ring = None
     self.object_ring = None
     self.node_timeout = int(conf.get('node_timeout', 10))
     self.conn_timeout = float(conf.get('conn_timeout', 0.5))
     self.myips = whataremyips()
     self.concurrency = int(conf.get('concurrency', 25))
     self.container_concurrency = self.object_concurrency = \
         sqrt(self.concurrency)
     self.container_pool = GreenPool(size=self.container_concurrency)
     swift.common.db.DB_PREALLOCATION = \
         config_true_value(conf.get('db_preallocation', 'f'))
     self.delay_reaping = int(conf.get('delay_reaping') or 0)
开发者ID:ChicoTeam,项目名称:swift,代码行数:20,代码来源:reaper.py


示例20: __init__

 def __init__(self, conf, ring, datadir, default_port, logger):
     self.logger = logger
     self.datadir = datadir
     self.conf = conf
     self.devices = conf.get('devices', '/srv/node/')
     port = int(conf.get('bind_port', default_port))
     my_ips = whataremyips()
     # device is a tuple (<device name>, <device mirror_copies>)
     self.device = None
     self.device_mirror_copies = 1
     for dev in ring.devs:
         if dev['ip'] in my_ips and dev['port'] == port:
             self.device_mirror_copies = int(dev.get('mirror_copies', 1))
             self.device = dev['device']
             break
     if not self.device:
         raise SwiftConfigurationError(
             _("Can\'t find device for this daemon"))
     self.faulted_devices = set()
     self.degraded_devices = set()
     self.unavailable_devices = set()
开发者ID:Nexenta,项目名称:lfs,代码行数:21,代码来源:__init__.py



注:本文中的swift.common.utils.whataremyips函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.write_pickle函数代码示例发布时间:2022-05-27
下一篇:
Python utils.validate_sync_to函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap