本文整理汇总了Python中mpp.lib.gprecoverseg.GpRecover类的典型用法代码示例。如果您正苦于以下问题:Python GpRecover类的具体用法?Python GpRecover怎么用?Python GpRecover使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GpRecover类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_option_port_offset
def test_option_port_offset(self):
"""
primary port + offset = mirror database port
primary port + (2 * offset) = mirror replication port
primary port + (3 * offset) = primary replication port
"""
gprecover = GpRecover()
port_offset = 500
self._setup_gpaddmirrors(port_offset = port_offset)
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with non default port_offset', res)
self.assertEqual(0, res['rc'])
query_ports = 'SELECT port, replication_port FROM gp_segment_configuration WHERE content = 0 ORDER BY preferred_role DESC;'
result = PSQL.run_sql_command(query_ports, flags='-q -t', dbname='template1')
ports = result.strip().split('\n')
primary_ports = ports[0]
mirror_ports = ports[1]
primary_ports = primary_ports.split('|')
primary_ports = [port.strip() for port in primary_ports]
primary_db_port = int(primary_ports[0])
primary_replic_port = int(primary_ports[1])
mirror_ports = mirror_ports.split('|')
mirror_ports = [port.strip() for port in mirror_ports]
mirror_db_port = int(mirror_ports[0])
mirror_replic_port = int(mirror_ports[1])
self.assertEqual(primary_db_port + port_offset, mirror_db_port)
self.assertEqual(primary_db_port + 2*port_offset, mirror_replic_port)
self.assertEqual(primary_db_port + 3*port_offset, primary_replic_port)
gprecover.wait_till_insync_transition()
self.verify_config_file_with_gp_config()
self.check_mirror_seg()
开发者ID:50wu,项目名称:gpdb,代码行数:33,代码来源:test_gpaddmirrors.py
示例2: test_with_concurrent_workload
def test_with_concurrent_workload(self):
"""
add new mirrors while concurrent workload in progress, check that mirrors added
and current workload won't get affected, in the end, run checkmirrorseg.
Note that: adding mirrors while running workload has checkmirrorseg issue with MPP-24311
"""
gprecover = GpRecover()
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
sql_setup_file = local_path('sql/ao_heap_table_setup.sql')
sql_file = local_path('sql/ao_heap_table.sql')
pg_stat_activity = 'SELECT * FROM pg_stat_activity;'
PSQL.run_sql_file(sql_setup_file)
subprocess.Popen(["psql", "-f", sql_file])
time.sleep(15)
subprocess.Popen(["gpaddmirrors", "-ai", self.mirror_config_file, "-d", self.mdd])
time.sleep(15)
result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1')
result = result.strip()
rows = result.split('\n')
self.assertTrue(len(rows) > 1)
while len(rows) > 1:
result = PSQL.run_sql_command(pg_stat_activity, flags='-q -t', dbname='template1')
result = result.strip()
rows = result.split('\n')
time.sleep(3)
gprecover.wait_till_insync_transition()
self.verify_config_file_with_gp_config()
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:28,代码来源:test_gpaddmirrors.py
示例3: check_insync_transition
def check_insync_transition(self, dbname='template1'):
"""
confirming that the current mode is in sync before performing the gpcheckmirrorseg,
resyncInterval increase 10 seconds for each new query, maximumly sleep 75 sec, can be tuned.
"""
recoverseg = GpRecover()
is_synchronized = recoverseg.wait_till_insync_transition()
if not is_synchronized:
self.fail('Segments are not in sync')
开发者ID:50wu,项目名称:gpdb,代码行数:9,代码来源:__init__.py
示例4: BaseClass
class BaseClass(MPPTestCase):
'''
Base Class for Storage test-suites
'''
def __init__(self,methodName):
self.filereputil = Filerepe2e_Util()
self.gprecover = GpRecover()
super(BaseClass,self).__init__(methodName)
def inject_fault(self, fault_name, type, role='mirror', port=None, occurence=None, sleeptime=None, seg_id=None):
''' Reset the fault and then issue the fault with the given type'''
self.filereputil.inject_fault(f=fault_name, y='reset', r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
self.filereputil.inject_fault(f=fault_name, y=type, r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
tinctest.logger.info('Successfully injected fault_name : %s fault_type : %s occurence : %s ' % (fault_name, type, occurence))
def reset_fault(self, fault_name, role='mirror', port=None, occurence=None, sleeptime=None, seg_id=None):
''' Reset the fault '''
self.filereputil.inject_fault(f=fault_name, y='reset', r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
tinctest.logger.info('Successfully reset fault_name : %s fault_type : %s occurence : %s ' % (fault_name, type, occurence))
def check_fault_status(self, fault_name, seg_id=None, role=None):
status = self.filereputil.check_fault_status(fault_name = fault_name, status ='triggered', max_cycle=20, role=role, seg_id=seg_id)
self.assertTrue(status, 'The fault is not triggered in the time expected')
def incremental_recoverseg(self):
self.gprecover.incremental()
def wait_till_change_tracking(self):
self.filereputil.wait_till_change_tracking_transition()
def run_sql_in_background(self, sql_cmd):
PSQL.run_sql_command(sql_cmd, background=True)
def wait_till_insync(self):
self.gprecover.wait_till_insync_transition()
def set_gpconfig(self, param, value):
''' Set the configuration parameter using gpconfig '''
command = "gpconfig -c %s -v \"\'%s\'\" --skipvalidation" % (param, value)
rc = run_shell_command(command)
if not rc:
raise Exception('Unable to set the configuration parameter %s ' % param)
gpstop = GpStop()
gpstop.run_gpstop_cmd(restart=True)
def reset_gpconfig(self,param):
''' Reset the configuration parameter '''
command = "gpconfig -r %s " % (param)
rc = run_shell_command(command)
if not rc:
raise Exception('Unable to reset the configuration parameter %s ' % param)
gpstop = GpStop()
gpstop.run_gpstop_cmd(restart=True)
开发者ID:50wu,项目名称:gpdb,代码行数:55,代码来源:base.py
示例5: test_option_d
def test_option_d(self):
"""
check the -d option of gpaddmirrors
"""
gprecover = GpRecover()
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
del os.environ['MASTER_DATA_DIRECTORY']
Command('run gpaddmirrors -i -d', 'gpaddmirrors -a -i %s -d %s' % (self.mirror_config_file, self.mdd)).run(validateAfter=True)
os.environ['MASTER_DATA_DIRECTORY']=self.mdd
gprecover.wait_till_insync_transition()
self.verify_config_file_with_gp_config()
self.check_mirror_seg()
开发者ID:50wu,项目名称:gpdb,代码行数:13,代码来源:test_gpaddmirrors.py
示例6: test_interview
def test_interview(self):
gprecover = GpRecover()
child = pexpect.spawn('gpaddmirrors')
#child.logfile = sys.stdout
for i in range(0, self.number_of_segments_per_host):
child.expect('Enter mirror segment data directory location.*.\r\n')
child.sendline(self.mirror_data_dir)
child.expect('Continue with add mirrors procedure Yy|Nn (default=N):')
child.sendline('Y')
child.expect(pexpect.EOF)
# wait until cluste totally synced, then run gpcheckmirrorseg
gprecover.wait_till_insync_transition()
self.check_mirror_seg()
self._do_gpdeletesystem()
self._do_gpinitsystem()
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:15,代码来源:test_gpaddmirrors.py
示例7: test_with_fault_injection
def test_with_fault_injection(self):
"""
add new mirrors run workload to verify if cluster functioning correctly, and
inject the mirror to bring cluster into change tracking, then recoverseg
"""
filerepUtil = Filerepe2e_Util()
gprecover = GpRecover()
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with fault injection', res)
gprecover.wait_till_insync_transition()
self.assertEqual(0, res['rc'])
self.run_simple_ddl_dml()
# after adding new mirrors, check the intergrity between primary and mirror
self.check_mirror_seg()
out_file = local_path('inject_fault_into_ct')
filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='mirror', H='ALL', outfile=out_file)
# trigger the transtion to change tracking
PSQL.run_sql_command('drop table if exists foo;', dbname = 'template1')
filerepUtil.wait_till_change_tracking_transition()
gprecover.incremental()
gprecover.wait_till_insync_transition()
out_file=local_path('reset_fault')
filerepUtil.inject_fault(f='filerep_consumer', m='async', y='reset', r='mirror', H='ALL', outfile=out_file)
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:27,代码来源:test_gpaddmirrors.py
示例8: __init__
def __init__(self, methodName):
self.pgport = os.environ.get('PGPORT')
self.fileutil = Filerepe2e_Util()
self.gpconfig = GPDBConfig()
self.gprecover = GpRecover(self.gpconfig)
self.gpstate = Gpstate()
self.gpprimarymirror = Gpprimarymirror()
self.base = GPDBStorageBaseTestCase(self.gpconfig)
super(FtsTransitions,self).__init__(methodName)
开发者ID:LJoNe,项目名称:gpdb,代码行数:9,代码来源:__init__.py
示例9: test_gpaddmirrors_with_workload
def test_gpaddmirrors_with_workload(self):
"""
add new mirrors after creating some workload in progress, check that mirrors added
and checkmirrorseg passes.
"""
gprecover = GpRecover()
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
sql_setup_file = local_path('sql/ao_heap_table_setup.sql')
sql_file = local_path('sql/ao_heap_table.sql')
pg_stat_activity = 'SELECT * FROM pg_stat_activity;'
PSQL.run_sql_file(sql_setup_file)
PSQL.run_sql_file(sql_file)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with fault injection', res)
self.assertEqual(0, res['rc'])
gprecover.wait_till_insync_transition()
self.verify_config_file_with_gp_config()
self.check_mirror_seg()
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:19,代码来源:test_gpaddmirrors.py
示例10: __init__
def __init__(self, methodName):
self.pgport = os.environ.get('PGPORT')
self.util = Filerepe2e_Util()
self.gpconfig = GpConfig()
self.config = GPDBConfig()
self.gpr = GpRecover(self.config)
self.dbstate = DbStateClass('run_validation',self.config)
self.gpstart = GpStart()
self.gpstop = GpStop()
super(FilerepTestCase,self).__init__(methodName)
开发者ID:50wu,项目名称:gpdb,代码行数:10,代码来源:__init__.py
示例11: __init__
def __init__(self,methodName):
self.filereputil = Filerepe2e_Util()
self.config = GPDBConfig()
self.gprecover = GpRecover(self.config)
self.gpstop = GpStop()
self.gpstart = GpStart()
self.gpfile = Gpfilespace(self.config)
self.gpverify = GpdbVerify(config=self.config)
self.dbstate = DbStateClass('run_validation',self.config)
self.port = os.getenv('PGPORT')
super(PgtwoPhaseClass,self).__init__(methodName)
开发者ID:shwu,项目名称:gpdb,代码行数:11,代码来源:__init__.py
示例12: __init__
def __init__(self,methodName):
self.fileutil = Filerepe2e_Util()
self.config = GPDBConfig()
self.gprecover = GpRecover(self.config)
self.gpstart = GpStart()
self.gpstop = GpStop()
self.gpfile = Gpfilespace(self.config)
self.dbstate = DbStateClass('run_validation', self.config)
self.port = os.getenv('PGPORT')
self.base = GPDBStorageBaseTestCase()
super(SuspendCheckpointCrashRecovery,self).__init__(methodName)
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:11,代码来源:__init__.py
示例13: run_gprecoverseg
def run_gprecoverseg(self,recover_option):
'''
@summary : Call gpecoverseg full or incremental to bring back the cluster to sync
'''
self.gpr = GpRecover()
tinctest.logger.info("[STLRTest] Running run_gprecoverseg")
if recover_option == 'full':
self.gpr.full()
else:
self.gpr.incremental()
self.gpr.wait_till_insync_transition()
开发者ID:50wu,项目名称:gpdb,代码行数:14,代码来源:__init__.py
示例14: test_mirror_spread
def test_mirror_spread(self):
"""
Mirror spreading will place each mirror on a different host within the Greenplum Database array
"""
gprecover = GpRecover()
if self.number_of_segments_per_host > len(self.hosts):
self.skipTest('skipping test since the number of host is less than number of segments per hosts')
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -s -d %s --verbose" % (self.mirror_config_file, self.mdd), 'run gpaddmirrros with mirror spreading', res)
self.assertEqual(0, res['rc'])
check_mirror_spreading = '''SELECT A.hostname, B.hostname
FROM gp_segment_configuration A, gp_segment_configuration B
WHERE A.preferred_role = \'p\' AND B.preferred_role = \'m\' AND A.content = B.content AND A.hostname <> B.hostname;'''
result = PSQL.run_sql_command(check_mirror_spreading, flags='-q -t', dbname='template1')
result = result.strip()
self.assertNotEqual(0, len(result))
rows = result.split('\n')
self.assertEqual(self.number_of_segments, len(rows))
gprecover.wait_till_insync_transition()
self.verify_config_file_with_gp_config()
self.check_mirror_seg()
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:23,代码来源:test_gpaddmirrors.py
示例15: test_batch_size_4
def test_batch_size_4(self):
"""
check the batch size option -B of gpaddmirrors, depending on how many mirror segment to setup, otherwise, it will start up to 10
"""
gprecover = GpRecover()
self._setup_gpaddmirrors()
self._cleanup_segment_data_dir(self.host_file, self.mirror_data_dir)
workers = Set()
batch_size = 4
res = {'rc': 0, 'stdout' : '', 'stderr': ''}
run_shell_command("gpaddmirrors -a -i %s -B %s -d %s --verbose" % (self.mirror_config_file, batch_size, self.mdd), 'run gpaddmirrros batch size %s' % batch_size, res)
self.assertEqual(0, res['rc'])
lines = res['stdout'].split('\n')
for line in lines:
if 'worker' in line and 'haltWork' in line:
elems = line.split(' ')[1]
worker = elems.split('-')[-1]
workers.add(worker)
self.assertEquals(len(workers), batch_size)
gprecover.wait_till_insync_transition()
self.verify_config_file_with_gp_config()
self.check_mirror_seg()
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:23,代码来源:test_gpaddmirrors.py
示例16: __init__
def __init__(self,methodName):
self.filereputil = Filerepe2e_Util()
self.gprecover = GpRecover()
super(BaseClass,self).__init__(methodName)
开发者ID:50wu,项目名称:gpdb,代码行数:4,代码来源:base.py
示例17: wait_till_insync_transition
def wait_till_insync_transition(self):
self.gpr = GpRecover()
self.gpr.wait_till_insync_transition()
开发者ID:50wu,项目名称:gpdb,代码行数:3,代码来源:test_gprecoverseg.py
示例18: GprecoversegTest
class GprecoversegTest(ScenarioTestCase):
"""
@description This test-suite contains the automation for 'gprecoverseg' tests
@created 2009-01-27 14:00:00
@modified 2013-09-12 17:10:15
@tags storage schema_topology
@product_version gpdb:4.2.x,gpdb:main
"""
def __init__(self, methodName, should_fail=False):
super(GprecoversegTest, self).__init__(methodName)
def get_version(self):
cmdStr = 'gpssh --version'
cmd = Command('get product version', cmdStr=cmdStr)
cmd.run(validateAfter=True)
return cmd.get_results().stdout.strip().split()[2]
def recover_segments(self,option,max_rtrycnt):
"""
@summary: Recovers the segments and returns the status of recovery process.
@param option: represents different gprecoverseg command options
@param max_rtrycnt: the max no. of times state of cluster should be checked
@return: Boolean value representing the status of recovery process
"""
config = GPDBConfig()
recoverseg = GpRecoverseg()
tinctest.logger.info("Running gprecoverseg with '%s' option..."%option)
recoverseg.run(option)
rtrycnt = 0
while ((config.is_not_insync_segments()) == False and rtrycnt <= max_rtrycnt):
tinctest.logger.info("Waiting [%s] for DB to recover" %rtrycnt)
sleep(10)
rtrycnt = rtrycnt + 1
if rtrycnt > max_rtrycnt:
return False
else:
return True
def wait_till_insync_transition(self):
self.gpr = GpRecover()
self.gpr.wait_till_insync_transition()
def check_segment_roles(self):
"""
@summary: Checks if the segments are in preferred roles or not.
If not, rebalances the cluster.
@return: None
"""
newfault = Fault()
# If the segments are not in preferred roles, go for rebalancing the cluster
if newfault.check_if_not_in_preferred_role():
tinctest.logger.warn("***** Segments not in their preferred roles : rebalancing the segments...")
# If rebalancing downs the segments, go for incremental recovery - this is observed sometimes
if not self.recover_segments('-r',10):
tinctest.logger.warn("***** Segments down after rebalance : Tests cannot proceed further!!")
# If rebalancing passes proceed for tests
else:
tinctest.logger.info("***** Segments successfully rebalanced : Proceeding with the tests")
# If segments in preferred roles, proceed for the tests
else:
tinctest.logger.info("***** Segments in preferred roles : Proceeding with the tests")
def check_cluster_health(self, doFullRecovery = False):
"""
@summary: Checks for the cluster health, tries to recover and rebalance the cluster,
fails the test if not able to do so
@param doFullRecovery: Boolean value which decides whether to go for full
recovery or not
@return: None
"""
tinctest.logger.info("***** Checking the cluster health before starting tests")
config = GPDBConfig()
# If the segments are not up, go for recovery
if not config.is_not_insync_segments():
tinctest.logger.info("***** Starting the recovery process")
# if incremental didn't work, go for full recovery
if not self.recover_segments(' ',10):
tinctest.logger.warn("***** Segments not recovered after incremental recovery")
if doFullRecovery:
# if full also fails, the tests cannot proceed, so fail it
if not self.recover_segments('-F',20):
tinctest.logger.error("***** Segments not recovered even after full recovery - Tests cannot proceed further!!")
self.fail("Segments are down - Tests cannot proceed further!!")
# if full recovery passes, check for rebalancing the cluster
else:
tinctest.logger.info("***** Segments up after full recovery : validating their roles...")
self.check_segment_roles()
else:
self.fail("Segments are down - Tests cannot proceed!!")
# if incremental recovery passes, check for rebalancing the cluster
else:
tinctest.logger.info("***** Segments up after incremental recovery : validating their roles...")
self.check_segment_roles()
#.........这里部分代码省略.........
开发者ID:50wu,项目名称:gpdb,代码行数:101,代码来源:test_gprecoverseg.py
示例19: test_recovery_full
def test_recovery_full(self):
gprecover = GpRecover()
gprecover.full()
gprecover.wait_till_insync_transition()
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:4,代码来源:fault.py
示例20: FilerepResync
class FilerepResync(ScenarioTestCase):
"""
@description test cases for MPP-11167
@created 2013-03-15 10:10:10
@modified 2013-05-07 17:10:15
@tags persistent tables schedule_filerep
@product_version gpdb:
"""
@classmethod
def setUpClass(cls):
super(FilerepResync,cls).setUpClass()
tinctest.logger.info('Setting up the filerep resync test.')
def wait_till_insync_transition(self):
self.gpr = GpRecover()
self.gpr.wait_till_insync_transition()
def test_filerep_resysnc(self):
#Step 1: Create an append-only table
test_case_list1 = []
test_case_list1.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.schema.SchemaTest.AOTable")
self.test_case_scenario.append(test_case_list1)
#Step 2:1 Begin a transaction & insert values into created table
test_case_list2 = []
test_case_list2.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.runsql.TransactionTest.Transaction")
#Step 2:2 Start a concurrent process to kill all the mirror processes.
# It should start only after the begin & insert are performed
test_case_list2.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.fault.FaultTest.ProcessKill")
self.test_case_scenario.append(test_case_list2)
#Step 3: Check the persistent table for duplicate entries
test_case_list3 = []
test_case_list3.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.schema.SchemaTest.DuplicateEntries.test_duplicate_entries_after_hitting_fault")
self.test_case_scenario.append(test_case_list3)
#Step 4: Perform incremental recovery
test_case_list4 = []
test_case_list4.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.fault.FaultTest.Recovery")
self.test_case_scenario.append(test_case_list4)
#Step 5: Check if the mirror segments are up or not
test_case_list5 = []
test_case_list5.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.fault.FaultTest.Health")
self.test_case_scenario.append(test_case_list5)
#Step 6: Re-check the persistent table for duplicate entries
test_case_list6 = []
test_case_list6.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.schema.SchemaTest.DuplicateEntries.test_duplicate_entries_after_recovery")
self.test_case_scenario.append(test_case_list6)
#Step 7: Check the Sate of DB and Cluster
test_case_list7 = []
test_case_list7.append("mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_catalog")
self.test_case_scenario.append(test_case_list7)
test_case_list8 = []
test_case_list8.append("mpp.gpdb.tests.storage.filerep.Filerep_Resync.test_filerep_resync.FilerepResync.wait_till_insync_transition")
self.test_case_scenario.append(test_case_list8)
test_case_list9 = []
test_case_list9.append("mpp.gpdb.tests.storage.lib.dbstate.DbStateClass.check_mirrorintegrity")
self.test_case_scenario.append(test_case_list9)
开发者ID:PengJi,项目名称:gpdb-comments,代码行数:66,代码来源:test_filerep_resync.py
注:本文中的mpp.lib.gprecoverseg.GpRecover类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论