本文整理汇总了Python中toil.job.Job类的典型用法代码示例。如果您正苦于以下问题:Python Job类的具体用法?Python Job怎么用?Python Job使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Job类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: makeWorkflow
def makeWorkflow():
job = Job()
r1 = job.addService(TestServiceSerialization("woot1"))
r2 = job.addService(TestServiceSerialization("woot2"))
r3 = job.addService(TestServiceSerialization("woot3"))
job.addChildFn(fnTest, [ r1, r2, r3 ], outFile)
return job
开发者ID:Duke-GCB,项目名称:toil,代码行数:7,代码来源:jobServiceTest.py
示例2: __init__
def __init__(self, tree, event, sleepTime, startTime, cpu):
Job.__init__(self, cpu=cpu)
self.tree = tree
self.event = event
self.sleepTime = sleepTime
self.startTime = startTime
self.cpu = cpu
开发者ID:BD2KGenomics,项目名称:toil-old,代码行数:7,代码来源:dependenciesTest.py
示例3: main
def main():
"""Restarts a toil workflow.
"""
##########################################
#Construct the arguments.
##########################################
parser = getBasicOptionParser()
parser.add_argument("--version", action='version', version=version)
parser.add_argument("jobStore", type=str,
help=("Store in which to place job management files \
and the global accessed temporary files"
"(If this is a file path this needs to be globally accessible "
"by all machines running jobs).\n"
"If the store already exists and restart is false an"
" ExistingJobStoreException exception will be thrown."))
options = parseBasicOptions(parser)
##########################################
#Now run the toil construction/leader
##########################################
setLoggingFromOptions(options)
options.restart = True
with setupToil(options) as (config, batchSystem, jobStore):
jobStore.clean(Job._loadRootJob(jobStore))
mainLoop(config, batchSystem, jobStore, Job._loadRootJob(jobStore))
开发者ID:kellrott,项目名称:toil,代码行数:31,代码来源:toilRestart.py
示例4: testEncapsulation
def testEncapsulation(self):
"""
Tests the Job.encapsulation method, which uses the EncapsulationJob
class.
"""
# Temporary file
outFile = getTempFile(rootDir=self._createTempDir())
try:
# Encapsulate a job graph
a = T.wrapJobFn(encapsulatedJobFn, "A", outFile)
a = a.encapsulate()
# Now add children/follow to the encapsulated graph
d = T.wrapFn(f, a.rv(), outFile)
e = T.wrapFn(f, d.rv(), outFile)
a.addChild(d)
a.addFollowOn(e)
# Create the runner for the workflow.
options = T.Runner.getDefaultOptions(self._getTestJobStorePath())
options.logLevel = "INFO"
# Run the workflow, the return value being the number of failed jobs
T.Runner.startToil(a, options)
# Check output
self.assertEquals(open(outFile, 'r').readline(), "ABCDE")
finally:
os.remove(outFile)
开发者ID:chapmanb,项目名称:toil,代码行数:25,代码来源:jobEncapsulationTest.py
示例5: testJobConcurrency
def testJobConcurrency(self):
"""
Tests that the batch system is allocating core resources properly for concurrent tasks.
"""
for cores_per_job in self.allocated_cores:
temp_dir = self._createTempDir('testFiles')
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.workDir = temp_dir
options.maxCores = self.cpu_count
options.batchSystem = self.batchSystemName
counter_path = os.path.join(temp_dir, 'counter')
resetCounters(counter_path)
value, max_value = getCounters(counter_path)
assert (value, max_value) == (0, 0)
root = Job()
for _ in range(self.cpu_count):
root.addFollowOn(Job.wrapFn(measureConcurrency, counter_path, self.sleep_time,
cores=cores_per_job, memory='1M', disk='1Mi'))
Job.Runner.startToil(root, options)
_, max_value = getCounters(counter_path)
self.assertEqual(max_value, self.cpu_count / cores_per_job)
开发者ID:broadinstitute,项目名称:toil,代码行数:25,代码来源:batchSystemTest.py
示例6: __init__
def __init__(self, tree, event, sleepTime, startTime, cores):
Job.__init__(self, cores=cores)
self.tree = tree
self.event = event
self.sleepTime = sleepTime
self.startTime = startTime
self.cores = cores
开发者ID:adamnovak,项目名称:toil,代码行数:7,代码来源:dependenciesTest.py
示例7: testEncapsulation
def testEncapsulation(self):
"""
Tests the Job.encapsulation method, which uses the EncapsulationJob
class.
"""
#Temporary file
outFile = getTempFile(rootDir=os.getcwd())
#Make a job graph
a = T.wrapFn(f, "A", outFile)
b = a.addChildFn(f, a.rv(), outFile)
c = a.addFollowOnFn(f, b.rv(), outFile)
#Encapsulate it
a = a.encapsulate()
#Now add children/follow to the encapsulated graph
d = T.wrapFn(f, c.rv(), outFile)
e = T.wrapFn(f, d.rv(), outFile)
a.addChild(d)
a.addFollowOn(e)
#Create the runner for the workflow.
options = T.Runner.getDefaultOptions()
options.logLevel = "INFO"
#Run the workflow, the return value being the number of failed jobs
self.assertEquals(T.Runner.startToil(a, options), 0)
T.Runner.cleanup(options) #This removes the jobStore
#Check output
self.assertEquals(open(outFile, 'r').readline(), "ABCDE")
#Cleanup
os.remove(outFile)
开发者ID:BD2KGenomics,项目名称:toil-old,代码行数:28,代码来源:jobEncapsulationTest.py
示例8: testCacheEjection
def testCacheEjection(self):
"""
Test cache always always ejects least recently created file
"""
# Makes three jobs that create an output file each which they write to filestore. The combined size of any two
# files is always less that cacheSize but the combined size of all 3 is always more so 1 file always has to be
# ejected. Test to ensure that A is always ejected regardless of size.
# Make a temp directory for the test
test_dir = self._createTempDir()
for test in xrange(10):
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.logLevel = "DEBUG"
options.cacheSize = 100000
options.retryCount=100
options.badWorker=0.5
options.badWorkerFailInterval = 1.0
# Create a temp file to write teh test results
handle, logfile = tempfile.mkstemp(dir=test_dir)
os.close(handle)
file_sizes = [50000, 40000, 30000]
# Randomize to (potentially) test all combinations
random.shuffle(file_sizes)
# Run the workflow. A, B and C do teh cache operations, and D prints test status to tempFile
A = Job.wrapJobFn(fileTestJob, file_sizes[0])
B = Job.wrapJobFn(fileTestJob, file_sizes[0])
C = Job.wrapJobFn(fileTestJob, file_sizes[0])
D = Job.wrapJobFn(fileTestCache, A.rv(), B.rv(), C.rv(), logfile)
A.addChild(B)
B.addChild(C)
C.addChild(D)
Job.Runner.startToil(A, options)
# Assert jobs passed by reading test results from tempFile
with open(logfile, 'r') as outfile:
for test_status in outfile:
assert test_status.strip() == 'True'
开发者ID:anukat2015,项目名称:toil,代码行数:35,代码来源:jobCacheEjectionTest.py
示例9: _deleteLocallyReadFilesFn
def _deleteLocallyReadFilesFn(self, readAsMutable):
self.options.retryCount = 0
A = Job.wrapJobFn(self._writeFileToJobStore, isLocalFile=True, memory='10M')
B = Job.wrapJobFn(self._removeReadFileFn, A.rv(), readAsMutable=readAsMutable,
memory='20M')
A.addChild(B)
Job.Runner.startToil(A, self.options)
开发者ID:joskid,项目名称:toil,代码行数:7,代码来源:jobCacheTest.py
示例10: runNewCheckpointIsLeafVertexTest
def runNewCheckpointIsLeafVertexTest(self, createWorkflowFn):
"""
Test verification that a checkpoint job is a leaf vertex using both
valid and invalid cases.
:param createWorkflowFn: function to create and new workflow and return a tuple of:
0) the workflow root job
1) a checkpoint job to test within the workflow
"""
logger.info('Test checkpoint job that is a leaf vertex')
self.runCheckpointVertexTest(*createWorkflowFn(),
expectedException=None)
logger.info('Test checkpoint job that is not a leaf vertex due to the presence of a service')
self.runCheckpointVertexTest(*createWorkflowFn(),
checkpointJobService=TrivialService("LeafTestService"),
expectedException=JobGraphDeadlockException)
logger.info('Test checkpoint job that is not a leaf vertex due to the presence of a child job')
self.runCheckpointVertexTest(*createWorkflowFn(),
checkpointJobChild=Job.wrapJobFn(
simpleJobFn, "LeafTestChild"),
expectedException=JobGraphDeadlockException)
logger.info('Test checkpoint job that is not a leaf vertex due to the presence of a follow-on job')
self.runCheckpointVertexTest(*createWorkflowFn(),
checkpointJobFollowOn=Job.wrapJobFn(
simpleJobFn,
"LeafTestFollowOn"),
expectedException=JobGraphDeadlockException)
开发者ID:brainstorm,项目名称:toil,代码行数:33,代码来源:jobTest.py
示例11: testPromiseRequirementRaceStatic
def testPromiseRequirementRaceStatic(self):
"""
Checks for a race condition when using promised requirements and child job functions.
"""
A = Job.wrapJobFn(logDiskUsage, 'A', sleep=5, disk=PromisedRequirement(1024))
B = Job.wrapJobFn(logDiskUsage, 'B', disk=PromisedRequirement(lambda x: x + 1024, A.rv()))
A.addChild(B)
Job.Runner.startToil(A, self.getOptions(self._createTempDir('testFiles')))
开发者ID:Duke-GCB,项目名称:toil,代码行数:8,代码来源:promisedRequirementTest.py
示例12: testReadCachHitFileFromJobStore
def testReadCachHitFileFromJobStore(self):
"""
Read a file from the file store that has a corresponding cached copy. Ensure the number
of links on the file are appropriate.
"""
A = Job.wrapJobFn(self._writeFileToJobStore, isLocalFile=True)
B = Job.wrapJobFn(self._readFromJobStore, isCachedFile=True, cacheReadFile=None,
fsID=A.rv())
A.addChild(B)
Job.Runner.startToil(A, self.options)
开发者ID:joskid,项目名称:toil,代码行数:10,代码来源:jobCacheTest.py
示例13: testAddChildEncapsulate
def testAddChildEncapsulate(self):
"""
Make sure that the encapsulate child does not have two pareents
with unique roots.
"""
# Temporary file
a = T.wrapFn(noOp)
b = T.wrapFn(noOp)
a.addChild(b).encapsulate()
self.assertEquals(len(a.getRootJobs()), 1)
开发者ID:chapmanb,项目名称:toil,代码行数:10,代码来源:jobEncapsulationTest.py
示例14: _deleteLocallyReadFilesFn
def _deleteLocallyReadFilesFn(self, readAsMutable):
self.options.retryCount = 0
A = Job.wrapJobFn(self._writeFileToJobStore, isLocalFile=True, memory='10M')
B = Job.wrapJobFn(self._removeReadFileFn, A.rv(), readAsMutable=readAsMutable, memory='20M')
A.addChild(B)
try:
Job.Runner.startToil(A, self.options)
except FailedJobsException as err:
self.assertEqual(err.numberOfFailedJobs, 2)
errMsg = self._parseAssertionError(self.options.logFile)
if 'explicitly' not in errMsg:
self.fail('Shouldn\'t see this')
开发者ID:broadinstitute,项目名称:toil,代码行数:12,代码来源:jobCacheTest.py
示例15: testControlledFailedWorkerRetry
def testControlledFailedWorkerRetry(self):
"""
Conduct a couple of job store operations. Then die. Ensure that the restarted job is
tracking values in the cache state file appropriately.
"""
workdir = self._createTempDir(purpose='nonLocalDir')
self.options.retryCount = 1
F = Job.wrapJobFn(self._controlledFailTestFn, jobDisk=2*1024*1024*1024, testDir=workdir,
disk='2G')
G = Job.wrapJobFn(self._probeJobReqs, sigmaJob=100, disk='100M')
F.addChild(G)
Job.Runner.startToil(F, self.options)
开发者ID:broadinstitute,项目名称:toil,代码行数:12,代码来源:jobCacheTest.py
示例16: _testCacheMissFunction
def _testCacheMissFunction(self, cacheReadFile):
"""
This is the function that actually does what the 2 cache miss functions want.
:param cacheReadFile: Does the read file need to be cached(T) or not(F)
"""
workdir = self._createTempDir(purpose='nonLocalDir')
A = Job.wrapJobFn(self._writeFileToJobStore, isLocalFile=False, nonLocalDir=workdir)
B = Job.wrapJobFn(self._readFromJobStore, isCachedFile=False,
cacheReadFile=cacheReadFile, fsID=A.rv())
A.addChild(B)
Job.Runner.startToil(A, self.options)
开发者ID:joskid,项目名称:toil,代码行数:12,代码来源:jobCacheTest.py
示例17: testToilIsNotBroken
def testToilIsNotBroken(self):
"""
Runs a simple DAG to test if if any features other that caching were broken.
"""
A = Job.wrapJobFn(self._uselessFunc)
B = Job.wrapJobFn(self._uselessFunc)
C = Job.wrapJobFn(self._uselessFunc)
D = Job.wrapJobFn(self._uselessFunc)
A.addChild(B)
A.addChild(C)
B.addChild(D)
C.addChild(D)
Job.Runner.startToil(A, self.options)
开发者ID:joskid,项目名称:toil,代码行数:13,代码来源:jobCacheTest.py
示例18: testServiceSerialization
def testServiceSerialization(self):
"""
Tests that a service can receive a promise without producing a serialization
error.
"""
job = Job()
service = TestServiceSerialization("woot")
startValue = job.addService(service) # Add a first service to job
subService = TestServiceSerialization(startValue) # Now create a child of
# that service that takes the start value promise from the parent service
job.addService(subService, parentService=service) # This should work if
# serialization on services is working correctly.
self.runToil(job)
开发者ID:Duke-GCB,项目名称:toil,代码行数:14,代码来源:jobServiceTest.py
示例19: test_star
def test_star(self):
"""
Test the functionality of align_dna
"""
univ_options = self._getTestUnivOptions()
config_file = os.path.join(self._projectRootPath(), "src/protect/test/test_inputs/ci_parameters.yaml")
test_src_folder = os.path.join(self._projectRootPath(), "src", "protect", "test")
a = Job.wrapJobFn(self._get_test_star_files)
b = Job.wrapJobFn(self._get_all_tools, config_file).encapsulate()
c = Job.wrapJobFn(self._get_tool, b.rv(), "star")
d = Job.wrapJobFn(align_rna, a.rv(), univ_options, c.rv()).encapsulate()
a.addChild(b)
b.addChild(c)
c.addChild(d)
Job.Runner.startToil(a, self.options)
开发者ID:BD2KGenomics,项目名称:protect,代码行数:15,代码来源:test_alignments.py
示例20: main
def main(args):
options = parse_args(args)
RealTimeLogger.start_master()
filtered_gams = []
skip_words = options.skip.split(",")
for gam in options.in_gams:
skip_gam = False
for word in skip_words:
if len(word) > 0 and word in gam:
skip_gam = True
if not skip_gam:
filtered_gams.append(gam)
options.in_gams = filtered_gams
for gam in options.in_gams:
if len(gam.split("/")) < 3 or os.path.splitext(gam)[1] != ".gam":
raise RuntimeError("Input gam paths must be of the form "
".../<alg>/<reads>/<filename>.gam")
# Make a root job
root_job = Job.wrapJobFn(call_variants, options,
cores=1, memory="2G", disk="2G")
# Run it and see how many jobs fail
failed_jobs = Job.Runner.startToil(root_job, options)
if failed_jobs > 0:
raise Exception("{} jobs failed!".format(failed_jobs))
RealTimeLogger.stop_master()
开发者ID:cmarkello,项目名称:hgvm-graph-bakeoff-evaluations,代码行数:33,代码来源:callVariants.py
注:本文中的toil.job.Job类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论