本文整理汇总了Python中taskbuffer.FileSpec.FileSpec类的典型用法代码示例。如果您正苦于以下问题:Python FileSpec类的具体用法?Python FileSpec怎么用?Python FileSpec使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了FileSpec类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: outFileSpec
def outFileSpec(of=None, log=False):
"""Local routine to create an FileSpec for the an job output/log file
:arg str of: output file base name
:return: FileSpec object for the output file."""
outfile = FileSpec()
if log:
outfile.lfn = "job.log_%d_%s.tgz" % (jobid, lfnhanger)
outfile.type = 'log'
else:
outfile.lfn = '%s_%d_%s%s' %(os.path.splitext(of)[0], jobid, lfnhanger, os.path.splitext(of)[1])
outfile.type = 'output'
outfile.destinationDBlock = pandajob.destinationDBlock
outfile.destinationSE = task['tm_asyncdest']
outfile.dataset = pandajob.destinationDBlock
return outfile
开发者ID:HassenRiahi,项目名称:CAFTaskWorker,代码行数:16,代码来源:PanDAInjection.py
示例2: FileSpec
job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf-phosim332.sh'
job.destinationDBlock = datasetName
#job.destinationSE = destName
job.destinationSE = 'local'
job.currentPriority = 1000
#job.prodSourceLabel = 'ptest'
#job.prodSourceLabel = 'panda'
#job.prodSourceLabel = 'ptest'
#job.prodSourceLabel = 'test'
#job.prodSourceLabel = 'ptest'
### 2014-01-27
#job.prodSourceLabel = 'user'
job.prodSourceLabel = 'panda'
job.computingSite = site
job.jobParameters = ""
job.VO = "lsst"
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)
s,o = Client.submitJobs([job],srvID=aSrvID)
print s
for x in o:
print "PandaID=%s" % x[0]
开发者ID:PanDAWMS,项目名称:panda-server,代码行数:30,代码来源:lsstSubmitPhosim332.py
示例3: DBProxyPool
import getpass
passwd = getpass.getpass()
pool = DBProxyPool('adbpro.usatlas.bnl.gov',passwd,2)
proxy = pool.getProxy()
import sys
import commands
job1 = JobSpec()
job1.PandaID='NULL'
job1.jobStatus='unknown'
job1.computingSite="aaa"
f11 = FileSpec()
f11.lfn = 'in1.pool.root'
f11.type = 'input'
job1.addFile(f11)
f12 = FileSpec()
f12.lfn = 'out1.pool.root'
f12.type = 'output'
job1.addFile(f12)
job2 = JobSpec()
job2.PandaID='NULL'
job2.jobStatus='unknown'
job2.computingSite="bbb"
f21 = FileSpec()
f21.lfn = 'in2.pool.root'
f21.type = 'input'
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:30,代码来源:testDB.py
示例4: range
for i in range(1):
for lfn in files.keys():
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = commands.getoutput('uuidgen')
job.AtlasRelease = 'Atlas-13.0.35'
job.homepackage = 'AtlasPoint1/13.0.35.1'
job.transformation = 'csc_cosmics_trf.py'
job.destinationDBlock = datasetName
job.cloud = cloud
job.computingSite = site
job.prodDBlock = files[lfn]
job.prodSourceLabel = 'test'
job.currentPriority = 1001
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
fileI.lfn = lfn
fileI.type = 'input'
job.addFile(fileI)
fileD = FileSpec()
fileD.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v030101'
fileD.prodDBlock = 'ddo.000001.Atlas.Ideal.DBRelease.v030101'
fileD.lfn = 'DBRelease-3.1.1.tar.gz'
fileD.type = 'input'
# job.addFile(fileD)
fileO1 = FileSpec()
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:31,代码来源:cl_testMXreco.py
示例5: master_prepare
def master_prepare(self,app,appconfig):
'''Prepare the master job'''
from pandatools import Client
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
job = app._getParent()
logger.debug('ExecutablePandaRTHandler master_prepare called for %s', job.getFQID('.'))
# set chirp variables
if configPanda['chirpconfig'] or configPanda['chirpserver']:
setChirpVariables()
# Pack inputsandbox
inputsandbox = 'sources.%s.tar' % commands.getoutput('uuidgen 2> /dev/null')
inpw = job.getInputWorkspace()
# add user script to inputsandbox
if hasattr(job.application.exe, "name"):
if not job.application.exe in job.inputsandbox:
job.inputsandbox.append(job.application.exe)
for fname in [f.name for f in job.inputsandbox]:
fname.rstrip(os.sep)
path = fname[:fname.rfind(os.sep)]
f = fname[fname.rfind(os.sep)+1:]
rc, output = commands.getstatusoutput('tar rf %s -C %s %s' % (inpw.getPath(inputsandbox), path, f))
if rc:
logger.error('Packing inputsandbox failed with status %d',rc)
logger.error(output)
raise ApplicationConfigurationError('Packing inputsandbox failed.')
if len(job.inputsandbox) > 0:
rc, output = commands.getstatusoutput('gzip %s' % (inpw.getPath(inputsandbox)))
if rc:
logger.error('Packing inputsandbox failed with status %d',rc)
logger.error(output)
raise ApplicationConfigurationError('Packing inputsandbox failed.')
inputsandbox += ".gz"
else:
inputsandbox = None
# Upload Inputsandbox
if inputsandbox:
logger.debug('Uploading source tarball ...')
uploadSources(inpw.getPath(),os.path.basename(inputsandbox))
self.inputsandbox = inputsandbox
else:
self.inputsandbox = None
# input dataset
if job.inputdata:
if job.inputdata._name != 'DQ2Dataset':
raise ApplicationConfigurationError('PANDA application supports only DQ2Datasets')
# run brokerage here if not splitting
if not job.splitter:
from GangaPanda.Lib.Panda.Panda import runPandaBrokerage
runPandaBrokerage(job)
elif job.splitter._name not in ['DQ2JobSplitter', 'ArgSplitter', 'ArgSplitterTask']:
raise ApplicationConfigurationError('Panda splitter must be DQ2JobSplitter or ArgSplitter')
if job.backend.site == 'AUTO':
raise ApplicationConfigurationError('site is still AUTO after brokerage!')
# output dataset
if job.outputdata:
if job.outputdata._name != 'DQ2OutputDataset':
raise ApplicationConfigurationError('Panda backend supports only DQ2OutputDataset')
else:
logger.info('Adding missing DQ2OutputDataset')
job.outputdata = DQ2OutputDataset()
job.outputdata.datasetname,outlfn = dq2outputdatasetname(job.outputdata.datasetname, job.id, job.outputdata.isGroupDS, job.outputdata.groupname)
self.outDsLocation = Client.PandaSites[job.backend.site]['ddm']
try:
Client.addDataset(job.outputdata.datasetname,False,location=self.outDsLocation)
logger.info('Output dataset %s registered at %s'%(job.outputdata.datasetname,self.outDsLocation))
dq2_set_dataset_lifetime(job.outputdata.datasetname, location=self.outDsLocation)
except exceptions.SystemExit:
raise BackendError('Panda','Exception in Client.addDataset %s: %s %s'%(job.outputdata.datasetname,sys.exc_info()[0],sys.exc_info()[1]))
# handle the libds
if job.backend.libds:
self.libDataset = job.backend.libds
self.fileBO = getLibFileSpecFromLibDS(self.libDataset)
self.library = self.fileBO.lfn
elif job.backend.bexec:
self.libDataset = job.outputdata.datasetname+'.lib'
self.library = '%s.tgz' % self.libDataset
try:
Client.addDataset(self.libDataset,False,location=self.outDsLocation)
dq2_set_dataset_lifetime(self.libDataset, location=self.outDsLocation)
logger.info('Lib dataset %s registered at %s'%(self.libDataset,self.outDsLocation))
except exceptions.SystemExit:
raise BackendError('Panda','Exception in Client.addDataset %s: %s %s'%(self.libDataset,sys.exc_info()[0],sys.exc_info()[1]))
# collect extOutFiles
self.extOutFile = []
#.........这里部分代码省略.........
开发者ID:Erni1619,项目名称:ganga,代码行数:101,代码来源:ExecutablePandaRTHandler.py
示例6: prepare
def prepare(self, app, appsubconfig, appmasterconfig, jobmasterconfig):
"""Prepare the specific aspec of each subjob.
Returns: subjobconfig list of objects understood by backends."""
from pandatools import Client
from pandatools import AthenaUtils
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
from GangaAtlas.Lib.ATLASDataset.DQ2Dataset import dq2_set_dataset_lifetime
from GangaPanda.Lib.Panda.Panda import refreshPandaSpecs
# make sure we have the correct siteType
refreshPandaSpecs()
job = app._getParent()
masterjob = job._getRoot()
logger.debug('ProdTransPandaRTHandler prepare called for %s',
job.getFQID('.'))
job.backend.actualCE = job.backend.site
job.backend.requirements.cloud = Client.PandaSites[job.backend.site]['cloud']
# check that the site is in a submit-able status
if not job.splitter or job.splitter._name != 'DQ2JobSplitter':
allowed_sites = job.backend.list_ddm_sites()
try:
outDsLocation = Client.PandaSites[job.backend.site]['ddm']
tmpDsExist = False
if (configPanda['processingType'].startswith('gangarobot') or configPanda['processingType'].startswith('hammercloud')):
#if Client.getDatasets(job.outputdata.datasetname):
if getDatasets(job.outputdata.datasetname):
tmpDsExist = True
logger.info('Re-using output dataset %s'%job.outputdata.datasetname)
if not configPanda['specialHandling']=='ddm:rucio' and not configPanda['processingType'].startswith('gangarobot') and not configPanda['processingType'].startswith('hammercloud') and not configPanda['processingType'].startswith('rucio_test'):
Client.addDataset(job.outputdata.datasetname,False,location=outDsLocation,allowProdDisk=True,dsExist=tmpDsExist)
logger.info('Output dataset %s registered at %s'%(job.outputdata.datasetname,outDsLocation))
dq2_set_dataset_lifetime(job.outputdata.datasetname, outDsLocation)
except exceptions.SystemExit:
raise BackendError('Panda','Exception in adding dataset %s: %s %s'%(job.outputdata.datasetname,sys.exc_info()[0],sys.exc_info()[1]))
# JobSpec.
jspec = JobSpec()
jspec.currentPriority = app.priority
jspec.jobDefinitionID = masterjob.id
jspec.jobName = commands.getoutput('uuidgen 2> /dev/null')
jspec.coreCount = app.core_count
jspec.AtlasRelease = 'Atlas-%s' % app.atlas_release
jspec.homepackage = app.home_package
jspec.transformation = app.transformation
jspec.destinationDBlock = job.outputdata.datasetname
if job.outputdata.location:
jspec.destinationSE = job.outputdata.location
else:
jspec.destinationSE = job.backend.site
if job.inputdata:
jspec.prodDBlock = job.inputdata.dataset[0]
else:
jspec.prodDBlock = 'NULL'
if app.prod_source_label:
jspec.prodSourceLabel = app.prod_source_label
else:
jspec.prodSourceLabel = configPanda['prodSourceLabelRun']
jspec.processingType = configPanda['processingType']
jspec.specialHandling = configPanda['specialHandling']
jspec.computingSite = job.backend.site
jspec.cloud = job.backend.requirements.cloud
jspec.cmtConfig = app.atlas_cmtconfig
if app.dbrelease == 'LATEST':
try:
latest_dbrelease = getLatestDBReleaseCaching()
except:
from pandatools import Client
latest_dbrelease = Client.getLatestDBRelease()
m = re.search('(.*):DBRelease-(.*)\.tar\.gz', latest_dbrelease)
if m:
self.dbrelease_dataset = m.group(1)
self.dbrelease = m.group(2)
else:
raise ApplicationConfigurationError(None, "Error retrieving LATEST DBRelease. Try setting application.dbrelease manually.")
else:
self.dbrelease_dataset = app.dbrelease_dataset
self.dbrelease = app.dbrelease
jspec.jobParameters = app.job_parameters
if self.dbrelease:
if self.dbrelease == 'current':
jspec.jobParameters += ' --DBRelease=current'
else:
if jspec.transformation.endswith("_tf.py") or jspec.transformation.endswith("_tf"):
jspec.jobParameters += ' --DBRelease=DBRelease-%s.tar.gz' % (self.dbrelease,)
else:
jspec.jobParameters += ' DBRelease=DBRelease-%s.tar.gz' % (self.dbrelease,)
dbspec = FileSpec()
dbspec.lfn = 'DBRelease-%s.tar.gz' % self.dbrelease
dbspec.dataset = self.dbrelease_dataset
dbspec.prodDBlock = jspec.prodDBlock
dbspec.type = 'input'
jspec.addFile(dbspec)
#.........这里部分代码省略.........
开发者ID:VladimirRomanovsky,项目名称:ganga,代码行数:101,代码来源:ProdTransPandaRTHandler.py
示例7: prepare
def prepare(self,app,appconfig,appmasterconfig,jobmasterconfig):
'''prepare the subjob specific configuration'''
# PandaTools
from pandatools import Client
from pandatools import AthenaUtils
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
job = app._getParent()
logger.debug('AthenaMCPandaRTHandler prepare called for %s', job.getFQID('.'))
try:
assert self.outsite
except:
logger.error("outsite not set. Aborting")
raise Exception()
job.backend.site = self.outsite
job.backend.actualCE = self.outsite
cloud = job._getRoot().backend.requirements.cloud
job.backend.requirements.cloud = cloud
# now just filling the job from AthenaMC data
jspec = JobSpec()
jspec.jobDefinitionID = job._getRoot().id
jspec.jobName = commands.getoutput('uuidgen 2> /dev/null')
jspec.AtlasRelease = 'Atlas-%s' % app.atlas_rel
if app.transform_archive:
jspec.homepackage = 'AnalysisTransforms'+app.transform_archive
elif app.prod_release:
jspec.homepackage = 'AnalysisTransforms-AtlasProduction_'+str(app.prod_release)
jspec.transformation = '%s/runAthena-00-00-11' % Client.baseURLSUB
#---->???? prodDBlock and destinationDBlock when facing several input / output datasets?
jspec.prodDBlock = 'NULL'
if job.inputdata and len(app.inputfiles)>0 and app.inputfiles[0] in app.dsetmap:
jspec.prodDBlock = app.dsetmap[app.inputfiles[0]]
# How to specify jspec.destinationDBlock when more than one type of output is available? Panda prod jobs seem to specify only the last output dataset
outdset=""
for type in ["EVNT","RDO","HITS","AOD","ESD","NTUP"]:
if type in app.outputpaths.keys():
outdset=string.replace(app.outputpaths[type],"/",".")
outdset=outdset[1:-1]
break
if not outdset:
try:
assert len(app.outputpaths.keys())>0
except:
logger.error("app.outputpaths is empty: check your output datasets")
raise
type=app.outputpaths.keys()[0]
outdset=string.replace(app.outputpaths[type],"/",".")
outdset=outdset[1:-1]
jspec.destinationDBlock = outdset
jspec.destinationSE = self.outsite
jspec.prodSourceLabel = 'user'
jspec.assignedPriority = 1000
jspec.cloud = cloud
# memory
if job.backend.requirements.memory != -1:
jspec.minRamCount = job.backend.requirements.memory
jspec.computingSite = self.outsite
jspec.cmtConfig = AthenaUtils.getCmtConfig(athenaVer=app.atlas_rel)
# library (source files)
flib = FileSpec()
flib.lfn = self.library
# flib.GUID =
flib.type = 'input'
# flib.status =
flib.dataset = self.libDataset
flib.dispatchDBlock = self.libDataset
jspec.addFile(flib)
# input files FIXME: many more input types
for lfn in app.inputfiles:
useguid=app.turls[lfn].replace("guid:","")
finp = FileSpec()
finp.lfn = lfn
finp.GUID = useguid
finp.dataset = app.dsetmap[lfn]
finp.prodDBlock = app.dsetmap[lfn]
finp.prodDBlockToken = 'local'
finp.dispatchDBlock = app.dsetmap[lfn]
finp.type = 'input'
finp.status = 'ready'
jspec.addFile(finp)
# add dbfiles if any:
for lfn in app.dbfiles:
useguid=app.dbturls[lfn].replace("guid:","")
finp = FileSpec()
finp.lfn = lfn
finp.GUID = useguid
finp.dataset = app.dsetmap[lfn]
#.........这里部分代码省略.........
开发者ID:MannyMoo,项目名称:ganga,代码行数:101,代码来源:AthenaMCPandaRTHandler.py
示例8: JobSpec
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = commands.getoutput('uuidgen')
job.AtlasRelease = 'Atlas-11.0.41'
#job.AtlasRelease = 'Atlas-11.0.3'
job.homepackage = 'AnalysisTransforms'
job.transformation = 'https://gridui01.usatlas.bnl.gov:24443/dav/test/runAthena'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.currentPriority = 100
job.prodSourceLabel = 'user'
job.computingSite = site
#job.prodDBlock = "pandatest.b1599dfa-cd36-4fc5-92f6-495781a94c66"
job.prodDBlock = "pandatest.f228b051-077b-4f81-90bf-496340644379"
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
fileI.lfn = "lib.f228b051-077b-4f81-90bf-496340644379.tgz"
fileI.type = 'input'
job.addFile(fileI)
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % commands.getoutput('uuidgen')
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)
fileOZ = FileSpec()
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:31,代码来源:execute.py
示例9: FileSpec
if arg in ['outputTypes']:
continue
prefix = var.split('.')[0]
sumatch = re.search('(\.[^\.]+\.[^\.]+)(\.\d+)*$',var)
suffix = sumatch.group(1)
newName = '%s.%s%s' % (job.jobName,prefix,suffix)
outMap[arg] = (var,newName)
# DBRelease
elif arg == 'DBRelease':
dbrMap = (arg,var)
# input
elif arg.startswith('input') and arg.endswith('File'):
inputMap = (arg,var)
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
fileI.lfn = lfn
fileI.type = 'input'
job.addFile(fileI)
fileD = FileSpec()
fileD.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v06020105'
fileD.prodDBlock = fileD.dataset
fileD.lfn = 'DBRelease-6.2.1.5.tar.gz'
fileD.type = 'input'
job.addFile(fileD)
newParams = origParams
newParams = newParams.replace(dbrMap[0]+'='+dbrMap[1],dbrMap[0]+'='+fileD.lfn)
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:31,代码来源:testRepro.py
示例10: JobSpec
destName = 'ANALY_BNL_ATLAS_1'
job = JobSpec()
job.jobDefinitionID = 1
job.jobName = commands.getoutput('uuidgen')
job.AtlasRelease = 'Atlas-12.0.2'
job.homepackage = 'AnalysisTransforms'
job.transformation = 'https://gridui01.usatlas.bnl.gov:24443/dav/test/runAthena2'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.currentPriority = 3000
job.prodSourceLabel = 'user'
job.computingSite = site
job.prodDBlock = 'testIdeal_06.005001.pythia_minbias.recon.AOD.v12000103'
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % commands.getoutput('uuidgen')
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)
fileOZ = FileSpec()
fileOZ.lfn = "AANT.%s.root" % commands.getoutput('uuidgen')
fileOZ.destinationDBlock = job.destinationDBlock
fileOZ.destinationSE = job.destinationSE
fileOZ.dataset = job.destinationDBlock
fileOZ.type = 'output'
job.addFile(fileOZ)
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:30,代码来源:analysis.py
示例11: run
#.........这里部分代码省略.........
retTopT = self.taskBuffer.updateDatasets([unmergedDs],withLock=True,withCriteria="status<>:crStatus",
criteriaMap={':crStatus':unmergedDs.status})
if len(retTopT) > 0 and retTopT[0]==1:
_logger.debug('%s set %s to parent dataset : %s' % (self.pandaID,unmergedDs.status,unmergedDsName))
else:
_logger.debug('%s failed to update parent dataset : %s' % (self.pandaID,unmergedDsName))
if self.pandaDDM and self.job.prodSourceLabel=='managed':
# instantiate SiteMapper
if self.siteMapper == None:
self.siteMapper = SiteMapper(self.taskBuffer)
# get file list for PandaDDM
retList = self.taskBuffer.queryFilesWithMap({'destinationDBlock':destinationDBlock})
lfnsStr = ''
guidStr = ''
for tmpFile in retList:
if tmpFile.type in ['log','output']:
lfnsStr += '%s,' % tmpFile.lfn
guidStr += '%s,' % tmpFile.GUID
if lfnsStr != '':
guidStr = guidStr[:-1]
lfnsStr = lfnsStr[:-1]
# create a DDM job
ddmjob = JobSpec()
ddmjob.jobDefinitionID = int(time.time()) % 10000
ddmjob.jobName = "%s" % commands.getoutput('uuidgen')
ddmjob.transformation = 'http://pandaserver.cern.ch:25080/trf/mover/run_dq2_cr'
ddmjob.destinationDBlock = 'testpanda.%s' % ddmjob.jobName
ddmjob.computingSite = "BNL_ATLAS_DDM"
ddmjob.destinationSE = ddmjob.computingSite
ddmjob.currentPriority = 200000
ddmjob.prodSourceLabel = 'ddm'
ddmjob.transferType = 'sub'
# append log file
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % ddmjob.jobName
fileOL.destinationDBlock = ddmjob.destinationDBlock
fileOL.destinationSE = ddmjob.destinationSE
fileOL.dataset = ddmjob.destinationDBlock
fileOL.type = 'log'
ddmjob.addFile(fileOL)
# make arguments
dstDQ2ID = 'BNLPANDA'
srcDQ2ID = self.siteMapper.getSite(self.job.computingSite).ddm
callBackURL = 'https://%s:%s/server/panda/datasetCompleted?vuid=%s&site=%s' % \
(panda_config.pserverhost,panda_config.pserverport,
dataset.vuid,dstDQ2ID)
_logger.debug(callBackURL)
# set src/dest
ddmjob.sourceSite = srcDQ2ID
ddmjob.destinationSite = dstDQ2ID
# if src==dst, send callback without ddm job
if dstDQ2ID == srcDQ2ID:
comout = commands.getoutput('curl -k %s' % callBackURL)
_logger.debug(comout)
else:
# run dq2_cr
callBackURL = urllib.quote(callBackURL)
# get destination dir
destDir = brokerage.broker_util._getDefaultStorage(self.siteMapper.getSite(self.job.computingSite).dq2url)
argStr = "-s %s -r %s --guids %s --lfns %s --callBack %s -d %s/%s %s" % \
(srcDQ2ID,dstDQ2ID,guidStr,lfnsStr,callBackURL,destDir,
destinationDBlock,destinationDBlock)
# set job parameters
ddmjob.jobParameters = argStr
_logger.debug('%s pdq2_cr %s' % (self.pandaID,ddmjob.jobParameters))
ddmJobs.append(ddmjob)
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:67,代码来源:Closer.py
示例12: FileSpec
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),index)
job.AtlasRelease = 'Atlas-14.1.0\nAtlas-14.1.0'
job.homepackage = 'AtlasProduction/14.1.0.3\nAtlasProduction/14.1.0.3'
job.transformation = 'csc_digi_trf.py\ncsc_reco_trf.py'
job.destinationDBlock = datasetName
job.computingSite = site
job.prodDBlock = 'valid1.005200.T1_McAtNlo_Jimmy.simul.HITS.e322_s429_tid022081'
job.prodSourceLabel = 'test'
job.currentPriority = 10000
job.cloud = 'US'
for lfn in ['HITS.022081._00001.pool.root','HITS.022081._00002.pool.root']:
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
fileI.lfn = lfn
fileI.type = 'input'
job.addFile(fileI)
fileD1 = FileSpec()
fileD1.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v050001'
fileD1.prodDBlock = fileD1.dataset
fileD1.lfn = 'DBRelease-5.0.1.tar.gz'
fileD1.type = 'input'
job.addFile(fileD1)
fileD2 = FileSpec()
fileD2.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v050101'
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:31,代码来源:testMultiTRF.py
示例13: FileSpec
job.prodSourceLabel = 'managed'
# priority
job.assignedPriority = priority
job.currentPriority = priority
# CPU, memory,disk ### FIXME
# attempt number ### FIXME
# input files
if iDataset != 'NULL':
# remove _tidXXX
pat = re.sub('_tid\d+$','',iDataset)
# search
m = re.search('('+pat+'\S+)',line)
if m != None:
file = FileSpec()
file.lfn = m.group(1)
file.type = 'input'
file.dataset = iDataset
file.prodDBlock = iDataset
job.addFile(file)
# DB release
for i,lpar in enumerate(lparams):
if lpar == 'DBRelease':
file = FileSpec()
file.lfn = "%s-%s.tgz" % (lpar,vparams[i])
file.type = 'input'
file.dataset = iDataset
file.prodDBlock = iDataset
job.addFile(file)
break
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:31,代码来源:directSubmit.py
示例14: send_job
def send_job(jobid, siteid):
_logger.debug('Jobid: ' + str(jobid))
site = sites_.get(siteid)
job = jobs_.get(int(jobid))
cont = job.container
files_catalog = cont.files
fscope = getScope(job.owner.username)
datasetName = '{}:{}'.format(fscope, cont.guid)
distributive = job.distr.name
release = job.distr.release
# Prepare runScript
parameters = job.distr.command
parameters = parameters.replace("$COMMAND$", job.params)
parameters = parameters.replace("$USERNAME$", job.owner.username)
parameters = parameters.replace("$WORKINGGROUP$", job.owner.working_group)
# Prepare metadata
metadata = dict(user=job.owner.username)
# Prepare PanDA Object
pandajob = JobSpec()
pandajob.jobDefinitionID = int(time.time()) % 10000
pandajob.jobName = cont.guid
pandajob.transformation = client_config.DEFAULT_TRF
pandajob.destinationDBlock = datasetName
pandajob.destinationSE = site.se
pandajob.currentPriority = 1000
pandajob.prodSourceLabel = 'user'
pandajob.computingSite = site.ce
pandajob.cloud = 'RU'
pandajob.VO = 'atlas'
pandajob.prodDBlock = "%s:%s" % (fscope, pandajob.jobName)
pandajob.coreCount = job.corecount
pandajob.metadata = json.dumps(metadata)
#pandajob.workingGroup = job.owner.working_group
if site.encode_commands:
# It requires script wrapper on cluster side
pandajob.jobParameters = '%s %s %s "%s"' % (cont.guid, release, distributive, parameters)
else:
pandajob.jobParameters = parameters
has_input = False
for fcc in files_catalog:
if fcc.type == 'input':
f = fcc.file
guid = f.guid
fileIT = FileSpec()
fileIT.lfn = f.lfn
fileIT.dataset = pandajob.prodDBlock
fileIT.prodDBlock = pandajob.prodDBlock
fileIT.type = 'input'
fileIT.scope = fscope
fileIT.status = 'ready'
fileIT.GUID = guid
pandajob.addFile(fileIT)
has_input = True
if fcc.type == 'output':
f = fcc.file
fileOT = FileSpec()
fileOT.lfn = f.lfn
fileOT.destinationDBlock = pandajob.prodDBlock
fileOT.destinationSE = pandajob.destinationSE
fileOT.dataset = pandajob.prodDBlock
fileOT.type = 'output'
fileOT.scope = fscope
fileOT.GUID = f.guid
pandajob.addFile(fileOT)
# Save replica meta
fc.new_replica(f, site)
if not has_input:
# Add fake input
fileIT = FileSpec()
fileIT.lfn = "fake.input"
fileIT.dataset = pandajob.prodDBlock
fileIT.prodDBlock = pandajob.prodDBlock
fileIT.type = 'input'
fileIT.scope = fscope
fileIT.status = 'ready'
fileIT.GUID = "fake.guid"
pandajob.addFile(fileIT)
# Prepare lof file
fileOL = FileSpec()
fileOL.lfn = "%s.log.tgz" % pandajob.jobName
fileOL.destinationDBlock = pandajob.destinationDBlock
fileOL.destinationSE = pandajob.destinationSE
fileOL.dataset = '{}:logs'.format(fscope)
fileOL.type = 'log'
fileOL.scope = 'panda'
pandajob.addFile(fileOL)
#.........这里部分代码省略.........
开发者ID:RRCKI,项目名称:panda-web-client,代码行数:101,代码来源:scripts.py
示例15: JobSpec
from taskbuffer.JobSpec import JobSpec
from taskbuffer.FileSpec import FileSpec
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = commands.getoutput('/usr/bin/uuidgen')
job.AtlasRelease = 'Atlas-9.0.4'
job.prodDBlock = 'pandatest.000003.dd.input'
job.destinationDBlock = 'panda.destDB.%s' % commands.getoutput('/usr/bin/uuidgen')
job.destinationSE = 'BNL_SE'
ids = {'pandatest.000003.dd.input._00028.junk':'6c19e1fc-ee8c-4bae-bd4c-c9e5c73aca27',
'pandatest.000003.dd.input._00033.junk':'98f79ba1-1793-4253-aac7-bdf90a51d1ee',
'pandatest.000003.dd.input._00039.junk':'33660dd5-7cef-422a-a7fc-6c24cb10deb1'}
for lfn in ids.keys():
file = FileSpec()
file.lfn = lfn
file.GUID = ids[file.lfn]
file.dataset = 'pandatest.000003.dd.input'
file.type = 'input'
job.addFile(file)
s,o = Client.submitJobs([job])
print "---------------------"
print s
print o
print "---------------------"
s,o = Client.getJobStatus([4934, 4766, 4767, 4768, 4769])
print s
if s == 0:
for job in o:
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:31,代码来源:testUser.py
示例16: convertToJobFileSpec
def convertToJobFileSpec(self,datasetSpec,setType=None,useEventService=False):
jobFileSpec = JobFileSpec()
jobFileSpec.fileID = self.fileID
jobFileSpec.datasetID = datasetSpec.datasetID
jobFileSpec.jediTaskID = datasetSpec.jediTaskID
jobFileSpec.lfn = self.lfn
jobFileSpec.GUID = self.GUID
if setType == None:
jobFileSpec.type = self.type
else:
jobFileSpec.type = setType
jobFileSpec.scope = self.scope
jobFileSpec.fsize = self.fsize
jobFileSpec.checksum = self.checksum
jobFileSpec.attemptNr = self.attemptNr
# dataset attribute
if datasetSpec != None:
# dataset
if not datasetSpec.containerName in [None,'']:
jobFileSpec.dataset = datasetSpec.containerName
else:
jobFileSpec.dataset = datasetSpec.datasetName
if self.type in datasetSpec.getInputTypes() or setType in datasetSpec.getInputTypes():
# prodDBlock
jobFileSpec.prodDBlock = datasetSpec.datasetName
# storage token
if not datasetSpec.storageToken in ['',None]:
jobFileSpec.dispatchDBlockToken = datasetSpec.storageToken
else:
# destinationDBlock
jobFileSpec.destinationDBlock = datasetSpec.datasetName
# storage token
if not datasetSpec.storageToken in ['',None]:
jobFileSpec.destinationDBlockToken = datasetSpec.storageToken.split('/')[0]
# destination
if not datasetSpec.destination in ['',None]:
jobFileSpec.destinationSE = datasetSpec.destination
# set prodDBlockToken for Event Service
if useEventService and datasetSpec.getObjectStore() != None:
jobFileSpec.prodDBlockToken = 'objectstore^{0}'.format(datasetSpec.getObjectStore())
# allow no output
if datasetSpec.isAllowedNoOutput():
jobFileSpec.allowNoOutput()
# return
return jobFileSpec
开发者ID:ruslan33,项目名称:panda-jedi,代码行数:45,代码来源:JediFileSpec.py
示例17: FileSpec
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),index)
job.AtlasRelease = 'Atlas-12.0.6'
job.homepackage = 'AtlasProduction/12.0.6.4'
job.transformation = 'csc_reco_trf.py'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.computingSite = site
#job.prodDBlock = 'misal1_mc12.005200.T1_McAtNlo_Jimmy.digit.RDO.v12000601_tid007554'
job.prodDBlock = 'misal1_mc12.005802.JF17_pythia_jet_filter.digit.RDO.v12000601_tid008610'
job.cloud = 'US'
job.prodSourceLabel = 'test'
job.currentPriority = 10000
job.cmtConfig = 'i686-slc4-gcc34-opt'
fileI = FileSpec()
fileI.dataset = job.prodDBlock
fileI.prodDBlock = job.prodDBlock
fileI.lfn = lfn
fileI.type = 'input'
job.addFile(fileI)
fileD = FileSpec()
fileD.dataset = 'ddo.000001.Atlas.Ideal.DBRelease.v030101'
fileD.prodDBlock = 'ddo.000001.Atlas.Ideal.DBRelease.v030101'
fileD.lfn = 'DBRelease-3.1.1.tar.gz'
fileD.type = 'input'
job.addFile(fileD)
fileOE = FileSpec()
fileOE.lfn = "%s.ESD.pool.root" % job.jobName
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:31,代码来源:testReco.py
示例18: defineEvgen16Job
def defineEvgen16Job(self, i):
"""Define an Evgen16 job based on predefined values and randomly generated names
"""
job = JobSpec()
job.computingSite = self.__site
job.cloud = self.__cloud
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s_%d" % (uuid.uuid1(), i)
job.AtlasRelease = 'Atlas-16.6.2'
job.homepackage = 'AtlasProduction/16.6.2.1'
job.transformation = 'Evgen_trf.py'
job.destinationDBlock = self.__datasetName
job.destinationSE = self.__destName
job.currentPriority = 10000
job.prodSourceLabel = 'test'
job.cmtConfig = 'i686-slc5-gcc43-opt'
#Output file
fileO = FileSpec()
fileO.lfn = "%s.evgen.pool.root" % job.jobName
fileO.destinationDBlock = job.destinationDBlock
fileO.destinationSE = job.destinationSE
fileO.dataset = job.destinationDBlock
fileO.destinationDBlockToken = 'ATLASDATADISK'
fileO.type = 'output'
job.addFile(fileO)
#Log file
fileL = FileSpec()
fileL.lfn = "%s.job.log.tgz" % job.jobName
fileL.destinationDBlock = job.destinationDBlock
fileL.destinationSE = job.destinationSE
fileL.dataset = job.destinationDBlock
fileL.destinationDBlockToken = 'ATLASDATADISK'
fileL.type = 'log'
job.addFile(fileL)
job.jobParameters = "2760 105048 19901 101 200 MC10.105048.PythiaB_ccmu3mu1X.py %s NONE NONE NONE MC10JobOpts-latest-test.tar.gz" % fileO.lfn
return job
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:41,代码来源:testJobFlowATLAS.py
示例19: range
for i in range(2):
job = JobSpec()
job.jobDefinitionID = jobDefinitionID
job.jobName = "%s_%d" % (commands.getoutput('uuidgen'),i)
job.AtlasRelease = 'Atlas-12.0.6'
job.homepackage = 'AnalysisTransforms'
job.transformation = 'https://gridui01.usatlas.bnl.gov:24443/dav/test/runAthenaXrd'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.currentPriority = 3000
job.assignedPriority = 3000
job.prodSourceLabel = 'user'
job.computingSite = site
file = FileSpec()
file.lfn = "%s.AANT._%05d.root" % (job.jobName,i)
file.dest
|
请发表评论