本文整理汇总了Python中userinterface.Client类的典型用法代码示例。如果您正苦于以下问题:Python Client类的具体用法?Python Client怎么用?Python Client使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Client类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: getPandaStatus
def getPandaStatus(self):
for country in self.config.sites.keys():
for group in self.config.sites[country].keys():
# country/group = None is equivalent to not specifing anything
self.factoryMessages.info('Polling panda status for country=%s, group=%s' % (country, group,))
error,self.config.sites[country][group]['siteStatus'] = Client.getJobStatisticsPerSite(countryGroup=country,workingGroup=group)
if error != 0:
raise PandaStatusFailure, 'Client.getJobStatisticsPerSite(countryGroup=%s,workingGroup=%s) error: %s' % (country, group, error)
for siteid, queues in self.config.sites[country][group].iteritems():
if siteid == 'siteStatus':
continue
if siteid in self.config.sites[country][group]['siteStatus']:
self.factoryMessages.debug('Panda status: %s (country=%s, group=%s) %s' % (siteid, country, group, self.config.sites[country][group]['siteStatus'][siteid]))
for queue in queues:
self.config.queues[queue]['pandaStatus'] = self.config.sites[country][group]['siteStatus'][siteid]
else:
# If panda knows nothing, then we assume all zeros (site may be inactive)
self.factoryMessages.debug('Panda status for siteid %s (country=%s, group=%s) not found - setting zeros in status to allow bootstraping of site.' % (siteid, country, group))
for queue in queues:
self.config.queues[queue]['pandaStatus'] = {'transferring': 0, 'activated': 0, 'running': 0, 'assigned': 0, 'failed': 0, 'finished': 0}
# Now poll site and cloud status to suppress pilots if a site is offline
# Take site staus out - better to use individual queue status from schedconfig
#self.factoryMessages.info('Polling panda for site status')
#error,self.pandaSiteStatus = Client.getSiteSpecs(siteType='all')
#if error != 0:
# raise PandaStatusFailure, '''Client.getSiteSpecs(siteType='all') error: %s''' % (error)
self.factoryMessages.info('Polling panda for cloud status')
error,self.pandaCloudStatus = Client.getCloudSpecs()
if error != 0:
raise PandaStatusFailure, 'Client.getCloudSpecs() error: %s' % (error)
开发者ID:hep-gc,项目名称:panda-autopyfactory,代码行数:32,代码来源:Factory.py
示例2: killJobs
def killJobs(self, ids, code=None, verbose=False):
"""Kill jobs. Normal users can kill only their own jobs.
People with production VOMS role can kill any jobs.
Running jobs are killed when next heartbeat comes from the pilot.
Set code=9 if running jobs need to be killed immediately.
args:
ids: the list of PandaIDs
code: specify why the jobs are killed
2: expire
3: aborted
4: expire in waiting
7: retry by server
8: rebrokerage
9: force kill
50: kill by JEDI
91: kill user jobs with prod role
verbose: set True to see what's going on
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of clouds (or Nones if tasks are not yet assigned)
"""
import userinterface.Client as Client
s,o = Client.killJobs(ids, code=code, verbose=verbose)
开发者ID:tertychnyy,项目名称:panda-web-client,代码行数:26,代码来源:UserIF.py
示例3: eraseDispDatasets
def eraseDispDatasets(ids):
print "eraseDispDatasets"
datasets = []
# get jobs
status,jobs = Client.getJobStatus(ids)
if status != 0:
return
# gather dispDBlcoks
for job in jobs:
# dispatchDS is not a DQ2 dataset in US
if job.cloud == 'US':
continue
# erase disp datasets for production jobs only
if job.prodSourceLabel != 'managed':
continue
for file in job.Files:
if file.dispatchDBlock == 'NULL':
continue
if (not file.dispatchDBlock in datasets) and \
re.search('_dis\d+$',file.dispatchDBlock) != None:
datasets.append(file.dispatchDBlock)
# erase
for dataset in datasets:
print 'erase %s' % dataset
status,out = ddm.DQ2.main('eraseDataset',dataset)
print out
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:26,代码来源:reassignSite.py
示例4: uploadLog
def uploadLog(self):
if self.jediTaskID == None:
return 'cannot find jediTaskID'
strMsg = self.logger.dumpToString()
s,o = Client.uploadLog(strMsg,self.jediTaskID)
if s != 0:
return "failed to upload log with {0}.".format(s)
if o.startswith('http'):
return '<a href="{0}">log</a>'.format(o)
return o
开发者ID:PanDAWMS,项目名称:panda-server,代码行数:10,代码来源:EventPicker.py
示例5: getJobStatus
def getJobStatus(self, ids):
import userinterface.Client as Client
s,o = Client.getJobStatus(ids)
result = {}
if s != 0:
_logger.error('Error response code: %s %s' %(str(s), str(o)))
return result
for x in o:
result[x.PandaID] = x.jobStatus
return result
开发者ID:tertychnyy,项目名称:panda-web-client,代码行数:10,代码来源:UserIF.py
示例6: killJobs
def killJobs(jobList):
print 'Kill jobs'
_logger.debug('Kill jobs')
_logger.debug(str(jobList))
s,o = Client.killJobs(jobList) # Code 3 eqs. aborted status
_logger.debug(o)
_logger.debug(s)
_logger.debug("---------------------")
return o
开发者ID:RRCKI,项目名称:panda-web-client,代码行数:10,代码来源:scripts.py
示例7: submitJobs
def submitJobs(jobList):
print 'Submit jobs'
_logger.debug('Submit jobs')
_logger.debug(str(jobList))
s,o = Client.submitJobs(jobList)
_logger.debug(o)
_logger.debug(s)
_logger.debug("---------------------")
for x in o:
_logger.debug("PandaID=%s" % x[0])
return o
开发者ID:RRCKI,项目名称:panda-web-client,代码行数:12,代码来源:scripts.py
示例8: getStatus
def getStatus(self, expectedStates):
idList = [job['jobID'] for job in self.__jobList]
print idList
status, jobInfoList = Client.getJobStatus(idList)
print jobInfoList
assert status == 0, "Retrieval of job state finished with status: %s" %status
for job in jobInfoList:
assert job.jobStatus in expectedStates, "Recently defined job was not in states %s (PandaID: %s jobStatus: %s)" %(expectedStates, job.PandaID, job.jobStatus)
return jobInfoList
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:13,代码来源:testJobFlowATLAS.py
示例9: eraseDispDatasets
def eraseDispDatasets(ids):
datasets = []
# get jobs
status,jobs = Client.getJobStatus(ids)
if status != 0:
return
# gather dispDBlcoks
for job in jobs:
for file in job.Files:
if not file.dispatchDBlock in datasets:
datasets.append(file.dispatchDBlock)
# erase
for dataset in datasets:
ddm.DQ2.main(['eraseDataset',datasets])
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:14,代码来源:reassignDefJobs.py
示例10: generateJobs
def generateJobs(self):
for i in range(self.__nJobs):
job = self.defineEvgen16Job(i)
self.__jobList.append({'jobSpec': job, 'jobID': None})
status, output = Client.submitJobs([job['jobSpec'] for job in self.__jobList]) #Return from submitJobs: ret.append((job.PandaID,job.jobDefinitionID,{'jobsetID':job.jobsetID}))
assert status == 0, "Submission of jobs finished with status: %s" %status
assert len(self.__jobList) == len(output), "Not all jobs seem to have been submitted properly"
for job, ids in zip(self.__jobList, output):
jobID = ids[0]
job['jobID'] = jobID
print("Generated job PandaID = %s" %jobID)
return
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:18,代码来源:testJobFlowATLAS.py
示例11: update_status
def update_status():
# Method to sync PandaDB job status and local job status
# show users jobs
jobs = Job.query.filter(Job.pandaid.isnot(None))\
.filter(~Job.status.in_(['finished', 'failed', 'cancelled']))\
.all()
ids = []
localids = []
for job in jobs:
localids.append(job.id)
ids.append(job.pandaid)
# get status update
if len(ids) > 0:
_logger.debug('getJobStatus: ' + str(ids))
s, o = Client.getJobStatus(ids)
_logger.debug(o)
_logger.debug(s)
_logger.debug("---------------------")
for job in jobs:
if job.pandaid in ids:
for obj in o:
if obj.PandaID == job.pandaid:
# Update attemptNr if changed
if job.attemptnr not in [obj.attemptNr]:
job.attemptnr = obj.attemptNr
jobs_.save(job)
# Update status if changed
if job.status != obj.jobStatus:
job.status = obj.jobStatus
job.modification_time = datetime.utcnow()
jobs_.save(job)
return localids
开发者ID:RRCKI,项目名称:panda-web-client,代码行数:36,代码来源:scripts.py
示例12: int
options,args = optP.parse_args()
aSrvID = None
codeV = None
useMailAsIDV = False
if options.forceKill:
codeV = 9
elif options.killUserJobs:
codeV = 91
else:
try:
codeV = int(options.codeV)
except Exception:
pass
if options.killOwnProdJobs:
useMailAsIDV = True
if len(args) == 1:
Client.killJobs([args[0]], code=codeV, useMailAsID=useMailAsIDV, keepUnmerged=options.keepUnmerged, jobSubStatus=options.jobSubStatus)
else:
startID = int(args[0])
endID = int(args[1])
if startID > endID:
print '%d is less than %d' % (endID,startID)
sys.exit(1)
Client.killJobs(range(startID,endID+1),code=codeV,useMailAsID=useMailAsIDV,keepUnmerged=options.keepUnmerged, jobSubStatus=options.jobSubStatus)
开发者ID:PanDAWMS,项目名称:panda-server,代码行数:29,代码来源:killJob.py
示例13: FileSpec
job.prodDBlock = 'pandatest.000003.dd.input'
job.destinationDBlock = 'panda.destDB.%s' % commands.getoutput('/usr/bin/uuidgen')
job.destinationSE = 'BNL_SE'
ids = {'pandatest.000003.dd.input._00028.junk':'6c19e1fc-ee8c-4bae-bd4c-c9e5c73aca27',
'pandatest.000003.dd.input._00033.junk':'98f79ba1-1793-4253-aac7-bdf90a51d1ee',
'pandatest.000003.dd.input._00039.junk':'33660dd5-7cef-422a-a7fc-6c24cb10deb1'}
for lfn in ids.keys():
file = FileSpec()
file.lfn = lfn
file.GUID = ids[file.lfn]
file.dataset = 'pandatest.000003.dd.input'
file.type = 'input'
job.addFile(file)
s,o = Client.submitJobs([job])
print "---------------------"
print s
print o
print "---------------------"
s,o = Client.getJobStatus([4934, 4766, 4767, 4768, 4769])
print s
if s == 0:
for job in o:
if job == None:
continue
print job.PandaID
for file in job.Files:
print file.lfn,file.type
print "---------------------"
s,o = Client.queryPandaIDs([0])
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:31,代码来源:testUser.py
示例14: putFile
def putFile(req,file):
if not Protocol.isSecure(req):
return False
if '/CN=limited proxy' in req.subprocess_env['SSL_CLIENT_S_DN']:
return False
_logger.debug("putFile : start %s %s" % (req.subprocess_env['SSL_CLIENT_S_DN'],file.filename))
# size check
fullSizeLimit = 768*1024*1024
if not file.filename.startswith('sources.'):
noBuild = True
sizeLimit = 100*1024*1024
else:
noBuild = False
sizeLimit = fullSizeLimit
# get file size
contentLength = 0
try:
contentLength = long(req.headers_in["content-length"])
except:
if req.headers_in.has_key("content-length"):
_logger.error("cannot get CL : %s" % req.headers_in["content-length"])
else:
_logger.error("no CL")
_logger.debug("size %s" % contentLength)
if contentLength > sizeLimit:
errStr = "ERROR : Upload failure. Exceeded size limit %s>%s." % (contentLength,sizeLimit)
if noBuild:
errStr += " Please submit the job without --noBuild/--libDS since those options impose a tighter size limit"
else:
errStr += " Please remove redundant files from your workarea"
_logger.error(errStr)
_logger.debug("putFile : end")
return errStr
try:
fileFullPath = '%s/%s' % (panda_config.cache_dir,file.filename.split('/')[-1])
# avoid overwriting
if os.path.exists(fileFullPath):
# touch
os.utime(fileFullPath,None)
# send error message
errStr = "ERROR : Cannot overwrite file"
_logger.debug('putFile : cannot overwrite file %s' % file.filename)
_logger.debug("putFile : end")
return errStr
# write
fo = open(fileFullPath,'wb')
fileContent = file.file.read()
fo.write(fileContent)
fo.close()
except:
errStr = "ERROR : Cannot write file"
_logger.error(errStr)
_logger.debug("putFile : end")
return errStr
# checksum
try:
# decode Footer
footer = fileContent[-8:]
checkSum,isize = struct.unpack("II",footer)
_logger.debug("CRC from gzip Footer %s" % checkSum)
except:
# calculate on the fly
"""
import zlib
checkSum = zlib.adler32(fileContent) & 0xFFFFFFFF
"""
# use None to avoid delay for now
checkSum = None
_logger.debug("CRC calculated %s" % checkSum)
# file size
fileSize = len(fileContent)
# user name
username = cleanUserID(req.subprocess_env['SSL_CLIENT_S_DN'])
_logger.debug("putFile : written dn=%s file=%s size=%s crc=%s" % \
(username,file.filename,fileSize,checkSum))
# put file info to DB
statClient,outClient = Client.insertSandboxFileInfo(username,file.filename,
fileSize,checkSum)
if statClient != 0 or outClient.startswith("ERROR"):
_logger.error("putFile : failed to put sandbox to DB with %s %s" % (statClient,outClient))
#_logger.debug("putFile : end")
#return "ERROR : Cannot insert sandbox to DB"
else:
_logger.debug("putFile : inserted sandbox to DB with %s" % outClient)
# store to cassandra
if hasattr(panda_config,'cacheUseCassandra') and panda_config.cacheUseCassandra == True:
try:
# time-stamp
timeNow = datetime.datetime.utcnow()
creationTime = timeNow.strftime('%Y-%m-%d %H:%M:%S')
# user name
username = req.subprocess_env['SSL_CLIENT_S_DN']
username = username.replace('/CN=proxy','')
username = username.replace('/CN=limited proxy','')
# file size
fileSize = len(fileContent)
# key
fileKeyName = file.filename.split('/')[-1]
sizeCheckSum = '%s:%s' % (fileSize,checkSum)
# insert to cassandra
#.........这里部分代码省略.........
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:101,代码来源:Utils.py
示例15: FileSpec
job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf-phosim332.sh'
job.destinationDBlock = datasetName
#job.destinationSE = destName
job.destinationSE = 'local'
job.currentPriority = 1000
#job.prodSourceLabel = 'ptest'
#job.prodSourceLabel = 'panda'
#job.prodSourceLabel = 'ptest'
#job.prodSourceLabel = 'test'
#job.prodSourceLabel = 'ptest'
### 2014-01-27
#job.prodSourceLabel = 'user'
job.prodSourceLabel = 'panda'
job.computingSite = site
job.jobParameters = ""
job.VO = "lsst"
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)
s,o = Client.submitJobs([job],srvID=aSrvID)
print s
for x in o:
print "PandaID=%s" % x[0]
开发者ID:PanDAWMS,项目名称:panda-server,代码行数:30,代码来源:lsstSubmitPhosim332.py
示例16: IN
files.append(tmpLFN)
print
print 'found {0} lost files -> {1}'.format(len(files), ','.join(files))
s,jediTaskID = taskBuffer.resetFileStatusInJEDI('',True,options.ds,files,[],options.dryRun)
if options.dryRun:
sys.exit(0)
if s:
if options.resurrectDS:
sd,so = taskBuffer.querySQLS('SELECT datasetName FROM ATLAS_PANDA.JEDI_Datasets WHERE jediTaskID=:id AND type IN (:t1,:t2)',
{':id': jediTaskID, ':t1': 'output', ':t2': 'log'})
rc = RucioClient()
for datasetName, in so:
for i in range(3):
try:
scope, name = rucioAPI.extract_scope(datasetName)
rc.get_did(scope, name)
break
except DataIdentifierNotFound:
print 'resurrect {0}'.format(datasetName)
rc.resurrect([{'scope': scope, 'name': name}])
try:
rc.set_metadata(scope, name, 'lifetime', None)
except:
pass
print Client.retryTask(jediTaskID, noChildRetry=options.noChildRetry)[-1][-1]
print 'done for jediTaskID={0}'.format(jediTaskID)
else:
print 'failed'
开发者ID:PanDAWMS,项目名称:panda-server,代码行数:29,代码来源:recoverLostFiles.py
示例17: len
status,res = proxyS.querySQLS(sql,varMap)
if res != None:
for (id,lockedby) in res:
if lockedby == 'jedi':
jediJobs.append(id)
else:
jobs.append(id)
# reassign
jobs.sort()
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
print 'reassign %s' % str(jobs[iJob:iJob+nJob])
Client.reassignJobs(jobs[iJob:iJob+nJob])
iJob += nJob
time.sleep(10)
if len(jediJobs) != 0:
nJob = 100
iJob = 0
while iJob < len(jediJobs):
print 'kill JEDI jobs %s' % str(jediJobs[iJob:iJob+nJob])
Client.killJobs(jediJobs[iJob:iJob+nJob],codeV,keepUnmerged=options.keepUnmerged)
iJob += nJob
print
print 'reassigned {0} jobs'.format(len(jobs+jediJobs))
开发者ID:PanDAWMS,项目名称:panda-server,代码行数:29,代码来源:reassignTask.py
示例18: len
jobs = []
varMap = {}
varMap[':prodSourceLabel'] = 'managed'
varMap[':taskID'] = args[0]
varMap[':pandaIDl'] = args[1]
varMap[':pandaIDu'] = args[2]
sql = "SELECT PandaID FROM %s WHERE prodSourceLabel=:prodSourceLabel AND taskID=:taskID AND PandaID BETWEEN :pandaIDl AND :pandaIDu ORDER BY PandaID"
for table in ['ATLAS_PANDA.jobsActive4','ATLAS_PANDA.jobsWaiting4','ATLAS_PANDA.jobsDefined4']:
status,res = proxyS.querySQLS(sql % table,varMap)
if res != None:
for id, in res:
if not id in jobs:
jobs.append(id)
print 'The number of jobs to be killed : %s' % len(jobs)
if len(jobs):
nJob = 100
iJob = 0
while iJob < len(jobs):
print 'kill %s' % str(jobs[iJob:iJob+nJob])
if options.forceKill:
Client.killJobs(jobs[iJob:iJob+nJob],9,useMailAsID=useMailAsIDV)
else:
Client.killJobs(jobs[iJob:iJob+nJob],useMailAsID=useMailAsIDV)
iJob += nJob
time.sleep(1)
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:27,代码来源:killJobsInTask.py
示例19: SiteMapper
from taskbuffer.TaskBuffer import taskBuffer
from brokerage.SiteMapper import SiteMapper
from config import panda_config
# instantiate TB
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,nDBConnection=1)
# instantiate sitemapper
siteMapper = SiteMapper(taskBuffer)
import httplib
import commands
id = sys.argv[1]
s,o = Client.getJobStatus([id])
if s != 0:
print "failed to get job with:%s" % s
sys.exit(0)
job = o[0]
if job == None:
print "got None"
sys.exit(0)
xml = """<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<!-- ATLAS file meta-data catalog -->
<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">
<POOLFILECATALOG>
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:30,代码来源:finishJob.py
示例20: FileSpec
#job.cloud = "UK"
job.taskID = i
file = FileSpec()
file.lfn = "%s.evgen.pool.root" % job.jobName
file.destinationDBlock = job.destinationDBlock
file.destinationSE = job.destinationSE
file.dataset = job.destinationDBlock
#file.destinationDBlockToken = 'ATLASDATADISK'
file.type = 'output'
job.addFile(file)
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)
job.jobParameters="7087 0 500000 1 DC3.007087.singlepart_fwdgamma_etaplus_E500.py %s NONE NONE NONE" % file.lfn
jobList.append(job)
for i in range(1):
#s,o = Client.submitJobs(jobList)
s,outS = Client.runTaskAssignment(jobList)
print "---------------------"
print s
for tmpOut in outS:
print tmpOut
开发者ID:EntityOfPlague,项目名称:panda-server,代码行数:30,代码来源:testTaskA2.py
注:本文中的userinterface.Client类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论