本文整理汇总了Python中tvb.core.entities.storage.dao.get_operation_by_id函数的典型用法代码示例。如果您正苦于以下问题:Python get_operation_by_id函数的具体用法?Python get_operation_by_id怎么用?Python get_operation_by_id使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_operation_by_id函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_launch_burst_invalid_portlet_analyzer_data
def test_launch_burst_invalid_portlet_analyzer_data(self):
"""
Test that burst is marked as error if invalid data is passed to the first step.
"""
algo_id = self.flow_service.get_algorithm_by_module_and_class('tvb.tests.framework.adapters.testadapter1',
'TestAdapter1').id
#Adapter tries to do an int(test1_val1) and int(test1_val2) so this should be valid
burst_config = self.burst_service.new_burst_configuration(self.test_project.id)
kwargs_replica = {'test1_val1': '1', 'test1_val2': '0'}
burst_config.update_simulator_configuration(kwargs_replica)
test_portlet = dao.get_portlet_by_identifier(self.PORTLET_ID)
portlet_configuration = self.burst_service.new_portlet_configuration(test_portlet.id)
#Portlet analyzer tries to do int(input) which should fail
declared_overwrites = {ADAPTER_PREFIX_ROOT + '0test_non_dt_input': 'asa'}
self.burst_service.update_portlet_configuration(portlet_configuration, declared_overwrites)
burst_config.tabs[0].portlets[0] = portlet_configuration
burst_id, _ = self.burst_service.launch_burst(burst_config, 0, algo_id, self.test_user.id)
burst_config = dao.get_burst_by_id(burst_id)
#Wait maximum x seconds for burst to finish
burst_config = self._wait_for_burst(burst_config, error_expected=True)
burst_wf = dao.get_workflows_for_burst(burst_config.id)[0]
wf_steps = dao.get_workflow_steps(burst_wf.id)
self.assertTrue(len(wf_steps) == 2,
"Should have exactly 2 wf steps. One for 'simulation' one for portlet analyze operation.")
simulator_op = dao.get_operation_by_id(wf_steps[0].fk_operation)
self.assertEqual(model.STATUS_FINISHED, simulator_op.status,
"First operation should be simulator which should have 'finished' status.")
portlet_analyze_op = dao.get_operation_by_id(wf_steps[1].fk_operation)
self.assertEqual(portlet_analyze_op.status, model.STATUS_ERROR,
"Second operation should be portlet analyze step which should have 'error' status.")
开发者ID:LauHoiYanGladys,项目名称:tvb-framework,代码行数:33,代码来源:burst_service_test.py
示例2: test_stop_operations
def test_stop_operations(self):
data = {"test1_val1": 5, 'test1_val2': 5}
operations = self._launch_test_algo_on_cluster(**data)
operation = dao.get_operation_by_id(operations[0].id)
self.assertFalse(operation.has_finished)
self.flow_c.stop_operation(operation.id, 0, False)
operation = dao.get_operation_by_id(operation.id)
self.assertEqual(operation.status, model.STATUS_CANCELED)
开发者ID:sdiazpier,项目名称:tvb-framework,代码行数:8,代码来源:flow_controller_test.py
示例3: prepare_next_step
def prepare_next_step(self, last_executed_op_id):
"""
If the operation with id 'last_executed_op_id' resulted after
the execution of a workflow step then this method will launch
the operation corresponding to the next step from the workflow.
"""
try:
current_step, next_workflow_step = self._get_data(last_executed_op_id)
if next_workflow_step is not None:
operation = dao.get_operation_by_id(next_workflow_step.fk_operation)
dynamic_param_names = next_workflow_step.dynamic_workflow_param_names
if len(dynamic_param_names) > 0:
op_params = json.loads(operation.parameters)
for param_name in dynamic_param_names:
dynamic_param = op_params[param_name]
former_step = dao.get_workflow_step_by_step_index(next_workflow_step.fk_workflow,
dynamic_param[wf_cfg.STEP_INDEX_KEY])
if type(dynamic_param[wf_cfg.DATATYPE_INDEX_KEY]) is IntType:
datatypes = dao.get_results_for_operation(former_step.fk_operation)
op_params[param_name] = datatypes[dynamic_param[wf_cfg.DATATYPE_INDEX_KEY]].gid
else:
previous_operation = dao.get_operation_by_id(former_step.fk_operation)
op_params[param_name] = json.loads(previous_operation.parameters)[
dynamic_param[wf_cfg.DATATYPE_INDEX_KEY]]
operation.parameters = json.dumps(op_params)
operation = dao.store_entity(operation)
return operation.id
else:
if current_step is not None:
current_workflow = dao.get_workflow_by_id(current_step.fk_workflow)
current_workflow.status = current_workflow.STATUS_FINISHED
dao.store_entity(current_workflow)
burst_entity = dao.get_burst_by_id(current_workflow.fk_burst)
parallel_workflows = dao.get_workflows_for_burst(burst_entity.id)
all_finished = True
for workflow in parallel_workflows:
if workflow.status == workflow.STATUS_STARTED:
all_finished = False
if all_finished:
self.mark_burst_finished(burst_entity, success=True)
disk_size = dao.get_burst_disk_size(burst_entity.id) # Transform from kB to MB
if disk_size > 0:
user = dao.get_project_by_id(burst_entity.fk_project).administrator
user.used_disk_space = user.used_disk_space + disk_size
dao.store_entity(user)
else:
operation = dao.get_operation_by_id(last_executed_op_id)
disk_size = dao.get_disk_size_for_operation(operation.id) # Transform from kB to MB
if disk_size > 0:
user = dao.get_user_by_id(operation.fk_launched_by)
user.used_disk_space = user.used_disk_space + disk_size
dao.store_entity(user)
return None
except Exception, excep:
self.logger.error(excep)
self.logger.exception(excep)
raise WorkflowInterStepsException(excep)
开发者ID:HuifangWang,项目名称:the-virtual-brain-website,代码行数:57,代码来源:workflow_service.py
示例4: test_stop_operations_group
def test_stop_operations_group(self):
data = {model.RANGE_PARAMETER_1: "test1_val1", "test1_val1": '5,6,7', 'test1_val2': 5}
operations = self._launch_test_algo_on_cluster(**data)
operation_group_id = 0
for operation in operations:
operation = dao.get_operation_by_id(operation.id)
self.assertFalse(operation.has_finished)
operation_group_id = operation.fk_operation_group
self.flow_c.stop_operation(operation_group_id, 1, False)
for operation in operations:
operation = dao.get_operation_by_id(operation.id)
self.assertEqual(operation.status, model.STATUS_CANCELED)
开发者ID:sdiazpier,项目名称:tvb-framework,代码行数:12,代码来源:flow_controller_test.py
示例5: _edit_data
def _edit_data(self, datatype, new_data, from_group=False):
"""
Private method, used for editing a meta-data XML file and a DataType row
for a given custom DataType entity with new dictionary of data from UI.
"""
if isinstance(datatype, MappedType) and not os.path.exists(datatype.get_storage_file_path()):
if not datatype.invalid:
datatype.invalid = True
dao.store_entity(datatype)
return
# 1. First update Operation fields:
# Update group field if possible
new_group_name = new_data[CommonDetails.CODE_OPERATION_TAG]
empty_group_value = (new_group_name is None or new_group_name == "")
if from_group:
if empty_group_value:
raise StructureException("Empty group is not allowed!")
group = dao.get_generic_entity(model.OperationGroup, new_data[CommonDetails.CODE_OPERATION_GROUP_ID])
if group and len(group) > 0 and new_group_name != group[0].name:
group = group[0]
exists_group = dao.get_generic_entity(model.OperationGroup, new_group_name, 'name')
if exists_group:
raise StructureException("Group '" + new_group_name + "' already exists.")
group.name = new_group_name
dao.store_entity(group)
else:
operation = dao.get_operation_by_id(datatype.fk_from_operation)
operation.user_group = new_group_name
dao.store_entity(operation)
# 2. Update dateType fields:
datatype.subject = new_data[DataTypeOverlayDetails.DATA_SUBJECT]
datatype.state = new_data[DataTypeOverlayDetails.DATA_STATE]
if DataTypeOverlayDetails.DATA_TAG_1 in new_data:
datatype.user_tag_1 = new_data[DataTypeOverlayDetails.DATA_TAG_1]
if DataTypeOverlayDetails.DATA_TAG_2 in new_data:
datatype.user_tag_2 = new_data[DataTypeOverlayDetails.DATA_TAG_2]
if DataTypeOverlayDetails.DATA_TAG_3 in new_data:
datatype.user_tag_3 = new_data[DataTypeOverlayDetails.DATA_TAG_3]
if DataTypeOverlayDetails.DATA_TAG_4 in new_data:
datatype.user_tag_4 = new_data[DataTypeOverlayDetails.DATA_TAG_4]
if DataTypeOverlayDetails.DATA_TAG_5 in new_data:
datatype.user_tag_5 = new_data[DataTypeOverlayDetails.DATA_TAG_5]
datatype = dao.store_entity(datatype)
# 3. Update MetaData in H5 as well.
datatype.persist_full_metadata()
# 4. Update the group_name/user_group into the operation meta-data file
operation = dao.get_operation_by_id(datatype.fk_from_operation)
self.structure_helper.update_operation_metadata(operation.project.name, new_group_name,
str(datatype.fk_from_operation), from_group)
开发者ID:rajul,项目名称:tvb-framework,代码行数:52,代码来源:project_service.py
示例6: initiate_prelaunch
def initiate_prelaunch(self, operation, adapter_instance, temp_files, **kwargs):
"""
Public method.
This should be the common point in calling an adapter- method.
"""
result_msg = ""
try:
unique_id = None
if self.ATT_UID in kwargs:
unique_id = kwargs[self.ATT_UID]
filtered_kwargs = adapter_instance.prepare_ui_inputs(kwargs)
self.logger.debug("Launching operation " + str(operation.id) + " with " + str(filtered_kwargs))
operation = dao.get_operation_by_id(operation.id) # Load Lazy fields
params = dict()
for k, value_ in filtered_kwargs.items():
params[str(k)] = value_
disk_space_per_user = TvbProfile.current.MAX_DISK_SPACE
pending_op_disk_space = dao.compute_disk_size_for_started_ops(operation.fk_launched_by)
user_disk_space = dao.compute_user_generated_disk_size(operation.fk_launched_by) # From kB to Bytes
available_space = disk_space_per_user - pending_op_disk_space - user_disk_space
result_msg, nr_datatypes = adapter_instance._prelaunch(operation, unique_id, available_space, **params)
operation = dao.get_operation_by_id(operation.id)
## Update DB stored kwargs for search purposes, to contain only valuable params (no unselected options)
operation.parameters = json.dumps(kwargs)
operation.mark_complete(model.STATUS_FINISHED)
if nr_datatypes > 0:
#### Write operation meta-XML only if some result are returned
self.file_helper.write_operation_metadata(operation)
dao.store_entity(operation)
self._remove_files(temp_files)
except zipfile.BadZipfile as excep:
msg = "The uploaded file is not a valid ZIP!"
self._handle_exception(excep, temp_files, msg, operation)
except TVBException as excep:
self._handle_exception(excep, temp_files, excep.message, operation)
except MemoryError:
msg = ("Could not execute operation because there is not enough free memory." +
" Please adjust operation parameters and re-launch it.")
self._handle_exception(Exception(msg), temp_files, msg, operation)
except Exception as excep1:
msg = "Could not launch Operation with the given input data!"
self._handle_exception(excep1, temp_files, msg, operation)
### Try to find next workflow Step. It might throw WorkflowException
next_op_id = self.workflow_service.prepare_next_step(operation.id)
self.launch_operation(next_op_id)
return result_msg
开发者ID:LauHoiYanGladys,项目名称:tvb-framework,代码行数:51,代码来源:operation_service.py
示例7: _check_if_datatype_was_removed
def _check_if_datatype_was_removed(self, datatype):
"""
Check if a certain datatype was removed.
"""
try:
dao.get_datatype_by_id(datatype.id)
self.fail("The datatype was not deleted.")
except Exception:
pass
try:
dao.get_operation_by_id(datatype.fk_from_operation)
self.fail("The operation was not deleted.")
except Exception:
pass
开发者ID:LauHoiYanGladys,项目名称:tvb-framework,代码行数:14,代码来源:project_structure_test.py
示例8: launch
def launch(self, data_file):
"""
Execute import operations: unpack ZIP, build and store generic DataType objects.
:param data_file: an archive (ZIP / HDF5) containing the `DataType`
:raises LaunchException: when data_file is None, nonexistent, or invalid \
(e.g. incomplete meta-data, not in ZIP / HDF5 format etc. )
"""
if data_file is None:
raise LaunchException("Please select file which contains data to import")
if os.path.exists(data_file):
if zipfile.is_zipfile(data_file):
current_op = dao.get_operation_by_id(self.operation_id)
# Creates a new TMP folder where to extract data
tmp_folder = os.path.join(self.storage_path, "tmp_import")
FilesHelper().unpack_zip(data_file, tmp_folder)
operations = ImportService().import_project_operations(current_op.project, self.storage_path)
shutil.rmtree(tmp_folder)
self.nr_of_datatypes += len(operations)
else:
#upgrade file if necessary
file_update_manager = FilesUpdateManager()
file_update_manager.upgrade_file(data_file)
folder, h5file = os.path.split(data_file)
manager = HDF5StorageManager(folder, h5file)
if manager.is_valid_hdf5_file():
datatype = None
try:
current_op = dao.get_operation_by_id(self.operation_id)
service = ImportService()
datatype = service.load_datatype_from_file(folder, h5file, current_op.id)
service.store_datatype(datatype)
self.nr_of_datatypes += 1
except Exception, excep:
# If import operation failed delete file from disk.
if datatype is not None and os.path.exists(datatype.get_storage_file_path()):
os.remove(datatype.get_storage_file_path())
self.log.exception(excep)
raise LaunchException("Invalid file received as input. Most probably incomplete "
"meta-data ... " + str(excep))
else:
raise LaunchException("Uploaded file: %s is neither in ZIP or HDF5 format" % data_file)
开发者ID:HuifangWang,项目名称:the-virtual-brain-website,代码行数:48,代码来源:tvb_importer.py
示例9: test_bct_all
def test_bct_all(self):
"""
Iterate all BCT algorithms and execute them.
"""
for i in xrange(len(self.bct_adapters)):
for bct_identifier in self.bct_adapters[i].get_algorithms_dictionary():
### Prepare Operation and parameters
algorithm = dao.get_algorithm_by_group(self.algo_groups[i].id, bct_identifier)
operation = TestFactory.create_operation(algorithm=algorithm, test_user=self.test_user,
test_project=self.test_project,
operation_status=model.STATUS_STARTED)
self.assertEqual(model.STATUS_STARTED, operation.status)
### Launch BCT algorithm
submit_data = {self.algo_groups[i].algorithm_param_name: bct_identifier,
algorithm.parameter_name: self.connectivity.gid}
try:
OperationService().initiate_prelaunch(operation, self.bct_adapters[i], {}, **submit_data)
if bct_identifier in BCTTest.EXPECTED_TO_FAIL_VALIDATION:
raise Exception("Algorithm %s was expected to throw input validation "
"exception, but did not!" % (bct_identifier,))
operation = dao.get_operation_by_id(operation.id)
### Check that operation status after execution is success.
self.assertEqual(STATUS_FINISHED, operation.status)
### Make sure at least one result exists for each BCT algorithm
results = dao.get_generic_entity(model.DataType, operation.id, 'fk_from_operation')
self.assertTrue(len(results) > 0)
except InvalidParameterException, excep:
## Some algorithms are expected to throw validation exception.
if bct_identifier not in BCTTest.EXPECTED_TO_FAIL_VALIDATION:
raise excep
开发者ID:amitsaroj001,项目名称:tvb-framework,代码行数:32,代码来源:bct_test.py
示例10: add_operation_additional_info
def add_operation_additional_info(self, message):
"""
Adds additional info on the operation to be displayed in the UI. Usually a warning message.
"""
current_op = dao.get_operation_by_id(self.operation_id)
current_op.additional_info = message
dao.store_entity(current_op)
开发者ID:unimauro,项目名称:tvb-framework,代码行数:7,代码来源:abcuploader.py
示例11: _run_cluster_job
def _run_cluster_job(operation_identifier, user_name_label, adapter_instance):
"""
Threaded Popen
It is the function called by the ClusterSchedulerClient in a Thread.
This function starts a new process.
"""
# Load operation so we can estimate the execution time
operation = dao.get_operation_by_id(operation_identifier)
kwargs = parse_json_parameters(operation.parameters)
kwargs = adapter_instance.prepare_ui_inputs(kwargs)
time_estimate = int(adapter_instance.get_execution_time_approximation(**kwargs))
hours = int(time_estimate / 3600)
minutes = (int(time_estimate) % 3600) / 60
seconds = int(time_estimate) % 60
# Anything lower than 5 hours just use default walltime
if hours < 5:
walltime = "05:00:00"
else:
if hours < 10:
hours = "0%d" % hours
else:
hours = str(hours)
walltime = "%s:%s:%s" % (hours, str(minutes), str(seconds))
call_arg = TvbProfile.current.cluster.SCHEDULE_COMMAND % (operation_identifier, user_name_label, walltime)
LOGGER.info(call_arg)
process_ = Popen([call_arg], stdout=PIPE, shell=True)
job_id = process_.stdout.read().replace('\n', '').split(TvbProfile.current.cluster.JOB_ID_STRING)[-1]
LOGGER.debug("Got jobIdentifier = %s for CLUSTER operationID = %s" % (operation_identifier, job_id))
operation_identifier = model.OperationProcessIdentifier(operation_identifier, job_id=job_id)
dao.store_entity(operation_identifier)
开发者ID:LauHoiYanGladys,项目名称:tvb-framework,代码行数:31,代码来源:backend_client.py
示例12: test_stop_burst_operation
def test_stop_burst_operation(self):
burst_config = self._long_burst_launch()
operation = self._wait_for_burst_ops(burst_config)[0]
self.assertFalse(operation.has_finished)
self.flow_c.stop_burst_operation(operation.id, 0, False)
operation = dao.get_operation_by_id(operation.id)
self.assertEqual(operation.status, model.STATUS_CANCELED)
开发者ID:sdiazpier,项目名称:tvb-framework,代码行数:7,代码来源:flow_controller_test.py
示例13: _run_cluster_job
def _run_cluster_job(operation_identifier, user_name_label, adapter_instance):
"""
Threaded Popen
It is the function called by the ClusterSchedulerClient in a Thread.
This function starts a new process.
"""
# Load operation so we can estimate the execution time
operation = dao.get_operation_by_id(operation_identifier)
kwargs = parse_json_parameters(operation.parameters)
time_estimate = int(adapter_instance.get_execution_time_approximation(**kwargs))
hours = int(time_estimate / 3600)
minutes = (int(time_estimate) % 3600) / 60
seconds = int(time_estimate) % 60
# Anything lower than 2 hours just use default walltime
if hours < 2:
walltime = "02:00:00"
elif hours > 23:
walltime = "23:59:59"
else:
walltime = datetime.time(hours, minutes, seconds)
walltime = walltime.strftime("%H:%M:%S")
call_arg = config.CLUSTER_SCHEDULE_COMMAND % (walltime, operation_identifier, user_name_label)
LOGGER.info(call_arg)
process_ = Popen([call_arg], stdout=PIPE, shell=True)
job_id = process_.stdout.read().replace('\n', '').split('OAR_JOB_ID=')[-1]
LOGGER.debug("Got jobIdentifier = %s for CLUSTER operationID = %s" % (operation_identifier, job_id))
operation_identifier = model.OperationProcessIdentifier(operation_identifier, job_id=job_id)
dao.store_entity(operation_identifier)
开发者ID:HuifangWang,项目名称:the-virtual-brain-website,代码行数:29,代码来源:backend_client.py
示例14: stop_operation
def stop_operation(operation_id):
"""
Stop a thread for a given operation id
"""
operation = dao.get_operation_by_id(operation_id)
if not operation or operation.status != model.STATUS_STARTED:
LOGGER.warning("Operation already stopped or not found is given to stop job: %s" % operation_id)
return True
LOGGER.debug("Stopping operation: %s" % str(operation_id))
## Set the thread stop flag to true
for thread in CURRENT_ACTIVE_THREADS:
if int(thread.operation_id) == operation_id:
thread.stop()
LOGGER.debug("Found running thread for operation: %d" % operation_id)
## Kill Thread
stopped = True
operation_process = dao.get_operation_process_for_operation(operation_id)
if operation_process is not None:
## Now try to kill the operation if it exists
stopped = OperationExecutor.stop_pid(operation_process.pid)
if not stopped:
LOGGER.debug("Operation %d was probably killed from it's specific thread." % operation_id)
else:
LOGGER.debug("Stopped OperationExecutor process for %d" % operation_id)
## Mark operation as canceled in DB.
operation.mark_cancelled()
dao.store_entity(operation)
return stopped
开发者ID:HuifangWang,项目名称:the-virtual-brain-website,代码行数:32,代码来源:backend_client.py
示例15: test_bct_all
def test_bct_all(self):
"""
Iterate all BCT algorithms and execute them.
"""
for adapter_instance in self.bct_adapters:
algorithm = adapter_instance.stored_adapter
operation = TestFactory.create_operation(algorithm=algorithm, test_user=self.test_user,
test_project=self.test_project,
operation_status=model.STATUS_STARTED)
assert model.STATUS_STARTED == operation.status
### Launch BCT algorithm
submit_data = {algorithm.parameter_name: self.connectivity.gid}
try:
OperationService().initiate_prelaunch(operation, adapter_instance, {}, **submit_data)
if algorithm.classname in TestBCT.EXPECTED_TO_FAIL_VALIDATION:
raise Exception("Algorithm %s was expected to throw input validation "
"exception, but did not!" % (algorithm.classname,))
operation = dao.get_operation_by_id(operation.id)
### Check that operation status after execution is success.
assert STATUS_FINISHED == operation.status
### Make sure at least one result exists for each BCT algorithm
results = dao.get_generic_entity(model.DataType, operation.id, 'fk_from_operation')
assert len(results) > 0
except InvalidParameterException as excep:
## Some algorithms are expected to throw validation exception.
if algorithm.classname not in TestBCT.EXPECTED_TO_FAIL_VALIDATION:
raise excep
开发者ID:maedoc,项目名称:tvb-framework,代码行数:29,代码来源:bct_test.py
示例16: create_operation
def create_operation(algorithm=None, test_user=None, test_project=None,
operation_status=model.STATUS_FINISHED, parameters="test params"):
"""
Create persisted operation.
:param algorithm: When not None, introspect TVB and TVB_TEST for adapters.
:return: Operation entity after persistence.
"""
if algorithm is None:
algorithm = dao.get_algorithm_by_module('tvb.tests.framework.adapters.ndimensionarrayadapter',
'NDimensionArrayAdapter')
if test_user is None:
test_user = TestFactory.create_user()
if test_project is None:
test_project = TestFactory.create_project(test_user)
meta = {DataTypeMetaData.KEY_SUBJECT: "John Doe",
DataTypeMetaData.KEY_STATE: "RAW_DATA"}
operation = model.Operation(test_user.id, test_project.id, algorithm.id, parameters, meta=json.dumps(meta),
status=operation_status)
dao.store_entity(operation)
### Make sure lazy attributes are correctly loaded.
return dao.get_operation_by_id(operation.id)
开发者ID:maedoc,项目名称:tvb-framework,代码行数:25,代码来源:factory.py
示例17: load_burst
def load_burst(self, burst_id):
"""
:param burst_id: the id of the burst that should be loaded
Having this input the method should:
- load the entity from the DB
- get all the workflow steps for the saved burst id
- go trough the visualization workflow steps to create the tab
configuration of the burst using the tab_index and index_in_tab
fields saved on each workflow_step
"""
burst = dao.get_burst_by_id(burst_id)
burst.prepare_after_load()
burst.reset_tabs()
burst_workflows = dao.get_workflows_for_burst(burst.id)
group_gid = None
if len(burst_workflows) == 1:
# A simple burst with no range parameters
burst = self.__populate_tabs_from_workflow(burst, burst_workflows[0])
elif len(burst_workflows) > 1:
# A burst workflow with a range of values, created multiple workflows and need
# to launch parameter space exploration with the resulted group
self.__populate_tabs_from_workflow(burst, burst_workflows[0])
executed_steps = dao.get_workflow_steps(burst_workflows[0].id)
operation = dao.get_operation_by_id(executed_steps[0].fk_operation)
if operation.operation_group:
workflow_group = dao.get_datatypegroup_by_op_group_id(operation.operation_group.id)
group_gid = workflow_group.gid
return burst, group_gid
开发者ID:lcosters,项目名称:tvb-framework,代码行数:33,代码来源:burst_service.py
示例18: launch_visualization
def launch_visualization(visualization, frame_width=None, frame_height=None,
method_name=ABCAdapter.LAUNCH_METHOD, is_preview=True):
"""
:param visualization: a visualization workflow step
"""
dynamic_params = visualization.dynamic_param
static_params = visualization.static_param
parameters_dict = static_params
current_project_id = 0
## Current operation id needed for export mechanism. So far just use ##
## the operation of the workflow_step from which the inputs are taken ####
for param in dynamic_params:
step_index = dynamic_params[param][WorkflowStepConfiguration.STEP_INDEX_KEY]
datatype_index = dynamic_params[param][WorkflowStepConfiguration.DATATYPE_INDEX_KEY]
referred_workflow_step = dao.get_workflow_step_by_step_index(visualization.fk_workflow, step_index)
referred_operation_id = referred_workflow_step.fk_operation
referred_operation = dao.get_operation_by_id(referred_operation_id)
current_project_id = referred_operation.fk_launched_in
if type(datatype_index) is IntType:
## Entry is the output of a previous step ##
datatypes = dao.get_results_for_operation(referred_operation_id)
parameters_dict[param] = datatypes[datatype_index].gid
else:
## Entry is the input of a previous step ###
parameters_dict[param] = json.loads(referred_operation.parameters)[datatype_index]
algorithm = dao.get_algorithm_by_id(visualization.fk_algorithm)
adapter_instance = ABCAdapter.build_adapter(algorithm.algo_group)
adapter_instance.current_project_id = current_project_id
prepared_inputs = adapter_instance.prepare_ui_inputs(parameters_dict)
if frame_width is not None:
prepared_inputs[ABCDisplayer.PARAM_FIGURE_SIZE] = (frame_width, frame_height)
if isinstance(adapter_instance, ABCMPLH5Displayer) and is_preview is True:
prepared_inputs[ABCMPLH5Displayer.SHOW_FULL_TOOLBAR] = False
result = eval("adapter_instance." + method_name + "(**prepared_inputs)")
return result, parameters_dict
开发者ID:sdiazpier,项目名称:tvb-framework,代码行数:35,代码来源:burst_service.py
示例19: store_result_figure
def store_result_figure(self, project, user, img_type, export_data, image_name=None, operation_id=None):
"""
Store into a file, Result Image and reference in DB.
"""
store_path, file_name = self._image_path(project.name, img_type)
if img_type == FigureService._TYPE_PNG: # PNG file from canvas
self._write_png(store_path, export_data)
elif img_type == FigureService._TYPE_SVG: # SVG file from svg viewer
self._write_svg(store_path, export_data)
if operation_id:
operation = dao.get_operation_by_id(operation_id)
else:
operation = None
operation_id = None
image_name = self._generate_image_name(project, user, operation, image_name)
# Store entity into DB
entity = model.ResultFigure(operation_id, user.id, project.id, FigureService._DEFAULT_SESSION_NAME,
image_name, file_name, img_type)
entity = dao.store_entity(entity)
# Load instance from DB to have lazy fields loaded
figure = dao.load_figure(entity.id)
# Write image meta data to disk
self.file_helper.write_image_metadata(figure)
if operation:
# Force writing operation meta data on disk.
# This is important later for operation import
self.file_helper.write_operation_metadata(operation)
开发者ID:amitsaroj001,项目名称:tvb-framework,代码行数:33,代码来源:figure_service.py
示例20: do_operation_launch
def do_operation_launch(operation_id):
"""
Event attached to the local queue for executing an operation, when we will have resources available.
"""
LOGGER = get_logger('tvb.core.operation_async_launcher')
try:
LOGGER.debug("Loading operation with id=%s" % operation_id)
curent_operation = dao.get_operation_by_id(operation_id)
stored_adapter = curent_operation.algorithm
LOGGER.debug("Importing Algorithm: " + str(stored_adapter.classname) +
" for Operation:" + str(curent_operation.id))
PARAMS = parse_json_parameters(curent_operation.parameters)
adapter_instance = ABCAdapter.build_adapter(stored_adapter)
## Un-comment bellow for profiling an operation:
## import cherrypy.lib.profiler as profiler
## p = profiler.Profiler("/Users/lia.domide/TVB/profiler/")
## p.run(OperationService().initiate_prelaunch, curent_operation, adapter_instance, {}, **PARAMS)
OperationService().initiate_prelaunch(curent_operation, adapter_instance, {}, **PARAMS)
LOGGER.debug("Successfully finished operation " + str(operation_id))
except Exception as excep:
LOGGER.error("Could not execute operation " + str(sys.argv[1]))
LOGGER.exception(excep)
parent_burst = dao.get_burst_for_operation_id(operation_id)
if parent_burst is not None:
WorkflowService().mark_burst_finished(parent_burst, error_message=str(excep))
开发者ID:LauHoiYanGladys,项目名称:tvb-framework,代码行数:29,代码来源:operation_async_launcher.py
注:本文中的tvb.core.entities.storage.dao.get_operation_by_id函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论