本文整理汇总了Python中tkp.db.general.insert_extracted_sources函数的典型用法代码示例。如果您正苦于以下问题:Python insert_extracted_sources函数的具体用法?Python insert_extracted_sources怎么用?Python insert_extracted_sources使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了insert_extracted_sources函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_basic_same_field_case
def test_basic_same_field_case(self):
""" Here we start with 1 source in image0.
We then add image1 (same field as image0), with a double association
for the source, and check assocskyrgn updates correctly.
"""
n_images = 2
im_params = db_subs.generate_timespaced_dbimages_data(n_images)
idx = 0
src_a = db_subs.example_extractedsource_tuple(
ra=im_params[idx]['centre_ra'],
dec=im_params[idx]['centre_decl'])
src_b = src_a._replace(ra=src_a.ra + 1. / 60.) # 1 arcminute offset
imgs = []
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
insert_extracted_sources(imgs[idx]._id, [src_a])
associate_extracted_sources(imgs[idx]._id, deRuiter_r, new_source_sigma_margin)
idx = 1
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
insert_extracted_sources(imgs[idx]._id, [src_a, src_b])
associate_extracted_sources(imgs[idx]._id, deRuiter_r, new_source_sigma_margin)
imgs[idx].update()
runcats = columns_from_table('runningcatalog',
where={'dataset':self.dataset.id})
self.assertEqual(len(runcats), 2) #Just a sanity check.
skyassocs = columns_from_table('assocskyrgn',
where={'skyrgn':imgs[idx]._data['skyrgn']})
self.assertEqual(len(skyassocs), 2)
开发者ID:ajstewart,项目名称:tkp,代码行数:30,代码来源:test_skyregion.py
示例2: test_infinite
def test_infinite(self):
# Check that database insertion doesn't choke on infinite errors.
dataset = DataSet(data={'description': 'example dataset'},
database=self.database)
image = Image(dataset=dataset, data=db_subs.example_dbimage_data_dict())
# Inserting a standard example extractedsource should be fine
extracted_source = db_subs.example_extractedsource_tuple()
insert_extracted_sources(image._id, [extracted_source])
inserted = columns_from_table('extractedsource',
where= {'image' : image.id})
self.assertEqual(len(inserted), 1)
# But if the source has infinite errors we drop it and log a warning
extracted_source = db_subs.example_extractedsource_tuple(error_radius=float('inf'),
peak_err=float('inf'),
flux_err=float('inf'))
# We will add a handler to the root logger which catches all log
# output in a buffer.
iostream = BytesIO()
hdlr = logging.StreamHandler(iostream)
logging.getLogger().addHandler(hdlr)
insert_extracted_sources(image._id, [extracted_source])
logging.getLogger().removeHandler(hdlr)
# We want to be sure that the error has been appropriately logged.
self.assertIn("Dropped source fit with infinite flux errors",
iostream.getvalue())
inserted = columns_from_table('extractedsource',
where= {'image' : image.id})
self.assertEqual(len(inserted), 1)
开发者ID:ajstewart,项目名称:tkp,代码行数:35,代码来源:test_orm.py
示例3: test_probably_not_a_transient
def test_probably_not_a_transient(self):
"""
No source at 250MHz, but we detect a source at 50MHz.
Not necessarily a transient.
Should trivially ignore 250MHz data when looking at a new 50MHz source.
"""
img_params = self.img_params
img0 = img_params[0]
# This time around, we just manually exclude the steady src from
# the first image detections.
steady_low_freq_src = MockSource(
example_extractedsource_tuple(ra=img_params[0]['centre_ra'],
dec=img_params[0]['centre_decl']
),
lightcurve=defaultdict(lambda :self.always_detectable_flux)
)
# Insert first image, no sources.
tkp.db.Image(data=img_params[0],dataset=self.dataset)
# Now set up second image.
img1 = tkp.db.Image(data=img_params[1],dataset=self.dataset)
xtr = steady_low_freq_src.simulate_extraction(img1,
extraction_type='blind')
insert_extracted_sources(img1._id, [xtr], 'blind')
associate_extracted_sources(img1._id, deRuiter_r, self.new_source_sigma_margin)
transients = get_newsources_for_dataset(self.dataset.id)
# Should have no marked transients
self.assertEqual(len(transients), 0)
开发者ID:ajstewart,项目名称:tkp,代码行数:31,代码来源:test_transients.py
示例4: insert_extracted_sources
def insert_extracted_sources(self, results, extract="blind"):
"""Insert a list of sources
Args:
results (list): list of
utility.containers.ExtractionResult objects (as
returned from
sourcefinder.image.ImageData().extract()), or a list
of data tuples with the source information as follows:
(ra, dec,
ra_fit_err, dec_fit_err,
peak, peak_err,
flux, flux_err,
significance level,
beam major width (as), beam minor width(as),
beam parallactic angle
ew_sys_err, ns_sys_err,
error_radius).
extract (str):'blind', 'ff_nd' or 'ff_ms'
(see db.general.insert_extracted_sources)
"""
# To do: Figure out a saner method of passing the results around
# (Namedtuple, for starters?)
insert_extracted_sources(self._id, results=results, extract_type=extract)
开发者ID:Error323,项目名称:tkp,代码行数:26,代码来源:orm.py
示例5: test_two_field_overlap_new_transient
def test_two_field_overlap_new_transient(self):
"""Now for something more interesting - two overlapping fields, 4 sources:
one steady source only in lower field,
one steady source in both fields,
one steady source only in upper field,
one transient source in both fields but only at 2nd timestep.
"""
n_images = 2
xtr_radius = 1.5
im_params = db_subs.generate_timespaced_dbimages_data(n_images,
xtr_radius=xtr_radius)
im_params[1]['centre_decl'] += xtr_radius * 1
imgs = []
lower_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] - 0.5 * xtr_radius)
upper_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[1]['centre_ra'],
dec=im_params[1]['centre_decl'] + 0.5 * xtr_radius)
overlap_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] + 0.2 * xtr_radius)
overlap_transient = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] + 0.8 * xtr_radius)
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[0]))
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[1]))
insert_extracted_sources(imgs[0]._id, [lower_steady_src, overlap_steady_src])
associate_extracted_sources(imgs[0]._id, deRuiter_r=0.1,
new_source_sigma_margin=new_source_sigma_margin)
nd_posns = dbnd.get_nulldetections(imgs[0].id)
self.assertEqual(len(nd_posns), 0)
insert_extracted_sources(imgs[1]._id, [upper_steady_src, overlap_steady_src,
overlap_transient])
associate_extracted_sources(imgs[1]._id, deRuiter_r=0.1,
new_source_sigma_margin=new_source_sigma_margin)
nd_posns = dbnd.get_nulldetections(imgs[1].id)
self.assertEqual(len(nd_posns), 0)
runcats = columns_from_table('runningcatalog',
where={'dataset': self.dataset.id})
self.assertEqual(len(runcats), 4) #sanity check.
newsources_qry = """\
SELECT *
FROM newsource tr
,runningcatalog rc
WHERE rc.dataset = %s
AND tr.runcat = rc.id
"""
self.database.cursor.execute(newsources_qry, (self.dataset.id,))
newsources = get_db_rows_as_dicts(self.database.cursor)
self.assertEqual(len(newsources), 1)
开发者ID:ajstewart,项目名称:tkp,代码行数:58,代码来源:test_skyregion.py
示例6: test_marginal_transient
def test_marginal_transient(self):
"""
( flux1 > (rms_min0*(det0 + margin) )
but ( flux1 < (rms_max0*(det0 + margin) )
--> Possible transient
If it was in a region of rms_min, we would (almost certainly) have seen
it in the first image. So new source --> Possible transient.
But if it was in a region of rms_max, then perhaps we would have missed
it. In which case, new source --> Just seeing deeper.
Note that if we are tiling overlapping images, then the first time
a field is processed with image-centre at the edge of the old field,
we may get a bunch of unhelpful 'possible transients'.
Furthermore, this will pick up fluctuating sources near the
image-margins even with a fixed field of view.
But without a more complex store of image-rms-per-position, we cannot
do better.
Hopefully we can use a 'distance from centre' feature to separate out
the good and bad candidates in this case.
"""
img_params = self.img_params
#Must pick flux value carefully to fire correct logic branch:
marginal_transient_flux = self.reliably_detected_at_image_centre_flux
marginal_transient = MockSource(
example_extractedsource_tuple(ra=img_params[0]['centre_ra'],
dec=img_params[0]['centre_decl']),
lightcurve={img_params[1]['taustart_ts'] : marginal_transient_flux}
)
# First, check that we've set up the test correctly
rms_min0 = img_params[0]['rms_min']
rms_max0 = img_params[0]['rms_max']
det0 = img_params[0]['detection_thresh']
self.assertTrue(marginal_transient_flux <
rms_max0 * (det0 + self.new_source_sigma_margin))
self.assertTrue(marginal_transient_flux >
rms_min0 * (det0 + self.new_source_sigma_margin))
for pars in self.img_params:
img = tkp.db.Image(data=pars, dataset=self.dataset)
xtr = marginal_transient.simulate_extraction(img,
extraction_type='blind')
if xtr is not None:
insert_extracted_sources(img._id, [xtr], 'blind')
associate_extracted_sources(img._id, deRuiter_r, self.new_source_sigma_margin)
newsources = get_newsources_for_dataset(self.dataset.id)
# Should have one 'possible' transient
self.assertEqual(len(newsources), 1)
self.assertTrue(
newsources[0]['low_thresh_sigma'] > self.new_source_sigma_margin)
self.assertTrue(
newsources[0]['high_thresh_sigma'] < self.new_source_sigma_margin)
开发者ID:ajstewart,项目名称:tkp,代码行数:58,代码来源:test_transients.py
示例7: test_single_fixed_source
def test_single_fixed_source(self):
"""test_single_fixed_source
- Pretend to extract the same source in each of a series of images.
- Perform source association
- Check the image source listing works
- Check runcat, assocxtrsource.
"""
fixed_src_runcat_id = None
for img_idx, im in enumerate(self.im_params):
self.db_imgs.append( Image(data=im, dataset=self.dataset))
last_img = self.db_imgs[-1]
insert_extracted_sources(last_img._id,
[db_subs.example_extractedsource_tuple()],'blind')
associate_extracted_sources(last_img._id, deRuiter_r,
new_source_sigma_margin)
running_cat = columns_from_table(table="runningcatalog",
keywords=['id', 'datapoints'],
where={"dataset":self.dataset.id})
self.assertEqual(len(running_cat), 1)
self.assertEqual(running_cat[0]['datapoints'], img_idx+1)
# Check runcat ID does not change for a steady single source
if img_idx == 0:
fixed_src_runcat_id = running_cat[0]['id']
self.assertIsNotNone(fixed_src_runcat_id, "No runcat id assigned to source")
self.assertEqual(running_cat[0]['id'], fixed_src_runcat_id,
"Multiple runcat ids for same fixed source")
runcat_flux = columns_from_table(table="runningcatalog_flux",
keywords=['f_datapoints'],
where={"runcat":fixed_src_runcat_id})
self.assertEqual(len(runcat_flux),1)
self.assertEqual(img_idx+1, runcat_flux[0]['f_datapoints'])
last_img.update()
last_img.update_sources()
img_xtrsrc_ids = [src.id for src in last_img.sources]
self.assertEqual(len(img_xtrsrc_ids), 1)
#Get the association row for most recent extraction:
assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
keywords=['runcat', 'xtrsrc' ],
where={"xtrsrc":img_xtrsrc_ids[0]})
# print "ImageID:", last_img.id
# print "Imgs sources:", img_xtrsrc_ids
# print "Assoc entries:", assocxtrsrcs_rows
# print "First extracted source id:", ds_source_ids[0]
# if len(assocxtrsrcs_rows):
# print "Associated source:", assocxtrsrcs_rows[0]['xtrsrc']
self.assertEqual(len(assocxtrsrcs_rows),1,
msg="No entries in assocxtrsrcs for image number "+str(img_idx))
self.assertEqual(assocxtrsrcs_rows[0]['runcat'], fixed_src_runcat_id,
"Mismatched runcat id in assocxtrsrc table")
开发者ID:ajstewart,项目名称:tkp,代码行数:57,代码来源:test_algorithms.py
示例8: test_only_first_epoch_source
def test_only_first_epoch_source(self):
"""test_only_first_epoch_source
- Pretend to extract a source only from the first image.
- Run source association for each image, as we would in TraP.
- Check the image source listing works
- Check runcat and assocxtrsource are correct.
"""
first_epoch = True
extracted_source_ids = []
for im in self.im_params:
self.db_imgs.append(Image( data=im, dataset=self.dataset))
last_img = self.db_imgs[-1]
if first_epoch:
insert_extracted_sources(last_img._id,
[db_subs.example_extractedsource_tuple()], 'blind')
associate_extracted_sources(last_img._id, deRuiter_r,
new_source_sigma_margin)
# First, check the runcat has been updated correctly
running_cat = columns_from_table(table="runningcatalog",
keywords=['datapoints'],
where={"dataset": self.dataset.id})
self.assertEqual(len(running_cat), 1)
self.assertEqual(running_cat[0]['datapoints'], 1)
last_img.update()
last_img.update_sources()
img_xtrsrc_ids = [src.id for src in last_img.sources]
if first_epoch:
self.assertEqual(len(img_xtrsrc_ids),1)
extracted_source_ids.extend(img_xtrsrc_ids)
assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
keywords=['runcat', 'xtrsrc' ],
where={"xtrsrc":img_xtrsrc_ids[0]})
self.assertEqual(len(assocxtrsrcs_rows),1)
self.assertEqual(assocxtrsrcs_rows[0]['xtrsrc'], img_xtrsrc_ids[0])
else:
self.assertEqual(len(img_xtrsrc_ids),0)
first_epoch=False
#Assocxtrsources still ok after multiple images?
self.assertEqual(len(extracted_source_ids),1)
assocxtrsrcs_rows = columns_from_table(table="assocxtrsource",
keywords=['runcat', 'xtrsrc' ],
where={"xtrsrc":extracted_source_ids[0]})
self.assertEqual(len(assocxtrsrcs_rows),1)
self.assertEqual(assocxtrsrcs_rows[0]['xtrsrc'], extracted_source_ids[0],
"Runcat xtrsrc entry must match the only extracted source")
开发者ID:ajstewart,项目名称:tkp,代码行数:56,代码来源:test_algorithms.py
示例9: test_rejected_initial_image
def test_rejected_initial_image(self):
"""
An image which is rejected should not be taken into account when
deciding whether a patch of sky has been previously observed, and
hence whether any detections in that area are (potential) transients.
Here, we create a database with two images. The first
(choronologically) is rejected; the second contains a source. That
source should not be marked as a transient.
"""
dataset = tkp.db.DataSet(data={"description": "Trans:" + self._testMethodName}, database=tkp.db.Database())
# We use a dataset with two images
# NB the routine in db_subs automatically increments time between
# images.
n_images = 2
db_imgs = [
tkp.db.Image(data=im_params, dataset=dataset)
for im_params in db_subs.generate_timespaced_dbimages_data(n_images)
]
# The first image is rejected for an arbitrary reason
# (for the sake of argument, we use an unacceptable RMS).
db_quality.reject(
imageid=db_imgs[0].id,
reason=db_quality.reject_reasons["rms"],
comment=self._testMethodName,
session=self.session,
)
# Have to commit here: old DB code makes queries in a separate transaction.
self.session.commit()
# Since we rejected the first image, we only find a source in the
# second.
source = db_subs.example_extractedsource_tuple()
insert_extracted_sources(db_imgs[1]._id, [source])
# Standard source association procedure etc.
associate_extracted_sources(db_imgs[1].id, deRuiter_r=3.7, new_source_sigma_margin=3)
# Our source should _not_ be a transient. That is, there should be no
# entries in the newsource table for this dataset.
cursor = tkp.db.execute(
"""\
SELECT t.id FROM newsource t, runningcatalog rc
WHERE t.runcat = rc.id
AND rc.dataset = %(ds_id)s
""",
{"ds_id": dataset.id},
)
self.assertEqual(cursor.rowcount, 0)
开发者ID:transientskp,项目名称:tkp,代码行数:52,代码来源:test_reject.py
示例10: test_two_field_overlap_nulling_src
def test_two_field_overlap_nulling_src(self):
"""Similar to above, but one source disappears:
Two overlapping fields, 4 sources:
one steady source only in lower field,
one steady source in both fields,
one steady source only in upper field,
one transient source in both fields but only at *1st* timestep.
"""
n_images = 2
xtr_radius = 1.5
im_params = db_subs.generate_timespaced_dbimages_data(n_images,
xtr_radius=xtr_radius)
im_params[1]['centre_decl'] += xtr_radius * 1
imgs = []
lower_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] - 0.5 * xtr_radius)
upper_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[1]['centre_ra'],
dec=im_params[1]['centre_decl'] + 0.5 * xtr_radius)
overlap_steady_src = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] + 0.2 * xtr_radius)
overlap_transient = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'] + 0.8 * xtr_radius)
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[0]))
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[1]))
insert_extracted_sources(imgs[0]._id, [lower_steady_src, overlap_steady_src,
overlap_transient])
associate_extracted_sources(imgs[0]._id, deRuiter_r=0.1,
new_source_sigma_margin=new_source_sigma_margin)
nd_posns = dbnd.get_nulldetections(imgs[0].id)
self.assertEqual(len(nd_posns), 0)
insert_extracted_sources(imgs[1]._id, [upper_steady_src, overlap_steady_src])
associate_extracted_sources(imgs[1]._id, deRuiter_r=0.1,
new_source_sigma_margin=new_source_sigma_margin)
#This time we don't expect to get an immediate transient detection,
#but we *do* expect to get a null-source forced extraction request:
nd_posns = dbnd.get_nulldetections(imgs[1].id)
self.assertEqual(len(nd_posns), 1)
runcats = columns_from_table('runningcatalog',
where={'dataset':self.dataset.id})
self.assertEqual(len(runcats), 4) #sanity check.
开发者ID:ajstewart,项目名称:tkp,代码行数:50,代码来源:test_skyregion.py
示例11: insert_image_and_simulated_sources
def insert_image_and_simulated_sources(dataset, image_params, mock_sources,
new_source_sigma_margin,
deruiter_radius=3.7):
"""
Simulates the standard database image-and-source insertion logic using mock
sources.
Args:
dataset: The dataset object
image_params (dict): Contains the image properties.
mock_sources (list of MockSource): The mock sources to simulate.
new_source_sigma_margin (float): Parameter passed to source-association
routines.
deruiter_radius (float): Parameter passed to source-association
routines.
Returns:
3-tuple (image, list of blind extractions, list of forced fits).
"""
image = tkp.db.Image(data=image_params,dataset=dataset)
blind_extractions=[]
for src in mock_sources:
xtr = src.simulate_extraction(image,extraction_type='blind')
if xtr is not None:
blind_extractions.append(xtr)
image.insert_extracted_sources(blind_extractions,'blind')
image.associate_extracted_sources(deRuiter_r=deruiter_radius,
new_source_sigma_margin=new_source_sigma_margin)
nd_ids_posns = nulldetections.get_nulldetections(image.id)
nd_posns = [(ra,decl) for ids, ra, decl in nd_ids_posns]
forced_fits = []
for posn in nd_posns:
for src in mock_sources:
eps = 1e-13
if (math.fabs(posn[0] - src.base_source.ra)<eps and
math.fabs(posn[1] - src.base_source.dec)<eps ):
forced_fits.append(
src.simulate_extraction(image,extraction_type='ff_nd')
)
if len(nd_posns) != len(forced_fits):
raise LookupError("Something went wrong, nulldetection position did "
"not match a mock source.")
#image.insert_extracted_sources(forced_fits, 'ff_nd')
dbgen.insert_extracted_sources(image.id, forced_fits, 'ff_nd',
ff_runcat_ids=[ids for ids, ra, decl in nd_ids_posns])
nulldetections.associate_nd(image.id)
return image, blind_extractions, forced_fits
开发者ID:gijzelaerr,项目名称:tkp-1,代码行数:49,代码来源:db_subs.py
示例12: test_new_skyregion_insertion
def test_new_skyregion_insertion(self):
"""Here we test the association logic executed upon insertion of a
new skyregion.
We expect that any pre-existing entries in the runningcatalog
which lie within the field of view will be marked as
'within this region', through the presence of an entry in table
``assocskyrgn``.
Conversely sources outside the FoV should not be marked as related.
We begin with img0, with a source at centre.
Then we add 2 more (empty) images/fields at varying positions.
"""
n_images = 6
im_params = db_subs.generate_timespaced_dbimages_data(n_images)
src_in_img0 = db_subs.example_extractedsource_tuple(
ra=im_params[0]['centre_ra'],
dec=im_params[0]['centre_decl'],)
# First image
image0 = tkp.db.Image(dataset=self.dataset, data=im_params[0])
insert_extracted_sources(image0._id, [src_in_img0])
associate_extracted_sources(image0._id, deRuiter_r, new_source_sigma_margin)
image0.update()
runcats = columns_from_table('runningcatalog',
where={'dataset':self.dataset.id})
self.assertEqual(len(runcats), 1) #Just a sanity check.
##Second, different *But overlapping* image:
idx = 1
im_params[idx]['centre_decl'] += im_params[idx]['xtr_radius'] * 0.9
image1 = tkp.db.Image(dataset=self.dataset, data=im_params[idx])
image1.update()
assocs = columns_from_table('assocskyrgn',
where={'skyrgn':image1._data['skyrgn']})
self.assertEqual(len(assocs), 1)
self.assertEqual(assocs[0]['runcat'], runcats[0]['id'])
##Third, different *and NOT overlapping* image:
idx = 2
im_params[idx]['centre_decl'] += im_params[idx]['xtr_radius'] * 1.1
image2 = tkp.db.Image(dataset=self.dataset, data=im_params[idx])
image2.update()
assocs = columns_from_table('assocskyrgn',
where={'skyrgn':image2._data['skyrgn']})
self.assertEqual(len(assocs), 0)
开发者ID:ajstewart,项目名称:tkp,代码行数:48,代码来源:test_skyregion.py
示例13: test_null_case_sequential
def test_null_case_sequential(self):
"""test_null_case_sequential
-Check extractedsource insertion routines can deal with empty input!
-Check source association can too
"""
for im in self.im_params:
self.db_imgs.append(Image(data=im, dataset=self.dataset))
insert_extracted_sources(self.db_imgs[-1]._id, [],'blind')
associate_extracted_sources(self.db_imgs[-1]._id, deRuiter_r,
new_source_sigma_margin)
running_cat = columns_from_table(table="runningcatalog",
keywords="*",
where={"dataset":self.dataset.id})
self.assertEqual(len(running_cat), 0)
开发者ID:ajstewart,项目名称:tkp,代码行数:16,代码来源:test_algorithms.py
示例14: test_one2oneflux
def test_one2oneflux(self):
dataset = tkp.db.DataSet(database=self.database, data={'description': 'flux test set: 1-1'})
n_images = 3
im_params = db_subs.generate_timespaced_dbimages_data(n_images)
src_list = []
src = db_subs.example_extractedsource_tuple()
src0 = src._replace(flux=2.0)
src_list.append(src0)
src1 = src._replace(flux=2.5)
src_list.append(src1)
src2 = src._replace(flux=2.4)
src_list.append(src2)
for idx, im in enumerate(im_params):
image = tkp.db.Image(database=self.database, dataset=dataset, data=im)
insert_extracted_sources(image._id, [src_list[idx]])
associate_extracted_sources(image.id, deRuiter_r=3.717)
query = """\
SELECT rf.avg_f_int
FROM runningcatalog r
,runningcatalog_flux rf
WHERE r.dataset = %(dataset)s
AND r.id = rf.runcat
"""
self.database.cursor.execute(query, {'dataset': dataset.id})
result = zip(*self.database.cursor.fetchall())
avg_f_int = result[0]
self.assertEqual(len(avg_f_int), 1)
py_metrics = db_subs.lightcurve_metrics(src_list)
self.assertAlmostEqual(avg_f_int[0], py_metrics[-1]['avg_f_int'])
runcat_id = columns_from_table('runningcatalog',
where={'dataset':dataset.id})
self.assertEqual(len(runcat_id),1)
runcat_id = runcat_id[0]['id']
# Check evolution of variability indices
db_metrics = db_queries.get_assoc_entries(self.database,
runcat_id)
self.assertEqual(len(db_metrics), n_images)
# Compare the python- and db-calculated values
for i in range(len(db_metrics)):
for key in ('v_int','eta_int'):
self.assertAlmostEqual(db_metrics[i][key], py_metrics[i][key])
开发者ID:ajstewart,项目名称:tkp,代码行数:44,代码来源:test_fluxes.py
示例15: test_two_field_basic_case
def test_two_field_basic_case(self):
"""
Here we create 2 disjoint image fields, with one source at centre of
each, and check that the second source inserted does not get flagged as
newsource.
"""
n_images = 2
xtr_radius = 1.5
im_params = db_subs.generate_timespaced_dbimages_data(n_images,
xtr_radius=xtr_radius)
im_params[1]['centre_decl'] += xtr_radius * 2 + 0.5
imgs = []
for idx in range(len(im_params)):
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
for idx in range(len(im_params)):
central_src = db_subs.example_extractedsource_tuple(
ra=im_params[idx]['centre_ra'],
dec=im_params[idx]['centre_decl'])
imgs.append(tkp.db.Image(dataset=self.dataset, data=im_params[idx]))
insert_extracted_sources(imgs[idx]._id, [central_src])
associate_extracted_sources(imgs[idx]._id, deRuiter_r, new_source_sigma_margin)
runcats = columns_from_table('runningcatalog',
where={'dataset':self.dataset.id})
self.assertEqual(len(runcats), 2) #Just a sanity check.
newsources_qry = """\
SELECT *
FROM newsource tr
,runningcatalog rc
WHERE rc.dataset = %s
AND tr.runcat = rc.id
"""
self.database.cursor.execute(newsources_qry, (self.dataset.id,))
newsources = get_db_rows_as_dicts(self.database.cursor)
self.assertEqual(len(newsources), 0)
开发者ID:ajstewart,项目名称:tkp,代码行数:40,代码来源:test_skyregion.py
示例16: test_certain_transient
def test_certain_transient(self):
"""
flux1 > (rms_max0*(det0+margin)
--> Definite transient
Nice and bright, must be new - mark it definite transient.
"""
img_params = self.img_params
bright_transient = MockSource(
example_extractedsource_tuple(ra=img_params[0]['centre_ra'],
dec=img_params[0]['centre_decl']),
lightcurve={img_params[1]['taustart_ts']:
self.always_detectable_flux}
)
#First, check that we've set up the test correctly:
rms_max0 = img_params[0]['rms_max']
det0 = img_params[0]['detection_thresh']
self.assertTrue(bright_transient.lightcurve.values()[0] >
rms_max0*(det0 + self.new_source_sigma_margin ) )
for pars in self.img_params:
img = tkp.db.Image(data=pars,dataset=self.dataset)
xtr = bright_transient.simulate_extraction(img,
extraction_type='blind')
if xtr is not None:
insert_extracted_sources(img._id, [xtr], 'blind')
associate_extracted_sources(img._id, deRuiter_r, self.new_source_sigma_margin)
newsources = get_newsources_for_dataset(self.dataset.id)
#Should have one 'definite' transient
self.assertEqual(len(newsources),1)
self.assertTrue(
newsources[0]['low_thresh_sigma'] > self.new_source_sigma_margin)
self.assertTrue(
newsources[0]['high_thresh_sigma'] > self.new_source_sigma_margin)
self.assertTrue(
newsources[0]['low_thresh_sigma'] >
newsources[0]['high_thresh_sigma'])
开发者ID:ajstewart,项目名称:tkp,代码行数:40,代码来源:test_transients.py
示例17: test_one2one_flux_infinite_error
def test_one2one_flux_infinite_error(self):
dataset = tkp.db.DataSet(database=self.database, data={'description': 'flux test set: 1-1'})
n_images = 3
im_params = db_subs.generate_timespaced_dbimages_data(n_images)
src_list = []
src = db_subs.example_extractedsource_tuple()
src0 = src._replace(flux=2.0)
src_list.append(src0)
src1 = src._replace(flux=2.5)
src_list.append(src1)
src2 = src._replace(flux=0.0001, flux_err=float('inf'),
peak=0.0001, peak_err=float('inf'))
src_list.append(src2)
for idx, im in enumerate(im_params):
image = tkp.db.Image(database=self.database, dataset=dataset, data=im)
insert_extracted_sources(image._id, [src_list[idx]])
associate_extracted_sources(image.id, deRuiter_r=3.717)
query = """\
SELECT rf.avg_f_int
,rf.f_datapoints
FROM runningcatalog r
,runningcatalog_flux rf
WHERE r.dataset = %(dataset)s
AND r.id = rf.runcat
"""
cursor = tkp.db.execute(query, {'dataset': dataset.id})
results = db_subs.get_db_rows_as_dicts(cursor)
self.assertEqual(len(results),1)
self.assertEqual(results[0]['f_datapoints'],2)
self.assertAlmostEqual(results[0]['avg_f_int'],
(src0.flux + src1.flux)/2.0 )
开发者ID:ajstewart,项目名称:tkp,代码行数:36,代码来源:test_fluxes.py
示例18: insert_forcedfits_into_extractedsource
def insert_forcedfits_into_extractedsource(image_id, results, extract):
general.insert_extracted_sources(image_id, results, extract)
开发者ID:jdswinbank,项目名称:tkp,代码行数:2,代码来源:monitoringlist.py
示例19: run
#.........这里部分代码省略.........
if job_config.persistence.dataset_id == -1:
store_config(job_config, dataset_id) # new data set
if supplied_mon_coords:
dbgen.insert_monitor_positions(dataset_id,supplied_mon_coords)
else:
job_config_from_db = fetch_config(dataset_id) # existing data set
if check_job_configs_match(job_config, job_config_from_db):
logger.debug("Job configs from file / database match OK.")
else:
logger.warn("Job config file has changed since dataset was "
"first loaded into database. ")
logger.warn("Using job config settings loaded from database, see "
"log dir for details")
job_config = job_config_from_db
if supplied_mon_coords:
logger.warn("Monitor positions supplied will be ignored. "
"(Previous dataset specified)")
dump_configs_to_logdir(log_dir, job_config, pipe_config)
logger.info("performing persistence step")
image_cache_params = pipe_config.image_cache
imgs = [[img] for img in all_images]
rms_est_sigma = job_config.persistence.rms_est_sigma
rms_est_fraction = job_config.persistence.rms_est_fraction
metadatas = runner.map("persistence_node_step", imgs,
[image_cache_params, rms_est_sigma, rms_est_fraction])
metadatas = [m[0] for m in metadatas if m]
logger.info("Storing images")
image_ids = store_images(metadatas,
job_config.source_extraction.extraction_radius_pix,
dataset_id)
db_images = [Image(id=image_id) for image_id in image_ids]
logger.info("performing quality check")
urls = [img.url for img in db_images]
arguments = [job_config]
rejecteds = runner.map("quality_reject_check", urls, arguments)
good_images = []
for image, rejected in zip(db_images, rejecteds):
if rejected:
reason, comment = rejected
steps.quality.reject_image(image.id, reason, comment)
else:
good_images.append(image)
if not good_images:
logger.warn("No good images under these quality checking criteria")
return
grouped_images = group_per_timestep(good_images)
timestep_num = len(grouped_images)
for n, (timestep, images) in enumerate(grouped_images):
msg = "processing %s images in timestep %s (%s/%s)"
logger.info(msg % (len(images), timestep, n+1, timestep_num))
logger.info("performing source extraction")
urls = [img.url for img in images]
arguments = [se_parset]
extraction_results = runner.map("extract_sources", urls, arguments)
logger.info("storing extracted sources to database")
# we also set the image max,min RMS values which calculated during
# source extraction
for image, results in zip(images, extraction_results):
imag
|
请发表评论