本文整理汇总了Python中merge.merge函数的典型用法代码示例。如果您正苦于以下问题:Python merge函数的具体用法?Python merge怎么用?Python merge使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了merge函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: create_po
def create_po():
tm = datetime.datetime.now()
postfix = '%s%02d%02d.%02d%02d%02d' % tuple(tm.timetuple())[:6]
home = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(os.path.join(home, 'scripts'))
cmd = "python %s %s %s" % ('pygettext.py', os.path.join(home, 'ui', '*.py'), os.path.join(home, 'updater.py'))
print cmd
os.system(cmd)
cmd = 'move messages.pot youmoney.pot'
print cmd
shutil.move('messages.pot', 'youmoney.pot')
#dstnames = ['youmoney_zh_CN', 'youmoney_ja_JP']
global dstnames
for name in dstnames:
dstfile = name + '.po'
print dstfile
# backup old file
if os.path.isfile(dstfile):
shutil.move(dstfile, '%s.%s.%d' % (dstfile, postfix, random.randint(0, 10000)))
merge.merge(name + '.sample', "youmoney.pot", dstfile)
开发者ID:Castlely,项目名称:PYTHON,代码行数:26,代码来源:make.py
示例2: testNotSubset
def testNotSubset(self):
master = parse(merge.merge(header + "<group a='a'>\n <implementation id='sha1=123' version='1'/>\n </group>" + footer, local_file))
assert master.uri == 'http://test/hello.xml', master
assert len(master.implementations) == 2
assert master.implementations['sha1=123'].metadata.get('a', None) == 'a'
assert master.implementations['sha1=002'].metadata.get('a', None) == None
master = parse(merge.merge(header + """\n
<group>
<requires interface='http://foo' meta='foo'/>
<implementation id='sha1=004' version='1'/>
</group>
<group>
<requires interface='http://foo'>
<version before='1'/>
</requires>
<implementation id='sha1=001' version='1'/>
</group>""" + footer, local_file_req))
assert len(master.implementations['sha1=001'].requires[0].restrictions) == 1
assert len(master.implementations['sha1=003'].requires[0].restrictions) == 0
assert master.implementations['sha1=004'].requires[0].metadata.get('meta', None) == 'foo'
assert master.implementations['sha1=003'].requires[0].metadata.get('meta', None) == None
assert master.implementations['sha1=003'].main == 'hello'
开发者ID:timdiels,项目名称:0publish,代码行数:25,代码来源:testlocal.py
示例3: testMergeBest
def testMergeBest(self):
master_xml = merge.merge(header + """\n
<group>
<implementation id='sha1=123' version='1'/>
</group>
<group>
<requires interface='http://foo'/>
<implementation id='sha1=002' version='2'/>
</group>""" + footer, local_file_req)
master = parse(master_xml)
assert master.uri == 'http://test/hello.xml', master
assert len(master.implementations) == 3
deps = master.implementations['sha1=003'].requires
assert len(deps) == 1
assert deps[0].interface == 'http://foo', deps[0]
assert len(minidom.parseString(master_xml).documentElement.getElementsByTagNameNS(XMLNS_IFACE, 'group')) == 2
# Again, but with the groups the other way around
master_xml = merge.merge(header + """\n
<group>
<requires interface='http://foo'/>
<implementation id='sha1=002' version='2'/>
</group>
<group>
<implementation id='sha1=123' version='1'/>
</group>""" + footer, local_file_req)
master = parse(master_xml)
assert master.uri == 'http://test/hello.xml', master
assert len(master.implementations) == 3
deps = master.implementations['sha1=003'].requires
assert len(deps) == 1
assert deps[0].interface == 'http://foo', deps[0]
assert len(minidom.parseString(master_xml).documentElement.getElementsByTagNameNS(XMLNS_IFACE, 'group')) == 2
开发者ID:timdiels,项目名称:0publish,代码行数:35,代码来源:testlocal.py
示例4: merge
def merge(lib_img_path, macro, detract = 0):
libimg = Image.open(dropBoxDir + lib_img_path)
targimg = Image.open(dropBoxDir + 'target.jpg')
backedby = Meme.objects.filter(classification = macro).count()
m.merge(libimg, targimg, backedby - detract)
if ".jpg" not in (dropBoxDir + lib_img_path):
libimg.save(dropBoxDir + lib_img_path + ".jpg")
libimg.save(dropBoxDir + lib_img_path)
开发者ID:rkass,项目名称:memestat.bak,代码行数:8,代码来源:parsePage.py
示例5: test_merged_file_contains_all_pages
def test_merged_file_contains_all_pages(self):
front_pages = MockPdfReader()
back_pages = MockPdfReader()
merge.merge(front_pages, back_pages, 'fake_out', True, False)
expected_len = len(front_pages.pages) + len(back_pages.pages)
self.assertEqual(expected_len, len(self.outfile.pages))
开发者ID:mgarriott,项目名称:PDFMerger,代码行数:8,代码来源:merge_test.py
示例6: merge
def merge(self, other):
if isinstance(other, Version):
merge.merge(self, other)
elif isinstance(other, Model):
merge.merge(self, other.version)
else:
raise TypeError('Expected instance of %s or %s' % \
(Version, self.__class__))
开发者ID:jbenet,项目名称:py-dronestore,代码行数:8,代码来源:model.py
示例7: _sort
def _sort(destination, source, first, last):
length = last - first
if length <= 1: return
middle = first + length//2
_sort(source, destination, first, middle)
_sort(source, destination, middle, last)
if source[middle - 1] > source[middle]:
merge.merge(destination, source, first, middle, last)
开发者ID:goldsborough,项目名称:algs4,代码行数:10,代码来源:top_bottom.py
示例8: main
def main():
global stop_set, docs_max, file_count,cont
global dict_title, dict_infobox, dict_category, dict_ref, dict_links, dict_body
global file_infobox, file_title, file_category, file_ref, file_body, file_links
# global total_no_document
stop_words_set()
# create an XMLReader
parser = xml.sax.make_parser()
# turn off namepsaces
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
# override the default ContextHandler
Handler = My_Wiki_Handler()
parser.setContentHandler( Handler )
parser.parse("corpus2.xml")
string_infobox = ""
string_title = ""
string_category = ""
string_body = ""
string_ref = ""
string_links = ""
for word in sorted( dict_title.keys() ):
string_title = word + "=" + dict_title[word] + '\n'
file_title.write(string_title)
for word in sorted( dict_infobox.keys() ):
string_infobox = word + "=" + dict_infobox[word] + '\n'
file_infobox.write(string_infobox)
for word in sorted( dict_category.keys() ):
string_category = word + "=" + dict_category[word] + '\n'
file_category.write(string_category)
for word in sorted( dict_body.keys() ):
string_body = word + "=" + dict_body[word] + '\n'
file_body.write(string_body)
for word in sorted( dict_ref.keys() ):
string_ref = word + "=" + dict_ref[word] + '\n'
file_ref.write(string_ref)
for word in sorted( dict_links.keys() ):
string_links = word + "=" + dict_links[word] + '\n'
file_links.write(string_links)
clear_dict()
close_file()
print "Merging Begins"
merge.merge(file_count)
os.system("rm index/body* index/category* index/infobox* index/title* index/ref* index/links*")
print cont
开发者ID:apaargarg,项目名称:Wikipedia-Search-Engine,代码行数:54,代码来源:index.py
示例9: test_list_key_type_mismatch
def test_list_key_type_mismatch(self):
input = {
'__all__': {
'groups': ['sudo'],
'shell': '/bin/bash'
},
'alice': {},
'bob': {
'groups': 'users',
}
}
with self.assertRaises(errors.AnsibleFilterError):
merge(input)
开发者ID:ahes,项目名称:ansible-filter-merge,代码行数:13,代码来源:test_merge.py
示例10: test_merged_file_contains_pages_in_correct_order
def test_merged_file_contains_pages_in_correct_order(self):
front_pages = MockPdfReader([MockPdfReader() for i in range(3)])
back_pages = MockPdfReader([MockPdfReader() for i in range(3)])
merge.merge(front_pages, back_pages, 'fake_out', True, False)
for i, page in enumerate(self.outfile.pages):
if i % 2 == 0:
expected_page = front_pages.pages[i / 2]
else:
expected_page = back_pages.pages[i / 2]
self.assertEqual(expected_page, page)
开发者ID:mgarriott,项目名称:PDFMerger,代码行数:13,代码来源:merge_test.py
示例11: test_identity
def test_identity(self):
identity = {}
other = {}
merged = merge(identity, other)
self.assertDictEqual(merged, other)
other = {'foo': 1}
merged = merge(identity, other)
self.assertDictEqual(merged, other)
other = {'foo': {'bar': 1}}
merged = merge(identity, other)
self.assertDictEqual(merged, other)
开发者ID:shawnsi,项目名称:ansible-merge-filter,代码行数:14,代码来源:test.py
示例12: test_merge
def test_merge():
A = [1,2,4,None,None]
B = [3,6]
merge(A,B)
assert(A == [1,2,3,4,6])
A = [12,14,15,None,None]
B = [1,2]
merge(A,B)
assert(A == [1,2,12,14,15])
开发者ID:prathamtandon,项目名称:python-algorithms-and-data-structures,代码行数:14,代码来源:merge_test.py
示例13: test_merging_fed_backwards_correctly_orders_pages
def test_merging_fed_backwards_correctly_orders_pages(self):
front_pages = MockPdfReader([MockPdfReader() for i in range(3)])
back_pages = MockPdfReader([MockPdfReader() for i in range(3)])
merge.merge(front_pages, back_pages, 'fake_out', True, True)
bp_last_index = len(back_pages.pages) - 1
for i, page in enumerate(self.outfile.pages):
if i % 2 == 0:
expected_page = front_pages.pages[i / 2]
else:
expected_page = back_pages.pages[bp_last_index - i / 2]
self.assertEqual(expected_page, page)
开发者ID:mgarriott,项目名称:PDFMerger,代码行数:14,代码来源:merge_test.py
示例14: mask_bright_objs
def mask_bright_objs(image, master, lower_limit, bg): #This routine generates masks for n number of objects
#for y in range (0,n_obj): #Dictates how many loops of the cycle are done
while image.max()>lower_limit:
max = image.max() #finds the max value in the image
list = image.flatten() #flattens the image into a 1D list
location = np.where(list == max)[0] #finds the position of all the maxima
length = location.size #calculates how many maxima are present
for z in range (0, length): #Loop which repeats as many times as there are maxima
ycoord = int(location[z]/2570) #calculates the x and y co-ordinates
xcoord = location[z]-(2570*ycoord) #using the fact we know the shape of the original image
pos = [ycoord, xcoord] #stores the xy co-ordinates in pos
#print pos #print position so we know which pixel is the problem if program fails
new_mask = bld.obj_mask(image, pos, bg) #creates a circular mask over the image
master = merge.merge(master, new_mask) #merges the most recent mask to the master
image.mask = master #applies the mask to the image so that we don't count the same objects when we repeat the loop
return master #returns the master to mosaic
开发者ID:scottadams,项目名称:astronomical-imaging,代码行数:25,代码来源:bleeder.py
示例15: createAlgsList
def createAlgsList(self):
# First we populate the list of algorithms with those created
# extending GeoAlgorithm directly (those that execute GDAL
# using the console)
self.preloadedAlgs = [nearblack(), information(), warp(), translate(),
rgb2pct(), pct2rgb(), merge(), buildvrt(), polygonize(), gdaladdo(),
ClipByExtent(), ClipByMask(), contour(), rasterize(), proximity(),
sieve(), fillnodata(), ExtractProjection(), gdal2xyz(),
hillshade(), slope(), aspect(), tri(), tpi(), roughness(),
ColorRelief(), GridInvDist(), GridAverage(), GridNearest(),
GridDataMetrics(), gdaltindex(), gdalcalc(), rasterize_over(),
# ----- OGR tools -----
OgrInfo(), Ogr2Ogr(), Ogr2OgrClip(), Ogr2OgrClipExtent(),
Ogr2OgrToPostGis(), Ogr2OgrToPostGisList(), Ogr2OgrPointsOnLines(),
Ogr2OgrBuffer(), Ogr2OgrDissolve(), Ogr2OgrOneSideBuffer(),
Ogr2OgrTableToPostGisList(), OgrSql(),
]
# And then we add those that are created as python scripts
folder = self.scriptsFolder()
if os.path.exists(folder):
for descriptionFile in os.listdir(folder):
if descriptionFile.endswith('py'):
try:
fullpath = os.path.join(self.scriptsFolder(),
descriptionFile)
alg = GdalScriptAlgorithm(fullpath)
self.preloadedAlgs.append(alg)
except WrongScriptException as e:
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR, e.msg)
开发者ID:Geoneer,项目名称:QGIS,代码行数:30,代码来源:GdalOgrAlgorithmProvider.py
示例16: _find_graphs_helper
def _find_graphs_helper(args):
merge_rules, time, min_length, lite = args
files, dest = merge_rules
log = logging.getLogger("brorecords")
# First check and see if there is already a pickled version of
# extracted graphs from this given work set. If so, we can quick out
# here. For simplicty sake, we just append .pickle to the name of the
# path for the combined bro records
tmp_path = "{0}.pickles.tmp".format(dest)
final_path = "{0}.pickles".format(dest)
if os.path.isfile(final_path):
log.info("Found picked records already at {0}".format(final_path))
return final_path
log.info("Merging {0} files into {1}".format(len(files), dest))
if not merge.merge(files, dest):
return None
log.info("{0}: Begining parsing".format(dest))
graph_count = 0
with open(dest, 'r') as source_h, open(tmp_path, 'w') as dest_h:
try:
for g in graphs(source_h, time=time, record_filter=record_filter):
graph_count += 1
if len(g) < min_length:
continue
pickle.dump(g, dest_h)
except Exception, e:
err = "Ignoring {0}: formatting errors in the log".format(dest)
log.error(err)
raise e
return None
开发者ID:snyderp,项目名称:bro-tools,代码行数:34,代码来源:reports.py
示例17: sort
def sort(a):
if(len(a) < 7):
return insert.sort(a)
mid = len(a)/2
left = merge.sort(a[:mid])
right = merge.sort(a[mid:])
return merge.merge(left,right)
开发者ID:linc01n,项目名称:sort-demo,代码行数:8,代码来源:improve.py
示例18: test_only_defaults_key
def test_only_defaults_key(self):
input = {
'__all__': {
'groups': ['sudo'],
},
}
expected_output = {}
self.assertEqual(merge(input), expected_output)
开发者ID:ahes,项目名称:ansible-filter-merge,代码行数:8,代码来源:test_merge.py
示例19: generate_log
def generate_log(by_uri, to_uri, data_uri, policy_uri,
by_label = None, to_label = None, data_label = None):
logging.debug("data_uri: %s", data_uri)
g = rdflib.Graph()
g.bind("","http://dig.csail.mit.edu/2010/DHS-fusion/common/fusion_ONT#")
g.bind("rdfify","{0}#".format(_rdfify_prefix))
trans = rdflib.URIRef(_transaction_uri)
g.add((trans, rdflib.namespace.RDF.type, rdflib.URIRef("{0}#Request".format(policy_uri))))
g.add((trans, rdflib.namespace.RDF.type, rdflib.URIRef("{0}#Disseminate".format(policy_uri))))
g.add((trans, rdflib.URIRef("{0}#by".format(policy_uri)), rdflib.URIRef(by_uri)))
g.add((trans, rdflib.URIRef("{0}#to".format(policy_uri)), rdflib.URIRef(to_uri)))
g.add((trans, rdflib.URIRef("{0}#data".format(policy_uri)), rdflib.URIRef(data_uri)))
g.add((rdflib.URIRef(by_uri), rdflib.URIRef(_label_predicate), rdflib.Literal(by_label)))
g.add((rdflib.URIRef(to_uri), rdflib.URIRef(_label_predicate), rdflib.Literal(to_label)))
g.add((rdflib.URIRef(data_uri), rdflib.URIRef(_label_predicate), rdflib.Literal(data_label)))
g2, s = merge.merge([by_uri, to_uri, data_uri], ["http://link.csail.mit.edu/projects/devel/2015/air-niem-compatibility-revised/xsd/niem/niem-core/3.0/niem-core.xsd"])
stmp = tempfile.NamedTemporaryFile()
gtmp = tempfile.NamedTemporaryFile()
stmp.write(s.serialize(format='n3'))
gtmp.write(g2.serialize(format='n3'))
stmp.flush()
gtmp.flush()
g.parse(stmp.name, format='n3')
g.parse(gtmp.name, format='n3')
g2tmp = tempfile.NamedTemporaryFile(delete=False)
g.add((rdflib.URIRef("{0}#this_graph".format(_rdfify_prefix)), rdflib.URIRef("{0}#uri".format(_rdfify_prefix)), rdflib.URIRef("file://{0}".format(g2tmp.name))))
## bzy HACK HACK HACK HACK HACK
## cwm gives us no way to up-cast URIs from strings
## so we have to do it the slow way ...
to_remove = set()
for s, p, o in g.triples((None, rdflib.URIRef("{0}PRIb7Policy".format(_extern_prefix)), None)):
if isinstance(o, rdflib.Literal):
to_remove.add((s, p, o))
g.add((s, p, rdflib.URIRef(o.value)))
for t in to_remove:
g.remove(t)
gstr = g.serialize(format='n3')
g2tmp.write(gstr)
g2tmp.flush()
return gstr
开发者ID:bzy-xyz,项目名称:air-niem-2015,代码行数:57,代码来源:dhs_air_integrated.py
示例20: _build
def _build(self, env, output_path, force, no_filters, parent_filters=[]):
"""Internal recursive build method.
"""
# TODO: We could support a nested bundle downgrading it's debug
# setting from "filters" to "merge only", i.e. enabling
# ``no_filters``. We cannot support downgrading to
# "full debug/no merge" (debug=True), of course.
#
# Right now we simply use the debug setting of the root bundle
# we build, und it overrides all the nested bundles. If we
# allow nested bundles to overwrite the debug value of parent
# bundles, as described above, then we should also deal with
# a child bundle enabling debug=True during a merge, i.e.
# raising an error rather than ignoring it as we do now.
resolved_contents = self.resolve_contents(env)
if not resolved_contents:
raise BuildError('empty bundle cannot be built')
# Ensure that the filters are ready
for filter in self.filters:
filter.set_environment(env)
# Apply input filters to all the contents. Note that we use
# both this bundle's filters as well as those given to us by
# the parent. We ONLY do those this for the input filters,
# because we need them to be applied before the apply our own
# output filters.
# TODO: Note that merge_filters() removes duplicates. Is this
# really the right thing to do, or does it just confuse things
# due to there now being different kinds of behavior...
combined_filters = merge_filters(self.filters, parent_filters)
cache = get_cache(env)
hunks = []
for c in resolved_contents:
if isinstance(c, Bundle):
hunk = c._build(env, output_path, force, no_filters,
combined_filters)
hunks.append(hunk)
else:
if is_url(c):
hunk = UrlHunk(c)
else:
hunk = FileHunk(env.abspath(c))
if no_filters:
hunks.append(hunk)
else:
hunks.append(apply_filters(
hunk, combined_filters, 'input', cache,
output_path=output_path))
# Return all source hunks as one, with output filters applied
final = merge(hunks)
if no_filters:
return final
else:
return apply_filters(final, self.filters, 'output', cache)
开发者ID:obiwanus,项目名称:webassets,代码行数:57,代码来源:bundle.py
注:本文中的merge.merge函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论