def _write_to_artifact_cache(self, vts, sources_by_target):
self._ensure_depfile_tmpdir()
vt_by_target = dict([(vt.target, vt) for vt in vts.versioned_targets])
# This work can happen in the background, if there's a measurable benefit to that.
# Split the depfile into per-target files.
splits = [(sources, JavaCompile.create_depfile_path(self._depfile_tmpdir, [target]))
for target, sources in sources_by_target.items()]
deps = Dependencies(self._classes_dir)
if os.path.exists(self._depfile):
deps.load(self._depfile)
deps.split(splits)
# Gather up the artifacts.
vts_artifactfiles_pairs = []
for target, sources in sources_by_target.items():
artifacts = [JavaCompile.create_depfile_path(self._depfile_tmpdir, [target])]
for source in sources:
for cls in deps.classes_by_source.get(source, []):
artifacts.append(os.path.join(self._classes_dir, cls))
vt = vt_by_target.get(target)
if vt is not None:
vts_artifactfiles_pairs.append((vt, artifacts))
# Write to the artifact cache.
self.update_artifact_cache(vts_artifactfiles_pairs)
def execute_single_compilation(self, vt, cp):
depfile = self.create_depfile_path(vt.targets)
self.merge_depfile(vt) # Get what we can from previous builds.
sources_by_target, fingerprint = self.calculate_sources(vt.targets)
if sources_by_target:
sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
if not sources:
self.context.log.warn('Skipping java compile for targets with no sources:\n %s' %
'\n '.join(str(t) for t in sources_by_target.keys()))
else:
classpath = [jar for conf, jar in cp if conf in self._confs]
result = self.compile(classpath, sources, fingerprint, depfile)
if result != 0:
default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))
self.split_depfile(vt)
all_artifact_files = [depfile]
if self._artifact_cache and self.context.options.write_to_artifact_cache:
deps = Dependencies(self._classes_dir)
deps.load(depfile)
vts_artifactfile_pairs = []
for single_vt in vt.versioned_targets:
per_target_depfile = self.create_depfile_path([single_vt.target])
per_target_artifact_files = [per_target_depfile]
for _, classes_by_source in deps.findclasses([single_vt.target]).items():
for _, classes in classes_by_source.items():
classfile_paths = [os.path.join(self._classes_dir, cls) for cls in classes]
per_target_artifact_files.extend(classfile_paths)
all_artifact_files.extend(classfile_paths)
vts_artifactfile_pairs.append((single_vt, per_target_artifact_files))
vts_artifactfile_pairs.append((vt, all_artifact_files))
self.update_artifact_cache(vts_artifactfile_pairs)
def _post_process(self, target, cp):
"""Must be called on all targets, whether they needed compilation or not."""
classes_dir, depfile, _ = self._output_paths([target])
# Update the classpath, for the benefit of tasks downstream from us.
if os.path.exists(classes_dir):
for conf in self._confs:
cp.insert(0, (conf, classes_dir))
# Make note of the classes generated by this target.
if os.path.exists(depfile) and self.context.products.isrequired('classes'):
self.context.log.debug('Reading dependencies from ' + depfile)
deps = Dependencies(classes_dir)
deps.load(depfile)
genmap = self.context.products.get('classes')
for classes_by_source in deps.findclasses([target]).values():
for source, classes in classes_by_source.items():
genmap.add(source, classes_dir, classes)
genmap.add(target, classes_dir, classes)
# TODO(John Sirois): Map target.resources in the same way
# Create and Map scala plugin info files to the owning targets.
if is_scalac_plugin(target) and target.classname:
basedir, plugin_info_file = self._zinc_utils.write_plugin_info(self._resources_dir, target)
genmap.add(target, basedir, [plugin_info_file])
def execute(self, targets):
java_targets = filter(_is_java, targets)
if java_targets:
safe_mkdir(self._classes_dir)
safe_mkdir(self._depfile_dir)
egroups = self.context.products.get_data('exclusives_groups')
group_id = egroups.get_group_key_for_target(java_targets[0])
for conf in self._confs:
egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])
egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
with self.invalidated(java_targets, invalidate_dependents=True,
partition_size_hint=self._partition_size_hint) as invalidation_check:
for vt in invalidation_check.invalid_vts_partitioned:
# Compile, using partitions for efficiency.
exclusives_classpath = egroups.get_classpath_for_group(group_id)
self.execute_single_compilation(vt, exclusives_classpath)
if not self.dry_run:
vt.update()
for vt in invalidation_check.all_vts:
depfile = self.create_depfile_path(vt.targets)
if not self.dry_run and os.path.exists(depfile):
# Read in the deps created either just now or by a previous run on these targets.
deps = Dependencies(self._classes_dir)
deps.load(depfile)
self._deps.merge(deps)
if not self.dry_run:
if self.context.products.isrequired('classes'):
genmap = self.context.products.get('classes')
# Map generated classes to the owning targets and sources.
for target, classes_by_source in self._deps.findclasses(java_targets).items():
for source, classes in classes_by_source.items():
genmap.add(source, self._classes_dir, classes)
genmap.add(target, self._classes_dir, classes)
# TODO(John Sirois): Map target.resources in the same way
# 'Map' (rewrite) annotation processor service info files to the owning targets.
for target in java_targets:
if is_apt(target) and target.processors:
basedir = os.path.join(self._resources_dir, Target.maybe_readable_identify([target]))
processor_info_file = os.path.join(basedir, _PROCESSOR_INFO_FILE)
self.write_processor_info(processor_info_file, target.processors)
genmap.add(target, basedir, [_PROCESSOR_INFO_FILE])
# Produce a monolithic apt processor service info file for further compilation rounds
# and the unit test classpath.
all_processors = set()
for target in java_targets:
if is_apt(target) and target.processors:
all_processors.update(target.processors)
processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
if os.path.exists(processor_info_file):
with safe_open(processor_info_file, 'r') as f:
for processor in f:
all_processors.add(processor.strip())
self.write_processor_info(processor_info_file, all_processors)
def post_process(self, versioned_targets):
depfile = self.create_depfile_path(versioned_targets.targets)
if not self.dry_run and os.path.exists(depfile):
# Read in the deps created either just now or by a previous compiler run on these targets.
deps = Dependencies(self._classes_dir)
deps.load(depfile)
self.split_depfile(deps, versioned_targets)
self._deps.merge(deps)
def split_artifact(self, deps, versioned_target_set):
if len(versioned_target_set.targets) <= 1:
return
buildroot = get_buildroot()
classes_by_source_by_target = deps.findclasses(versioned_target_set.targets)
src_output_dir, _, src_analysis_cache = self.create_output_paths(versioned_target_set.targets)
analysis_splits = [] # List of triples of (list of sources, destination output dir, destination analysis cache).
# for dependency analysis, we need to record the cache files that we create in the split
for target in versioned_target_set.targets:
classes_by_source = classes_by_source_by_target.get(target, {})
dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths([target])
safe_rmtree(dst_output_dir)
safe_mkdir(dst_output_dir)
sources = []
dst_deps = Dependencies(dst_output_dir)
for source, classes in classes_by_source.items():
src = os.path.join(target.target_base, source)
dst_deps.add(src, classes)
source_abspath = os.path.join(buildroot, target.target_base, source)
sources.append(source_abspath)
for cls in classes:
# Copy the class file.
dst = os.path.join(dst_output_dir, cls)
safe_mkdir(os.path.dirname(dst))
os.link(os.path.join(src_output_dir, cls), dst)
dst_deps.save(dst_depfile)
analysis_splits.append((sources, dst_output_dir, dst_analysis_cache))
self.generated_caches.add(os.path.join(dst_output_dir, dst_analysis_cache))
# Use zinc to split the analysis files.
if os.path.exists(src_analysis_cache):
analysis_args = []
analysis_args.extend(self._zinc_jar_args)
analysis_args.extend([
'-log-level', self.context.options.log_level or 'info',
'-analysis',
'-mirror-analysis'
])
split_args = analysis_args + [
'-cache', src_analysis_cache,
'-split', ','.join(['{%s}:%s' % (':'.join(x[0]), x[2]) for x in analysis_splits]),
]
if self.runjava(self._main, classpath=self._zinc_classpath, args=split_args, jvmargs=self._jvm_args):
raise TaskError, 'zinc failed to split analysis files %s from %s' %\
(':'.join([x[2] for x in analysis_splits]), src_analysis_cache)
# Now rebase the newly created analysis files.
for split in analysis_splits:
dst_analysis_cache = split[2]
if os.path.exists(dst_analysis_cache):
rebase_args = analysis_args + [
'-cache', dst_analysis_cache,
'-rebase', '%s:%s' % (src_output_dir, split[1]),
]
if self.runjava(self._main, classpath=self._zinc_classpath, args=rebase_args, jvmargs=self._jvm_args):
raise TaskError, 'In split_artifact: zinc failed to rebase analysis file %s' % dst_analysis_cache
def _split_artifact(self, deps, versioned_target_set):
"""Splits an artifact representing several targets into target-by-target artifacts.
Creates an output classes dir, a depfile and an analysis file for each target.
Note that it's not OK to create incomplete artifacts here: this is run *after* a zinc invocation,
and the expectation is that the result is complete.
NOTE: This method is reentrant.
"""
if len(versioned_target_set.targets) <= 1:
return
classes_by_source_by_target = deps.findclasses(versioned_target_set.targets)
src_classes_dir, _, src_analysis_file = self._output_paths(versioned_target_set.targets)
# Specifies that the list of sources defines a split to the classes dir and analysis file.
SplitInfo = namedtuple('SplitInfo', ['sources', 'dst_classes_dir', 'dst_analysis_file'])
analysis_splits = [] # List of SplitInfos.
portable_analysis_splits = [] # The same, for the portable version of the analysis cache.
# Prepare the split arguments.
for target in versioned_target_set.targets:
classes_by_source = classes_by_source_by_target.get(target, {})
dst_classes_dir, dst_depfile, dst_analysis_file = self._output_paths([target])
safe_rmtree(dst_classes_dir)
safe_mkdir(dst_classes_dir)
sources = []
dst_deps = Dependencies(dst_classes_dir)
for source, classes in classes_by_source.items():
src = os.path.join(target.target_base, source)
dst_deps.add(src, classes)
sources.append(os.path.join(target.target_base, source))
for cls in classes:
# Copy the class file.
dst = os.path.join(dst_classes_dir, cls)
safe_mkdir(os.path.dirname(dst))
os.link(os.path.join(src_classes_dir, cls), dst)
dst_deps.save(dst_depfile)
analysis_splits.append(SplitInfo(sources, dst_classes_dir, dst_analysis_file))
portable_analysis_splits.append(SplitInfo(sources, dst_classes_dir, _portable(dst_analysis_file)))
def do_split(src_analysis_file, splits):
if os.path.exists(src_analysis_file):
if self._zinc_utils.run_zinc_split(src_analysis_file, [(x.sources, x.dst_analysis_file) for x in splits]):
raise TaskError, 'zinc failed to split analysis files %s from %s' %\
(':'.join([x.dst_analysis_file for x in splits]), src_analysis_file)
for split in splits:
if os.path.exists(split.dst_analysis_file):
if self._zinc_utils.run_zinc_rebase(split.dst_analysis_file,
[(src_classes_dir, split.dst_classes_dir)]):
raise TaskError, \
'In split_artifact: zinc failed to rebase analysis file %s' % split.dst_analysis_file
# Now rebase the newly created analysis file(s) to reflect the split classes dirs.
do_split(src_analysis_file, analysis_splits)
do_split(_portable(src_analysis_file), portable_analysis_splits)
def _compile(self, versioned_target_set, classpath, upstream_analysis_files):
"""Actually compile some targets.
May be invoked concurrently on independent target sets.
Postcondition: The individual targets in versioned_target_set are up-to-date, as if each
were compiled individually.
"""
# Note: We actually compile all the targets in the set in a single zinc call, because
# compiler invocation overhead is high, but this fact is not exposed outside this method.
classes_dir, depfile, analysis_file = self._output_paths(versioned_target_set.targets)
safe_mkdir(classes_dir)
# Get anything we have from previous builds.
self._merge_artifact(versioned_target_set)
# Compute the sources we need to compile.
sources_by_target = ScalaCompile._calculate_sources(versioned_target_set.targets)
if sources_by_target:
sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
if not sources:
self.context.log.warn('Skipping scala compile for targets with no sources:\n %s' %
'\n '.join(str(t) for t in sources_by_target.keys()))
else:
# Invoke the compiler.
self.context.log.info('Compiling targets %s' % versioned_target_set.targets)
if self._zinc_utils.compile(classpath, sources, classes_dir, analysis_file,
upstream_analysis_files, depfile):
raise TaskError('Compile failed.')
# Read in the deps we just created.
self.context.log.debug('Reading dependencies from ' + depfile)
deps = Dependencies(classes_dir)
deps.load(depfile)
# Split the artifact into per-target artifacts.
self._split_artifact(deps, versioned_target_set)
# Write to artifact cache, if needed.
for vt in versioned_target_set.versioned_targets:
vt_classes_dir, vt_depfile, vt_analysis_file = self._output_paths(vt.targets)
vt_portable_analysis_file = _portable(vt_analysis_file)
if self._artifact_cache and self.context.options.write_to_artifact_cache:
# Relativize the analysis.
# TODO: Relativize before splitting? This will require changes to Zinc, which currently
# eliminates paths it doesn't recognize (including our placeholders) when splitting.
if os.path.exists(vt_analysis_file) and \
self._zinc_utils.relativize_analysis_file(vt_analysis_file, vt_portable_analysis_file):
raise TaskError('Zinc failed to relativize analysis file: %s' % vt_analysis_file)
# Write the per-target artifacts to the cache.
artifacts = [vt_classes_dir, vt_depfile, vt_portable_analysis_file]
self.update_artifact_cache(vt, artifacts)
else:
safe_rmtree(vt_portable_analysis_file) # Don't leave cruft lying around.
def execute(self, targets):
java_targets = [t for t in targets if t.has_sources(".java")]
if not java_targets:
return
# Get the exclusives group for the targets to compile.
# Group guarantees that they'll be a single exclusives key for them.
egroups = self.context.products.get_data("exclusives_groups")
group_id = egroups.get_group_key_for_target(java_targets[0])
# Add classes and resource dirs to the classpath for us and for downstream tasks.
for conf in self._confs:
egroups.update_compatible_classpaths(group_id, [(conf, self._classes_dir)])
egroups.update_compatible_classpaths(group_id, [(conf, self._resources_dir)])
# Get the classpath generated by upstream JVM tasks (including previous calls to execute()).
cp = egroups.get_classpath_for_group(group_id)
with self.invalidated(
java_targets, invalidate_dependents=True, partition_size_hint=self._partition_size_hint
) as invalidation_check:
if not self.dry_run:
for vts in invalidation_check.invalid_vts_partitioned:
# Compile, using partitions for efficiency.
sources_by_target = self._process_target_partition(vts, cp)
# TODO: Check for missing dependencies. See ScalaCompile for an example.
# Will require figuring out what the actual deps of a class file are.
vts.update()
if self.artifact_cache_writes_enabled():
self._write_to_artifact_cache(vts, sources_by_target)
# Provide the target->class and source->class mappings to downstream tasks if needed.
if self.context.products.isrequired("classes"):
if os.path.exists(self._depfile):
sources_by_target = self._compute_sources_by_target(java_targets)
deps = Dependencies(self._classes_dir)
deps.load(self._depfile)
self._add_all_products_to_genmap(sources_by_target, deps.classes_by_source)
# Produce a monolithic apt processor service info file for further compilation rounds
# and the unit test classpath.
all_processors = set()
for target in java_targets:
if target.is_apt and target.processors:
all_processors.update(target.processors)
processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
if os.path.exists(processor_info_file):
with safe_open(processor_info_file, "r") as f:
for processor in f:
all_processors.add(processor.strip())
self.write_processor_info(processor_info_file, all_processors)
def _merge_artifact(self, versioned_target_set):
"""Merges artifacts representing the individual targets in a VersionedTargetSet into one artifact for that set.
Creates an output classes dir, depfile and analysis file for the VersionedTargetSet.
Note that the merged artifact may be incomplete (e.g., if we have no previous artifacts for some of the
individual targets). That's OK: We run this right before we invoke zinc, which will fill in what's missing.
This method is not required for correctness, only for efficiency: it can prevent zinc from doing superfluous work.
NOTE: This method is reentrant.
"""
if len(versioned_target_set.targets) <= 1:
return # Nothing to do.
with temporary_dir() as tmpdir:
dst_classes_dir, dst_depfile, dst_analysis_file = self._output_paths(versioned_target_set.targets)
safe_rmtree(dst_classes_dir)
safe_mkdir(dst_classes_dir)
src_analysis_files = []
# TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
dst_deps = Dependencies(dst_classes_dir)
for target in versioned_target_set.targets:
src_classes_dir, src_depfile, src_analysis_file = self._output_paths([target])
if os.path.exists(src_depfile):
src_deps = Dependencies(src_classes_dir)
src_deps.load(src_depfile)
dst_deps.merge(src_deps)
classes_by_source = src_deps.findclasses([target]).get(target, {})
for source, classes in classes_by_source.items():
for cls in classes:
src = os.path.join(src_classes_dir, cls)
dst = os.path.join(dst_classes_dir, cls)
# src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
# it's missing and rebuild it.
# dst may already exist if we have overlapping targets. It's not a good idea
# to have those, but until we enforce it, we must allow it here.
if os.path.exists(src) and not os.path.exists(dst):
# Copy the class file.
safe_mkdir(os.path.dirname(dst))
os.link(src, dst)
# Rebase a copy of the per-target analysis files to reflect the merged classes dir.
if os.path.exists(src_analysis_file):
src_analysis_file_tmp = \
os.path.join(tmpdir, os.path.relpath(src_analysis_file, self._analysis_files_base))
shutil.copyfile(src_analysis_file, src_analysis_file_tmp)
src_analysis_files.append(src_analysis_file_tmp)
if self._zinc_utils.run_zinc_rebase(src_analysis_file_tmp, [(src_classes_dir, dst_classes_dir)]):
self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. '\
'Target may require a full rebuild.' %\
src_analysis_file_tmp)
dst_deps.save(dst_depfile)
if self._zinc_utils.run_zinc_merge(src_analysis_files, dst_analysis_file):
self.context.log.warn('zinc failed to merge analysis files %s to %s. '\
'Target may require a full rebuild.' %\
(':'.join(src_analysis_files), dst_analysis_file))
def post_process_cached_vts(cached_vts):
# Merge the cached analyses into the existing global one.
if cached_vts:
with self.context.new_workunit(name='merge-dependencies'):
global_deps = Dependencies(self._classes_dir)
if os.path.exists(self._depfile):
global_deps.load(self._depfile)
for vt in cached_vts:
for target in vt.targets:
depfile = JavaCompile.create_depfile_path(self._depfile_tmpdir, [target])
if os.path.exists(depfile):
deps = Dependencies(self._classes_dir)
deps.load(depfile)
global_deps.merge(deps)
global_deps.save(self._depfile)
def execute_single_compilation(self, java_targets, cp):
self.context.log.info('Compiling targets %s' % str(java_targets))
# Compute the id of this compilation. We try to make it human-readable.
if len(java_targets) == 1:
compilation_id = java_targets[0].id
else:
compilation_id = self.context.identify(java_targets)
if self._flatten:
# If compiling in flat mode, we let all dependencies aggregate into a single well-known depfile. This
# allows us to build different targets in different invocations without losing dependency information
# from any of them.
depfile = os.path.join(self._depfile_dir, 'dependencies.flat')
else:
# If not in flat mode, we let each compilation have its own depfile, to avoid quadratic behavior (each
# compilation will read in the entire depfile, add its stuff to it and write it out again).
depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'
with self.changed(java_targets, invalidate_dependants=True) as changed:
sources_by_target, processors, fingerprint = self.calculate_sources(changed)
if sources_by_target:
sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
if not sources:
self.context.log.warn('Skipping java compile for targets with no sources:\n %s' %
'\n '.join(str(t) for t in sources_by_target.keys()))
else:
classpath = [jar for conf, jar in cp if conf in self._confs]
result = self.compile(classpath, sources, fingerprint, depfile)
if result != 0:
default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))
if processors:
# Produce a monolithic apt processor service info file for further compilation rounds
# and the unit test classpath.
processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
if os.path.exists(processor_info_file):
with safe_open(processor_info_file, 'r') as f:
for processor in f:
processors.add(processor.strip())
self.write_processor_info(processor_info_file, processors)
# Read in the deps created either just now or by a previous compiler run on these targets.
deps = Dependencies(self._classes_dir)
deps.load(depfile)
self._deps.merge(deps)
def execute_single_compilation(self, versioned_targets, cp):
compilation_id = Target.maybe_readable_identify(versioned_targets.targets)
# TODO: Use the artifact cache. In flat mode we may want to look for the artifact for all targets,
# not just the invalid ones, as it might be more likely to be present. Or we could look for both.
if self._flatten:
# If compiling in flat mode, we let all dependencies aggregate into a single well-known depfile. This
# allows us to build different targets in different invocations without losing dependency information
# from any of them.
depfile = os.path.join(self._depfile_dir, 'dependencies.flat')
else:
# If not in flat mode, we let each compilation have its own depfile, to avoid quadratic behavior (each
# compilation will read in the entire depfile, add its stuff to it and write it out again).
depfile = os.path.join(self._depfile_dir, compilation_id) + '.dependencies'
if not versioned_targets.valid:
self.context.log.info('Compiling targets %s' % str(versioned_targets.targets))
sources_by_target, processors, fingerprint = self.calculate_sources(versioned_targets.targets)
if sources_by_target:
sources = reduce(lambda all, sources: all.union(sources), sources_by_target.values())
if not sources:
touch(depfile) # Create an empty depfile, since downstream code may assume that one exists.
self.context.log.warn('Skipping java compile for targets with no sources:\n %s' %
'\n '.join(str(t) for t in sources_by_target.keys()))
else:
classpath = [jar for conf, jar in cp if conf in self._confs]
result = self.compile(classpath, sources, fingerprint, depfile)
if result != 0:
default_message = 'Unexpected error - %s returned %d' % (_JMAKE_MAIN, result)
raise TaskError(_JMAKE_ERROR_CODES.get(result, default_message))
if processors:
# Produce a monolithic apt processor service info file for further compilation rounds
# and the unit test classpath.
processor_info_file = os.path.join(self._classes_dir, _PROCESSOR_INFO_FILE)
if os.path.exists(processor_info_file):
with safe_open(processor_info_file, 'r') as f:
for processor in f:
processors.add(processor.strip())
self.write_processor_info(processor_info_file, processors)
# Read in the deps created either just now or by a previous compiler run on these targets.
deps = Dependencies(self._classes_dir)
deps.load(depfile)
self._deps.merge(deps)
def merge_artifact(self, versioned_target_set):
if len(versioned_target_set.targets) <= 1:
return
with temporary_dir() as tmpdir:
dst_output_dir, dst_depfile, dst_analysis_cache = self.create_output_paths(versioned_target_set.targets)
safe_rmtree(dst_output_dir)
safe_mkdir(dst_output_dir)
src_analysis_caches = []
# TODO: Do we actually need to merge deps? Zinc will stomp them anyway on success.
dst_deps = Dependencies(dst_output_dir)
for target in versioned_target_set.targets:
src_output_dir, src_depfile, src_analysis_cache = self.create_output_paths([target])
if os.path.exists(src_depfile):
src_deps = Dependencies(src_output_dir)
src_deps.load(src_depfile)
dst_deps.merge(src_deps)
classes_by_source = src_deps.findclasses([target]).get(target, {})
for source, classes in classes_by_source.items():
for cls in classes:
src = os.path.join(src_output_dir, cls)
dst = os.path.join(dst_output_dir, cls)
# src may not exist if we aborted a build in the middle. That's OK: zinc will notice that
# it's missing and rebuild it.
# dst may already exist if we have overlapping targets. It's not a good idea
# to have those, but until we enforce it, we must allow it here.
if os.path.exists(src) and not os.path.exists(dst):
# Copy the class file.
safe_mkdir(os.path.dirname(dst))
os.link(src, dst)
# Rebase a copy of the per-target analysis files prior to merging.
if os.path.exists(src_analysis_cache):
src_analysis_cache_tmp = \
os.path.join(tmpdir, os.path.relpath(src_analysis_cache, self._analysis_cache_dir))
shutil.copyfile(src_analysis_cache, src_analysis_cache_tmp)
src_analysis_caches.append(src_analysis_cache_tmp)
if self._zinc_utils.run_zinc_rebase(cache=src_analysis_cache_tmp, rebasings=[(src_output_dir, dst_output_dir)]):
self.context.log.warn('In merge_artifact: zinc failed to rebase analysis file %s. ' \
'Target may require a full rebuild.' % \
src_analysis_cache_tmp)
dst_deps.save(dst_depfile)
if self._zinc_utils.run_zinc_merge(src_caches=src_analysis_caches, dst_cache=dst_analysis_cache):
self.context.log.warn('zinc failed to merge analysis files %s to %s. ' \
'Target may require a full rebuild.' % \
(':'.join(src_analysis_caches), dst_analysis_cache))
请发表评论