diff --git a/eng/pipelines/coreclr/superpmi-diffs.yml b/eng/pipelines/coreclr/superpmi-diffs.yml
index 45e402c5cd1da9..b057a3471cc181 100644
--- a/eng/pipelines/coreclr/superpmi-diffs.yml
+++ b/eng/pipelines/coreclr/superpmi-diffs.yml
@@ -122,3 +122,19 @@ extends:
diffType: tpdiff
baseJitOptions: ${{ parameters.spmi_jitoptions_base }}
diffJitOptions: ${{ parameters.spmi_jitoptions_diff }}
+
+ - template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/coreclr/templates/superpmi-diffs-job.yml
+ buildConfig: checked
+ platforms:
+ - windows_x64
+ - windows_x86
+ - linux_x64
+ helixQueueGroup: superpmi-diffs
+ helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml
+ jobParameters:
+ condition: not(eq(stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_jiteeversionguid.containsChange'], true))
+ diffType: metricdiff
+ baseJitOptions: ${{ parameters.spmi_jitoptions_base }}
+ diffJitOptions: ${{ parameters.spmi_jitoptions_diff }}
diff --git a/eng/pipelines/coreclr/templates/run-superpmi-diffs-job.yml b/eng/pipelines/coreclr/templates/run-superpmi-diffs-job.yml
index 19a6f7778047a6..710c1d39734349 100644
--- a/eng/pipelines/coreclr/templates/run-superpmi-diffs-job.yml
+++ b/eng/pipelines/coreclr/templates/run-superpmi-diffs-job.yml
@@ -16,7 +16,7 @@ parameters:
enableTelemetry: false # optional -- enable for telemetry
liveLibrariesBuildConfig: '' # optional -- live-live libraries configuration to use for the run
helixQueues: '' # required -- Helix queues
- diffType: 'asmdiffs' # required -- 'asmdiffs', 'tpdiff', or 'all'
+ diffType: 'asmdiffs' # required -- 'asmdiffs', 'tpdiff', 'metricdiff', or 'all'
baseJitOptions: '' # e.g. JitStressModeNames=STRESS_PHYSICAL_PROMOTION;JitFullyInt=1
diffJitOptions: ''
@@ -84,6 +84,9 @@ jobs:
- ${{ if eq(parameters.diffType, 'tpdiff') }}:
- name: SetupScriptDirs
value: '-release_directory $(releaseProductRootFolderPath)'
+ - ${{ if eq(parameters.diffType, 'metricdiff') }}:
+ - name: SetupScriptDirs
+ value: '-release_directory $(releaseProductRootFolderPath)'
- ${{ if eq(parameters.diffType, 'all') }}:
- name: SetupScriptDirs
value: '-checked_directory $(buildProductRootFolderPath) -release_directory $(releaseProductRootFolderPath)'
diff --git a/eng/pipelines/coreclr/templates/superpmi-diffs-job.yml b/eng/pipelines/coreclr/templates/superpmi-diffs-job.yml
index dd298abd395aff..2f029d5d98ebba 100644
--- a/eng/pipelines/coreclr/templates/superpmi-diffs-job.yml
+++ b/eng/pipelines/coreclr/templates/superpmi-diffs-job.yml
@@ -9,7 +9,7 @@ parameters:
variables: {}
helixQueues: ''
runJobTemplate: '/eng/pipelines/coreclr/templates/run-superpmi-diffs-job.yml'
- diffType: 'asmdiffs' # required -- 'asmdiffs', 'tpdiff', or 'all'
+ diffType: 'asmdiffs' # required -- 'asmdiffs', 'tpdiff', 'metricdiff', or 'all'
baseJitOptions: '' # e.g. JitStressModeNames=STRESS_PHYSICAL_PROMOTION;JitFullyInt=1
diffJitOptions: ''
@@ -32,14 +32,14 @@ jobs:
dependsOn:
- ${{ if in(parameters.diffType, 'asmdiffs', 'all') }}:
- 'build_${{ parameters.osGroup }}${{ parameters.osSubgroup }}_${{ parameters.archType }}_checked_'
- - ${{ if in(parameters.diffType, 'tpdiff', 'all') }}:
+ - ${{ if in(parameters.diffType, 'tpdiff', 'metricdiff', 'all') }}:
- 'build_${{ parameters.osGroup }}${{ parameters.osSubgroup }}_${{ parameters.archType }}_release_'
variables:
- ${{ each variable in parameters.variables }}:
- ${{insert}}: ${{ variable }}
- - ${{ if in(parameters.diffType, 'tpdiff', 'all') }}:
+ - ${{ if in(parameters.diffType, 'tpdiff', 'metricdiff', 'all') }}:
- ${{ if eq(parameters.osGroup, 'windows') }}:
- name: releaseProductRootFolderPath
value: '$(Build.SourcesDirectory)\artifacts\bin\coreclr\$(osGroup).$(archType).Release'
@@ -59,7 +59,7 @@ jobs:
displayName: 'JIT checked build'
cleanUnpackFolder: false
- - ${{ if in(parameters.diffType, 'tpdiff', 'all') }}:
+ - ${{ if in(parameters.diffType, 'tpdiff', 'metricdiff', 'all') }}:
# Download jit release builds
- template: /eng/pipelines/common/download-artifact-step.yml
parameters:
diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp
index df6aa03d76f80a..1e2f05fb94797d 100644
--- a/src/coreclr/jit/compiler.cpp
+++ b/src/coreclr/jit/compiler.cpp
@@ -6590,7 +6590,12 @@ void Compiler::compCompileFinish()
#endif
}
}
-#endif // DEBUG
+#else // DEBUG
+ if (JitConfig.JitReportMetrics())
+ {
+ Metrics.report(this);
+ }
+#endif // !DEBUG
}
#ifdef PSEUDORANDOM_NOP_INSERTION
diff --git a/src/coreclr/scripts/superpmi-diffs.proj b/src/coreclr/scripts/superpmi-diffs.proj
index 1dbed048b540ce..886fe51f734f12 100644
--- a/src/coreclr/scripts/superpmi-diffs.proj
+++ b/src/coreclr/scripts/superpmi-diffs.proj
@@ -79,7 +79,8 @@
$(WorkItemTimeout)
superpmi_download_%(Identity).log;superpmi_asmdiffs_%(Identity).log;superpmi_asmdiffs_summary_%(Identity).json;Asmdiffs_%(Identity).zip
superpmi_download_%(Identity).log;superpmi_tpdiff_%(Identity).log;superpmi_tpdiff_summary_%(Identity).json
- superpmi_download_%(Identity).log;superpmi_asmdiffs_%(Identity).log;superpmi_asmdiffs_summary_%(Identity).json;Asmdiffs_%(Identity).zip;superpmi_tpdiff_%(Identity).log;superpmi_tpdiff_summary_%(Identity).json
+ superpmi_download_%(Identity).log;superpmi_metricdiff_%(Identity).log;superpmi_metricdiff_summary_%(Identity).json
+ superpmi_download_%(Identity).log;superpmi_asmdiffs_%(Identity).log;superpmi_asmdiffs_summary_%(Identity).json;Asmdiffs_%(Identity).zip;superpmi_tpdiff_%(Identity).log;superpmi_tpdiff_summary_%(Identity).json;superpmi_metricdiff_%(Identity).log;superpmi_metricdiff_summary_%(Identity).json
diff --git a/src/coreclr/scripts/superpmi.py b/src/coreclr/scripts/superpmi.py
index da10ef5dc50a6f..057a130765a2b3 100644
--- a/src/coreclr/scripts/superpmi.py
+++ b/src/coreclr/scripts/superpmi.py
@@ -103,6 +103,10 @@
Measure throughput using PIN on one or more collections.
"""
+metricdiff_description = """\
+Measure JIT metric differences (for all metrics in the -details CSV, such as memory allocations) on one or more collections.
+"""
+
upload_description = """\
Upload a collection to SuperPMI Azure storage.
"""
@@ -131,7 +135,7 @@
Summarize multiple .json summaries created by --summary_as_json into a single .md file.
"""
-summary_type_help = "Type of summaries: asmdiffs or tpdiff"
+summary_type_help = "Type of summaries: asmdiffs, tpdiff, or metricdiff"
summaries_help = "List of .json files to summarize"
@@ -376,6 +380,10 @@ def add_core_root_arguments(parser, build_type_default, build_type_help):
throughput_parser = subparsers.add_parser("tpdiff", description=throughput_description, parents=[target_parser, superpmi_common_parser, replay_common_parser, base_diff_parser])
add_core_root_arguments(throughput_parser, "Release", throughput_build_type_help)
+# subparser for metricdiff
+metricdiff_parser = subparsers.add_parser("metricdiff", description=metricdiff_description, parents=[target_parser, superpmi_common_parser, replay_common_parser, base_diff_parser])
+add_core_root_arguments(metricdiff_parser, "Release", throughput_build_type_help)
+
# subparser for upload
upload_parser = subparsers.add_parser("upload", description=upload_description, parents=[core_root_parser, target_parser])
@@ -2587,7 +2595,7 @@ def create_exception():
if os.path.isfile(overall_md_summary_file):
os.remove(overall_md_summary_file)
- with open(overall_md_summary_file, "w") as write_fh:
+ with open(overall_md_summary_file, "w", encoding="utf-8") as write_fh:
write_asmdiffs_markdown_summary(write_fh, base_jit_options, diff_jit_options, summarizable_asm_diffs, True)
logging.info(" Summary Markdown file: %s", overall_md_summary_file)
@@ -2595,7 +2603,7 @@ def create_exception():
if os.path.isfile(short_md_summary_file):
os.remove(short_md_summary_file)
- with open(short_md_summary_file, "w") as write_fh:
+ with open(short_md_summary_file, "w", encoding="utf-8") as write_fh:
write_asmdiffs_markdown_summary(write_fh, base_jit_options, diff_jit_options, summarizable_asm_diffs, False)
logging.info(" Short Summary Markdown file: %s", short_md_summary_file)
@@ -3185,7 +3193,7 @@ def replay_with_throughput_diff(self):
if os.path.isfile(overall_md_summary_file):
os.remove(overall_md_summary_file)
- with open(overall_md_summary_file, "w") as write_fh:
+ with open(overall_md_summary_file, "w", encoding="utf-8") as write_fh:
write_tpdiff_markdown_summary(write_fh, base_jit_build_string_decoded, diff_jit_build_string_decoded, base_jit_options, diff_jit_options, tp_diffs, True)
logging.info(" Summary Markdown file: %s", overall_md_summary_file)
@@ -3194,7 +3202,7 @@ def replay_with_throughput_diff(self):
if os.path.isfile(short_md_summary_file):
os.remove(short_md_summary_file)
- with open(short_md_summary_file, "w") as write_fh:
+ with open(short_md_summary_file, "w", encoding="utf-8") as write_fh:
write_tpdiff_markdown_summary(write_fh, base_jit_build_string_decoded, diff_jit_build_string_decoded, base_jit_options, diff_jit_options, tp_diffs, False)
logging.info(" Short Summary Markdown file: %s", short_md_summary_file)
@@ -3284,6 +3292,338 @@ def write_pivot_section(row):
compute_and_format_pct(base_instructions, diff_instructions)))
write_fh.write("\n")
+################################################################################
+# SuperPMI Metric Diff
+################################################################################
+
+
+class SuperPMIReplayMetricDiff:
+ """ SuperPMI Replay metric diff class
+
+ Notes:
+ The object is responsible for replaying the mch files given to the
+ instance of the class and measuring JIT metric differences.
+ """
+
+ def __init__(self, coreclr_args, mch_files, base_jit_path, diff_jit_path):
+ """ Constructor
+
+ Args:
+ coreclr_args (CoreclrArguments) : parsed args
+ mch_files (list) : list of MCH files to replay
+ base_jit_path (str) : path to baseline clrjit
+ diff_jit_path (str) : path to diff clrjit
+
+ """
+
+ self.base_jit_path = base_jit_path
+ self.diff_jit_path = diff_jit_path
+ self.mch_files = mch_files
+ self.superpmi_path = determine_superpmi_tool_path(coreclr_args)
+
+ self.coreclr_args = coreclr_args
+ self.diff_mcl_contents = None
+
+ ############################################################################
+ # Instance Methods
+ ############################################################################
+
+ def replay_with_metric_diff(self):
+ """ Replay SuperPMI collections measuring JIT metric differences.
+
+ Returns:
+ (bool) True on success; False otherwise
+ """
+
+ target_flags = []
+ if self.coreclr_args.arch != self.coreclr_args.target_arch:
+ target_flags += [ "-target", self.coreclr_args.target_arch ]
+
+ base_option_flags = []
+ if self.coreclr_args.base_jit_option:
+ for o in self.coreclr_args.base_jit_option:
+ base_option_flags += "-jitoption", o
+ if self.coreclr_args.jitoption:
+ for o in self.coreclr_args.jitoption:
+ base_option_flags += "-jitoption", o
+
+ diff_option_flags = []
+ if self.coreclr_args.diff_jit_option:
+ for o in self.coreclr_args.diff_jit_option:
+ diff_option_flags += "-jit2option", o
+ if self.coreclr_args.jitoption:
+ for o in self.coreclr_args.jitoption:
+ diff_option_flags += "-jit2option", o
+
+ metric_diffs = []
+
+ with TempDir(None, self.coreclr_args.skip_cleanup) as temp_location:
+ logging.debug("")
+ logging.debug("Temp Location: %s", temp_location)
+ logging.debug("")
+
+ for mch_file in self.mch_files:
+
+ logging.info("Running metric diff of %s", mch_file)
+
+ if self.coreclr_args.details:
+ details_info_file = self.coreclr_args.details
+ else:
+ details_info_file = os.path.join(temp_location, os.path.basename(mch_file) + "_details.csv")
+
+ flags = [
+ "-applyDiff",
+ "-v", "ewi",
+ "-details", details_info_file,
+ ]
+ flags += target_flags
+ flags += base_option_flags
+ flags += diff_option_flags
+
+ if not self.coreclr_args.sequential and not self.coreclr_args.compile:
+ if not self.coreclr_args.parallelism:
+ flags += [ "-p" ]
+ else:
+ flags += [ "-p", self.coreclr_args.parallelism ]
+
+ if self.coreclr_args.break_on_assert:
+ flags += [ "-boa" ]
+
+ if self.coreclr_args.break_on_error:
+ flags += [ "-boe" ]
+
+ if self.coreclr_args.compile:
+ flags += [ "-c", self.coreclr_args.compile ]
+
+ if self.coreclr_args.spmi_log_file is not None:
+ flags += [ "-w", self.coreclr_args.spmi_log_file ]
+
+ if self.coreclr_args.error_limit is not None:
+ flags += ["-failureLimit", self.coreclr_args.error_limit]
+
+ with ChangeDir(self.coreclr_args.core_root):
+ command = [self.superpmi_path] + flags + [self.base_jit_path, self.diff_jit_path, mch_file]
+ return_code = run_and_log(command)
+
+ print_superpmi_error_result(return_code, self.coreclr_args)
+
+ metrics_list = None
+ base_metrics = None
+ diff_metrics = None
+ try:
+ if os.path.isfile(details_info_file):
+ (metrics_list, base_metrics, diff_metrics) = aggregate_metric_diff_metrics(details_info_file)
+ else:
+ logging.warning("Details info file '%s' not found; skipping metric diff aggregation", details_info_file)
+ except Exception as ex:
+ logging.warning("Failed to aggregate metric diff metrics from '%s': %s", details_info_file, ex)
+
+ if base_metrics is not None and diff_metrics is not None:
+ if return_code == 0:
+ logging.info("Clean SuperPMI diff ({} contexts processed)".format(base_metrics["Successful compiles"]))
+ elif return_code == 3:
+ logging.warning("SuperPMI encountered missing data for {} contexts".format(base_metrics["Missing compiles"]))
+
+ has_any_data = any(base_metrics[m] != 0 or diff_metrics[m] != 0 for m in metrics_list)
+ if has_any_data:
+ for metric in metrics_list:
+ base_val = base_metrics[metric]
+ diff_val = diff_metrics[metric]
+ if base_val != 0 or diff_val != 0:
+ delta = diff_val - base_val
+ fmt = "{:,.2f}" if isinstance(base_val, float) else "{:,d}"
+ if base_val != 0:
+ logging.info(" {}: base={}, diff={}, delta={} ({:.2%})".format(
+ metric, fmt.format(base_val), fmt.format(diff_val), fmt.format(delta), delta / base_val))
+ else:
+ logging.info(" {}: base={}, diff={}, delta={}".format(
+ metric, fmt.format(base_val), fmt.format(diff_val), fmt.format(delta)))
+ metric_diffs.append((os.path.basename(mch_file), metrics_list, base_metrics, diff_metrics))
+ else:
+ logging.warning("One compilation failed to produce any results")
+ else:
+ logging.warning("No metric files present?")
+
+ ################################################################################################ end of for mch_file in self.mch_files
+
+ # Report the overall results summary of the metricdiff run
+
+ logging.info("Metric diff summary:")
+
+ # Construct overall summary files.
+
+ if self.coreclr_args.summary_as_json:
+ if not os.path.isdir(self.coreclr_args.spmi_location):
+ os.makedirs(self.coreclr_args.spmi_location)
+
+ (base_jit_options, diff_jit_options) = get_base_diff_jit_options(self.coreclr_args)
+
+ overall_json_summary_file = create_unique_file_name(self.coreclr_args.spmi_location, "metricdiff_summary", "json")
+ if os.path.isfile(overall_json_summary_file):
+ os.remove(overall_json_summary_file)
+
+ with open(overall_json_summary_file, "w") as write_fh:
+ json.dump((base_jit_options, diff_jit_options, metric_diffs), write_fh)
+ logging.info(" Summary JSON file: %s", overall_json_summary_file)
+ elif len(metric_diffs) > 0:
+ if not os.path.isdir(self.coreclr_args.spmi_location):
+ os.makedirs(self.coreclr_args.spmi_location)
+
+ (base_jit_options, diff_jit_options) = get_base_diff_jit_options(self.coreclr_args)
+
+ overall_md_summary_file = create_unique_file_name(self.coreclr_args.spmi_location, "metricdiff_summary", "md")
+
+ if os.path.isfile(overall_md_summary_file):
+ os.remove(overall_md_summary_file)
+
+ with open(overall_md_summary_file, "w", encoding="utf-8") as write_fh:
+ write_metricdiff_markdown_summary(write_fh, base_jit_options, diff_jit_options, metric_diffs, True)
+ logging.info(" Summary Markdown file: %s", overall_md_summary_file)
+
+ short_md_summary_file = create_unique_file_name(self.coreclr_args.spmi_location, "metricdiff_short_summary", "md")
+
+ if os.path.isfile(short_md_summary_file):
+ os.remove(short_md_summary_file)
+
+ with open(short_md_summary_file, "w", encoding="utf-8") as write_fh:
+ write_metricdiff_markdown_summary(write_fh, base_jit_options, diff_jit_options, metric_diffs, False)
+ logging.info(" Short Summary Markdown file: %s", short_md_summary_file)
+ return True
+ ################################################################################################ end of replay_with_metric_diff()
+
+
+def discover_metrics_from_csv(details_file):
+ """ Read the CSV header and return all metric column names (those with 'Base X' / 'Diff X' pairs). """
+ with open(details_file, encoding="utf-8") as f:
+ reader = csv.DictReader(f)
+ headers = reader.fieldnames
+ metrics = []
+ for h in headers:
+ if h.startswith("Base ") and ("Diff " + h[5:]) in headers:
+ name = h[5:]
+ if name not in ("result", "instructions"):
+ metrics.append(name)
+ return metrics
+
+def aggregate_metric_diff_metrics(details_file):
+ """ Given the path to a CSV details file output by SPMI for a diff, aggregate all metrics.
+
+ Returns:
+ (metrics_list, base_metrics, diff_metrics)
+ """
+
+ metrics_list = discover_metrics_from_csv(details_file)
+ base_totals = {m: 0 for m in metrics_list}
+ diff_totals = {m: 0 for m in metrics_list}
+ base_compile_stats = {"Successful compiles": 0, "Missing compiles": 0, "Failing compiles": 0}
+ diff_compile_stats = {"Successful compiles": 0, "Missing compiles": 0, "Failing compiles": 0}
+
+ for row in read_csv(details_file):
+ base_result = row["Base result"]
+ diff_result = row["Diff result"]
+
+ if base_result == "Success":
+ base_compile_stats["Successful compiles"] += 1
+ elif base_result == "Miss":
+ base_compile_stats["Missing compiles"] += 1
+ else:
+ base_compile_stats["Failing compiles"] += 1
+
+ if diff_result == "Success":
+ diff_compile_stats["Successful compiles"] += 1
+ elif diff_result == "Miss":
+ diff_compile_stats["Missing compiles"] += 1
+ else:
+ diff_compile_stats["Failing compiles"] += 1
+
+ if base_result == "Success" and diff_result == "Success":
+ for metric in metrics_list:
+ base_val = row["Base " + metric]
+ diff_val = row["Diff " + metric]
+ base_totals[metric] += float(base_val) if '.' in base_val else int(base_val)
+ diff_totals[metric] += float(diff_val) if '.' in diff_val else int(diff_val)
+
+ base_totals.update(base_compile_stats)
+ diff_totals.update(diff_compile_stats)
+
+ return (metrics_list, base_totals, diff_totals)
+
+
+def write_metricdiff_markdown_summary(write_fh, base_jit_options, diff_jit_options, metric_diffs, include_details):
+
+ def fmt_val(v):
+ return "{:,.2f}".format(v) if isinstance(v, float) else "{:,d}".format(v)
+
+ write_jit_options(base_jit_options, diff_jit_options, write_fh)
+
+ # Collect the union of all metrics across all collections (preserving first-seen order)
+ all_metrics = list(dict.fromkeys(m for (_, metrics_list, _, _) in metric_diffs for m in metrics_list))
+
+ any_significant = False
+ metrics_with_diffs = set()
+ for metric in all_metrics:
+ significant_diffs = [(mch, base, diff) for (mch, _, base, diff) in metric_diffs
+ if metric in base and base[metric] != diff[metric]]
+ if not significant_diffs:
+ continue
+
+ any_significant = True
+ metrics_with_diffs.add(metric)
+ pcts = [compute_pct(base[metric], diff[metric]) for (_, base, diff) in significant_diffs
+ if base[metric] != 0]
+ all_base_zero = all(base[metric] == 0 for (_, base, _) in significant_diffs)
+ if all_base_zero:
+ all_positive = all(diff[metric] > 0 for (_, _, diff) in significant_diffs)
+ all_negative = all(diff[metric] < 0 for (_, _, diff) in significant_diffs)
+ if all_positive:
+ header = "{} (+\u221e)".format(metric)
+ elif all_negative:
+ header = "{} (-\u221e)".format(metric)
+ else:
+ header = "{} (\u00b1\u221e)".format(metric)
+ elif pcts:
+ min_pct_str = format_pct(min(pcts))
+ max_pct_str = format_pct(max(pcts))
+ if min_pct_str == max_pct_str:
+ header = "{} ({})".format(metric, min_pct_str)
+ else:
+ header = "{} ({} to {})".format(metric, min_pct_str, max_pct_str)
+ else:
+ header = metric
+
+ with DetailsSection(write_fh, header):
+ write_fh.write("|Collection|Base|Diff|PDIFF|\n")
+ write_fh.write("|---|--:|--:|--:|\n")
+ for mch_file, base, diff in significant_diffs:
+ write_fh.write("|{}|{}|{}|{}|\n".format(
+ mch_file, fmt_val(base[metric]), fmt_val(diff[metric]),
+ compute_and_format_pct(base[metric], diff[metric])))
+
+ if not any_significant:
+ if include_details:
+ write_fh.write("No significant metric differences found\n")
+
+ if include_details:
+ no_diff_metrics = [m for m in all_metrics if m not in metrics_with_diffs]
+ no_diff_rows_by_metric = []
+ for metric in no_diff_metrics:
+ rows = [(mch, base.get(metric, 0), diff.get(metric, 0)) for (mch, _, base, diff) in metric_diffs
+ if base.get(metric, 0) != 0 or diff.get(metric, 0) != 0]
+ if rows:
+ no_diff_rows_by_metric.append((metric, rows))
+
+ if no_diff_rows_by_metric:
+ write_fh.write("\n")
+ with DetailsSection(write_fh, "Metrics with no diffs"):
+ for metric, rows in no_diff_rows_by_metric:
+ with DetailsSection(write_fh, metric):
+ write_fh.write("|Collection|Base|Diff|PDIFF|\n")
+ write_fh.write("|---|--:|--:|--:|\n")
+ for mch_file, base_val, diff_val in rows:
+ write_fh.write("|{}|{}|{}|{}|\n".format(
+ mch_file, fmt_val(base_val), fmt_val(diff_val),
+ compute_and_format_pct(base_val, diff_val)))
+
################################################################################
# Argument handling helpers
################################################################################
@@ -4240,8 +4580,21 @@ def summarize_json_summaries(coreclr_args):
for file in coreclr_args.summaries:
logging.info(" {}".format(file))
- file_name_prefix = "diff" if coreclr_args.summary_type == "asmdiffs" else "tpdiff"
-
+ summary_type_to_prefix = {
+ "asmdiffs": "diff",
+ "metricdiff": "metricdiff",
+ "tpdiff": "tpdiff",
+ }
+
+ if coreclr_args.summary_type not in summary_type_to_prefix:
+ logging.error(
+ "Invalid summary_type '%s'. Valid values are: %s",
+ coreclr_args.summary_type,
+ ", ".join(sorted(summary_type_to_prefix.keys())),
+ )
+ raise ValueError(f"Invalid summary_type: {coreclr_args.summary_type}")
+
+ file_name_prefix = summary_type_to_prefix[coreclr_args.summary_type]
if coreclr_args.output_long_summary_path:
overall_md_summary_file = coreclr_args.output_long_summary_path
else:
@@ -4266,13 +4619,36 @@ def summarize_json_summaries(coreclr_args):
# Sort by collection name
summarizable_asm_diffs.sort(key=lambda t: t[0])
- with open(overall_md_summary_file, "w") as write_fh:
+ with open(overall_md_summary_file, "w", encoding="utf-8") as write_fh:
write_asmdiffs_markdown_summary(write_fh, base_jit_options, diff_jit_options, summarizable_asm_diffs, True)
logging.info(" Summary Markdown file: %s", overall_md_summary_file)
- with open(short_md_summary_file, "w") as write_fh:
+ with open(short_md_summary_file, "w", encoding="utf-8") as write_fh:
write_asmdiffs_markdown_summary(write_fh, base_jit_options, diff_jit_options, summarizable_asm_diffs, False)
logging.info(" Short Summary Markdown file: %s", short_md_summary_file)
+ elif coreclr_args.summary_type == "metricdiff":
+ base_jit_options = []
+ diff_jit_options = []
+ summarizable_metric_diffs = []
+
+ for json_file in coreclr_args.summaries:
+ with open(json_file, "r") as fh:
+ data = json.load(fh)
+ if isinstance(data, str):
+ continue
+ (base_jit_options, diff_jit_options, metric_diffs) = data
+ summarizable_metric_diffs.extend(metric_diffs)
+
+ # Sort by collection name
+ summarizable_metric_diffs.sort(key=lambda t: t[0])
+
+ with open(overall_md_summary_file, "w", encoding="utf-8") as write_fh:
+ write_metricdiff_markdown_summary(write_fh, base_jit_options, diff_jit_options, summarizable_metric_diffs, True)
+ logging.info(" Summary Markdown file: %s", overall_md_summary_file)
+
+ with open(short_md_summary_file, "w", encoding="utf-8") as write_fh:
+ write_metricdiff_markdown_summary(write_fh, base_jit_options, diff_jit_options, summarizable_metric_diffs, False)
+ logging.info(" Short Summary Markdown file: %s", short_md_summary_file)
else:
base_jit_build_string_decoded = ""
diff_jit_build_string_decoded = ""
@@ -4288,11 +4664,11 @@ def summarize_json_summaries(coreclr_args):
# Sort by collection name
summarizable_tp_diffs.sort(key=lambda t: t[0])
- with open(overall_md_summary_file, "w") as write_fh:
+ with open(overall_md_summary_file, "w", encoding="utf-8") as write_fh:
write_tpdiff_markdown_summary(write_fh, base_jit_build_string_decoded, diff_jit_build_string_decoded, base_jit_options, diff_jit_options, summarizable_tp_diffs, True)
logging.info(" Summary Markdown file: %s", overall_md_summary_file)
- with open(short_md_summary_file, "w") as write_fh:
+ with open(short_md_summary_file, "w", encoding="utf-8") as write_fh:
write_tpdiff_markdown_summary(write_fh, base_jit_build_string_decoded, diff_jit_build_string_decoded, base_jit_options, diff_jit_options, summarizable_tp_diffs, False)
logging.info(" Short Summary Markdown file: %s", short_md_summary_file)
@@ -5279,6 +5655,20 @@ def verify_base_diff_args():
process_base_jit_path_arg(coreclr_args)
download_clrjit_pintool(coreclr_args)
+ elif coreclr_args.mode == "metricdiff":
+
+ verify_target_args()
+ verify_superpmi_common_args()
+ verify_replay_common_args()
+ verify_base_diff_args()
+
+ coreclr_args.verify(determine_coredis_tools(coreclr_args),
+ "coredistools_location",
+ os.path.isfile,
+ "Unable to find coredistools.")
+
+ process_base_jit_path_arg(coreclr_args)
+
elif coreclr_args.mode == "upload":
verify_target_args()
@@ -5384,7 +5774,7 @@ def verify_base_diff_args():
lambda unused: True,
"Unable to set output_long_summary_path")
- if coreclr_args.mode == "replay" or coreclr_args.mode == "asmdiffs" or coreclr_args.mode == "tpdiff" or coreclr_args.mode == "download":
+ if coreclr_args.mode == "replay" or coreclr_args.mode == "asmdiffs" or coreclr_args.mode == "tpdiff" or coreclr_args.mode == "metricdiff" or coreclr_args.mode == "download":
if hasattr(coreclr_args, "private_store") and coreclr_args.private_store is not None:
logging.info("Using private stores:")
for path in coreclr_args.private_store:
@@ -5533,6 +5923,37 @@ def main(args):
logging.debug("Finish time: %s", end_time.strftime("%H:%M:%S"))
logging.debug("Elapsed time: %s", elapsed_time)
+ elif coreclr_args.mode == "metricdiff":
+ local_mch_paths = process_mch_files_arg(coreclr_args)
+ mch_files = get_mch_files_for_replay(local_mch_paths, coreclr_args.filter)
+ if mch_files is None:
+ return 1
+
+ begin_time = datetime.datetime.now()
+
+ logging.info("SuperPMI metric diff")
+ logging.debug("------------------------------------------------------------")
+ logging.debug("Start time: %s", begin_time.strftime("%H:%M:%S"))
+
+ base_jit_path = coreclr_args.base_jit_path
+ diff_jit_path = coreclr_args.diff_jit_path
+
+ logging.info("Base JIT Path: %s", base_jit_path)
+ logging.info("Diff JIT Path: %s", diff_jit_path)
+
+ logging.info("Using MCH files:")
+ for mch_file in mch_files:
+ logging.info(" %s", mch_file)
+
+ metric_diff = SuperPMIReplayMetricDiff(coreclr_args, mch_files, base_jit_path, diff_jit_path)
+ success = metric_diff.replay_with_metric_diff()
+
+ end_time = datetime.datetime.now()
+ elapsed_time = end_time - begin_time
+
+ logging.debug("Finish time: %s", end_time.strftime("%H:%M:%S"))
+ logging.debug("Elapsed time: %s", elapsed_time)
+
elif coreclr_args.mode == "upload":
begin_time = datetime.datetime.now()
diff --git a/src/coreclr/scripts/superpmi_diffs.py b/src/coreclr/scripts/superpmi_diffs.py
index 00ab175f41d850..5e20527602a0c3 100644
--- a/src/coreclr/scripts/superpmi_diffs.py
+++ b/src/coreclr/scripts/superpmi_diffs.py
@@ -22,7 +22,7 @@
parser = argparse.ArgumentParser(description="description")
-parser.add_argument("-type", help="Type of diff (asmdiffs, tpdiff, all)")
+parser.add_argument("-type", help="Type of diff (asmdiffs, tpdiff, metricdiff, all)")
parser.add_argument("-partition_info", help="Path to partition info file")
parser.add_argument("-base_jit_directory", help="path to the directory containing base clrjit binaries")
parser.add_argument("-diff_jit_directory", help="path to the directory containing diff clrjit binaries")
@@ -51,7 +51,7 @@ def setup_args(args):
coreclr_args.verify(args,
"type",
- lambda type: type in ["asmdiffs", "tpdiff", "all"],
+ lambda type: type in ["asmdiffs", "tpdiff", "metricdiff", "all"],
"Invalid type \"{}\"".format)
coreclr_args.verify(args,
@@ -318,6 +318,48 @@ def do_tpdiff(self):
print("Failed during tpdiff. Log file: {}".format(log_file))
self.failed = True
+ def do_metricdiff(self):
+ """ Run metricdiff
+ """
+
+ print("Running metricdiff")
+
+ # Figure out which JITs to use
+ jit_name = determine_jit_name(self.host_os, self.target_os, self.host_arch_name, self.arch_name, use_cross_compile_jit=True)
+ base_release_jit_path = os.path.join(self.coreclr_args.base_jit_directory, "release", jit_name)
+ diff_release_jit_path = os.path.join(self.coreclr_args.diff_jit_directory, "release", jit_name)
+
+ log_file = os.path.join(self.log_directory, "superpmi_metricdiff_{}.log".format(self.target))
+
+ # This is the summary file name and location written by superpmi.py. If the file exists, remove it to ensure superpmi.py doesn't create a numbered version.
+ overall_json_metricdiff_summary_file = os.path.join(self.spmi_location, "metricdiff_summary.json")
+ if os.path.isfile(overall_json_metricdiff_summary_file):
+ os.remove(overall_json_metricdiff_summary_file)
+
+ overall_json_metricdiff_summary_file_target = os.path.join(self.log_directory, "superpmi_metricdiff_summary_{}.json".format(self.target))
+ self.summary_json_files.append((overall_json_metricdiff_summary_file, overall_json_metricdiff_summary_file_target))
+
+ _, _, return_code = run_command([
+ self.python_path,
+ os.path.join(self.script_dir, "superpmi.py"),
+ "metricdiff",
+ "--no_progress",
+ "-core_root", self.core_root_dir,
+ "-target_os", self.target_os,
+ "-target_arch", self.arch_name,
+ "-arch", self.host_arch_name,
+ "-base_jit_path", base_release_jit_path,
+ "-diff_jit_path", diff_release_jit_path,
+ "-spmi_location", self.spmi_location,
+ "-error_limit", "100",
+ "--summary_as_json",
+ "-log_level", "debug",
+ "-log_file", log_file] + self.create_jit_options_args())
+
+ if return_code != 0:
+ print("Failed during metricdiff. Log file: {}".format(log_file))
+ self.failed = True
+
def create_jit_options_args(self):
options = []
if self.coreclr_args.base_jit_options is not None:
@@ -365,13 +407,17 @@ def main(main_args):
do_asmdiffs = False
do_tpdiff = False
+ do_metricdiff = False
if coreclr_args.type == 'asmdiffs':
do_asmdiffs = True
if coreclr_args.type == 'tpdiff':
do_tpdiff = True
+ if coreclr_args.type == 'metricdiff':
+ do_metricdiff = True
if coreclr_args.type == 'all':
do_asmdiffs = True
do_tpdiff = True
+ do_metricdiff = True
diff = Diff(coreclr_args)
@@ -381,6 +427,8 @@ def main(main_args):
diff.do_asmdiffs()
if do_tpdiff:
diff.do_tpdiff()
+ if do_metricdiff:
+ diff.do_metricdiff()
diff.summarize()
diff --git a/src/coreclr/scripts/superpmi_diffs_setup.py b/src/coreclr/scripts/superpmi_diffs_setup.py
index 2f7d6b70ecb4e1..dcecb367dc534f 100644
--- a/src/coreclr/scripts/superpmi_diffs_setup.py
+++ b/src/coreclr/scripts/superpmi_diffs_setup.py
@@ -32,7 +32,7 @@
parser.add_argument("-arch", required=True, help="Architecture")
parser.add_argument("-platform", required=True, help="OS platform")
-parser.add_argument("-type", required=True, help="Type of diff (asmdiffs, tpdiff, all)")
+parser.add_argument("-type", required=True, help="Type of diff (asmdiffs, tpdiff, metricdiff, all)")
parser.add_argument("-source_directory", required=True, help="Path to the root directory of the dotnet/runtime source tree")
parser.add_argument("-checked_directory", help="Path to the directory containing built checked binaries (e.g., /artifacts/bin/coreclr/windows.x64.Checked)")
parser.add_argument("-release_directory", help="Path to the directory containing built release binaries (e.g., /artifacts/bin/coreclr/windows.x64.Release)")
@@ -67,7 +67,7 @@ def setup_args(args):
coreclr_args.verify(args,
"type",
- lambda type: type in ["asmdiffs", "tpdiff", "all"],
+ lambda type: type in ["asmdiffs", "tpdiff", "metricdiff", "all"],
"Invalid type \"{}\"".format)
coreclr_args.verify(args,
@@ -87,18 +87,24 @@ def setup_args(args):
do_asmdiffs = False
do_tpdiff = False
+ do_metricdiff = False
if coreclr_args.type == 'asmdiffs':
do_asmdiffs = True
if coreclr_args.type == 'tpdiff':
do_tpdiff = True
+ if coreclr_args.type == 'metricdiff':
+ do_metricdiff = True
if coreclr_args.type == 'all':
do_asmdiffs = True
do_tpdiff = True
+ do_metricdiff = True
use_checked = False
use_release = False
if do_asmdiffs:
use_checked = True
+ if do_metricdiff:
+ use_release = True
if do_tpdiff:
use_release = True
@@ -328,18 +334,24 @@ def main(main_args):
do_asmdiffs = False
do_tpdiff = False
+ do_metricdiff = False
if coreclr_args.type == 'asmdiffs':
do_asmdiffs = True
if coreclr_args.type == 'tpdiff':
do_tpdiff = True
+ if coreclr_args.type == 'metricdiff':
+ do_metricdiff = True
if coreclr_args.type == 'all':
do_asmdiffs = True
do_tpdiff = True
+ do_metricdiff = True
use_checked = False
use_release = False
if do_asmdiffs:
use_checked = True
+ if do_metricdiff:
+ use_release = True
if do_tpdiff:
use_release = True
diff --git a/src/coreclr/scripts/superpmi_diffs_summarize.py b/src/coreclr/scripts/superpmi_diffs_summarize.py
index dc900797e60232..a9a0cae716c715 100644
--- a/src/coreclr/scripts/superpmi_diffs_summarize.py
+++ b/src/coreclr/scripts/superpmi_diffs_summarize.py
@@ -25,7 +25,7 @@
parser.add_argument("-diff_summary_dir", required=True, help="Path to diff summary directory")
parser.add_argument("-arch", required=True, help="Architecture")
parser.add_argument("-platform", required=True, help="OS platform")
-parser.add_argument("-type", required=True, help="Type of diff (asmdiffs, tpdiff, all)")
+parser.add_argument("-type", required=True, help="Type of diff (asmdiffs, tpdiff, metricdiff, all)")
parser.add_argument("-source_directory", required=True, help="Path to the root directory of the dotnet/runtime source tree")
def setup_args(args):
@@ -58,7 +58,7 @@ def setup_args(args):
coreclr_args.verify(args,
"type",
- lambda type: type in ["asmdiffs", "tpdiff", "all"],
+ lambda type: type in ["asmdiffs", "tpdiff", "metricdiff", "all"],
"Invalid type \"{}\"".format)
coreclr_args.verify(args,
@@ -88,10 +88,11 @@ def append_diff_file(f, file_name, full_file_path):
# What platform is this file summarizing? We parse the filename itself, which is of the form:
# superpmi_asmdiffs_summary__.md
# superpmi_tpdiff_summary__.md
+ # superpmi_metricdiff_summary__.md
diff_os = "unknown"
diff_arch = "unknown"
- match_obj = re.search(r'^superpmi_(tpdiff|asmdiffs)_summary_(.*)_(.*).md', file_name)
+ match_obj = re.search(r'^superpmi_(tpdiff|asmdiffs|metricdiff)_summary_([^_]+)_([^_]+)\.md$', file_name)
if match_obj is not None:
diff_os = match_obj.group(2)
diff_arch = match_obj.group(3)
@@ -131,13 +132,17 @@ def main(main_args):
do_asmdiffs = False
do_tpdiff = False
+ do_metricdiff = False
if coreclr_args.type == 'asmdiffs':
do_asmdiffs = True
if coreclr_args.type == 'tpdiff':
do_tpdiff = True
+ if coreclr_args.type == 'metricdiff':
+ do_metricdiff = True
if coreclr_args.type == 'all':
do_asmdiffs = True
do_tpdiff = True
+ do_metricdiff = True
superpmi_scripts_directory = os.path.join(source_directory, 'src', 'coreclr', 'scripts')
python_path = sys.executable
@@ -176,7 +181,7 @@ def main(main_args):
return return_code
# Consolidate all superpmi_asmdiffs_summary_*.json and superpmi_tpdiff_summary_*.json
- # into overall__summary__.md.
+ # into overall___summary__.md.
# (Don't name it "superpmi_xxx.md" or we might consolidate it into itself.)
# If there are no summary files found, add a "No diffs found" text to be explicit about that.
#
@@ -185,7 +190,11 @@ def main(main_args):
# We should create a job that depends on all the diff jobs, downloads all the .md file artifacts,
# and consolidates everything together in one file.
- final_md_path = os.path.join(diff_summary_dir, "overall_{}_summary_{}_{}.md".format(coreclr_args.type, platform_name, arch))
+ # Use a numeric prefix so tabs sort in the desired order on the AzDO Extensions page:
+ # asmdiffs first, tpdiff second, metricdiff last.
+ type_sort_prefix = {"asmdiffs": "1", "tpdiff": "2", "metricdiff": "3"}
+ prefix = type_sort_prefix.get(coreclr_args.type, "0")
+ final_md_path = os.path.join(diff_summary_dir, "overall_{}_{}_summary_{}_{}.md".format(prefix, coreclr_args.type, platform_name, arch))
print("Consolidating final {}".format(final_md_path))
with open(final_md_path, "a") as f:
@@ -220,6 +229,21 @@ def main(main_args):
if not any_tpdiff_found:
f.write("No throughput diffs found\n")
+ if do_metricdiff:
+ f.write("# JIT metric diffs on {} {}\n\n".format(platform_name, arch))
+ f.write("The following shows the impact on JIT metrics.\n\n")
+
+ any_metricdiff_found = False
+ for dirpath, _, files in os.walk(diff_summary_dir):
+ for file_name in files:
+ if file_name.startswith("superpmi_metricdiff") and file_name.endswith(".md"):
+ full_file_path = os.path.join(dirpath, file_name)
+ if append_diff_file(f, file_name, full_file_path):
+ any_metricdiff_found = True
+
+ if not any_metricdiff_found:
+ f.write("No metric diffs found\n")
+
with open(final_md_path, "r") as f:
print(f.read())