|
| 1 | +import copy |
| 2 | + |
| 3 | +from omnetpp.scave.results import * |
| 4 | + |
| 5 | +from inet.common.util import * |
| 6 | +from inet.simulation.task import * |
| 7 | +from inet.test.fingerprint.task import * |
| 8 | + |
| 9 | +__sphinx_mock__ = True # ignore this module in documentation |
| 10 | + |
| 11 | +_logger = logging.getLogger(__name__) |
| 12 | + |
| 13 | +class CompareSimulationsTask(MultipleSimulationTasks): |
| 14 | + def __init__(self, **kwargs): |
| 15 | + super().__init__(**kwargs) |
| 16 | + num_tasks = len(self.tasks) |
| 17 | + if num_tasks != 2: |
| 18 | + raise Exception(f"Found {num_tasks} simulation tasks instead of two") |
| 19 | + index = 0 |
| 20 | + for task in self.tasks: |
| 21 | + index += 1 |
| 22 | + task.record_eventlog = True |
| 23 | + task.eventlog_file_path = f"results/{task.simulation_config.config}-#{str(task.run_number)}-{index}.elog" |
| 24 | + task.scalar_file_path = f"results/{task.simulation_config.config}-#{str(task.run_number)}-{index}.sca" |
| 25 | + task.vector_file_path = f"results/{task.simulation_config.config}-#{str(task.run_number)}-{index}.vec" |
| 26 | + |
| 27 | + def run_protected(self, ingredients="tplx", **kwargs): |
| 28 | + append_args = ["--fingerprint=0000-0000/" + ingredients] + get_ingredients_append_args(ingredients) |
| 29 | + multiple_task_results = super().run_protected(append_args=append_args, **kwargs) |
| 30 | + return multiple_task_results |
| 31 | + |
| 32 | +class CompareSimulationsTaskResult(MultipleTaskResults): |
| 33 | + def __init__(self, **kwargs): |
| 34 | + super().__init__(**kwargs) |
| 35 | + if self.result == "DONE": |
| 36 | + self.divergence_position = self._find_divergence_position(self) |
| 37 | + self.different_statistical_results = self._get_different_statistical_results(exclude_result_name_filter="numTicks") |
| 38 | + if self.divergence_position: |
| 39 | + self.result = "DIVERGENT" |
| 40 | + self.color = COLOR_YELLOW |
| 41 | + else: |
| 42 | + self.result = "IDENTICAL" |
| 43 | + else: |
| 44 | + self.divergence_position = None |
| 45 | + self.different_statistical_results = pd.DataFrame() |
| 46 | + |
| 47 | + def __repr__(self): |
| 48 | + divergence_description = "\n" + self.divergence_position.__repr__() if self.divergence_position else "" |
| 49 | + statistical_desription = "\nFound " + COLOR_YELLOW + str(len(self.different_statistical_results)) + COLOR_RESET + " different statistical results" if not self.different_statistical_results.empty else "" |
| 50 | + return MultipleTaskResults.__repr__(self) + divergence_description + statistical_desription |
| 51 | + |
| 52 | + def debug_at_divergence_position(self, **kwargs): |
| 53 | + if self.divergence_position: |
| 54 | + task_1 = copy.copy(self.multiple_tasks.tasks[0]) |
| 55 | + task_1.debug = True |
| 56 | + task_1.mode = "debug" |
| 57 | + task_1.break_at_event_number = self.divergence_position.simulation_event_1.event_number |
| 58 | + task_2 = copy.copy(self.multiple_tasks.tasks[1]) |
| 59 | + task_2.debug = True |
| 60 | + task_2.mode = "debug" |
| 61 | + task_2.break_at_event_number = self.divergence_position.simulation_event_2.event_number |
| 62 | + multiple_tasks = copy.copy(self.multiple_tasks) |
| 63 | + multiple_tasks.tasks = [task_1, task_2] |
| 64 | + multiple_tasks.run(**kwargs) |
| 65 | + |
| 66 | + def run_until_divergence_position(self, **kwargs): |
| 67 | + if self.divergence_position: |
| 68 | + for task in self.multiple_tasks.tasks: |
| 69 | + task = copy.copy(task) |
| 70 | + task.user_interface = "Qtenv" |
| 71 | + task.wait = False |
| 72 | + task.run(**kwargs) |
| 73 | + |
| 74 | + def show_divergence_posisiton_in_sequence_chart(self): |
| 75 | + if self.divergence_position: |
| 76 | + simulation_event_1 = self.divergence_position.simulation_event_1 |
| 77 | + simulation_event_2 = self.divergence_position.simulation_event_2 |
| 78 | + project_name1 = simulation_event_1.simulation_result.task.simulation_config.simulation_project.get_name() |
| 79 | + project_name2 = simulation_event_2.simulation_result.task.simulation_config.simulation_project.get_name() |
| 80 | + path_name1 = "/" + project_name1 + "/" + simulation_event_1.simulation_result.task.simulation_config.working_directory + "/" + simulation_event_1.simulation_result.eventlog_file_path |
| 81 | + path_name2 = "/" + project_name2 + "/" + simulation_event_2.simulation_result.task.simulation_config.working_directory + "/" + simulation_event_2.simulation_result.eventlog_file_path |
| 82 | + editor1 = open_editor(path_name1) |
| 83 | + editor2 = open_editor(path_name2) |
| 84 | + goto_event_number(editor1, simulation_event_1.event_number) |
| 85 | + goto_event_number(editor2, simulation_event_2.event_number) |
| 86 | + |
| 87 | + def print_different_statistical_results(self): |
| 88 | + print(self.different_statistical_results) |
| 89 | + |
| 90 | + def _find_divergence_position(self, multiple_task_results): |
| 91 | + fingerprint_trajectory_1 = multiple_task_results.results[0].get_fingerprint_trajectory().get_unique() |
| 92 | + fingerprint_trajectory_2 = multiple_task_results.results[1].get_fingerprint_trajectory().get_unique() |
| 93 | + min_size = min(len(fingerprint_trajectory_1.fingerprints), len(fingerprint_trajectory_2.fingerprints)) |
| 94 | + for i in range(0, min_size): |
| 95 | + trajectory_fingerprint_1 = fingerprint_trajectory_1.fingerprints[i] |
| 96 | + trajectory_fingerprint_2 = fingerprint_trajectory_2.fingerprints[i] |
| 97 | + if trajectory_fingerprint_1.fingerprint != trajectory_fingerprint_2.fingerprint: |
| 98 | + return FingerprintTrajectoryDivergencePosition(SimulationEvent(fingerprint_trajectory_1.simulation_result, fingerprint_trajectory_1.event_numbers[i]), |
| 99 | + SimulationEvent(fingerprint_trajectory_2.simulation_result, fingerprint_trajectory_2.event_numbers[i])) |
| 100 | + return None |
| 101 | + |
| 102 | + def _get_different_statistical_results(self, result_name_filter=None, exclude_result_name_filter=None, result_module_filter=None, exclude_result_module_filter=None, full_match=False): |
| 103 | + df_1 = self._get_result_data_frame(self.results[0]) |
| 104 | + df_2 = self._get_result_data_frame(self.results[1]) |
| 105 | + if not df_1.equals(df_2): |
| 106 | + merged = df_1.merge(df_2, on=['experiment', 'measurement', 'replication', 'module', 'name'], how='outer', suffixes=('_1', '_2')) |
| 107 | + df = merged[ |
| 108 | + (merged['value_1'].isna() & merged['value_2'].notna()) | |
| 109 | + (merged['value_1'].notna() & merged['value_2'].isna()) | |
| 110 | + (merged['value_1'] != merged['value_2'])].dropna(subset=['value_1', 'value_2'], how='all').copy() |
| 111 | + df["absolute_error"] = df.apply(lambda row: abs(row["value_2"] - row["value_1"]), axis=1) |
| 112 | + df["relative_error"] = df.apply(lambda row: row["absolute_error"] / abs(row["value_1"]) if row["value_1"] != 0 else (float("inf") if row["value_2"] != 0 else 0), axis=1) |
| 113 | + df = df[df.apply(lambda row: matches_filter(row["name"], result_name_filter, exclude_result_name_filter, full_match) and \ |
| 114 | + matches_filter(row["module"], result_module_filter, exclude_result_module_filter, full_match), axis=1)] |
| 115 | + sorted_df = df.sort_values(by="relative_error", ascending=False) |
| 116 | + return sorted_df |
| 117 | + else: |
| 118 | + return pd.DataFrame() |
| 119 | + |
| 120 | + def _get_result_file_name(self, simulation_task, extension): |
| 121 | + simulation_config = simulation_task.simulation_config |
| 122 | + return f"{simulation_config.ini_file}-{simulation_config.config}-#{simulation_task.run_number}.{extension}" |
| 123 | + |
| 124 | + def _read_scalar_result_file(self, file_name): |
| 125 | + df = read_result_files(file_name, include_fields_as_scalars=True) |
| 126 | + df = get_scalars(df, include_runattrs=True) |
| 127 | + df = df if df.empty else df[["experiment", "measurement", "replication", "module", "name", "value"]] |
| 128 | + return df |
| 129 | + |
| 130 | + def _get_result_data_frame(self, simulation_task_result): |
| 131 | + simulation_task = simulation_task_result.task |
| 132 | + simulation_config = simulation_task.simulation_config |
| 133 | + simulation_project = simulation_config.simulation_project |
| 134 | + working_directory = simulation_config.working_directory |
| 135 | + scalar_file_path = simulation_project.get_full_path(os.path.join(working_directory, simulation_task_result.scalar_file_path)) |
| 136 | + vector_file_path = simulation_project.get_full_path(os.path.join(working_directory, simulation_task_result.vector_file_path)) |
| 137 | + if os.path.exists(vector_file_path): |
| 138 | + run_command_with_logging(["opp_scavetool", "x", "--type", "sth", "-w", vector_file_path, "-o", scalar_file_path]) |
| 139 | + os.remove(vector_file_path) |
| 140 | + stored_scalar_result_file_name = simulation_project.get_full_path(os.path.join(simulation_project.statistics_folder, working_directory, simulation_task_result.scalar_file_path)) |
| 141 | + _logger.debug(f"Reading result file {scalar_file_path}") |
| 142 | + return self._read_scalar_result_file(scalar_file_path) |
| 143 | + |
| 144 | +def compare_simulations(task1, task2, **kwargs): |
| 145 | + simulation_comparison_task = CompareSimulationsTask(tasks=[task1, task2], multiple_task_results_class=CompareSimulationsTaskResult, **kwargs) |
| 146 | + return simulation_comparison_task.run(**kwargs) |
0 commit comments