Coverage for sparkle/CLI/generate_report.py: 88%
139 statements
« prev ^ index » next coverage.py v7.6.4, created at 2024-11-05 14:48 +0000
« prev ^ index » next coverage.py v7.6.4, created at 2024-11-05 14:48 +0000
1#!/usr/bin/env python3
2"""Sparkle command to generate a report for an executed experiment."""
4import sys
5import argparse
6from pathlib import Path, PurePath
8from sparkle.CLI.help import global_variables as gv
9from sparkle.solver.ablation import AblationScenario
10from sparkle.platform import generate_report_for_selection as sgfs
11from sparkle.platform import \
12 generate_report_for_configuration as sgrfch
13from sparkle.CLI.help import logging as sl
14from sparkle.platform.settings_objects import Settings, SettingState
15from sparkle.CLI.help import argparse_custom as ac
16from sparkle.CLI.help.reporting_scenario import Scenario
17from sparkle.platform import \
18 generate_report_for_parallel_portfolio as sgrfpph
19from sparkle.solver import Solver
20from sparkle.solver.validator import Validator
21from sparkle.instance import Instance_Set
22from sparkle.structures import PerformanceDataFrame, FeatureDataFrame
23from sparkle.platform.output.configuration_output import ConfigurationOutput
24from sparkle.platform.output.selection_output import SelectionOutput
25from sparkle.platform.output.parallel_portfolio_output import ParallelPortfolioOutput
27from sparkle.platform import CommandName, COMMAND_DEPENDENCIES
28from sparkle.CLI.initialise import check_for_initialise
29from sparkle.CLI.help.nicknames import resolve_object_name
32def parser_function() -> argparse.ArgumentParser:
33 """Define the command line arguments."""
34 parser = argparse.ArgumentParser(
35 description="Without any arguments a report for the most recent algorithm "
36 "selection or algorithm configuration procedure is generated.",
37 epilog="Note that if a test instance set is given, the training instance set "
38 "must also be given.")
39 # Configuration arguments
40 parser.add_argument(*ac.SolverReportArgument.names,
41 **ac.SolverReportArgument.kwargs)
42 parser.add_argument(*ac.InstanceSetTrainReportArgument.names,
43 **ac.InstanceSetTrainReportArgument.kwargs)
44 parser.add_argument(*ac.InstanceSetTestReportArgument.names,
45 **ac.InstanceSetTestReportArgument.kwargs)
46 parser.add_argument(*ac.NoAblationReportArgument.names,
47 **ac.NoAblationReportArgument.kwargs)
48 # Selection arguments
49 parser.add_argument(*ac.SelectionReportArgument.names,
50 **ac.SelectionReportArgument.kwargs)
51 parser.add_argument(*ac.TestCaseDirectoryArgument.names,
52 **ac.TestCaseDirectoryArgument.kwargs)
53 # Common arguments
54 parser.add_argument(*ac.SparkleObjectiveArgument.names,
55 **ac.SparkleObjectiveArgument.kwargs)
56 parser.add_argument(*ac.SettingsFileArgument.names,
57 **ac.SettingsFileArgument.kwargs)
58 parser.add_argument(*ac.GenerateJSONArgument.names,
59 **ac.GenerateJSONArgument.kwargs)
60 return parser
63def main(argv: list[str]) -> None:
64 """Generate a report for an executed experiment."""
65 prev_settings = Settings(PurePath("Settings/latest.ini"))
67 # Log command call
68 sl.log_command(sys.argv)
70 # Define command line arguments
71 parser = parser_function()
73 # Process command line arguments
74 args = parser.parse_args(argv)
76 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.GENERATE_REPORT])
78 # Do first, so other command line options can override settings from the file
79 if ac.set_by_user(args, "settings_file"):
80 gv.settings().read_settings_ini(
81 args.settings_file, SettingState.CMD_LINE
82 )
83 if args.objectives is not None:
84 gv.settings().set_general_sparkle_objectives(
85 args.objectives, SettingState.CMD_LINE)
86 selection = args.selection
87 test_case_dir = args.test_case_directory
88 only_json = args.only_json
90 solver = resolve_object_name(args.solver,
91 gv.solver_nickname_mapping,
92 gv.settings().DEFAULT_solver_dir, Solver)
93 instance_set_train = resolve_object_name(
94 args.instance_set_train,
95 gv.file_storage_data_mapping[gv.instances_nickname_path],
96 gv.settings().DEFAULT_instance_dir, Instance_Set)
97 instance_set_test = resolve_object_name(
98 args.instance_set_train,
99 gv.file_storage_data_mapping[gv.instances_nickname_path],
100 gv.settings().DEFAULT_instance_dir, Instance_Set)
102 Settings.check_settings_changes(gv.settings(), prev_settings)
103 # If no arguments are set get the latest scenario
104 if not selection and test_case_dir is None and solver is None:
105 scenario = gv.latest_scenario().get_latest_scenario()
106 if scenario == Scenario.SELECTION:
107 selection = True
108 test_case_dir = gv.latest_scenario().get_selection_test_case_directory()
109 elif scenario == Scenario.CONFIGURATION:
110 solver = gv.latest_scenario().get_config_solver()
111 instance_set_train = gv.latest_scenario().get_config_instance_set_train()
112 instance_set_test = gv.latest_scenario().get_config_instance_set_test()
113 elif scenario == Scenario.PARALLEL_PORTFOLIO:
114 parallel_portfolio_path = gv.latest_scenario().get_parallel_portfolio_path()
115 pap_instance_set =\
116 gv.latest_scenario().get_parallel_portfolio_instance_set()
118 flag_instance_set_train = instance_set_train is not None
119 flag_instance_set_test = instance_set_test is not None
121 # Reporting for algorithm selection
122 if selection or test_case_dir is not None:
123 objective = gv.settings().get_general_sparkle_objectives()[0]
124 if not objective.time:
125 print("ERROR: The selection report is not implemented for "
126 " non-runtime objectives!")
127 sys.exit(-1)
128 selection_scenario = gv.latest_scenario().get_selection_scenario_path()
129 actual_portfolio_selector_path = selection_scenario / "portfolio_selector"
130 if not actual_portfolio_selector_path.is_file():
131 print("Before generating a Sparkle report, please first construct the "
132 "Sparkle portfolio selector. Not generating a Sparkle report, stopping"
133 " execution!")
134 sys.exit(-1)
136 print("Generating report for selection...")
137 train_data = PerformanceDataFrame(gv.settings().DEFAULT_performance_data_path)
138 feature_data = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path)
139 test_data = None
140 test_case_path = Path(test_case_dir) if test_case_dir is not None else None
141 if test_case_dir is not None and (test_case_path
142 / "performance_data.csv").exists():
143 test_data = PerformanceDataFrame(test_case_path / "performance_data.csv")
144 # Create machine readable selection output
145 instance_dirs = set(Path(instance).parent for instance in train_data.instances)
146 instance_sets = []
147 for dir in instance_dirs:
148 instance_sets.append(Instance_Set(dir))
149 test_set = None if test_case_dir is None else Instance_Set(Path(test_case_dir))
150 cutoff_time = gv.settings().get_general_target_cutoff_time()
151 output = gv.settings().DEFAULT_selection_output_analysis
152 selection_output = SelectionOutput(
153 selection_scenario, train_data, feature_data,
154 instance_sets, test_set, objective, cutoff_time,
155 output)
156 selection_output.write_output()
157 print("Machine readable output is placed at: ", selection_output.output)
159 if not only_json:
160 sgfs.generate_report_selection(
161 gv.settings().DEFAULT_selection_output_analysis,
162 gv.settings().DEFAULT_latex_source,
163 "template-Sparkle-for-selection.tex",
164 gv.settings().DEFAULT_latex_bib,
165 gv.settings().DEFAULT_extractor_dir,
166 selection_scenario,
167 feature_data,
168 train_data,
169 objective,
170 gv.settings().get_general_extractor_cutoff_time(),
171 gv.settings().get_general_target_cutoff_time(),
172 test_data
173 )
174 if test_case_dir is None:
175 print("Report generated ...")
176 else:
177 print("Report for test generated ...")
179 elif gv.latest_scenario().get_latest_scenario() == Scenario.PARALLEL_PORTFOLIO:
180 # Reporting for parallel portfolio
181 # Machine readable Output
182 cutoff_time = gv.settings().get_general_target_cutoff_time()
183 objective = gv.settings().get_general_sparkle_objectives()[0]
184 output = gv.settings().DEFAULT_parallel_portfolio_output_analysis
185 parallel_portfolio_output = ParallelPortfolioOutput(parallel_portfolio_path,
186 pap_instance_set,
187 objective,
188 output)
189 parallel_portfolio_output.write_output()
190 print("Machine readable output is placed at: ", parallel_portfolio_output.output)
192 if not only_json:
193 sgrfpph.generate_report_parallel_portfolio(
194 parallel_portfolio_path,
195 gv.settings().DEFAULT_parallel_portfolio_output_analysis,
196 gv.settings().DEFAULT_latex_source,
197 gv.settings().DEFAULT_latex_bib,
198 gv.settings().get_general_sparkle_objectives()[0],
199 gv.settings().get_general_target_cutoff_time(),
200 pap_instance_set)
201 print("Parallel portfolio report generated ...")
202 else:
203 # Reporting for algorithm configuration
204 if solver is None:
205 print("Error! No Solver found for configuration report generation.")
206 sys.exit(-1)
208 # If only the testing set is given return an error
209 if not flag_instance_set_train and flag_instance_set_test:
210 print("Argument Error! Only a testing set was provided, please also "
211 "provide a training set")
212 print(f"Usage: {sys.argv[0]} --solver <solver> [--instance-set-train "
213 "<instance-set-train>] [--instance-set-test <instance-set-test>]")
214 sys.exit(-1)
215 # Generate a report depending on which instance sets are provided
216 if flag_instance_set_train or flag_instance_set_test:
217 # Check if there are result to generate a report from
218 validator = Validator(gv.settings().DEFAULT_validation_output)
219 train_res = validator.get_validation_results(
220 solver, instance_set_train)
221 if instance_set_test is not None:
222 test_res = validator.get_validation_results(solver,
223 instance_set_test)
224 if len(train_res) == 0 or (instance_set_test is not None
225 and len(test_res) == 0):
226 print("Error: Results not found for the given solver and instance set(s)"
227 ' combination. Make sure the "configure_solver" and "validate_'
228 'configured_vs_default" commands were correctly executed. ')
229 sys.exit(-1)
230 else:
231 print("Error: No results from validate_configured_vs_default found that "
232 "can be used in the report!")
233 sys.exit(-1)
234 # Extract config scenario data for report, but this should be read from the
235 # scenario file instead as we can't know wether features were used or not now
236 configurator = gv.settings().get_general_sparkle_configurator()
237 config_scenario = gv.latest_scenario().get_configuration_scenario(
238 configurator.scenario_class)
239 ablation_scenario = None
240 if args.flag_ablation:
241 ablation_scenario = AblationScenario(
242 solver, instance_set_train, instance_set_test,
243 gv.settings().DEFAULT_ablation_output)
245 # Create machine readable output
246 output = gv.settings().DEFAULT_configuration_output_analysis
247 config_output = ConfigurationOutput(config_scenario.directory,
248 configurator,
249 config_scenario,
250 instance_set_test,
251 output)
252 config_output.write_output()
253 print("Machine readable output is placed at: ", config_output.output)
255 if not only_json:
256 sgrfch.generate_report_for_configuration(
257 solver,
258 gv.settings().get_general_sparkle_configurator(),
259 Validator(gv.settings().DEFAULT_validation_output),
260 gv.settings().DEFAULT_extractor_dir,
261 gv.settings().DEFAULT_configuration_output_analysis,
262 gv.settings().DEFAULT_latex_source,
263 gv.settings().DEFAULT_latex_bib,
264 gv.settings().get_general_extractor_cutoff_time(),
265 config_scenario,
266 instance_set_test,
267 ablation=ablation_scenario
268 )
270 # Write used settings to file
271 gv.settings().write_used_settings()
272 sys.exit(0)
275if __name__ == "__main__":
276 main(sys.argv[1:])