Coverage for sparkle/CLI/generate_report.py: 0%

149 statements  

« prev     ^ index     » next       coverage.py v7.6.1, created at 2024-09-27 09:10 +0000

1#!/usr/bin/env python3 

2"""Sparkle command to generate a report for an executed experiment.""" 

3 

4import sys 

5import argparse 

6from pathlib import Path, PurePath 

7 

8from sparkle.CLI.help import global_variables as gv 

9from sparkle.solver.ablation import AblationScenario 

10from sparkle.platform import generate_report_for_selection as sgfs 

11from sparkle.platform import \ 

12 generate_report_for_configuration as sgrfch 

13from sparkle.CLI.help import logging as sl 

14from sparkle.platform.settings_objects import Settings, SettingState 

15from sparkle.CLI.help import argparse_custom as ac 

16from sparkle.CLI.help.reporting_scenario import Scenario 

17from sparkle.platform import \ 

18 generate_report_for_parallel_portfolio as sgrfpph 

19from sparkle.solver import Solver 

20from sparkle.solver.validator import Validator 

21from sparkle.instance import instance_set 

22from sparkle.structures import PerformanceDataFrame, FeatureDataFrame 

23from sparkle.platform.output.configuration_output import ConfigurationOutput 

24from sparkle.platform.output.selection_output import SelectionOutput 

25from sparkle.platform.output.parallel_portfolio_output import ParallelPortfolioOutput 

26 

27from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

28from sparkle.CLI.initialise import check_for_initialise 

29from sparkle.CLI.help.nicknames import resolve_object_name 

30 

31 

32def parser_function() -> argparse.ArgumentParser: 

33 """Define the command line arguments.""" 

34 parser = argparse.ArgumentParser( 

35 description=("Without any arguments a report for the most recent algorithm " 

36 "selection or algorithm configuration procedure is generated."), 

37 epilog=("Note that if a test instance set is given, the training instance set " 

38 "must also be given.")) 

39 # Configuration arguments 

40 parser.add_argument(*ac.SolverReportArgument.names, 

41 **ac.SolverReportArgument.kwargs) 

42 parser.add_argument(*ac.InstanceSetTrainReportArgument.names, 

43 **ac.InstanceSetTrainReportArgument.kwargs) 

44 parser.add_argument(*ac.InstanceSetTestReportArgument.names, 

45 **ac.InstanceSetTestReportArgument.kwargs) 

46 parser.add_argument(*ac.NoAblationReportArgument.names, 

47 **ac.NoAblationReportArgument.kwargs) 

48 # Selection arguments 

49 parser.add_argument(*ac.SelectionReportArgument.names, 

50 **ac.SelectionReportArgument.kwargs) 

51 parser.add_argument(*ac.TestCaseDirectoryArgument.names, 

52 **ac.TestCaseDirectoryArgument.kwargs) 

53 # Common arguments 

54 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

55 **ac.SparkleObjectiveArgument.kwargs) 

56 parser.add_argument(*ac.SettingsFileArgument.names, 

57 **ac.SettingsFileArgument.kwargs) 

58 parser.add_argument(*ac.GenerateJSONArgument.names, 

59 **ac.GenerateJSONArgument.kwargs) 

60 return parser 

61 

62 

63if __name__ == "__main__": 

64 # Compare current settings to latest.ini 

65 prev_settings = Settings(PurePath("Settings/latest.ini")) 

66 

67 # Log command call 

68 sl.log_command(sys.argv) 

69 

70 # Define command line arguments 

71 parser = parser_function() 

72 

73 # Process command line arguments 

74 args = parser.parse_args() 

75 selection = args.selection 

76 test_case_dir = args.test_case_directory 

77 only_json = args.only_json 

78 

79 solver = resolve_object_name(args.solver, 

80 gv.solver_nickname_mapping, 

81 gv.settings().DEFAULT_solver_dir, Solver) 

82 instance_set_train = resolve_object_name( 

83 args.instance_set_train, 

84 gv.file_storage_data_mapping[gv.instances_nickname_path], 

85 gv.settings().DEFAULT_instance_dir, instance_set) 

86 instance_set_test = resolve_object_name( 

87 args.instance_set_train, 

88 gv.file_storage_data_mapping[gv.instances_nickname_path], 

89 gv.settings().DEFAULT_instance_dir, instance_set) 

90 

91 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.GENERATE_REPORT]) 

92 

93 # Do first, so other command line options can override settings from the file 

94 if ac.set_by_user(args, "settings_file"): 

95 gv.settings().read_settings_ini( 

96 args.settings_file, SettingState.CMD_LINE 

97 ) 

98 if args.objectives is not None: 

99 gv.settings().set_general_sparkle_objectives( 

100 args.objectives, SettingState.CMD_LINE) 

101 

102 Settings.check_settings_changes(gv.settings(), prev_settings) 

103 # If no arguments are set get the latest scenario 

104 if not selection and test_case_dir is None and solver is None: 

105 scenario = gv.latest_scenario().get_latest_scenario() 

106 if scenario == Scenario.SELECTION: 

107 selection = True 

108 test_case_dir = gv.latest_scenario().get_selection_test_case_directory() 

109 elif scenario == Scenario.CONFIGURATION: 

110 solver = gv.latest_scenario().get_config_solver() 

111 instance_set_train = gv.latest_scenario().get_config_instance_set_train() 

112 instance_set_test = gv.latest_scenario().get_config_instance_set_test() 

113 elif scenario == Scenario.PARALLEL_PORTFOLIO: 

114 parallel_portfolio_path = gv.latest_scenario().get_parallel_portfolio_path() 

115 pap_instance_set =\ 

116 gv.latest_scenario().get_parallel_portfolio_instance_set() 

117 

118 flag_instance_set_train = instance_set_train is not None 

119 flag_instance_set_test = instance_set_test is not None 

120 

121 # Reporting for algorithm selection 

122 if selection or test_case_dir is not None: 

123 objective = gv.settings().get_general_sparkle_objectives()[0] 

124 if not objective.time: 

125 print("ERROR: The selection report is not implemented for " 

126 " non-runtime objectives!") 

127 sys.exit(-1) 

128 selection_scenario = gv.latest_scenario().get_selection_scenario_path() 

129 actual_portfolio_selector_path = selection_scenario / "portfolio_selector" 

130 if not actual_portfolio_selector_path.is_file(): 

131 print("Before generating a Sparkle report, please first construct the " 

132 "Sparkle portfolio selector. Not generating a Sparkle report, stopping" 

133 " execution!") 

134 sys.exit(-1) 

135 

136 print("Generating report for selection...") 

137 train_data = PerformanceDataFrame(gv.settings().DEFAULT_performance_data_path) 

138 feature_data = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

139 test_data = None 

140 test_case_path = Path(test_case_dir) if test_case_dir is not None else None 

141 if test_case_dir is not None and (test_case_path 

142 / "performance_data.csv").exists(): 

143 test_data = PerformanceDataFrame(test_case_path / "performance_data.csv") 

144 # Create machine readable selection output 

145 instance_folders = set(Path(instance).parent 

146 for instance in train_data.instances) 

147 instance_sets = [] 

148 for dir in instance_folders: 

149 set = instance_set(dir) 

150 instance_sets.append(set) 

151 test_set = None if test_case_dir is None else instance_set(Path(test_case_dir)) 

152 cutoff_time = gv.settings().get_general_target_cutoff_time() 

153 output = gv.settings().DEFAULT_selection_output_analysis 

154 selection_output = SelectionOutput( 

155 selection_scenario, train_data, feature_data, 

156 instance_sets, test_set, objective, cutoff_time, 

157 output) 

158 selection_output.write_output() 

159 print("Machine readable output is placed at: ", selection_output.output) 

160 

161 if not only_json: 

162 sgfs.generate_report_selection( 

163 gv.settings().DEFAULT_selection_output_analysis, 

164 gv.settings().DEFAULT_latex_source, 

165 "template-Sparkle-for-selection.tex", 

166 gv.settings().DEFAULT_latex_bib, 

167 gv.settings().DEFAULT_extractor_dir, 

168 selection_scenario, 

169 feature_data, 

170 train_data, 

171 objective, 

172 gv.settings().get_general_extractor_cutoff_time(), 

173 gv.settings().get_general_target_cutoff_time(), 

174 test_data 

175 ) 

176 if test_case_dir is None: 

177 print("Report generated ...") 

178 else: 

179 print("Report for test generated ...") 

180 

181 elif gv.latest_scenario().get_latest_scenario() == Scenario.PARALLEL_PORTFOLIO: 

182 # Reporting for parallel portfolio 

183 # Machine readable Output 

184 cutoff_time = gv.settings().get_general_target_cutoff_time() 

185 objective = gv.settings().get_general_sparkle_objectives()[0] 

186 output = gv.settings().DEFAULT_parallel_portfolio_output_analysis 

187 parallel_portfolio_output = ParallelPortfolioOutput(parallel_portfolio_path, 

188 pap_instance_set, 

189 objective, 

190 output) 

191 parallel_portfolio_output.write_output() 

192 print("Machine readable output is placed at: ", parallel_portfolio_output.output) 

193 

194 if not only_json: 

195 sgrfpph.generate_report_parallel_portfolio( 

196 parallel_portfolio_path, 

197 gv.settings().DEFAULT_parallel_portfolio_output_analysis, 

198 gv.settings().DEFAULT_latex_source, 

199 gv.settings().DEFAULT_latex_bib, 

200 gv.settings().get_general_sparkle_objectives()[0], 

201 gv.settings().get_general_target_cutoff_time(), 

202 pap_instance_set) 

203 print("Parallel portfolio report generated ...") 

204 else: 

205 # Reporting for algorithm configuration 

206 if solver is None: 

207 print("Error! No Solver found for configuration report generation.") 

208 sys.exit(-1) 

209 

210 # If only the testing set is given return an error 

211 if not flag_instance_set_train and flag_instance_set_test: 

212 print("Argument Error! Only a testing set was provided, please also " 

213 "provide a training set") 

214 print(f"Usage: {sys.argv[0]} --solver <solver> [--instance-set-train " 

215 "<instance-set-train>] [--instance-set-test <instance-set-test>]") 

216 sys.exit(-1) 

217 instance_set_train_name = instance_set_train.name 

218 gv.settings().get_general_sparkle_configurator()\ 

219 .set_scenario_dirs(solver, instance_set_train) 

220 # Generate a report depending on which instance sets are provided 

221 if flag_instance_set_train or flag_instance_set_test: 

222 # Check if there are result to generate a report from 

223 validator = Validator(gv.settings().DEFAULT_validation_output) 

224 train_res = validator.get_validation_results( 

225 solver, instance_set_train) 

226 if instance_set_test is not None: 

227 test_res = validator.get_validation_results(solver, 

228 instance_set_test) 

229 if len(train_res) == 0 or (instance_set_test is not None 

230 and len(test_res) == 0): 

231 print("Error: Results not found for the given solver and instance set(s)" 

232 ' combination. Make sure the "configure_solver" and "validate_' 

233 'configured_vs_default" commands were correctly executed. ') 

234 sys.exit(-1) 

235 else: 

236 print("Error: No results from validate_configured_vs_default found that " 

237 "can be used in the report!") 

238 sys.exit(-1) 

239 # Extract config scenario data for report, but this should be read from the 

240 # scenario file instead as we can't know wether features were used or not now 

241 number_of_runs = gv.settings().get_config_number_of_runs() 

242 solver_calls = gv.settings().get_config_solver_calls() 

243 cpu_time = gv.settings().get_config_cpu_time() 

244 wallclock_time = gv.settings().get_config_wallclock_time() 

245 cutoff_time = gv.settings().get_general_target_cutoff_time() 

246 cutoff_length = gv.settings().get_configurator_target_cutoff_length() 

247 sparkle_objectives =\ 

248 gv.settings().get_general_sparkle_objectives() 

249 configurator = gv.settings().get_general_sparkle_configurator() 

250 configurator.scenario = configurator.scenario_class( 

251 solver, instance_set_train, number_of_runs, solver_calls, cpu_time, 

252 wallclock_time, cutoff_time, cutoff_length, sparkle_objectives) 

253 configurator.scenario._set_paths(configurator.output_path) 

254 ablation_scenario = None 

255 if args.flag_ablation: 

256 ablation_scenario = AblationScenario( 

257 solver, instance_set_train, instance_set_test, 

258 gv.settings().DEFAULT_ablation_output) 

259 

260 # Create machine readable output 

261 solver_name = gv.latest_scenario().get_config_solver().name 

262 instance_set_name = gv.latest_scenario().get_config_instance_set_train().name 

263 output = gv.settings().DEFAULT_configuration_output_analysis 

264 config_output = ConfigurationOutput(configurator.scenario.directory, 

265 solver, configurator, 

266 instance_set_train, 

267 instance_set_test, 

268 output) 

269 config_output.write_output() 

270 print("Machine readable output is placed at: ", config_output.output) 

271 

272 if not only_json: 

273 sgrfch.generate_report_for_configuration( 

274 solver, 

275 gv.settings().get_general_sparkle_configurator(), 

276 Validator(gv.settings().DEFAULT_validation_output), 

277 gv.settings().DEFAULT_extractor_dir, 

278 gv.settings().DEFAULT_configuration_output_analysis, 

279 gv.settings().DEFAULT_latex_source, 

280 gv.settings().DEFAULT_latex_bib, 

281 instance_set_train, 

282 gv.settings().get_general_extractor_cutoff_time(), 

283 instance_set_test, 

284 ablation=ablation_scenario 

285 ) 

286 

287 # Write used settings to file 

288 gv.settings().write_used_settings()