Coverage for sparkle/CLI/run_portfolio_selector.py: 0%

79 statements  

« prev     ^ index     » next       coverage.py v7.6.1, created at 2024-09-27 09:10 +0000

1#!/usr/bin/env python3 

2"""Sparkle command to execute a portfolio selector.""" 

3 

4import sys 

5import argparse 

6from pathlib import PurePath, Path 

7 

8import runrunner as rrr 

9from runrunner import Runner 

10 

11from sparkle.CLI.help import global_variables as gv 

12from sparkle.CLI.help import logging as sl 

13from sparkle.platform.settings_objects import Settings, SettingState 

14from sparkle.CLI.help import argparse_custom as ac 

15from sparkle.structures import PerformanceDataFrame, FeatureDataFrame 

16from sparkle.platform import CommandName, COMMAND_DEPENDENCIES 

17from sparkle.CLI.help.reporting_scenario import Scenario 

18from sparkle.CLI.initialise import check_for_initialise 

19from sparkle.CLI.help.nicknames import resolve_object_name 

20from sparkle.instance import instance_set 

21from sparkle.CLI.compute_features import compute_features 

22 

23 

24def parser_function() -> argparse.ArgumentParser: 

25 """Define the command line arguments.""" 

26 parser = argparse.ArgumentParser() 

27 parser.add_argument(*ac.InstancePathPositional.names, 

28 **ac.InstancePathPositional.kwargs) 

29 parser.add_argument(*ac.RunOnArgument.names, 

30 **ac.RunOnArgument.kwargs) 

31 parser.add_argument(*ac.SettingsFileArgument.names, 

32 **ac.SettingsFileArgument.kwargs) 

33 parser.add_argument(*ac.SparkleObjectiveArgument.names, 

34 **ac.SparkleObjectiveArgument.kwargs) 

35 

36 return parser 

37 

38 

39if __name__ == "__main__": 

40 # Log command call 

41 sl.log_command(sys.argv) 

42 

43 # Define command line arguments 

44 parser = parser_function() 

45 

46 # Process command line arguments 

47 args = parser.parse_args() 

48 

49 if args.run_on is not None: 

50 gv.settings().set_run_on(args.run_on.value, SettingState.CMD_LINE) 

51 run_on = gv.settings().get_run_on() 

52 

53 data_set = resolve_object_name( 

54 args.instance_path, 

55 gv.file_storage_data_mapping[gv.instances_nickname_path], 

56 gv.settings().DEFAULT_instance_dir, instance_set) 

57 

58 check_for_initialise(COMMAND_DEPENDENCIES[CommandName.RUN_PORTFOLIO_SELECTOR]) 

59 

60 if ac.set_by_user(args, "settings_file"): 

61 gv.settings().read_settings_ini( 

62 args.settings_file, SettingState.CMD_LINE 

63 ) # Do first, so other command line options can override settings from the file 

64 if ac.set_by_user(args, "objectives"): 

65 gv.settings().set_general_sparkle_objectives(args.objectives, 

66 SettingState.CMD_LINE) 

67 

68 # Compare current settings to latest.ini 

69 prev_settings = Settings(PurePath("Settings/latest.ini")) 

70 Settings.check_settings_changes(gv.settings(), prev_settings) 

71 objectives = gv.settings().get_general_sparkle_objectives() 

72 # NOTE: Is this still relevant? 

73 if not objectives[0].time: 

74 print("ERROR: The run_portfolio_selector command is not yet implemented" 

75 " for the QUALITY_ABSOLUTE performance measure!") 

76 sys.exit(-1) 

77 

78 selector_scenario = gv.latest_scenario().get_selection_scenario_path() 

79 selector_path = selector_scenario / "portfolio_selector" 

80 if not selector_path.exists() or not selector_path.is_file(): 

81 print("ERROR: The portfolio selector could not be found. Please make sure to " 

82 "first construct a portfolio selector.") 

83 sys.exit(-1) 

84 if len([p for p in gv.settings().DEFAULT_extractor_dir.iterdir()]) == 0: 

85 print("ERROR: No feature extractor added to Sparkle.") 

86 sys.exit(-1) 

87 

88 # Compute the features of the incoming instances 

89 test_case_path = selector_scenario / data_set.name 

90 test_case_path.mkdir(exist_ok=True) 

91 feature_dataframe = FeatureDataFrame(gv.settings().DEFAULT_feature_data_path) 

92 feature_dataframe.remove_instances(feature_dataframe.instances) 

93 feature_dataframe.csv_filepath = test_case_path / "feature_data.csv" 

94 feature_dataframe.add_instances(data_set.instance_paths) 

95 feature_dataframe.save_csv() 

96 feature_run = compute_features(feature_dataframe, recompute=False, run_on=run_on) 

97 

98 if run_on == Runner.LOCAL: 

99 feature_run.wait() 

100 

101 # Prepare performance data 

102 performance_data = PerformanceDataFrame( 

103 test_case_path / "performance_data.csv", 

104 objectives=objectives) 

105 for instance_name in data_set.instance_names: 

106 if instance_name not in performance_data.instances: 

107 performance_data.add_instance(instance_name) 

108 performance_data.add_solver(selector_path.name) 

109 performance_data.save_csv() 

110 # Update latest scenario 

111 gv.latest_scenario().set_selection_test_case_directory(test_case_path) 

112 gv.latest_scenario().set_latest_scenario(Scenario.SELECTION) 

113 # Write used scenario to file 

114 gv.latest_scenario().write_scenario_ini() 

115 

116 run_core = Path(__file__).parent.parent.resolve() /\ 

117 "CLI" / "core" / "run_portfolio_selector_core.py" 

118 cmd_list = [f"python {run_core} " 

119 f"--selector {selector_path} " 

120 f"--feature-data-csv {feature_dataframe.csv_filepath} " 

121 f"--performance-data-csv {performance_data.csv_filepath} " 

122 f"--instance {instance_path} " 

123 f"--log-dir {sl.caller_log_dir}" 

124 for instance_path in data_set.instance_paths] 

125 

126 selector_run = rrr.add_to_queue( 

127 runner=run_on, 

128 cmd=cmd_list, 

129 name=CommandName.RUN_PORTFOLIO_SELECTOR, 

130 base_dir=sl.caller_log_dir, 

131 stdout=None, 

132 dependencies=feature_run if run_on == Runner.SLURM else None, 

133 sbatch_options=gv.settings().get_slurm_extra_options(as_args=True)) 

134 

135 if run_on == Runner.LOCAL: 

136 selector_run.wait() 

137 for job in selector_run.jobs: 

138 print(job.stdout) 

139 print("Running Sparkle portfolio selector done!") 

140 else: 

141 print("Sparkle portfolio selector is running ...") 

142 

143 # Write used settings to file 

144 gv.settings().write_used_settings()