Coverage for sparkle/solver/extractor.py: 57%
65 statements
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-03 10:42 +0000
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-03 10:42 +0000
1"""Methods regarding feature extractors."""
2from __future__ import annotations
3from pathlib import Path
4import ast
5import subprocess
6from sparkle.types import SparkleCallable, SolverStatus
7from sparkle.structures import FeatureDataFrame
8from sparkle.tools import RunSolver
11class Extractor(SparkleCallable):
12 """Extractor base class for extracting features from instances."""
13 wrapper = "sparkle_extractor_wrapper.py"
15 def __init__(self: Extractor,
16 directory: Path,
17 runsolver_exec: Path = None,
18 raw_output_directory: Path = None,
19 ) -> None:
20 """Initialize solver.
22 Args:
23 directory: Directory of the solver.
24 runsolver_exec: Path to the runsolver executable.
25 By default, runsolver in directory.
26 raw_output_directory: Directory where solver will write its raw output.
27 Defaults to directory / tmp
28 """
29 super().__init__(directory, runsolver_exec, raw_output_directory)
30 self._features = None
31 self._feature_groups = None
32 self._groupwise_computation = None
34 @property
35 def features(self: Extractor) -> list[tuple[str, str]]:
36 """Determines the features of the extractor."""
37 if self._features is None:
38 extractor_process = subprocess.run(
39 [self.directory / Extractor.wrapper, "-features"], capture_output=True)
40 self._features = ast.literal_eval(extractor_process.stdout.decode())
41 return self._features
43 @property
44 def feature_groups(self: Extractor) -> list[str]:
45 """Returns the various feature groups the Extractor has."""
46 if self._feature_groups is None:
47 self._feature_groups = list(set([group for group, _ in self.features]))
48 return self._feature_groups
50 @property
51 def output_dimension(self: Extractor) -> int:
52 """The size of the output vector of the extractor."""
53 return len(self.features)
55 @property
56 def groupwise_computation(self: Extractor) -> bool:
57 """Determines if you can call the extractor per group for parallelisation."""
58 if self._groupwise_computation is None:
59 extractor_help = subprocess.run([self.directory / Extractor.wrapper, "-h"],
60 capture_output=True)
61 # Not the cleanest / most precise way to determine this
62 self._groupwise_computation =\
63 "-feature_group" in extractor_help.stdout.decode()
64 return self._groupwise_computation
66 def build_cmd(self: Extractor,
67 instance: Path | list[Path],
68 feature_group: str = None,
69 output_file: Path = None,
70 cutoff_time: int = None,
71 log_dir: Path = None,
72 ) -> list[str]:
73 """Builds a command line string seperated by space.
75 Args:
76 instance: The instance to run on
77 feature_group: The optional feature group to run the extractor for.
78 outputfile: Optional file to write the output to.
79 runsolver_args: The arguments for runsolver. If not present,
80 will run the extractor without runsolver.
82 Returns:
83 The command seperated per item in the list.
84 """
85 cmd_list_extractor = []
86 if not isinstance(instance, list):
87 instance = [instance]
88 cmd_list_extractor = [f"{self.directory / Extractor.wrapper}",
89 "-extractor_dir", f"{self.directory}/",
90 "-instance_file"] + [str(file) for file in instance]
91 if feature_group is not None:
92 cmd_list_extractor += ["-feature_group", feature_group]
93 if output_file is not None:
94 cmd_list_extractor += ["-output_file", str(output_file)]
95 if cutoff_time is not None:
96 # Extractor handles output file itself
97 return RunSolver.wrap_command(self.runsolver_exec,
98 cmd_list_extractor,
99 cutoff_time,
100 log_dir,
101 log_name_base=self.name,
102 raw_results_file=False)
103 return cmd_list_extractor
105 def run(self: Extractor,
106 instance: Path | list[Path],
107 feature_group: str = None,
108 output_file: Path = None,
109 cutoff_time: int = None,
110 log_dir: Path = None) -> list | None:
111 """Runs an extractor job with Runrunner.
113 Args:
114 extractor_path: Path to the executable
115 instance: Path to the instance to run on
116 feature_group: The feature group to compute. Must be supported by the
117 extractor to use.
118 output_file: Target output. If None, piped to the RunRunner job.
119 cutoff_time: CPU cutoff time in seconds
120 log_dir: Directory to write logs. Defaults to self.raw_output_directory.
122 Returns:
123 The features or None if an output file is used, or features can not be found.
124 """
125 if log_dir is None:
126 log_dir = self.raw_output_directory
127 if feature_group is not None and not self.groupwise_computation:
128 # This extractor cannot handle groups, compute all features
129 feature_group = None
130 cmd_extractor = self.build_cmd(
131 instance, feature_group, output_file, cutoff_time, log_dir)
132 extractor = subprocess.run(cmd_extractor, capture_output=True)
133 if output_file is None:
134 try:
135 features = ast.literal_eval(
136 extractor.stdout.decode().split(maxsplit=1)[1])
137 return features
138 except Exception:
139 return None
140 return None
142 def get_feature_vector(self: Extractor,
143 result: Path,
144 runsolver_values: Path = None) -> list[str]:
145 """Extracts feature vector from an output file.
147 Args:
148 result: The raw output of the extractor
149 runsolver_values: The output of runsolver.
151 Returns:
152 A list of features. Vector of missing values upon failure.
153 """
154 if result.exists() and RunSolver.get_status(runsolver_values,
155 None) != SolverStatus.TIMEOUT:
156 feature_values = ast.literal_eval(result.read_text())
157 return [str(value) for _, _, value in feature_values]
158 return [FeatureDataFrame.missing_value] * self.output_dimension