Coverage for sparkle/solver/extractor.py: 32%
65 statements
« prev ^ index » next coverage.py v7.6.1, created at 2024-09-27 09:10 +0000
« prev ^ index » next coverage.py v7.6.1, created at 2024-09-27 09:10 +0000
1"""Methods regarding feature extractors."""
2from __future__ import annotations
3from pathlib import Path
4import ast
5import subprocess
6from sparkle.types import SparkleCallable, SolverStatus
7from sparkle.structures import FeatureDataFrame
8from sparkle.tools.runsolver_parsing import get_status
11class Extractor(SparkleCallable):
12 """Extractor base class for extracting features from instances."""
13 wrapper = "sparkle_extractor_wrapper.py"
15 def __init__(self: Extractor,
16 directory: Path,
17 runsolver_exec: Path = None,
18 raw_output_directory: Path = None,
19 ) -> None:
20 """Initialize solver.
22 Args:
23 directory: Directory of the solver.
24 runsolver_exec: Path to the runsolver executable.
25 By default, runsolver in directory.
26 raw_output_directory: Directory where solver will write its raw output.
27 Defaults to directory / tmp
28 """
29 super().__init__(directory, runsolver_exec, raw_output_directory)
30 self._features = None
31 self._feature_groups = None
32 self._output_dimension = None
33 self._groupwise_computation = None
35 @property
36 def features(self: Extractor) -> list[tuple[str, str]]:
37 """Determines the features of the extractor."""
38 if self._features is None:
39 extractor_process = subprocess.run(
40 [self.directory / Extractor.wrapper, "-features"], capture_output=True)
41 self._features = ast.literal_eval(extractor_process.stdout.decode())
42 return self._features
44 @property
45 def feature_groups(self: Extractor) -> list[str]:
46 """Returns the various feature groups the Extractor has."""
47 if self._feature_groups is None:
48 self._feature_groups = list(set([group for group, _ in self.features]))
49 return self._feature_groups
51 @property
52 def output_dimension(self: Extractor) -> int:
53 """The size of the output vector of the extractor."""
54 return len(self.features)
56 @property
57 def groupwise_computation(self: Extractor) -> bool:
58 """Determines if you can call the extractor per group for parallelisation."""
59 if self._groupwise_computation is None:
60 extractor_help = subprocess.run([self.directory / Extractor.wrapper, "-h"],
61 capture_output=True)
62 # Not the cleanest / most precise way to determine this
63 self._groupwise_computation =\
64 "-feature_group" in extractor_help.stdout.decode()
65 return self._groupwise_computation
67 def build_cmd(self: Extractor,
68 instance: Path | list[Path],
69 feature_group: str = None,
70 output_file: Path = None,
71 runsolver_args: list[str | Path] = None,
72 ) -> list[str]:
73 """Builds a command line string seperated by space.
75 Args:
76 instance: The instance to run on
77 feature_group: The optional feature group to run the extractor for.
78 outputfile: Optional file to write the output to.
79 runsolver_args: The arguments for runsolver. If not present,
80 will run the extractor without runsolver.
82 Returns:
83 The command seperated per item in the list.
84 """
85 cmd_list_extractor = []
86 if not isinstance(instance, list):
87 instance = [instance]
88 if runsolver_args is not None:
89 # Ensure stringification of runsolver configuration is done correctly
90 cmd_list_extractor += [str(self.runsolver_exec.absolute())]
91 cmd_list_extractor += [str(runsolver_config) for runsolver_config
92 in runsolver_args]
93 cmd_list_extractor += [f"{self.directory / Extractor.wrapper}",
94 "-extractor_dir", f"{self.directory}/",
95 "-instance_file"] + [str(file) for file in instance]
96 if feature_group is not None:
97 cmd_list_extractor += ["-feature_group", feature_group]
98 if output_file is not None:
99 cmd_list_extractor += ["-output_file", str(output_file)]
100 return cmd_list_extractor
102 def run(self: Extractor,
103 instance: Path | list[Path],
104 feature_group: str = None,
105 output_file: Path = None,
106 runsolver_args: list[str | Path] = None) -> list | None:
107 """Runs an extractor job with Runrunner.
109 Args:
110 extractor_path: Path to the executable
111 instance: Path to the instance to run on
112 feature_group: The feature group to compute. Must be supported by the
113 extractor to use.
114 output_file: Target output. If None, piped to the RunRunner job.
115 runsolver_args: List of run solver args, each word a seperate item.
117 Returns:
118 The features or None if an output file is used, or features can not be found.
119 """
120 if feature_group is not None and not self.groupwise_computation:
121 # This extractor cannot handle groups, compute all features
122 feature_group = None
123 cmd_extractor = self.build_cmd(
124 instance, feature_group, output_file, runsolver_args)
125 extractor = subprocess.run(cmd_extractor, capture_output=True)
126 if output_file is None:
127 try:
128 features = ast.literal_eval(extractor.stdout.decode())
129 return features
130 except Exception:
131 return None
132 return None
134 def get_feature_vector(self: Extractor,
135 result: Path,
136 runsolver_values: Path = None) -> list[str]:
137 """Extracts feature vector from an output file.
139 Args:
140 result: The raw output of the extractor
141 runsolver_values: The output of runsolver.
143 Returns:
144 A list of features. Vector of missing values upon failure.
145 """
146 if result.exists() and get_status(runsolver_values,
147 None) != SolverStatus.TIMEOUT:
148 feature_values = ast.literal_eval(result.read_text())
149 return [str(value) for _, _, value in feature_values]
150 return [FeatureDataFrame.missing_value] * self.output_dimension