Skip to content

Processor

Processor #

Bases: ABC

Abstract base class for model processors that handle preprocessing and postprocessing.

This class defines the interface for processing inputs and outputs for different model types. Subclasses must implement the abstract methods to provide model-specific processing logic.

Attributes:

Name Type Description
config ModelConfig

Configuration object containing model-specific settings.

training bool

Flag indicating whether the processor is in training mode.

Source code in focoos/processor/base_processor.py
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
class Processor(ABC):
    """Abstract base class for model processors that handle preprocessing and postprocessing.

    This class defines the interface for processing inputs and outputs for different model types.
    Subclasses must implement the abstract methods to provide model-specific processing logic.

    Attributes:
        config (ModelConfig): Configuration object containing model-specific settings.
        training (bool): Flag indicating whether the processor is in training mode.
    """

    def __init__(self, config: ModelConfig):
        """Initialize the processor with the given configuration.

        Args:
            config (ModelConfig): Model configuration containing settings and parameters.
        """
        self.config = config
        self.training = False

    def eval(self):
        """Set the processor to evaluation mode.

        Returns:
            Processor: Self reference for method chaining.
        """
        self.training = False
        return self

    def train(self, training: bool = True):
        """Set the processor training mode.

        Args:
            training (bool, optional): Whether to set training mode. Defaults to True.

        Returns:
            Processor: Self reference for method chaining.
        """
        self.training = training
        return self

    @abstractmethod
    def preprocess(
        self,
        inputs: Union[torch.Tensor, np.ndarray, Image.Image, list[Image.Image], list[np.ndarray], list[torch.Tensor]],
        device: Union[Literal["cuda", "cpu"], torch.device] = "cuda",
        dtype: torch.dtype = torch.float32,
        image_size: Optional[int] = None,
    ) -> tuple[torch.Tensor, Any]:
        """Preprocess input data for model inference.

        This method must be implemented by subclasses to handle model-specific preprocessing
        such as resizing, normalization, and tensor formatting.

        Args:
            inputs: Input data which can be single or multiple images in various formats.
            device: Target device for tensor placement. Defaults to "cuda".
            dtype: Target data type for tensors. Defaults to torch.float32.
            image_size: Optional target image size for resizing. Defaults to None.

        Returns:
            tuple[torch.Tensor, Any]: Preprocessed tensor and any additional metadata.

        Raises:
            NotImplementedError: If not implemented by subclass.
        """
        raise NotImplementedError("Pre-processing is not implemented for this model.")

    @abstractmethod
    def postprocess(
        self,
        outputs: ModelOutput,
        inputs: Union[torch.Tensor, np.ndarray, Image.Image, list[Image.Image], list[np.ndarray], list[torch.Tensor]],
        class_names: list[str] = [],
        threshold: float = 0.5,
        **kwargs,
    ) -> list[FocoosDetections]:
        """Postprocess model outputs to generate final detection results.

        This method must be implemented by subclasses to convert raw model outputs
        into structured detection results.

        Args:
            outputs (ModelOutput): Raw outputs from the model.
            inputs: Original input data for reference during postprocessing.
            class_names (list[str], optional): List of class names for detection labels.
                Defaults to empty list.
            threshold (float, optional): Confidence threshold for detections. Defaults to 0.5.
            **kwargs: Additional keyword arguments for model-specific postprocessing.

        Returns:
            list[FocoosDetections]: List of detection results for each input.

        Raises:
            NotImplementedError: If not implemented by subclass.
        """
        raise NotImplementedError("Post-processing is not implemented for this model.")

    @abstractmethod
    def export_postprocess(
        self,
        output: Union[list[torch.Tensor], list[np.ndarray]],
        inputs: Union[
            torch.Tensor,
            np.ndarray,
            Image.Image,
            list[Image.Image],
            list[np.ndarray],
            list[torch.Tensor],
        ],
        threshold: Optional[float] = None,
        **kwargs,
    ) -> list[FocoosDetections]:
        """Postprocess outputs from exported model for inference.

        This method handles postprocessing for models that have been exported
        (e.g., to ONNX format) and may have different output formats.

        Args:
            output: Raw outputs from exported model as tensors or numpy arrays.
            inputs: Original input data for reference during postprocessing.
            threshold: Optional confidence threshold for detections. Defaults to None.
            **kwargs: Additional keyword arguments for export-specific postprocessing.

        Returns:
            list[FocoosDetections]: List of detection results for each input.

        Raises:
            NotImplementedError: If not implemented by subclass.
        """
        raise NotImplementedError("Export post-processing is not implemented for this model.")

    @abstractmethod
    def get_dynamic_axes(self) -> DynamicAxes:
        """Get dynamic axes configuration for model export.

        This method defines which axes can vary in size during model export,
        typically used for ONNX export with dynamic batch sizes or image dimensions.

        Returns:
            DynamicAxes: Configuration specifying which axes are dynamic.

        Raises:
            NotImplementedError: If not implemented by subclass.
        """
        raise NotImplementedError("Export axes are not implemented for this model.")

    @abstractmethod
    def eval_postprocess(self, outputs: ModelOutput, inputs: list[DatasetEntry]):
        """Postprocess model outputs for evaluation purposes.

        This method handles postprocessing specifically for model evaluation,
        which may differ from inference postprocessing.

        Args:
            outputs (ModelOutput): Raw outputs from the model.
            inputs (list[DatasetEntry]): List of dataset entries used as inputs.

        Raises:
            NotImplementedError: If not implemented by subclass.
        """
        raise NotImplementedError("Post-processing is not implemented for this model.")

    def get_image_sizes(
        self,
        inputs: Union[torch.Tensor, np.ndarray, Image.Image, list[Image.Image], list[np.ndarray], list[torch.Tensor]],
    ):
        """Extract image dimensions from various input formats.

        This utility method determines the height and width of images from different
        input types including tensors, numpy arrays, and PIL images.

        Args:
            inputs: Input data containing one or more images in various formats.

        Returns:
            list[tuple[int, int]]: List of (height, width) tuples for each image.

        Raises:
            ValueError: If input type is not supported.
        """
        image_sizes = []

        if isinstance(inputs, (torch.Tensor, np.ndarray)):
            # Single tensor/array input
            if isinstance(inputs, torch.Tensor):
                height, width = inputs.shape[-2:]
            else:  # numpy array
                height, width = inputs.shape[-3:-1] if inputs.ndim > 3 else inputs.shape[:2]
            image_sizes.append((height, width))
        elif isinstance(inputs, Image.Image):
            # Single PIL image
            width, height = inputs.size
            image_sizes.append((height, width))
        elif isinstance(inputs, list):
            # List of inputs
            for img in inputs:
                if isinstance(img, torch.Tensor):
                    height, width = img.shape[-2:]
                elif isinstance(img, np.ndarray):
                    height, width = img.shape[-3:-1] if img.ndim > 3 else img.shape[:2]
                elif isinstance(img, Image.Image):
                    width, height = img.size
                else:
                    raise ValueError(f"Unsupported input type in list: {type(img)}")
                image_sizes.append((height, width))
        else:
            raise ValueError(f"Unsupported input type: {type(inputs)}")
        return image_sizes

    def get_tensors(
        self,
        inputs: Union[torch.Tensor, np.ndarray, Image.Image, list[Image.Image], list[np.ndarray], list[torch.Tensor]],
    ) -> torch.Tensor:
        """Convert various input formats to a batched PyTorch tensor.

        This utility method standardizes different input types (PIL Images, numpy arrays,
        PyTorch tensors) into a single batched tensor with consistent format (BCHW).

        Args:
            inputs: Input data containing one or more images in various formats.

        Returns:
            torch.Tensor: Batched tensor with shape (B, C, H, W) where:
                - B is batch size
                - C is number of channels (typically 3 for RGB)
                - H is height
                - W is width

        Note:
            This method may break with different image sizes as it uses torch.cat
            which requires consistent dimensions across inputs.
        """
        if isinstance(inputs, (Image.Image, np.ndarray, torch.Tensor)):
            inputs_list = [inputs]
        else:
            inputs_list = inputs

        # Process each input based on its type
        processed_inputs = []
        for inp in inputs_list:
            # todo check for tensor of 4 dimesions.
            if isinstance(inp, Image.Image):
                inp = np.array(inp)
            if isinstance(inp, np.ndarray):
                inp = torch.from_numpy(inp)

            # Ensure input has correct shape and type
            if inp.dim() == 3:  # Add batch dimension if missing
                inp = inp.unsqueeze(0)
            if inp.shape[1] != 3 and inp.shape[-1] == 3:  # Convert HWC to CHW if needed
                inp = inp.permute(0, 3, 1, 2)

            processed_inputs.append(inp)

        # Stack all inputs into a single batch tensor
        # use pixel mean to get dtype -> If fp16, pixel_mean is fp16, so inputs will be fp16
        # TODO: this will break with different image sizes
        images_torch = torch.cat(processed_inputs, dim=0)

        return images_torch

__init__(config) #

Initialize the processor with the given configuration.

Parameters:

Name Type Description Default
config ModelConfig

Model configuration containing settings and parameters.

required
Source code in focoos/processor/base_processor.py
22
23
24
25
26
27
28
29
def __init__(self, config: ModelConfig):
    """Initialize the processor with the given configuration.

    Args:
        config (ModelConfig): Model configuration containing settings and parameters.
    """
    self.config = config
    self.training = False

eval() #

Set the processor to evaluation mode.

Returns:

Name Type Description
Processor

Self reference for method chaining.

Source code in focoos/processor/base_processor.py
31
32
33
34
35
36
37
38
def eval(self):
    """Set the processor to evaluation mode.

    Returns:
        Processor: Self reference for method chaining.
    """
    self.training = False
    return self

eval_postprocess(outputs, inputs) abstractmethod #

Postprocess model outputs for evaluation purposes.

This method handles postprocessing specifically for model evaluation, which may differ from inference postprocessing.

Parameters:

Name Type Description Default
outputs ModelOutput

Raw outputs from the model.

required
inputs list[DatasetEntry]

List of dataset entries used as inputs.

required

Raises:

Type Description
NotImplementedError

If not implemented by subclass.

Source code in focoos/processor/base_processor.py
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
@abstractmethod
def eval_postprocess(self, outputs: ModelOutput, inputs: list[DatasetEntry]):
    """Postprocess model outputs for evaluation purposes.

    This method handles postprocessing specifically for model evaluation,
    which may differ from inference postprocessing.

    Args:
        outputs (ModelOutput): Raw outputs from the model.
        inputs (list[DatasetEntry]): List of dataset entries used as inputs.

    Raises:
        NotImplementedError: If not implemented by subclass.
    """
    raise NotImplementedError("Post-processing is not implemented for this model.")

export_postprocess(output, inputs, threshold=None, **kwargs) abstractmethod #

Postprocess outputs from exported model for inference.

This method handles postprocessing for models that have been exported (e.g., to ONNX format) and may have different output formats.

Parameters:

Name Type Description Default
output Union[list[Tensor], list[ndarray]]

Raw outputs from exported model as tensors or numpy arrays.

required
inputs Union[Tensor, ndarray, Image, list[Image], list[ndarray], list[Tensor]]

Original input data for reference during postprocessing.

required
threshold Optional[float]

Optional confidence threshold for detections. Defaults to None.

None
**kwargs

Additional keyword arguments for export-specific postprocessing.

{}

Returns:

Type Description
list[FocoosDetections]

list[FocoosDetections]: List of detection results for each input.

Raises:

Type Description
NotImplementedError

If not implemented by subclass.

Source code in focoos/processor/base_processor.py
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
@abstractmethod
def export_postprocess(
    self,
    output: Union[list[torch.Tensor], list[np.ndarray]],
    inputs: Union[
        torch.Tensor,
        np.ndarray,
        Image.Image,
        list[Image.Image],
        list[np.ndarray],
        list[torch.Tensor],
    ],
    threshold: Optional[float] = None,
    **kwargs,
) -> list[FocoosDetections]:
    """Postprocess outputs from exported model for inference.

    This method handles postprocessing for models that have been exported
    (e.g., to ONNX format) and may have different output formats.

    Args:
        output: Raw outputs from exported model as tensors or numpy arrays.
        inputs: Original input data for reference during postprocessing.
        threshold: Optional confidence threshold for detections. Defaults to None.
        **kwargs: Additional keyword arguments for export-specific postprocessing.

    Returns:
        list[FocoosDetections]: List of detection results for each input.

    Raises:
        NotImplementedError: If not implemented by subclass.
    """
    raise NotImplementedError("Export post-processing is not implemented for this model.")

get_dynamic_axes() abstractmethod #

Get dynamic axes configuration for model export.

This method defines which axes can vary in size during model export, typically used for ONNX export with dynamic batch sizes or image dimensions.

Returns:

Name Type Description
DynamicAxes DynamicAxes

Configuration specifying which axes are dynamic.

Raises:

Type Description
NotImplementedError

If not implemented by subclass.

Source code in focoos/processor/base_processor.py
143
144
145
146
147
148
149
150
151
152
153
154
155
156
@abstractmethod
def get_dynamic_axes(self) -> DynamicAxes:
    """Get dynamic axes configuration for model export.

    This method defines which axes can vary in size during model export,
    typically used for ONNX export with dynamic batch sizes or image dimensions.

    Returns:
        DynamicAxes: Configuration specifying which axes are dynamic.

    Raises:
        NotImplementedError: If not implemented by subclass.
    """
    raise NotImplementedError("Export axes are not implemented for this model.")

get_image_sizes(inputs) #

Extract image dimensions from various input formats.

This utility method determines the height and width of images from different input types including tensors, numpy arrays, and PIL images.

Parameters:

Name Type Description Default
inputs Union[Tensor, ndarray, Image, list[Image], list[ndarray], list[Tensor]]

Input data containing one or more images in various formats.

required

Returns:

Type Description

list[tuple[int, int]]: List of (height, width) tuples for each image.

Raises:

Type Description
ValueError

If input type is not supported.

Source code in focoos/processor/base_processor.py
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
def get_image_sizes(
    self,
    inputs: Union[torch.Tensor, np.ndarray, Image.Image, list[Image.Image], list[np.ndarray], list[torch.Tensor]],
):
    """Extract image dimensions from various input formats.

    This utility method determines the height and width of images from different
    input types including tensors, numpy arrays, and PIL images.

    Args:
        inputs: Input data containing one or more images in various formats.

    Returns:
        list[tuple[int, int]]: List of (height, width) tuples for each image.

    Raises:
        ValueError: If input type is not supported.
    """
    image_sizes = []

    if isinstance(inputs, (torch.Tensor, np.ndarray)):
        # Single tensor/array input
        if isinstance(inputs, torch.Tensor):
            height, width = inputs.shape[-2:]
        else:  # numpy array
            height, width = inputs.shape[-3:-1] if inputs.ndim > 3 else inputs.shape[:2]
        image_sizes.append((height, width))
    elif isinstance(inputs, Image.Image):
        # Single PIL image
        width, height = inputs.size
        image_sizes.append((height, width))
    elif isinstance(inputs, list):
        # List of inputs
        for img in inputs:
            if isinstance(img, torch.Tensor):
                height, width = img.shape[-2:]
            elif isinstance(img, np.ndarray):
                height, width = img.shape[-3:-1] if img.ndim > 3 else img.shape[:2]
            elif isinstance(img, Image.Image):
                width, height = img.size
            else:
                raise ValueError(f"Unsupported input type in list: {type(img)}")
            image_sizes.append((height, width))
    else:
        raise ValueError(f"Unsupported input type: {type(inputs)}")
    return image_sizes

get_tensors(inputs) #

Convert various input formats to a batched PyTorch tensor.

This utility method standardizes different input types (PIL Images, numpy arrays, PyTorch tensors) into a single batched tensor with consistent format (BCHW).

Parameters:

Name Type Description Default
inputs Union[Tensor, ndarray, Image, list[Image], list[ndarray], list[Tensor]]

Input data containing one or more images in various formats.

required

Returns:

Type Description
Tensor

torch.Tensor: Batched tensor with shape (B, C, H, W) where: - B is batch size - C is number of channels (typically 3 for RGB) - H is height - W is width

Note

This method may break with different image sizes as it uses torch.cat which requires consistent dimensions across inputs.

Source code in focoos/processor/base_processor.py
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
def get_tensors(
    self,
    inputs: Union[torch.Tensor, np.ndarray, Image.Image, list[Image.Image], list[np.ndarray], list[torch.Tensor]],
) -> torch.Tensor:
    """Convert various input formats to a batched PyTorch tensor.

    This utility method standardizes different input types (PIL Images, numpy arrays,
    PyTorch tensors) into a single batched tensor with consistent format (BCHW).

    Args:
        inputs: Input data containing one or more images in various formats.

    Returns:
        torch.Tensor: Batched tensor with shape (B, C, H, W) where:
            - B is batch size
            - C is number of channels (typically 3 for RGB)
            - H is height
            - W is width

    Note:
        This method may break with different image sizes as it uses torch.cat
        which requires consistent dimensions across inputs.
    """
    if isinstance(inputs, (Image.Image, np.ndarray, torch.Tensor)):
        inputs_list = [inputs]
    else:
        inputs_list = inputs

    # Process each input based on its type
    processed_inputs = []
    for inp in inputs_list:
        # todo check for tensor of 4 dimesions.
        if isinstance(inp, Image.Image):
            inp = np.array(inp)
        if isinstance(inp, np.ndarray):
            inp = torch.from_numpy(inp)

        # Ensure input has correct shape and type
        if inp.dim() == 3:  # Add batch dimension if missing
            inp = inp.unsqueeze(0)
        if inp.shape[1] != 3 and inp.shape[-1] == 3:  # Convert HWC to CHW if needed
            inp = inp.permute(0, 3, 1, 2)

        processed_inputs.append(inp)

    # Stack all inputs into a single batch tensor
    # use pixel mean to get dtype -> If fp16, pixel_mean is fp16, so inputs will be fp16
    # TODO: this will break with different image sizes
    images_torch = torch.cat(processed_inputs, dim=0)

    return images_torch

postprocess(outputs, inputs, class_names=[], threshold=0.5, **kwargs) abstractmethod #

Postprocess model outputs to generate final detection results.

This method must be implemented by subclasses to convert raw model outputs into structured detection results.

Parameters:

Name Type Description Default
outputs ModelOutput

Raw outputs from the model.

required
inputs Union[Tensor, ndarray, Image, list[Image], list[ndarray], list[Tensor]]

Original input data for reference during postprocessing.

required
class_names list[str]

List of class names for detection labels. Defaults to empty list.

[]
threshold float

Confidence threshold for detections. Defaults to 0.5.

0.5
**kwargs

Additional keyword arguments for model-specific postprocessing.

{}

Returns:

Type Description
list[FocoosDetections]

list[FocoosDetections]: List of detection results for each input.

Raises:

Type Description
NotImplementedError

If not implemented by subclass.

Source code in focoos/processor/base_processor.py
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
@abstractmethod
def postprocess(
    self,
    outputs: ModelOutput,
    inputs: Union[torch.Tensor, np.ndarray, Image.Image, list[Image.Image], list[np.ndarray], list[torch.Tensor]],
    class_names: list[str] = [],
    threshold: float = 0.5,
    **kwargs,
) -> list[FocoosDetections]:
    """Postprocess model outputs to generate final detection results.

    This method must be implemented by subclasses to convert raw model outputs
    into structured detection results.

    Args:
        outputs (ModelOutput): Raw outputs from the model.
        inputs: Original input data for reference during postprocessing.
        class_names (list[str], optional): List of class names for detection labels.
            Defaults to empty list.
        threshold (float, optional): Confidence threshold for detections. Defaults to 0.5.
        **kwargs: Additional keyword arguments for model-specific postprocessing.

    Returns:
        list[FocoosDetections]: List of detection results for each input.

    Raises:
        NotImplementedError: If not implemented by subclass.
    """
    raise NotImplementedError("Post-processing is not implemented for this model.")

preprocess(inputs, device='cuda', dtype=torch.float32, image_size=None) abstractmethod #

Preprocess input data for model inference.

This method must be implemented by subclasses to handle model-specific preprocessing such as resizing, normalization, and tensor formatting.

Parameters:

Name Type Description Default
inputs Union[Tensor, ndarray, Image, list[Image], list[ndarray], list[Tensor]]

Input data which can be single or multiple images in various formats.

required
device Union[Literal['cuda', 'cpu'], device]

Target device for tensor placement. Defaults to "cuda".

'cuda'
dtype dtype

Target data type for tensors. Defaults to torch.float32.

float32
image_size Optional[int]

Optional target image size for resizing. Defaults to None.

None

Returns:

Type Description
tuple[Tensor, Any]

tuple[torch.Tensor, Any]: Preprocessed tensor and any additional metadata.

Raises:

Type Description
NotImplementedError

If not implemented by subclass.

Source code in focoos/processor/base_processor.py
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
@abstractmethod
def preprocess(
    self,
    inputs: Union[torch.Tensor, np.ndarray, Image.Image, list[Image.Image], list[np.ndarray], list[torch.Tensor]],
    device: Union[Literal["cuda", "cpu"], torch.device] = "cuda",
    dtype: torch.dtype = torch.float32,
    image_size: Optional[int] = None,
) -> tuple[torch.Tensor, Any]:
    """Preprocess input data for model inference.

    This method must be implemented by subclasses to handle model-specific preprocessing
    such as resizing, normalization, and tensor formatting.

    Args:
        inputs: Input data which can be single or multiple images in various formats.
        device: Target device for tensor placement. Defaults to "cuda".
        dtype: Target data type for tensors. Defaults to torch.float32.
        image_size: Optional target image size for resizing. Defaults to None.

    Returns:
        tuple[torch.Tensor, Any]: Preprocessed tensor and any additional metadata.

    Raises:
        NotImplementedError: If not implemented by subclass.
    """
    raise NotImplementedError("Pre-processing is not implemented for this model.")

train(training=True) #

Set the processor training mode.

Parameters:

Name Type Description Default
training bool

Whether to set training mode. Defaults to True.

True

Returns:

Name Type Description
Processor

Self reference for method chaining.

Source code in focoos/processor/base_processor.py
40
41
42
43
44
45
46
47
48
49
50
def train(self, training: bool = True):
    """Set the processor training mode.

    Args:
        training (bool, optional): Whether to set training mode. Defaults to True.

    Returns:
        Processor: Self reference for method chaining.
    """
    self.training = training
    return self

ProcessorManager #

Automatic processor manager with lazy loading

Source code in focoos/processor/processor_manager.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
class ProcessorManager:
    """Automatic processor manager with lazy loading"""

    _PROCESSOR_MAPPING: Dict[str, Callable[[], Type[Processor]]] = {}

    @classmethod
    def register_processor(cls, model_family: ModelFamily, processor_loader: Callable[[], Type[Processor]]):
        """
        Register a loader for a specific processor
        """
        cls._PROCESSOR_MAPPING[model_family.value] = processor_loader

    @classmethod
    def _ensure_family_registered(cls, model_family: ModelFamily):
        """Ensure the processor family is registered, importing if needed."""
        if model_family.value not in cls._PROCESSOR_MAPPING:
            family_module = importlib.import_module(f"focoos.models.{model_family.value}")
            for attr_name in dir(family_module):
                if attr_name.startswith("_register"):
                    register_func = getattr(family_module, attr_name)
                    if callable(register_func):
                        register_func()

    @classmethod
    def get_processor(cls, model_family: ModelFamily, model_config: ModelConfig) -> Processor:
        """
        Get a processor instance for the given model family.
        """
        cls._ensure_family_registered(model_family)
        if model_family.value not in cls._PROCESSOR_MAPPING:
            raise ValueError(f"Processor for {model_family} not supported")
        processor_class = cls._PROCESSOR_MAPPING[model_family.value]()
        return processor_class(config=model_config)

get_processor(model_family, model_config) classmethod #

Get a processor instance for the given model family.

Source code in focoos/processor/processor_manager.py
31
32
33
34
35
36
37
38
39
40
@classmethod
def get_processor(cls, model_family: ModelFamily, model_config: ModelConfig) -> Processor:
    """
    Get a processor instance for the given model family.
    """
    cls._ensure_family_registered(model_family)
    if model_family.value not in cls._PROCESSOR_MAPPING:
        raise ValueError(f"Processor for {model_family} not supported")
    processor_class = cls._PROCESSOR_MAPPING[model_family.value]()
    return processor_class(config=model_config)

register_processor(model_family, processor_loader) classmethod #

Register a loader for a specific processor

Source code in focoos/processor/processor_manager.py
13
14
15
16
17
18
@classmethod
def register_processor(cls, model_family: ModelFamily, processor_loader: Callable[[], Type[Processor]]):
    """
    Register a loader for a specific processor
    """
    cls._PROCESSOR_MAPPING[model_family.value] = processor_loader