Skip to content

ports

ApiKey #

Bases: PydanticBase

API key for authentication.

Source code in focoos/ports.py
768
769
770
771
class ApiKey(PydanticBase):
    """API key for authentication."""

    key: str  # type: ignore

ArtifactName #

Bases: str, Enum

Model artifact type.

Source code in focoos/ports.py
1332
1333
1334
1335
1336
1337
1338
1339
1340
class ArtifactName(str, Enum):
    """Model artifact type."""

    WEIGHTS = "model_final.pth"
    ONNX = "model.onnx"
    PT = "model.pt"
    INFO = "model_info.json"
    METRICS = "metrics.json"
    LOGS = "log.txt"

DatasetLayout #

Bases: str, Enum

Supported dataset formats in Focoos.

Values
  • ROBOFLOW_COCO: (Detection,Instance Segmentation)
  • ROBOFLOW_SEG: (Semantic Segmentation)

Example:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
- ROBOFLOW_COCO: (Detection,Instance Segmentation) Roboflow COCO format:
    root/
        train/
            - _annotations.coco.json
            - img_1.jpg
            - img_2.jpg
        valid/
            - _annotations.coco.json
            - img_3.jpg
            - img_4.jpg
- ROBOFLOW_SEG: (Semantic Segmentation) Roboflow segmentation format:
    root/
        train/
            - _classes.csv (comma separated csv)
            - img_1.jpg
            - img_2.jpg
        valid/
            - _classes.csv (comma separated csv)
            - img_3_mask.png
            - img_4_mask.png

Source code in focoos/ports.py
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
class DatasetLayout(str, Enum):
    """Supported dataset formats in Focoos.

    Values:
        - ROBOFLOW_COCO: (Detection,Instance Segmentation)
        - ROBOFLOW_SEG: (Semantic Segmentation)
    Example:
        ```python
        - ROBOFLOW_COCO: (Detection,Instance Segmentation) Roboflow COCO format:
            root/
                train/
                    - _annotations.coco.json
                    - img_1.jpg
                    - img_2.jpg
                valid/
                    - _annotations.coco.json
                    - img_3.jpg
                    - img_4.jpg
        - ROBOFLOW_SEG: (Semantic Segmentation) Roboflow segmentation format:
            root/
                train/
                    - _classes.csv (comma separated csv)
                    - img_1.jpg
                    - img_2.jpg
                valid/
                    - _classes.csv (comma separated csv)
                    - img_3_mask.png
                    - img_4_mask.png
        ```
    """

    ROBOFLOW_COCO = "roboflow_coco"
    ROBOFLOW_SEG = "roboflow_seg"
    CATALOG = "catalog"
    CLS_FOLDER = "cls_folder"

DatasetMetadata dataclass #

Dataclass for storing dataset metadata.

Source code in focoos/ports.py
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
@dataclass
class DatasetMetadata:
    """Dataclass for storing dataset metadata."""

    num_classes: int
    task: Task
    count: Optional[int] = None
    name: Optional[str] = None
    image_root: Optional[str] = None
    thing_classes: Optional[List[str]] = None
    _thing_colors: Optional[List[Tuple]] = None
    stuff_classes: Optional[List[str]] = None
    _stuff_colors: Optional[List[Tuple]] = None
    sem_seg_root: Optional[str] = None
    panoptic_root: Optional[str] = None
    ignore_label: Optional[int] = None
    thing_dataset_id_to_contiguous_id: Optional[dict] = None
    stuff_dataset_id_to_contiguous_id: Optional[dict] = None
    json_file: Optional[str] = None
    keypoints: Optional[List[str]] = None
    keypoints_skeleton: Optional[List[Tuple[int, int]]] = None

    @property
    def classes(self) -> List[str]:  #!TODO: check if this is correct
        if self.task == Task.DETECTION or self.task == Task.INSTANCE_SEGMENTATION:
            assert self.thing_classes is not None, "thing_classes is required for detection and instance segmentation"
            return self.thing_classes
        if self.task == Task.SEMSEG:
            # fixme: not sure for panoptic
            assert self.stuff_classes is not None, "stuff_classes is required for semantic segmentation"
            return self.stuff_classes
        if self.task == Task.CLASSIFICATION:
            assert self.thing_classes is not None, "thing_classes is required for classification"
            return self.thing_classes
        if self.task == Task.KEYPOINT:
            assert self.thing_classes is not None, "thing_classes is required for keypoint"
            return self.thing_classes

        raise ValueError(f"Task {self.task} not supported")

    @property
    def stuff_colors(self):
        if self._stuff_colors is not None:
            return self._stuff_colors
        if self.stuff_classes is None:
            return []
        return [((i * 64) % 255, (i * 128) % 255, (i * 32) % 255) for i in range(len(self.stuff_classes))]

    @stuff_colors.setter
    def stuff_colors(self, colors):
        self._stuff_colors = colors

    @property
    def thing_colors(self):
        if self._thing_colors is not None:
            return self._thing_colors
        if self.thing_classes is None:
            return []
        return [((i * 64) % 255, (i * 128) % 255, (i * 32) % 255) for i in range(1, len(self.thing_classes) + 1)]

    @thing_colors.setter
    def thing_colors(self, colors):
        self._thing_colors = colors

    @classmethod
    def from_dict(cls, metadata: dict):
        """Create DatasetMetadata from a dictionary.

        Args:
            metadata (dict): Dictionary containing metadata.

        Returns:
            DatasetMetadata: Instance of DatasetMetadata.
        """
        metadata = {k: v for k, v in metadata.items() if k in inspect.signature(cls).parameters}
        metadata["task"] = Task(metadata["task"])
        return cls(**metadata)

    @classmethod
    def from_json(cls, path: str):
        """Create DatasetMetadata from a json file.

        Args:
            path (str): Path to json file.

        Returns:
            DatasetMetadata: Instance of DatasetMetadata.
        """
        with open(path, encoding="utf-8") as f:
            metadata = json.load(f)
        metadata["task"] = Task(metadata["task"])
        return cls(**metadata)

    def dump_json(self, path: str):
        """Dump DatasetMetadata to a json file.

        Args:
            path (str): Path to json file.
        """
        with open(path, "w", encoding="utf-8") as f:
            json.dump(asdict(self), f, ensure_ascii=False, indent=4)

    def get(self, attr, default=None):
        if hasattr(self, attr):
            return getattr(self, attr)
        else:
            return default

dump_json(path) #

Dump DatasetMetadata to a json file.

Parameters:

Name Type Description Default
path str

Path to json file.

required
Source code in focoos/ports.py
1138
1139
1140
1141
1142
1143
1144
1145
def dump_json(self, path: str):
    """Dump DatasetMetadata to a json file.

    Args:
        path (str): Path to json file.
    """
    with open(path, "w", encoding="utf-8") as f:
        json.dump(asdict(self), f, ensure_ascii=False, indent=4)

from_dict(metadata) classmethod #

Create DatasetMetadata from a dictionary.

Parameters:

Name Type Description Default
metadata dict

Dictionary containing metadata.

required

Returns:

Name Type Description
DatasetMetadata

Instance of DatasetMetadata.

Source code in focoos/ports.py
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
@classmethod
def from_dict(cls, metadata: dict):
    """Create DatasetMetadata from a dictionary.

    Args:
        metadata (dict): Dictionary containing metadata.

    Returns:
        DatasetMetadata: Instance of DatasetMetadata.
    """
    metadata = {k: v for k, v in metadata.items() if k in inspect.signature(cls).parameters}
    metadata["task"] = Task(metadata["task"])
    return cls(**metadata)

from_json(path) classmethod #

Create DatasetMetadata from a json file.

Parameters:

Name Type Description Default
path str

Path to json file.

required

Returns:

Name Type Description
DatasetMetadata

Instance of DatasetMetadata.

Source code in focoos/ports.py
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
@classmethod
def from_json(cls, path: str):
    """Create DatasetMetadata from a json file.

    Args:
        path (str): Path to json file.

    Returns:
        DatasetMetadata: Instance of DatasetMetadata.
    """
    with open(path, encoding="utf-8") as f:
        metadata = json.load(f)
    metadata["task"] = Task(metadata["task"])
    return cls(**metadata)

DatasetPreview #

Bases: PydanticBase

Preview information for a Focoos dataset.

This class provides metadata about a dataset in the Focoos platform, including its identification, task type, and layout format.

Attributes:

Name Type Description
ref str

Unique reference ID for the dataset.

name str

Human-readable name of the dataset.

task FocoosTask

The computer vision task this dataset is designed for.

layout DatasetLayout

The structural format of the dataset (e.g., ROBOFLOW_COCO, ROBOFLOW_SEG).

description Optional[str]

Optional description of the dataset's purpose or contents.

spec Optional[DatasetSpec]

Detailed specifications about the dataset's composition and size.

Source code in focoos/ports.py
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
class DatasetPreview(PydanticBase):
    """Preview information for a Focoos dataset.

    This class provides metadata about a dataset in the Focoos platform,
    including its identification, task type, and layout format.

    Attributes:
        ref (str): Unique reference ID for the dataset.
        name (str): Human-readable name of the dataset.
        task (FocoosTask): The computer vision task this dataset is designed for.
        layout (DatasetLayout): The structural format of the dataset (e.g., ROBOFLOW_COCO, ROBOFLOW_SEG).
        description (Optional[str]): Optional description of the dataset's purpose or contents.
        spec (Optional[DatasetSpec]): Detailed specifications about the dataset's composition and size.
    """

    ref: str
    name: str
    task: Task
    layout: DatasetLayout
    description: Optional[str] = None
    spec: Optional[DatasetSpec] = None

DatasetSpec #

Bases: PydanticBase

Specification details for a dataset in the Focoos platform.

This class provides information about the dataset's size and composition, including the number of samples in training and validation sets and the total size.

Attributes:

Name Type Description
train_length int

Number of samples in the training set.

valid_length int

Number of samples in the validation set.

size_mb float

Total size of the dataset in megabytes.

Source code in focoos/ports.py
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
class DatasetSpec(PydanticBase):
    """Specification details for a dataset in the Focoos platform.

    This class provides information about the dataset's size and composition,
    including the number of samples in training and validation sets and the total size.

    Attributes:
        train_length (int): Number of samples in the training set.
        valid_length (int): Number of samples in the validation set.
        size_mb (float): Total size of the dataset in megabytes.
    """

    train_length: int
    valid_length: int
    size_mb: float

DictClass #

Bases: OrderedDict

Source code in focoos/ports.py
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
class DictClass(OrderedDict):
    def to_tuple(self) -> tuple[Any]:
        """
        Convert self to a tuple containing all the attributes/keys that are not `None`.
        """
        return tuple(
            self[k] for k in self.keys() if self[k] is not None
        )  # without this check we are unable to export models with None values

    def __getitem__(self, k):
        if isinstance(k, str):
            inner_dict = dict(self.items())
            return inner_dict[k]
        else:
            return self.to_tuple()[k]

    def __setattr__(self, name, value):
        if name in self.keys() and value is not None:
            # Don't call self.__setitem__ to avoid recursion errors
            super().__setitem__(name, value)
        super().__setattr__(name, value)

    def __setitem__(self, key, value):
        # Will raise a KeyException if needed
        super().__setitem__(key, value)
        # Don't call self.__setattr__ to avoid recursion errors
        super().__setattr__(key, value)

    def __post_init__(self):
        """Check the BasicContainer dataclass.

        Only occurs if @dataclass decorator has been used.
        """
        class_fields = fields(self)

        # Safety and consistency checks
        if not len(class_fields):
            raise ValueError(f"{self.__class__.__name__} has no fields.")

        for _field in class_fields:
            v = getattr(self, _field.name)
            # if v is not None:  # without this check we are unable to export models with None values
            #    self[_field.name] = v
            self[_field.name] = v

    def __reduce__(self):
        state_dict = {field.name: getattr(self, field.name) for field in fields(self)}
        return (self.__class__.__new__, (self.__class__,), state_dict)

__post_init__() #

Check the BasicContainer dataclass.

Only occurs if @dataclass decorator has been used.

Source code in focoos/ports.py
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
def __post_init__(self):
    """Check the BasicContainer dataclass.

    Only occurs if @dataclass decorator has been used.
    """
    class_fields = fields(self)

    # Safety and consistency checks
    if not len(class_fields):
        raise ValueError(f"{self.__class__.__name__} has no fields.")

    for _field in class_fields:
        v = getattr(self, _field.name)
        # if v is not None:  # without this check we are unable to export models with None values
        #    self[_field.name] = v
        self[_field.name] = v

to_tuple() #

Convert self to a tuple containing all the attributes/keys that are not None.

Source code in focoos/ports.py
857
858
859
860
861
862
863
def to_tuple(self) -> tuple[Any]:
    """
    Convert self to a tuple containing all the attributes/keys that are not `None`.
    """
    return tuple(
        self[k] for k in self.keys() if self[k] is not None
    )  # without this check we are unable to export models with None values

DynamicAxes dataclass #

Dynamic axes for model export.

Source code in focoos/ports.py
1323
1324
1325
1326
1327
1328
1329
@dataclass
class DynamicAxes:
    """Dynamic axes for model export."""

    input_names: list[str]
    output_names: list[str]
    dynamic_axes: dict

ExportCfg dataclass #

Configuration for model export.

Parameters:

Name Type Description Default
out_dir str

Output directory for exported model

required
onnx_opset int

ONNX opset version to use

17
onnx_dynamic bool

Whether to use dynamic axes in ONNX export

True
onnx_simplify bool

Whether to simplify ONNX model

True
model_fuse bool

Whether to fuse model layers

True
format Literal['onnx', 'torchscript']

Export format ("onnx" or "torchscript")

'onnx'
device Optional[str]

Device to use for export

'cuda'
Source code in focoos/ports.py
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
@dataclass
class ExportCfg:
    """Configuration for model export.

    Args:
        out_dir: Output directory for exported model
        onnx_opset: ONNX opset version to use
        onnx_dynamic: Whether to use dynamic axes in ONNX export
        onnx_simplify: Whether to simplify ONNX model
        model_fuse: Whether to fuse model layers
        format: Export format ("onnx" or "torchscript")
        device: Device to use for export
    """

    out_dir: str
    onnx_opset: int = 17
    onnx_dynamic: bool = True
    onnx_simplify: bool = True
    model_fuse: bool = True
    format: Literal["onnx", "torchscript"] = "onnx"
    device: Optional[str] = "cuda"

ExportFormat #

Bases: str, Enum

Available export formats for model inference.

Values
  • ONNX: ONNX format
  • TORCHSCRIPT: TorchScript format
Source code in focoos/ports.py
568
569
570
571
572
573
574
575
576
577
578
class ExportFormat(str, Enum):
    """Available export formats for model inference.

    Values:
        - ONNX: ONNX format
        - TORCHSCRIPT: TorchScript format

    """

    ONNX = "onnx"
    TORCHSCRIPT = "torchscript"

FocoosDet dataclass #

Represents a single result from a Focoos model.

This dataclass encapsulates all relevant information for a detected object, including: - Bounding box coordinates (bbox) in [x1, y1, x2, y2] format, where (x1, y1) is the top-left and (x2, y2) is the bottom-right. - Confidence score (conf) between 0 and 1. - Class ID (cls_id) corresponding to the model's class list. - Human-readable label (label) for the detected class. - Optional segmentation mask (mask) as a base64-encoded PNG, cropped to the bbox region. - Optional keypoints for pose estimation or similar tasks.

Notes
  • The mask field is only present for instance or semantic segmentation models.
  • The mask is a base64-encoded PNG string, with its origin at the top-left of the bbox and dimensions matching the bbox.
  • Keypoints, if present, are a list of (x, y, visibility) tuples.

Attributes:

Name Type Description
bbox Optional[list[int]]

Bounding box [x1, y1, x2, y2].

conf Optional[float]

Detection confidence score.

cls_id Optional[int]

Class index.

label Optional[str]

Class label.

mask Optional[str]

Base64-encoded PNG mask (cropped to bbox).

keypoints Optional[list[tuple[int, int, float]]]

Optional keypoints.

Source code in focoos/ports.py
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
@dataclass
class FocoosDet:
    """
    Represents a single result from a Focoos model.

    This dataclass encapsulates all relevant information for a detected object, including:
      - Bounding box coordinates (bbox) in [x1, y1, x2, y2] format, where (x1, y1) is the top-left and (x2, y2) is the bottom-right.
      - Confidence score (conf) between 0 and 1.
      - Class ID (cls_id) corresponding to the model's class list.
      - Human-readable label (label) for the detected class.
      - Optional segmentation mask (mask) as a base64-encoded PNG, cropped to the bbox region.
      - Optional keypoints for pose estimation or similar tasks.

    Notes:
        - The mask field is only present for instance or semantic segmentation models.
        - The mask is a base64-encoded PNG string, with its origin at the top-left of the bbox and dimensions matching the bbox.
        - Keypoints, if present, are a list of (x, y, visibility) tuples.

    Attributes:
        bbox (Optional[list[int]]): Bounding box [x1, y1, x2, y2].
        conf (Optional[float]): Detection confidence score.
        cls_id (Optional[int]): Class index.
        label (Optional[str]): Class label.
        mask (Optional[str]): Base64-encoded PNG mask (cropped to bbox).
        keypoints (Optional[list[tuple[int, int, float]]]): Optional keypoints.
    """

    bbox: Optional[list[int]] = None
    conf: Optional[float] = None
    cls_id: Optional[int] = None
    label: Optional[str] = None
    mask: Optional[str] = None
    keypoints: Optional[list[tuple[int, int, float]]] = None  # TODO: check if float visibility is used or not

    @classmethod
    def from_json(cls, data: Union[str, dict]):
        if isinstance(data, str):
            with open(data, encoding="utf-8") as f:
                data_dict = json.load(f)
        else:
            data_dict = data

        bbox = data_dict.get("bbox")
        if bbox is not None:  # Retrocompatibility fix for remote results with float bbox, !TODO remove asap
            data_dict["bbox"] = list(map(int, bbox))

        return cls(**data_dict)

    def __repr__(self):
        # Show "hidden" if mask is not None, else show the actual mask value
        mask_repr = "hidden" if self.mask is not None else self.mask
        return (
            f"FocoosDet(bbox={self.bbox}, conf={self.conf}, cls_id={self.cls_id}, "
            f"label={self.label}, mask={mask_repr}, keypoints={self.keypoints})"
        )

FocoosDetections dataclass #

Represents a collection of detection or segmentation results from a Focoos model.

This dataclass holds a list of FocoosDet objects, and optionally: - The image (as a base64 string or numpy array) associated with the detections. - Latency information for the inference process, such as time spent in preprocessing, inference, postprocessing, and annotation.

Attributes:

Name Type Description
detections list[FocoosDet]

List of detection results.

image Optional[Union[str, ndarray]]

The image associated with the detections, either as a base64-encoded string or a numpy array. If present, the string is typically a base64-encoded annotated image.

latency Optional[dict]

Dictionary with timing information for each inference step. Keys may include 'inference', 'preprocess', 'postprocess', and 'annotate', with values in seconds.

Source code in focoos/ports.py
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
@dataclass
class FocoosDetections:
    """
    Represents a collection of detection or segmentation results from a Focoos model.

    This dataclass holds a list of FocoosDet objects, and optionally:
      - The image (as a base64 string or numpy array) associated with the detections.
      - Latency information for the inference process, such as time spent in preprocessing,
        inference, postprocessing, and annotation.

    Attributes:
        detections (list[FocoosDet]): List of detection results.
        image (Optional[Union[str, np.ndarray]]): The image associated with the detections,
            either as a base64-encoded string or a numpy array. If present, the string is
            typically a base64-encoded annotated image.
        latency (Optional[dict]): Dictionary with timing information for each inference step.
            Keys may include 'inference', 'preprocess', 'postprocess', and 'annotate', with
            values in seconds.
    """

    detections: list[FocoosDet]
    image: Optional[Union[str, np.ndarray]] = None  # can be Base64 encoded image or numpy array
    latency: Optional[InferLatency] = None

    def __len__(self):
        return len(self.detections)

    def model_dump(self):
        return {
            "detections": [asdict(det) for det in self.detections],
            "image": self.image if isinstance(self.image, str) else None,
            "latency": asdict(self.latency) if self.latency is not None else None,
        }

    def __repr__(self):
        # Show "hidden" if image is not None, else exclude 'image'
        fields = []
        for field_name in self.__dataclass_fields__:
            if field_name == "image":
                value = getattr(self, field_name)
                if value is not None:
                    fields.append(f"{field_name}=hidden")
                continue
            value = getattr(self, field_name)
            fields.append(f"{field_name}={value!r}")
        return f"{self.__class__.__name__}({', '.join(fields)})"

    def infer_print(self):
        """Print a formatted summary of the detections and timing information."""
        num_detections = len(self.detections)

        # Handle special case of zero detections
        if num_detections == 0:
            print("\nNo detections!")
        else:
            # Count detections by class
            class_counts = {}
            for det in self.detections:
                # Determine class key with fallback logic
                if det.label is not None and det.label != "":
                    class_key = det.label
                elif det.cls_id is not None:
                    class_key = f"(id_class={det.cls_id})"
                else:
                    class_key = "unknown"

                class_counts[class_key] = class_counts.get(class_key, 0) + 1

            # Format class counts as comma-separated string
            if not class_counts:
                formatted_classes = "no_classes"
            else:
                sorted_classes = sorted(class_counts.items())
                formatted_classes = ", ".join(f"{count} {class_name}" for class_name, count in sorted_classes)

            print(f"\n{formatted_classes}")

        # Print latency information with total time at the end
        if self.latency is not None:
            times = [
                self.latency.imload or 0,
                self.latency.preprocess or 0,
                self.latency.inference or 0,
                self.latency.postprocess or 0,
                self.latency.annotate or 0,
            ]
            total_time_ms = sum(times) * 1000
            total_time_str = f"{total_time_ms:.0f}ms"

            latency_parts = []
            if self.latency.imload is not None:
                latency_parts.append(f"imload {self.latency.imload * 1000:.0f}ms")
            if self.latency.preprocess is not None:
                latency_parts.append(f"preprocess {self.latency.preprocess * 1000:.0f}ms")
            if self.latency.inference is not None:
                latency_parts.append(f"inference {self.latency.inference * 1000:.0f}ms")
            if self.latency.postprocess is not None:
                latency_parts.append(f"postprocess {self.latency.postprocess * 1000:.0f}ms")
            if self.latency.annotate is not None:
                latency_parts.append(f"annotate {self.latency.annotate * 1000:.0f}ms")

            if latency_parts:
                print(f"Latency: {', '.join(latency_parts)}, total {total_time_str}")
            else:
                print(f"Latency: total {total_time_str}")

    def pprint(self):
        print("\n" + "=" * 50)
        print("DETECTION RESULTS")
        print("=" * 50)

        num_detections = len(self.detections)
        print(f"Found {num_detections} detections{': ' if num_detections > 0 else ''}")
        print()

        for i, det in enumerate(self.detections):
            # Get values from FocoosDet object
            x1, y1, x2, y2 = det.bbox if det.bbox else [-1, -1, -1, -1]
            conf = det.conf if det.conf is not None else -1

            print(f"  {i + 1}. {det.label or f'Class {det.cls_id}'}")
            print(f"     Confidence: {conf:.3f}")
            print(f"     Bbox: [{x1}, {y1}, {x2}, {y2}]")
            print(f"     Size: {x2 - x1} x {y2 - y1}")
            if det.mask:
                print("     Has mask: Yes (base64 encoded)")
            print()

        # Print latency information if available
        if self.latency is not None:
            print("Latencies:")
            print("-" * 50)
            for key, value in asdict(self.latency).items():
                if isinstance(value, (int, float)):
                    print(f"  {key}: {value:.3f}s")
                else:
                    print(f"  {key}: {value}")
        print()
        print("=" * 50 + "\n")

infer_print() #

Print a formatted summary of the detections and timing information.

Source code in focoos/ports.py
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
def infer_print(self):
    """Print a formatted summary of the detections and timing information."""
    num_detections = len(self.detections)

    # Handle special case of zero detections
    if num_detections == 0:
        print("\nNo detections!")
    else:
        # Count detections by class
        class_counts = {}
        for det in self.detections:
            # Determine class key with fallback logic
            if det.label is not None and det.label != "":
                class_key = det.label
            elif det.cls_id is not None:
                class_key = f"(id_class={det.cls_id})"
            else:
                class_key = "unknown"

            class_counts[class_key] = class_counts.get(class_key, 0) + 1

        # Format class counts as comma-separated string
        if not class_counts:
            formatted_classes = "no_classes"
        else:
            sorted_classes = sorted(class_counts.items())
            formatted_classes = ", ".join(f"{count} {class_name}" for class_name, count in sorted_classes)

        print(f"\n{formatted_classes}")

    # Print latency information with total time at the end
    if self.latency is not None:
        times = [
            self.latency.imload or 0,
            self.latency.preprocess or 0,
            self.latency.inference or 0,
            self.latency.postprocess or 0,
            self.latency.annotate or 0,
        ]
        total_time_ms = sum(times) * 1000
        total_time_str = f"{total_time_ms:.0f}ms"

        latency_parts = []
        if self.latency.imload is not None:
            latency_parts.append(f"imload {self.latency.imload * 1000:.0f}ms")
        if self.latency.preprocess is not None:
            latency_parts.append(f"preprocess {self.latency.preprocess * 1000:.0f}ms")
        if self.latency.inference is not None:
            latency_parts.append(f"inference {self.latency.inference * 1000:.0f}ms")
        if self.latency.postprocess is not None:
            latency_parts.append(f"postprocess {self.latency.postprocess * 1000:.0f}ms")
        if self.latency.annotate is not None:
            latency_parts.append(f"annotate {self.latency.annotate * 1000:.0f}ms")

        if latency_parts:
            print(f"Latency: {', '.join(latency_parts)}, total {total_time_str}")
        else:
            print(f"Latency: total {total_time_str}")

GPUDevice #

Bases: PydanticBase

Information about a GPU device.

Source code in focoos/ports.py
643
644
645
646
647
648
649
650
651
class GPUDevice(PydanticBase):
    """Information about a GPU device."""

    gpu_id: Optional[int] = None
    gpu_name: Optional[str] = None
    gpu_memory_total_gb: Optional[float] = None
    gpu_memory_used_percentage: Optional[float] = None
    gpu_temperature: Optional[float] = None
    gpu_load_percentage: Optional[float] = None

GPUInfo #

Bases: PydanticBase

Information about a GPU driver.

Source code in focoos/ports.py
654
655
656
657
658
659
660
661
class GPUInfo(PydanticBase):
    """Information about a GPU driver."""

    gpu_count: Optional[int] = None
    gpu_driver: Optional[str] = None
    gpu_cuda_version: Optional[str] = None
    total_gpu_memory_gb: Optional[float] = None
    devices: Optional[list[GPUDevice]] = None

InferLatency dataclass #

Represents the latency data for a Focoos model.

Source code in focoos/ports.py
342
343
344
345
346
347
348
349
350
351
352
@dataclass
class InferLatency:
    """
    Represents the latency data for a Focoos model.
    """

    imload: Optional[float] = None
    preprocess: Optional[float] = None
    inference: Optional[float] = None
    postprocess: Optional[float] = None
    annotate: Optional[float] = None

LatencyMetrics dataclass #

Performance metrics for model inference.

This class provides performance metrics for model inference, including frames per second (FPS), engine used, minimum latency, maximum latency, mean latency, standard deviation of latency, input image size, and device type.

Attributes:

Name Type Description
fps int

Frames per second (FPS) of the inference process.

engine str

The inference engine used (e.g., "onnx", "torchscript").

min float

Minimum latency in milliseconds.

max float

Maximum latency in milliseconds.

mean float

Mean latency in milliseconds.

std float

Standard deviation of latency in milliseconds.

im_size int

Input image size.

device str

Device type.

Source code in focoos/ports.py
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
@dataclass
class LatencyMetrics:
    """Performance metrics for model inference.

    This class provides performance metrics for model inference, including frames per second (FPS),
    engine used, minimum latency, maximum latency, mean latency, standard deviation of latency,
    input image size, and device type.

    Attributes:
        fps (int): Frames per second (FPS) of the inference process.
        engine (str): The inference engine used (e.g., "onnx", "torchscript").
        min (float): Minimum latency in milliseconds.
        max (float): Maximum latency in milliseconds.
        mean (float): Mean latency in milliseconds.
        std (float): Standard deviation of latency in milliseconds.
        im_size (int): Input image size.
        device (str): Device type.
    """

    fps: int
    engine: str
    min: float
    max: float
    mean: float
    std: float
    im_size: int
    device: str

Metrics dataclass #

Collection of training and inference metrics.

Source code in focoos/ports.py
832
833
834
835
836
837
838
839
840
841
842
@dataclass
class Metrics:
    """
    Collection of training and inference metrics.
    """

    infer_metrics: list[dict] = field(default_factory=list)
    valid_metrics: list[dict] = field(default_factory=list)
    train_metrics: list[dict] = field(default_factory=list)
    iterations: Optional[int] = None
    best_valid_metric: Optional[dict] = None

ModelExtension #

Bases: str, Enum

Supported model extension.

Values
  • ONNX: ONNX format
  • TORCHSCRIPT: TorchScript format
  • WEIGHTS: Weights format
Source code in focoos/ports.py
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
class ModelExtension(str, Enum):
    """Supported model extension.

    Values:
        - ONNX: ONNX format
        - TORCHSCRIPT: TorchScript format
        - WEIGHTS: Weights format
    """

    ONNX = "onnx"
    TORCHSCRIPT = "pt"
    WEIGHTS = "pth"

    @classmethod
    def from_runtime_type(cls, runtime_type: RuntimeType):
        if runtime_type in [
            RuntimeType.ONNX_CUDA32,
            RuntimeType.ONNX_TRT32,
            RuntimeType.ONNX_TRT16,
            RuntimeType.ONNX_CPU,
            RuntimeType.ONNX_COREML,
        ]:
            return cls.ONNX
        elif runtime_type == RuntimeType.TORCHSCRIPT_32:
            return cls.TORCHSCRIPT
        else:
            raise ValueError(f"Invalid runtime type: {runtime_type}")

ModelFamily #

Bases: str, Enum

Enumerazione delle famiglie di modelli disponibili

Source code in focoos/ports.py
845
846
847
848
849
850
851
852
class ModelFamily(str, Enum):
    """Enumerazione delle famiglie di modelli disponibili"""

    DETR = "fai_detr"
    MASKFORMER = "fai_mf"
    BISENETFORMER = "bisenetformer"
    IMAGE_CLASSIFIER = "fai_cls"
    RTMO = "rtmo"

ModelInfo dataclass #

Bases: DictClass

Comprehensive metadata for a Focoos model.

This dataclass encapsulates all relevant information required to identify, configure, and evaluate a model within the Focoos platform. It is used for serialization, deserialization, and programmatic access to model properties.

Attributes:

Name Type Description
name str

Human-readable name or unique identifier for the model.

model_family ModelFamily

The model's architecture family (e.g., RTDETR, M2F).

classes list[str]

List of class names that the model can detect or segment.

im_size int

Input image size (usually square, e.g., 640).

task Task

Computer vision task performed by the model (e.g., detection, segmentation).

config dict

Model-specific configuration parameters.

ref Optional[str]

Optional unique reference string for the model.

focoos_model Optional[str]

Optional Focoos base model identifier.

status Optional[ModelStatus]

Current status of the model (e.g., training, ready).

description Optional[str]

Optional human-readable description of the model.

train_args Optional[TrainerArgs]

Optional training arguments used to train the model.

weights_uri Optional[str]

Optional URI or path to the model weights.

val_dataset Optional[str]

Optional name or reference of the validation dataset.

val_metrics Optional[dict]

Optional dictionary of validation metrics (e.g., mAP, accuracy).

focoos_version Optional[str]

Optional Focoos version string.

latency Optional[list[LatencyMetrics]]

Optional list of latency measurements for different runtimes.

updated_at Optional[str]

Optional ISO timestamp of the last update.

Source code in focoos/ports.py
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
@dataclass
class ModelInfo(DictClass):
    """
    Comprehensive metadata for a Focoos model.

    This dataclass encapsulates all relevant information required to identify, configure, and evaluate a model
    within the Focoos platform. It is used for serialization, deserialization, and programmatic access to model
    properties.

    Attributes:
        name (str): Human-readable name or unique identifier for the model.
        model_family (ModelFamily): The model's architecture family (e.g., RTDETR, M2F).
        classes (list[str]): List of class names that the model can detect or segment.
        im_size (int): Input image size (usually square, e.g., 640).
        task (Task): Computer vision task performed by the model (e.g., detection, segmentation).
        config (dict): Model-specific configuration parameters.
        ref (Optional[str]): Optional unique reference string for the model.
        focoos_model (Optional[str]): Optional Focoos base model identifier.
        status (Optional[ModelStatus]): Current status of the model (e.g., training, ready).
        description (Optional[str]): Optional human-readable description of the model.
        train_args (Optional[TrainerArgs]): Optional training arguments used to train the model.
        weights_uri (Optional[str]): Optional URI or path to the model weights.
        val_dataset (Optional[str]): Optional name or reference of the validation dataset.
        val_metrics (Optional[dict]): Optional dictionary of validation metrics (e.g., mAP, accuracy).
        focoos_version (Optional[str]): Optional Focoos version string.
        latency (Optional[list[LatencyMetrics]]): Optional list of latency measurements for different runtimes.
        updated_at (Optional[str]): Optional ISO timestamp of the last update.
    """

    name: str
    model_family: ModelFamily
    classes: list[str]
    im_size: int
    task: Task
    config: dict
    ref: Optional[str] = None
    focoos_model: Optional[str] = None
    status: Optional[ModelStatus] = None
    description: Optional[str] = None
    train_args: Optional[TrainerArgs] = None
    weights_uri: Optional[str] = None
    val_dataset: Optional[str] = None
    val_metrics: Optional[dict] = None  # TODO: Consider making metrics explicit in the future
    focoos_version: Optional[str] = None
    latency: Optional[list[LatencyMetrics]] = None
    training_info: Optional[TrainingInfo] = None
    updated_at: Optional[str] = None

    @classmethod
    def from_json(cls, data: Union[str, dict]):
        """
        Load ModelInfo from a JSON file.

        Args:
            path (Optional[str]): Path to the JSON file containing model metadata.
            data (Optional[dict]): Dictionary containing model metadata.

        Returns:
            ModelInfo: An instance of ModelInfo populated with data from the file.
        """
        assert isinstance(data, dict) or isinstance(data, str), "data must be a dictionary or a path to a JSON file"
        if isinstance(data, str):
            with open(data, encoding="utf-8") as f:
                model_info_json = json.load(f)
        else:
            model_info_json = data

        training_info = None
        if "training_info" in model_info_json and model_info_json["training_info"] is not None:
            training_info = TrainingInfo(
                **{k: v for k, v in model_info_json["training_info"].items() if k in TrainingInfo.__dataclass_fields__}
            )

        model_info = cls(
            name=model_info_json["name"],
            ref=model_info_json.get("ref", None),
            model_family=ModelFamily(model_info_json["model_family"]),
            classes=model_info_json["classes"],
            im_size=int(model_info_json["im_size"]),
            status=ModelStatus(model_info_json.get("status")) if model_info_json.get("status") else None,
            task=Task(model_info_json["task"]),
            focoos_model=model_info_json.get("focoos_model", None),
            config=model_info_json["config"],
            description=model_info_json.get("description", None),
            train_args=TrainerArgs(
                **{k: v for k, v in model_info_json["train_args"].items() if k in TrainerArgs.__dataclass_fields__}
            )
            if "train_args" in model_info_json and model_info_json["train_args"] is not None
            else None,
            weights_uri=model_info_json.get("weights_uri", None),
            val_dataset=model_info_json.get("val_dataset", None),
            latency=[LatencyMetrics(**latency) for latency in model_info_json.get("latency", [])]
            if "latency" in model_info_json and model_info_json["latency"] is not None
            else None,
            updated_at=model_info_json.get("updated_at", None),
            focoos_version=model_info_json.get("focoos_version", None),
            val_metrics=model_info_json.get("val_metrics", None),
            training_info=training_info,
        )
        return model_info

    def dump_json(self, path: str):
        """
        Serialize ModelInfo to a JSON file.

        Args:
            path (str): Path where the JSON file will be saved.
        """
        data = asdict(self)
        # Note: config_class is not included; if needed, convert to string here.

        with open(path, "w", encoding="utf-8") as f:
            json.dump(data, f, ensure_ascii=False, indent=4)

    def pprint(self):
        """
        Pretty-print the main model information using the Focoos logger.
        """
        from focoos.utils.logger import get_logger

        logger = get_logger("model_info")
        logger.info(
            f"""
            📋 Name: {self.name}
            📝 Description: {self.description}
            👪 Family: {self.model_family}
            🔗 Focoos Model: {self.focoos_model}
            🎯 Task: {self.task}
            🏷️ Classes: {self.classes}
            🖼️ Im size: {self.im_size}
            """
        )

dump_json(path) #

Serialize ModelInfo to a JSON file.

Parameters:

Name Type Description Default
path str

Path where the JSON file will be saved.

required
Source code in focoos/ports.py
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
def dump_json(self, path: str):
    """
    Serialize ModelInfo to a JSON file.

    Args:
        path (str): Path where the JSON file will be saved.
    """
    data = asdict(self)
    # Note: config_class is not included; if needed, convert to string here.

    with open(path, "w", encoding="utf-8") as f:
        json.dump(data, f, ensure_ascii=False, indent=4)

from_json(data) classmethod #

Load ModelInfo from a JSON file.

Parameters:

Name Type Description Default
path Optional[str]

Path to the JSON file containing model metadata.

required
data Optional[dict]

Dictionary containing model metadata.

required

Returns:

Name Type Description
ModelInfo

An instance of ModelInfo populated with data from the file.

Source code in focoos/ports.py
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
@classmethod
def from_json(cls, data: Union[str, dict]):
    """
    Load ModelInfo from a JSON file.

    Args:
        path (Optional[str]): Path to the JSON file containing model metadata.
        data (Optional[dict]): Dictionary containing model metadata.

    Returns:
        ModelInfo: An instance of ModelInfo populated with data from the file.
    """
    assert isinstance(data, dict) or isinstance(data, str), "data must be a dictionary or a path to a JSON file"
    if isinstance(data, str):
        with open(data, encoding="utf-8") as f:
            model_info_json = json.load(f)
    else:
        model_info_json = data

    training_info = None
    if "training_info" in model_info_json and model_info_json["training_info"] is not None:
        training_info = TrainingInfo(
            **{k: v for k, v in model_info_json["training_info"].items() if k in TrainingInfo.__dataclass_fields__}
        )

    model_info = cls(
        name=model_info_json["name"],
        ref=model_info_json.get("ref", None),
        model_family=ModelFamily(model_info_json["model_family"]),
        classes=model_info_json["classes"],
        im_size=int(model_info_json["im_size"]),
        status=ModelStatus(model_info_json.get("status")) if model_info_json.get("status") else None,
        task=Task(model_info_json["task"]),
        focoos_model=model_info_json.get("focoos_model", None),
        config=model_info_json["config"],
        description=model_info_json.get("description", None),
        train_args=TrainerArgs(
            **{k: v for k, v in model_info_json["train_args"].items() if k in TrainerArgs.__dataclass_fields__}
        )
        if "train_args" in model_info_json and model_info_json["train_args"] is not None
        else None,
        weights_uri=model_info_json.get("weights_uri", None),
        val_dataset=model_info_json.get("val_dataset", None),
        latency=[LatencyMetrics(**latency) for latency in model_info_json.get("latency", [])]
        if "latency" in model_info_json and model_info_json["latency"] is not None
        else None,
        updated_at=model_info_json.get("updated_at", None),
        focoos_version=model_info_json.get("focoos_version", None),
        val_metrics=model_info_json.get("val_metrics", None),
        training_info=training_info,
    )
    return model_info

pprint() #

Pretty-print the main model information using the Focoos logger.

Source code in focoos/ports.py
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
def pprint(self):
    """
    Pretty-print the main model information using the Focoos logger.
    """
    from focoos.utils.logger import get_logger

    logger = get_logger("model_info")
    logger.info(
        f"""
        📋 Name: {self.name}
        📝 Description: {self.description}
        👪 Family: {self.model_family}
        🔗 Focoos Model: {self.focoos_model}
        🎯 Task: {self.task}
        🏷️ Classes: {self.classes}
        🖼️ Im size: {self.im_size}
        """
    )

ModelNotFound #

Bases: Exception

Exception raised when a requested model is not found.

Source code in focoos/ports.py
824
825
826
827
828
829
class ModelNotFound(Exception):
    """Exception raised when a requested model is not found."""

    def __init__(self, message: str):
        self.message = message
        super().__init__(self.message)

ModelOutput dataclass #

Bases: DictClass

Model output base container.

Source code in focoos/ports.py
911
912
913
914
915
@dataclass
class ModelOutput(DictClass):
    """Model output base container."""

    loss: Optional[dict]

ModelPreview #

Bases: PydanticBase

Preview information for a Focoos model.

This class provides a lightweight preview of model information in the Focoos platform, containing essential details like reference ID, name, task type, and status.

Attributes:

Name Type Description
ref str

Unique reference ID for the model.

name str

Human-readable name of the model.

task FocoosTask

The computer vision task this model is designed for.

description Optional[str]

Optional description of the model's purpose or capabilities.

status ModelStatus

Current status of the model (e.g., training, ready, failed).

focoos_model str

The base model architecture identifier.

Source code in focoos/ports.py
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
class ModelPreview(PydanticBase):
    """Preview information for a Focoos model.

    This class provides a lightweight preview of model information in the Focoos platform,
    containing essential details like reference ID, name, task type, and status.

    Attributes:
        ref (str): Unique reference ID for the model.
        name (str): Human-readable name of the model.
        task (FocoosTask): The computer vision task this model is designed for.
        description (Optional[str]): Optional description of the model's purpose or capabilities.
        status (ModelStatus): Current status of the model (e.g., training, ready, failed).
        focoos_model (str): The base model architecture identifier.
    """

    ref: str
    name: str
    task: Task
    description: Optional[str] = None
    status: ModelStatus
    focoos_model: str

ModelStatus #

Bases: str, Enum

Status of a Focoos model during its lifecycle.

Values
  • CREATED: Model has been created
  • TRAINING_STARTING: Training is about to start
  • TRAINING_RUNNING: Training is in progress
  • TRAINING_ERROR: Training encountered an error
  • TRAINING_COMPLETED: Training finished successfully
  • TRAINING_STOPPED: Training was stopped
  • DEPLOYED: Model is deployed
  • DEPLOY_ERROR: Deployment encountered an error
Example
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
from focoos import Focoos

focoos = Focoos(api_key="<YOUR-API-KEY>")
model = focoos.get_remote_model("my-model")

if model.status == ModelStatus.DEPLOYED:
    print("Model is deployed and ready for inference")
elif model.status == ModelStatus.TRAINING_RUNNING:
    print("Model is currently training")
elif model.status == ModelStatus.TRAINING_ERROR:
    print("Model training encountered an error")
Source code in focoos/ports.py
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
class ModelStatus(str, Enum):
    """Status of a Focoos model during its lifecycle.

    Values:
        - CREATED: Model has been created
        - TRAINING_STARTING: Training is about to start
        - TRAINING_RUNNING: Training is in progress
        - TRAINING_ERROR: Training encountered an error
        - TRAINING_COMPLETED: Training finished successfully
        - TRAINING_STOPPED: Training was stopped
        - DEPLOYED: Model is deployed
        - DEPLOY_ERROR: Deployment encountered an error

    Example:
        ```python
        from focoos import Focoos

        focoos = Focoos(api_key="<YOUR-API-KEY>")
        model = focoos.get_remote_model("my-model")

        if model.status == ModelStatus.DEPLOYED:
            print("Model is deployed and ready for inference")
        elif model.status == ModelStatus.TRAINING_RUNNING:
            print("Model is currently training")
        elif model.status == ModelStatus.TRAINING_ERROR:
            print("Model training encountered an error")
        ```
    """

    CREATED = "CREATED"
    TRAINING_STARTING = "TRAINING_STARTING"
    TRAINING_RUNNING = "TRAINING_RUNNING"
    TRAINING_ERROR = "TRAINING_ERROR"
    TRAINING_COMPLETED = "TRAINING_COMPLETED"
    TRAINING_STOPPED = "TRAINING_STOPPED"
    DEPLOYED = "DEPLOYED"
    DEPLOY_ERROR = "DEPLOY_ERROR"

OnnxRuntimeOpts dataclass #

ONNX runtime configuration options.

This class provides configuration options for the ONNX runtime used for model inference.

Attributes:

Name Type Description
fp16 Optional[bool]

Enable FP16 precision. Default is False.

cuda Optional[bool]

Enable CUDA acceleration for GPU inference. Default is False.

vino Optional[bool]

Enable OpenVINO acceleration for Intel hardware. Default is False.

verbose Optional[bool]

Enable verbose logging during inference. Default is False.

trt Optional[bool]

Enable TensorRT acceleration for NVIDIA GPUs. Default is False.

coreml Optional[bool]

Enable CoreML acceleration for Apple hardware. Default is False.

warmup_iter int

Number of warmup iterations to run before benchmarking. Default is 0.

Source code in focoos/ports.py
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
@dataclass
class OnnxRuntimeOpts:
    """ONNX runtime configuration options.

    This class provides configuration options for the ONNX runtime used for model inference.

    Attributes:
        fp16 (Optional[bool]): Enable FP16 precision. Default is False.
        cuda (Optional[bool]): Enable CUDA acceleration for GPU inference. Default is False.
        vino (Optional[bool]): Enable OpenVINO acceleration for Intel hardware. Default is False.
        verbose (Optional[bool]): Enable verbose logging during inference. Default is False.
        trt (Optional[bool]): Enable TensorRT acceleration for NVIDIA GPUs. Default is False.
        coreml (Optional[bool]): Enable CoreML acceleration for Apple hardware. Default is False.
        warmup_iter (int): Number of warmup iterations to run before benchmarking. Default is 0.

    """

    fp16: Optional[bool] = False
    cuda: Optional[bool] = False
    vino: Optional[bool] = False
    verbose: Optional[bool] = False
    trt: Optional[bool] = False
    coreml: Optional[bool] = False
    warmup_iter: int = 0

Quotas #

Bases: PydanticBase

Usage quotas and limits for a user account.

Attributes:

Name Type Description
total_inferences int

Total number of inferences allowed.

max_inferences int

Maximum number of inferences allowed.

used_storage_gb float

Used storage in gigabytes.

max_storage_gb float

Maximum storage in gigabytes.

active_training_jobs list[str]

List of active training job IDs.

max_active_training_jobs int

Maximum number of active training jobs allowed.

Source code in focoos/ports.py
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
class Quotas(PydanticBase):
    """Usage quotas and limits for a user account.

    Attributes:
        total_inferences (int): Total number of inferences allowed.
        max_inferences (int): Maximum number of inferences allowed.
        used_storage_gb (float): Used storage in gigabytes.
        max_storage_gb (float): Maximum storage in gigabytes.
        active_training_jobs (list[str]): List of active training job IDs.
        max_active_training_jobs (int): Maximum number of active training jobs allowed.
    """

    # INFERENCE
    total_inferences: int
    max_inferences: int
    # STORAGE
    used_storage_gb: float
    max_storage_gb: float
    # TRAINING
    active_training_jobs: list[str]
    max_active_training_jobs: int

    # ML_G4DN_XLARGE TRAINING HOURS
    used_mlg4dnxlarge_training_jobs_hours: float
    max_mlg4dnxlarge_training_jobs_hours: float

RemoteModelInfo #

Bases: PydanticBase

Complete metadata for a Focoos model.

This class contains comprehensive information about a model in the Focoos platform, including its identification, configuration, performance metrics, and training details.

Attributes:

Name Type Description
ref str

Unique reference ID for the model.

name str

Human-readable name of the model.

description Optional[str]

Optional description of the model's purpose or capabilities.

owner_ref str

Reference ID of the model owner.

focoos_model str

The base model architecture used.

task FocoosTask

The task type the model is designed for (e.g., DETECTION, SEMSEG).

created_at datetime

Timestamp when the model was created.

updated_at datetime

Timestamp when the model was last updated.

status ModelStatus

Current status of the model (e.g., TRAINING, DEPLOYED).

metrics Optional[dict]

Performance metrics of the model (e.g., mAP, accuracy).

latencies Optional[list[dict]]

Inference latency measurements across different configurations.

classes Optional[list[str]]

List of class names the model can detect or segment.

im_size Optional[int]

Input image size the model expects.

training_info Optional[TrainingInfo]

Information about the training process.

location Optional[str]

Storage location of the model.

dataset Optional[DatasetPreview]

Information about the dataset used for training.

Source code in focoos/ports.py
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
class RemoteModelInfo(PydanticBase):
    """Complete metadata for a Focoos model.

    This class contains comprehensive information about a model in the Focoos platform,
    including its identification, configuration, performance metrics, and training details.

    Attributes:
        ref (str): Unique reference ID for the model.
        name (str): Human-readable name of the model.
        description (Optional[str]): Optional description of the model's purpose or capabilities.
        owner_ref (str): Reference ID of the model owner.
        focoos_model (str): The base model architecture used.
        task (FocoosTask): The task type the model is designed for (e.g., DETECTION, SEMSEG).
        created_at (datetime): Timestamp when the model was created.
        updated_at (datetime): Timestamp when the model was last updated.
        status (ModelStatus): Current status of the model (e.g., TRAINING, DEPLOYED).
        metrics (Optional[dict]): Performance metrics of the model (e.g., mAP, accuracy).
        latencies (Optional[list[dict]]): Inference latency measurements across different configurations.
        classes (Optional[list[str]]): List of class names the model can detect or segment.
        im_size (Optional[int]): Input image size the model expects.
        training_info (Optional[TrainingInfo]): Information about the training process.
        location (Optional[str]): Storage location of the model.
        dataset (Optional[DatasetPreview]): Information about the dataset used for training.
    """

    ref: str
    name: str
    description: Optional[str] = None
    is_managed: bool
    owner_ref: str
    focoos_model: str
    config: Optional[dict] = None
    task: Task
    created_at: datetime
    updated_at: datetime
    status: ModelStatus
    model_family: Optional[str] = None
    metrics: Optional[dict] = None
    classes: Optional[list[str]] = None
    im_size: Optional[int] = None
    training_info: Optional[TrainingInfo] = None
    dataset: Optional[DatasetPreview] = None
    hyperparameters: Optional[dict] = None
    focoos_version: Optional[str] = None

RuntimeType #

Bases: str, Enum

Available runtime configurations for model inference.

Values
  • ONNX_CUDA32: ONNX with CUDA FP32
  • ONNX_TRT32: ONNX with TensorRT FP32
  • ONNX_TRT16: ONNX with TensorRT FP16
  • ONNX_CPU: ONNX on CPU
  • ONNX_COREML: ONNX with CoreML
  • TORCHSCRIPT_32: TorchScript FP32
Source code in focoos/ports.py
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
class RuntimeType(str, Enum):
    """Available runtime configurations for model inference.

    Values:
        - ONNX_CUDA32: ONNX with CUDA FP32
        - ONNX_TRT32: ONNX with TensorRT FP32
        - ONNX_TRT16: ONNX with TensorRT FP16
        - ONNX_CPU: ONNX on CPU
        - ONNX_COREML: ONNX with CoreML
        - TORCHSCRIPT_32: TorchScript FP32

    """

    ONNX_CUDA32 = "onnx_cuda32"
    ONNX_TRT32 = "onnx_trt32"
    ONNX_TRT16 = "onnx_trt16"
    ONNX_CPU = "onnx_cpu"
    ONNX_COREML = "onnx_coreml"
    TORCHSCRIPT_32 = "torchscript_32"

    def __str__(self) -> str:
        return self.value

    def __repr__(self) -> str:
        return self.value

    def to_export_format(self) -> ExportFormat:
        if self == RuntimeType.TORCHSCRIPT_32:
            return ExportFormat.TORCHSCRIPT
        else:
            return ExportFormat.ONNX

SystemInfo #

Bases: PydanticBase

System information including hardware and software details.

Source code in focoos/ports.py
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
class SystemInfo(PydanticBase):
    """System information including hardware and software details."""

    focoos_host: Optional[str] = None
    focoos_version: Optional[str] = None
    python_version: Optional[str] = None
    system: Optional[str] = None
    system_name: Optional[str] = None
    cpu_type: Optional[str] = None
    cpu_cores: Optional[int] = None
    memory_gb: Optional[float] = None
    memory_used_percentage: Optional[float] = None
    available_onnx_providers: Optional[list[str]] = None
    disk_space_total_gb: Optional[float] = None
    disk_space_used_percentage: Optional[float] = None
    pytorch_info: Optional[str] = None
    gpu_info: Optional[GPUInfo] = None
    packages_versions: Optional[dict[str, str]] = None
    environment: Optional[dict[str, str]] = None

    def pprint(self, level: Literal["INFO", "DEBUG"] = "DEBUG"):
        """Pretty print the system info."""
        from focoos.utils.logger import get_logger

        logger = get_logger("SystemInfo", level=level)

        output_lines = ["\n================ 🔍 SYSTEM INFO 🔍 ===================="]
        model_data = self.model_dump()

        if "focoos_host" in model_data and "focoos_version" in model_data:
            output_lines.append(f"focoos: {model_data.get('focoos_host')} (v{model_data.get('focoos_version')})")
            model_data.pop("focoos_host", None)
            model_data.pop("focoos_version", None)

        if "system" in model_data and "system_name" in model_data:
            output_lines.append(f"system: {model_data.get('system')} ({model_data.get('system_name')})")
            model_data.pop("system", None)
            model_data.pop("system_name", None)

        if "cpu_type" in model_data and "cpu_cores" in model_data:
            output_lines.append(f"cpu: {model_data.get('cpu_type')} ({model_data.get('cpu_cores')} cores)")
            model_data.pop("cpu_type", None)
            model_data.pop("cpu_cores", None)

        if "memory_gb" in model_data and "memory_used_percentage" in model_data:
            output_lines.append(
                f"memory_gb: {model_data.get('memory_gb')} ({model_data.get('memory_used_percentage')}% used)"
            )
            model_data.pop("memory_gb", None)
            model_data.pop("memory_used_percentage", None)

        if "disk_space_total_gb" in model_data and "disk_space_used_percentage" in model_data:
            output_lines.append(
                f"disk_space_total_gb: {model_data.get('disk_space_total_gb')} ({model_data.get('disk_space_used_percentage')}% used)"
            )
            model_data.pop("disk_space_total_gb", None)
            model_data.pop("disk_space_used_percentage", None)

        for key, value in model_data.items():
            if key == "gpu_info" and value is not None:
                output_lines.append(f"{key}:")
                output_lines.append(f"  - gpu_count: {value.get('gpu_count')}")
                output_lines.append(f"  - total_memory_gb: {value.get('total_gpu_memory_gb')} GB")
                output_lines.append(f"  - gpu_driver: {value.get('gpu_driver')}")
                output_lines.append(f"  - gpu_cuda_version: {value.get('gpu_cuda_version')}")
                if value.get("devices"):
                    output_lines.append("  - devices:")
                    for device in value.get("devices", []):
                        gpu_memory_used = (
                            f"{device.get('gpu_memory_used_percentage')}%"
                            if device.get("gpu_memory_used_percentage") is not None
                            else "N/A"
                        )
                        gpu_load = (
                            f"{device.get('gpu_load_percentage')}%"
                            if device.get("gpu_load_percentage") is not None
                            else "N/A"
                        )
                        gpu_memory_total = (
                            f"{device.get('gpu_memory_total_gb')} GB"
                            if device.get("gpu_memory_total_gb") is not None
                            else "N/A"
                        )

                        output_lines.append(
                            f"    - GPU {device.get('gpu_id')}: {device.get('gpu_name')}, Memory: {gpu_memory_total} ({gpu_memory_used} used), Load: {gpu_load}"
                        )
            elif isinstance(value, list):
                output_lines.append(f"{key}: {value}")
            elif isinstance(value, dict) and key == "packages_versions":  # Special formatting for packages_versions
                output_lines.append(f"{key}:")
                for pkg_name, pkg_version in value.items():
                    output_lines.append(f"  - {pkg_name}: {pkg_version}")
            elif isinstance(value, dict) and key == "environment":  # Special formatting for environment
                output_lines.append(f"{key}:")
                for env_key, env_value in value.items():
                    output_lines.append(f"  - {env_key}: {env_value}")
            else:
                output_lines.append(f"{key}: {value}")
        output_lines.append("================================================")

        logger.info("\n".join(output_lines))

pprint(level='DEBUG') #

Pretty print the system info.

Source code in focoos/ports.py
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
def pprint(self, level: Literal["INFO", "DEBUG"] = "DEBUG"):
    """Pretty print the system info."""
    from focoos.utils.logger import get_logger

    logger = get_logger("SystemInfo", level=level)

    output_lines = ["\n================ 🔍 SYSTEM INFO 🔍 ===================="]
    model_data = self.model_dump()

    if "focoos_host" in model_data and "focoos_version" in model_data:
        output_lines.append(f"focoos: {model_data.get('focoos_host')} (v{model_data.get('focoos_version')})")
        model_data.pop("focoos_host", None)
        model_data.pop("focoos_version", None)

    if "system" in model_data and "system_name" in model_data:
        output_lines.append(f"system: {model_data.get('system')} ({model_data.get('system_name')})")
        model_data.pop("system", None)
        model_data.pop("system_name", None)

    if "cpu_type" in model_data and "cpu_cores" in model_data:
        output_lines.append(f"cpu: {model_data.get('cpu_type')} ({model_data.get('cpu_cores')} cores)")
        model_data.pop("cpu_type", None)
        model_data.pop("cpu_cores", None)

    if "memory_gb" in model_data and "memory_used_percentage" in model_data:
        output_lines.append(
            f"memory_gb: {model_data.get('memory_gb')} ({model_data.get('memory_used_percentage')}% used)"
        )
        model_data.pop("memory_gb", None)
        model_data.pop("memory_used_percentage", None)

    if "disk_space_total_gb" in model_data and "disk_space_used_percentage" in model_data:
        output_lines.append(
            f"disk_space_total_gb: {model_data.get('disk_space_total_gb')} ({model_data.get('disk_space_used_percentage')}% used)"
        )
        model_data.pop("disk_space_total_gb", None)
        model_data.pop("disk_space_used_percentage", None)

    for key, value in model_data.items():
        if key == "gpu_info" and value is not None:
            output_lines.append(f"{key}:")
            output_lines.append(f"  - gpu_count: {value.get('gpu_count')}")
            output_lines.append(f"  - total_memory_gb: {value.get('total_gpu_memory_gb')} GB")
            output_lines.append(f"  - gpu_driver: {value.get('gpu_driver')}")
            output_lines.append(f"  - gpu_cuda_version: {value.get('gpu_cuda_version')}")
            if value.get("devices"):
                output_lines.append("  - devices:")
                for device in value.get("devices", []):
                    gpu_memory_used = (
                        f"{device.get('gpu_memory_used_percentage')}%"
                        if device.get("gpu_memory_used_percentage") is not None
                        else "N/A"
                    )
                    gpu_load = (
                        f"{device.get('gpu_load_percentage')}%"
                        if device.get("gpu_load_percentage") is not None
                        else "N/A"
                    )
                    gpu_memory_total = (
                        f"{device.get('gpu_memory_total_gb')} GB"
                        if device.get("gpu_memory_total_gb") is not None
                        else "N/A"
                    )

                    output_lines.append(
                        f"    - GPU {device.get('gpu_id')}: {device.get('gpu_name')}, Memory: {gpu_memory_total} ({gpu_memory_used} used), Load: {gpu_load}"
                    )
        elif isinstance(value, list):
            output_lines.append(f"{key}: {value}")
        elif isinstance(value, dict) and key == "packages_versions":  # Special formatting for packages_versions
            output_lines.append(f"{key}:")
            for pkg_name, pkg_version in value.items():
                output_lines.append(f"  - {pkg_name}: {pkg_version}")
        elif isinstance(value, dict) and key == "environment":  # Special formatting for environment
            output_lines.append(f"{key}:")
            for env_key, env_value in value.items():
                output_lines.append(f"  - {env_key}: {env_value}")
        else:
            output_lines.append(f"{key}: {value}")
    output_lines.append("================================================")

    logger.info("\n".join(output_lines))

Task #

Bases: str, Enum

Types of computer vision tasks supported by Focoos.

Values
  • DETECTION: Object detection
  • SEMSEG: Semantic segmentation
  • INSTANCE_SEGMENTATION: Instance segmentation
  • CLASSIFICATION: Image classification
  • KEYPOINT: Keypoint detection
Source code in focoos/ports.py
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
class Task(str, Enum):
    """Types of computer vision tasks supported by Focoos.

    Values:
        - DETECTION: Object detection
        - SEMSEG: Semantic segmentation
        - INSTANCE_SEGMENTATION: Instance segmentation
        - CLASSIFICATION: Image classification
        - KEYPOINT: Keypoint detection
    """

    DETECTION = "detection"
    SEMSEG = "semseg"
    INSTANCE_SEGMENTATION = "instseg"
    CLASSIFICATION = "classification"
    KEYPOINT = "keypoint"

TorchscriptRuntimeOpts dataclass #

TorchScript runtime configuration options.

This class provides configuration options for the TorchScript runtime used for model inference.

Attributes:

Name Type Description
warmup_iter int

Number of warmup iterations to run before benchmarking. Default is 0.

optimize_for_inference bool

Enable inference optimizations. Default is True.

set_fusion_strategy bool

Enable operator fusion. Default is True.

Source code in focoos/ports.py
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
@dataclass
class TorchscriptRuntimeOpts:
    """TorchScript runtime configuration options.

    This class provides configuration options for the TorchScript runtime used for model inference.

    Attributes:
        warmup_iter (int): Number of warmup iterations to run before benchmarking. Default is 0.
        optimize_for_inference (bool): Enable inference optimizations. Default is True.
        set_fusion_strategy (bool): Enable operator fusion. Default is True.
    """

    warmup_iter: int = 0
    optimize_for_inference: bool = True
    set_fusion_strategy: bool = True

TrainerArgs dataclass #

Configuration class for unified model training.

Attributes:

Name Type Description
run_name str

Name of the training run

output_dir str

Directory to save outputs

ckpt_dir Optional[str]

Directory for checkpoints

init_checkpoint Optional[str]

Initial checkpoint to load

resume bool

Whether to resume from checkpoint

num_gpus int

Number of GPUs to use

device str

Device to use (cuda/cpu)

workers int

Number of data loading workers

amp_enabled bool

Whether to use automatic mixed precision

ddp_broadcast_buffers bool

Whether to broadcast buffers in DDP

ddp_find_unused bool

Whether to find unused parameters in DDP

checkpointer_period int

How often to save checkpoints

checkpointer_max_to_keep int

Maximum checkpoints to keep

eval_period int

How often to evaluate

log_period int

How often to log

vis_period int

How often to visualize

samples int

Number of samples for visualization

seed int

Random seed

early_stop bool

Whether to use early stopping

patience int

Early stopping patience

ema_enabled bool

Whether to use EMA

ema_decay float

EMA decay rate

ema_warmup int

EMA warmup period

learning_rate float

Base learning rate

weight_decay float

Weight decay

max_iters int

Maximum training iterations

batch_size int

Batch size

scheduler str

Learning rate scheduler type

scheduler_extra Optional[dict]

Extra scheduler parameters

optimizer str

Optimizer type

optimizer_extra Optional[dict]

Extra optimizer parameters

weight_decay_norm float

Weight decay for normalization layers

weight_decay_embed float

Weight decay for embeddings

backbone_multiplier float

Learning rate multiplier for backbone

decoder_multiplier float

Learning rate multiplier for decoder

head_multiplier float

Learning rate multiplier for head

freeze_bn bool

Whether to freeze batch norm

clip_gradients float

Gradient clipping value

size_divisibility int

Input size divisibility requirement

gather_metric_period int

How often to gather metrics

zero_grad_before_forward bool

Whether to zero gradients before forward pass

Source code in focoos/ports.py
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
@dataclass
class TrainerArgs:
    """Configuration class for unified model training.

    Attributes:
        run_name (str): Name of the training run
        output_dir (str): Directory to save outputs
        ckpt_dir (Optional[str]): Directory for checkpoints
        init_checkpoint (Optional[str]): Initial checkpoint to load
        resume (bool): Whether to resume from checkpoint
        num_gpus (int): Number of GPUs to use
        device (str): Device to use (cuda/cpu)
        workers (int): Number of data loading workers
        amp_enabled (bool): Whether to use automatic mixed precision
        ddp_broadcast_buffers (bool): Whether to broadcast buffers in DDP
        ddp_find_unused (bool): Whether to find unused parameters in DDP
        checkpointer_period (int): How often to save checkpoints
        checkpointer_max_to_keep (int): Maximum checkpoints to keep
        eval_period (int): How often to evaluate
        log_period (int): How often to log
        vis_period (int): How often to visualize
        samples (int): Number of samples for visualization
        seed (int): Random seed
        early_stop (bool): Whether to use early stopping
        patience (int): Early stopping patience
        ema_enabled (bool): Whether to use EMA
        ema_decay (float): EMA decay rate
        ema_warmup (int): EMA warmup period
        learning_rate (float): Base learning rate
        weight_decay (float): Weight decay
        max_iters (int): Maximum training iterations
        batch_size (int): Batch size
        scheduler (str): Learning rate scheduler type
        scheduler_extra (Optional[dict]): Extra scheduler parameters
        optimizer (str): Optimizer type
        optimizer_extra (Optional[dict]): Extra optimizer parameters
        weight_decay_norm (float): Weight decay for normalization layers
        weight_decay_embed (float): Weight decay for embeddings
        backbone_multiplier (float): Learning rate multiplier for backbone
        decoder_multiplier (float): Learning rate multiplier for decoder
        head_multiplier (float): Learning rate multiplier for head
        freeze_bn (bool): Whether to freeze batch norm
        clip_gradients (float): Gradient clipping value
        size_divisibility (int): Input size divisibility requirement
        gather_metric_period (int): How often to gather metrics
        zero_grad_before_forward (bool): Whether to zero gradients before forward pass
    """

    run_name: str
    output_dir: str = MODELS_DIR
    ckpt_dir: Optional[str] = None
    init_checkpoint: Optional[str] = None
    resume: bool = False
    # Logistics params
    num_gpus: int = get_gpus_count()
    device: str = "cuda"
    workers: int = 4
    amp_enabled: bool = True
    ddp_broadcast_buffers: bool = False
    ddp_find_unused: bool = True
    checkpointer_period: int = 1000
    checkpointer_max_to_keep: int = 1
    eval_period: int = 50
    log_period: int = 20
    samples: int = 9
    seed: int = 42
    early_stop: bool = True
    patience: int = 10
    # EMA
    ema_enabled: bool = False
    ema_decay: float = 0.999
    ema_warmup: int = 2000
    # Hyperparameters
    learning_rate: float = 5e-4
    weight_decay: float = 0.02
    max_iters: int = 3000
    batch_size: int = 16
    scheduler: SchedulerType = "MULTISTEP"
    scheduler_extra: Optional[dict] = None
    optimizer: OptimizerType = "ADAMW"
    optimizer_extra: Optional[dict] = None
    weight_decay_norm: float = 0.0
    weight_decay_embed: float = 0.0
    backbone_multiplier: float = 0.1
    decoder_multiplier: float = 1.0
    head_multiplier: float = 1.0
    freeze_bn: bool = False
    clip_gradients: float = 0.1
    size_divisibility: int = 0
    # Training specific
    gather_metric_period: int = 1
    zero_grad_before_forward: bool = False

    # Sync to hub
    sync_to_hub: bool = False

TrainingInfo dataclass #

Information about a model's training process.

This class contains details about the training job configuration, status, and timing.

Attributes:

Name Type Description
algorithm_name Optional[str]

The name of the training algorithm used.

instance_type Optional[str]

The compute instance type used for training.

volume_size Optional[int]

The storage volume size in GB allocated for the training job.

max_runtime_in_seconds Optional[int]

Maximum allowed runtime for the training job in seconds.

main_status Optional[str]

The primary status of the training job (e.g., "InProgress", "Completed").

secondary_status Optional[str]

Additional status information about the training job.

failure_reason Optional[str]

Description of why the training job failed, if applicable.

elapsed_time Optional[str]

Time elapsed since the start of the training job in seconds.

status_transitions Optional[list[dict]]

List of status change events during the training process.

start_time Optional[str]

Timestamp when the training job started.

end_time Optional[str]

Timestamp when the training job completed or failed.

artifact_location Optional[str]

Storage location of the training artifacts and model outputs.

Source code in focoos/ports.py
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
@dataclass
class TrainingInfo:
    """Information about a model's training process.

    This class contains details about the training job configuration, status, and timing.

    Attributes:
        algorithm_name: The name of the training algorithm used.
        instance_type: The compute instance type used for training.
        volume_size: The storage volume size in GB allocated for the training job.
        max_runtime_in_seconds: Maximum allowed runtime for the training job in seconds.
        main_status: The primary status of the training job (e.g., "InProgress", "Completed").
        secondary_status: Additional status information about the training job.
        failure_reason: Description of why the training job failed, if applicable.
        elapsed_time: Time elapsed since the start of the training job in seconds.
        status_transitions: List of status change events during the training process.
        start_time: Timestamp when the training job started.
        end_time: Timestamp when the training job completed or failed.
        artifact_location: Storage location of the training artifacts and model outputs.
    """

    algorithm_name: Optional[str] = ""  # todo: remove
    instance_device: Optional[str] = None
    instance_type: Optional[str] = None
    volume_size: Optional[int] = None
    main_status: Optional[str] = None
    failure_reason: Optional[str] = None
    status_transitions: Optional[list[dict]] = None
    start_time: Optional[str] = None
    end_time: Optional[str] = None
    artifact_location: Optional[str] = None

User #

Bases: PydanticBase

User account information.

This class represents a user account in the Focoos platform, containing personal information, API key, and usage quotas.

Attributes:

Name Type Description
email str

The user's email address.

created_at datetime

When the user account was created.

updated_at datetime

When the user account was last updated.

company Optional[str]

The user's company name, if provided.

api_key ApiKey

The API key associated with the user account.

quotas Quotas

Usage quotas and limits for the user account.

Source code in focoos/ports.py
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
class User(PydanticBase):
    """User account information.

    This class represents a user account in the Focoos platform, containing
    personal information, API key, and usage quotas.

    Attributes:
        email (str): The user's email address.
        created_at (datetime): When the user account was created.
        updated_at (datetime): When the user account was last updated.
        company (Optional[str]): The user's company name, if provided.
        api_key (ApiKey): The API key associated with the user account.
        quotas (Quotas): Usage quotas and limits for the user account.
    """

    email: str
    created_at: datetime
    updated_at: datetime
    company: Optional[str] = None
    api_key: ApiKey
    quotas: Quotas