Skip to content

declearn.quickrun.ExperimentConfig

Bases: TomlConfig

Dataclass providing kwargs to FederatedServer and FederatedClient.

Attributes:

Name Type Description
metrics MetricSet or None

Optional MetricSet instance, defining evaluation metrics to compute in addition to the model's loss. It may be parsed from a list of Metric names or (name, config) tuples), or from a MetricSet config dict.

checkpoint str or None

The checkpoint folder path where to save the server's and client-wise outputs (round-wise model weights, evaluation metrics, logs, etc.).

Source code in declearn/quickrun/_config.py
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
@dataclasses.dataclass
class ExperimentConfig(TomlConfig):
    """Dataclass providing kwargs to `FederatedServer` and `FederatedClient`.

    Attributes
    ----------
    metrics: MetricSet or None
        Optional MetricSet instance, defining evaluation metrics to compute
        in addition to the model's loss. It may be parsed from a list of
        Metric names or (name, config) tuples), or from a MetricSet config
        dict.
    checkpoint: str or None
        The checkpoint folder path where to save the server's and client-wise
        outputs (round-wise model weights, evaluation metrics, logs, etc.).
    """

    metrics: Optional[MetricSet] = None
    checkpoint: Optional[str] = None

    def parse_metrics(
        self,
        inputs: Union[MetricSet, Dict[str, Any], List[MetricInputType], None],
    ) -> Optional[MetricSet]:
        """Parser for metrics."""
        if inputs is None or isinstance(inputs, MetricSet):
            return None
        try:
            # Case of a manual listing of metrics (most expected).
            if isinstance(inputs, (tuple, list)):
                return MetricSet.from_specs(inputs)
            # Case of a MetricSet config dict (unexpected but supported).
            if isinstance(inputs, dict):
                return MetricSet.from_config(inputs)
        except (TypeError, ValueError) as exc:
            raise TypeError(
                f"Failed to parse inputs for field 'metrics': {exc}."
            ) from exc
        raise TypeError(
            "Failed to parse inputs for field 'metrics': unproper type."
        )

parse_metrics(inputs)

Parser for metrics.

Source code in declearn/quickrun/_config.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
def parse_metrics(
    self,
    inputs: Union[MetricSet, Dict[str, Any], List[MetricInputType], None],
) -> Optional[MetricSet]:
    """Parser for metrics."""
    if inputs is None or isinstance(inputs, MetricSet):
        return None
    try:
        # Case of a manual listing of metrics (most expected).
        if isinstance(inputs, (tuple, list)):
            return MetricSet.from_specs(inputs)
        # Case of a MetricSet config dict (unexpected but supported).
        if isinstance(inputs, dict):
            return MetricSet.from_config(inputs)
    except (TypeError, ValueError) as exc:
        raise TypeError(
            f"Failed to parse inputs for field 'metrics': {exc}."
        ) from exc
    raise TypeError(
        "Failed to parse inputs for field 'metrics': unproper type."
    )