Skip to content

CachedModel

Model Examples

Examples of using the Model Class are in the Examples section at the bottom of this page. AWS Model setup and deployment are quite complicated to do manually but the Workbench Model Class makes it a breeze!

CachedModel: Caches the method results for Workbench Models

CachedModel

Bases: CachedArtifactMixin, ModelCore

CachedModel: Caches the method results for Workbench Models

Note: Cached method values may lag underlying Model changes.

Common Usage
my_model = CachedModel(name)
my_model.details()
my_model.health_check()
my_model.workbench_meta()
Source code in src/workbench/cached/cached_model.py
class CachedModel(CachedArtifactMixin, ModelCore):
    """CachedModel: Caches the method results for Workbench Models

    Note: Cached method values may lag underlying Model changes.

    Common Usage:
        ```python
        my_model = CachedModel(name)
        my_model.details()
        my_model.health_check()
        my_model.workbench_meta()
        ```
    """

    def __init__(self, uuid: str):
        """CachedModel Initialization"""
        ModelCore.__init__(self, model_uuid=uuid, use_cached_meta=True)

    @CachedArtifactMixin.cache_result
    def summary(self, **kwargs) -> dict:
        """Retrieve the CachedModel Details.

        Returns:
            dict: A dictionary of details about the CachedModel
        """
        return super().summary(**kwargs)

    @CachedArtifactMixin.cache_result
    def details(self, **kwargs) -> dict:
        """Retrieve the CachedModel Details.

        Returns:
            dict: A dictionary of details about the CachedModel
        """
        return super().details(**kwargs)

    @CachedArtifactMixin.cache_result
    def health_check(self, **kwargs) -> dict:
        """Retrieve the CachedModel Health Check.

        Returns:
            dict: A dictionary of health check details for the CachedModel
        """
        return super().health_check(**kwargs)

    @CachedArtifactMixin.cache_result
    def workbench_meta(self) -> Union[str, None]:
        """Retrieve the Enumerated Model Type (REGRESSOR, CLASSIFER, etc).

        Returns:
            str: The Enumerated Model Type
        """
        return super().workbench_meta()

    @CachedArtifactMixin.cache_result
    def get_endpoint_inference_path(self) -> Union[str, None]:
        """Retrieve the Endpoint Inference Path.

        Returns:
            str: The Endpoint Inference Path
        """
        return super().get_endpoint_inference_path()

    @CachedArtifactMixin.cache_result
    def list_inference_runs(self) -> list[str]:
        """Retrieve the captured prediction results for this model

        Returns:
            list[str]: List of Inference Runs
        """
        return super().list_inference_runs()

    @CachedArtifactMixin.cache_result
    def get_inference_metrics(self, capture_uuid: str = "latest") -> Union[pd.DataFrame, None]:
        """Retrieve the captured prediction results for this model

        Args:
            capture_uuid (str, optional): Specific capture_uuid (default: latest)

        Returns:
            pd.DataFrame: DataFrame of the Captured Metrics (might be None)
        """
        return super().get_inference_metrics(capture_uuid=capture_uuid)

    @CachedArtifactMixin.cache_result
    def get_inference_predictions(self, capture_uuid: str = "auto_inference") -> Union[pd.DataFrame, None]:
        """Retrieve the captured prediction results for this model

        Args:
            capture_uuid (str, optional): Specific capture_uuid (default: training_holdout)

        Returns:
            pd.DataFrame: DataFrame of the Captured Predictions (might be None)
        """
        # Note: This method can generate larger dataframes, so we'll sample if needed
        df = super().get_inference_predictions(capture_uuid=capture_uuid)
        if df is not None and len(df) > 5000:
            self.log.warning(f"{self.uuid}:{capture_uuid} Sampling Inference Predictions to 5000 rows")
            return df.sample(5000)
        return df

    @CachedArtifactMixin.cache_result
    def confusion_matrix(self, capture_uuid: str = "latest") -> Union[pd.DataFrame, None]:
        """Retrieve the confusion matrix for the model

        Args:
            capture_uuid (str, optional): Specific capture_uuid (default: latest)

        Returns:
            pd.DataFrame: DataFrame of the Confusion Matrix (might be None)
        """
        return super().confusion_matrix(capture_uuid=capture_uuid)

__init__(uuid)

CachedModel Initialization

Source code in src/workbench/cached/cached_model.py
def __init__(self, uuid: str):
    """CachedModel Initialization"""
    ModelCore.__init__(self, model_uuid=uuid, use_cached_meta=True)

confusion_matrix(capture_uuid='latest')

Retrieve the confusion matrix for the model

Parameters:

Name Type Description Default
capture_uuid str

Specific capture_uuid (default: latest)

'latest'

Returns:

Type Description
Union[DataFrame, None]

pd.DataFrame: DataFrame of the Confusion Matrix (might be None)

Source code in src/workbench/cached/cached_model.py
@CachedArtifactMixin.cache_result
def confusion_matrix(self, capture_uuid: str = "latest") -> Union[pd.DataFrame, None]:
    """Retrieve the confusion matrix for the model

    Args:
        capture_uuid (str, optional): Specific capture_uuid (default: latest)

    Returns:
        pd.DataFrame: DataFrame of the Confusion Matrix (might be None)
    """
    return super().confusion_matrix(capture_uuid=capture_uuid)

details(**kwargs)

Retrieve the CachedModel Details.

Returns:

Name Type Description
dict dict

A dictionary of details about the CachedModel

Source code in src/workbench/cached/cached_model.py
@CachedArtifactMixin.cache_result
def details(self, **kwargs) -> dict:
    """Retrieve the CachedModel Details.

    Returns:
        dict: A dictionary of details about the CachedModel
    """
    return super().details(**kwargs)

get_endpoint_inference_path()

Retrieve the Endpoint Inference Path.

Returns:

Name Type Description
str Union[str, None]

The Endpoint Inference Path

Source code in src/workbench/cached/cached_model.py
@CachedArtifactMixin.cache_result
def get_endpoint_inference_path(self) -> Union[str, None]:
    """Retrieve the Endpoint Inference Path.

    Returns:
        str: The Endpoint Inference Path
    """
    return super().get_endpoint_inference_path()

get_inference_metrics(capture_uuid='latest')

Retrieve the captured prediction results for this model

Parameters:

Name Type Description Default
capture_uuid str

Specific capture_uuid (default: latest)

'latest'

Returns:

Type Description
Union[DataFrame, None]

pd.DataFrame: DataFrame of the Captured Metrics (might be None)

Source code in src/workbench/cached/cached_model.py
@CachedArtifactMixin.cache_result
def get_inference_metrics(self, capture_uuid: str = "latest") -> Union[pd.DataFrame, None]:
    """Retrieve the captured prediction results for this model

    Args:
        capture_uuid (str, optional): Specific capture_uuid (default: latest)

    Returns:
        pd.DataFrame: DataFrame of the Captured Metrics (might be None)
    """
    return super().get_inference_metrics(capture_uuid=capture_uuid)

get_inference_predictions(capture_uuid='auto_inference')

Retrieve the captured prediction results for this model

Parameters:

Name Type Description Default
capture_uuid str

Specific capture_uuid (default: training_holdout)

'auto_inference'

Returns:

Type Description
Union[DataFrame, None]

pd.DataFrame: DataFrame of the Captured Predictions (might be None)

Source code in src/workbench/cached/cached_model.py
@CachedArtifactMixin.cache_result
def get_inference_predictions(self, capture_uuid: str = "auto_inference") -> Union[pd.DataFrame, None]:
    """Retrieve the captured prediction results for this model

    Args:
        capture_uuid (str, optional): Specific capture_uuid (default: training_holdout)

    Returns:
        pd.DataFrame: DataFrame of the Captured Predictions (might be None)
    """
    # Note: This method can generate larger dataframes, so we'll sample if needed
    df = super().get_inference_predictions(capture_uuid=capture_uuid)
    if df is not None and len(df) > 5000:
        self.log.warning(f"{self.uuid}:{capture_uuid} Sampling Inference Predictions to 5000 rows")
        return df.sample(5000)
    return df

health_check(**kwargs)

Retrieve the CachedModel Health Check.

Returns:

Name Type Description
dict dict

A dictionary of health check details for the CachedModel

Source code in src/workbench/cached/cached_model.py
@CachedArtifactMixin.cache_result
def health_check(self, **kwargs) -> dict:
    """Retrieve the CachedModel Health Check.

    Returns:
        dict: A dictionary of health check details for the CachedModel
    """
    return super().health_check(**kwargs)

list_inference_runs()

Retrieve the captured prediction results for this model

Returns:

Type Description
list[str]

list[str]: List of Inference Runs

Source code in src/workbench/cached/cached_model.py
@CachedArtifactMixin.cache_result
def list_inference_runs(self) -> list[str]:
    """Retrieve the captured prediction results for this model

    Returns:
        list[str]: List of Inference Runs
    """
    return super().list_inference_runs()

summary(**kwargs)

Retrieve the CachedModel Details.

Returns:

Name Type Description
dict dict

A dictionary of details about the CachedModel

Source code in src/workbench/cached/cached_model.py
@CachedArtifactMixin.cache_result
def summary(self, **kwargs) -> dict:
    """Retrieve the CachedModel Details.

    Returns:
        dict: A dictionary of details about the CachedModel
    """
    return super().summary(**kwargs)

workbench_meta()

Retrieve the Enumerated Model Type (REGRESSOR, CLASSIFER, etc).

Returns:

Name Type Description
str Union[str, None]

The Enumerated Model Type

Source code in src/workbench/cached/cached_model.py
@CachedArtifactMixin.cache_result
def workbench_meta(self) -> Union[str, None]:
    """Retrieve the Enumerated Model Type (REGRESSOR, CLASSIFER, etc).

    Returns:
        str: The Enumerated Model Type
    """
    return super().workbench_meta()

Examples

All of the Workbench Examples are in the Workbench Repository under the examples/ directory. For a full code listing of any example please visit our Workbench Examples

Pull Inference Run

from workbench.cached.cached_model import CachedModel

# Grab a Model
model = CachedModel("abalone-regression")

# List the inference runs
model.list_inference_runs()
['auto_inference', 'model_training']

# Grab specific inference results
model.get_inference_predictions("auto_inference")
     class_number_of_rings  prediction    id
0                       16   10.516158     7
1                        9    9.031365     8
..                     ...         ...   ...
831                      8    7.693689  4158
832                      9    7.542521  4167