about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image
diff options
context:
space:
mode:
Diffstat (limited to '.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image')
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/__init__.py35
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image.py244
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image_classification_base.py439
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image_object_detection_base.py524
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_job.py244
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_multilabel_job.py252
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_search_space.py437
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_instance_segmentation_job.py249
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_limit_settings.py117
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_model_settings.py876
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_object_detection_job.py240
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_object_detection_search_space.py899
-rw-r--r--.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_sweep_settings.py86
13 files changed, 4642 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/__init__.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/__init__.py
new file mode 100644
index 00000000..46964086
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/__init__.py
@@ -0,0 +1,35 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+from .automl_image import AutoMLImage
+from .image_classification_job import ImageClassificationJob
+from .image_classification_multilabel_job import ImageClassificationMultilabelJob
+from .image_classification_search_space import ImageClassificationSearchSpace
+from .image_instance_segmentation_job import ImageInstanceSegmentationJob
+from .image_limit_settings import ImageLimitSettings
+from .image_model_settings import (
+    ImageModelSettingsClassification,
+    ImageModelSettingsObjectDetection,
+    LogTrainingMetrics,
+    LogValidationLoss,
+)
+from .image_object_detection_job import ImageObjectDetectionJob
+from .image_object_detection_search_space import ImageObjectDetectionSearchSpace
+from .image_sweep_settings import ImageSweepSettings
+
+__all__ = [
+    "AutoMLImage",
+    "LogTrainingMetrics",
+    "LogValidationLoss",
+    "ImageClassificationJob",
+    "ImageClassificationMultilabelJob",
+    "ImageClassificationSearchSpace",
+    "ImageInstanceSegmentationJob",
+    "ImageLimitSettings",
+    "ImageObjectDetectionJob",
+    "ImageObjectDetectionSearchSpace",
+    "ImageSweepSettings",
+    "ImageModelSettingsClassification",
+    "ImageModelSettingsObjectDetection",
+]
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image.py
new file mode 100644
index 00000000..a07bba4a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image.py
@@ -0,0 +1,244 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+from abc import ABC
+from typing import Any, Dict, Optional, Union
+
+from azure.ai.ml._restclient.v2023_04_01_preview.models import LogVerbosity, SamplingAlgorithmType
+from azure.ai.ml._utils.utils import camel_to_snake
+from azure.ai.ml.entities._inputs_outputs import Input
+from azure.ai.ml.entities._job.automl.automl_vertical import AutoMLVertical
+from azure.ai.ml.entities._job.automl.image.image_limit_settings import ImageLimitSettings
+from azure.ai.ml.entities._job.automl.image.image_sweep_settings import ImageSweepSettings
+from azure.ai.ml.entities._job.sweep.early_termination_policy import (
+    BanditPolicy,
+    MedianStoppingPolicy,
+    TruncationSelectionPolicy,
+)
+from azure.ai.ml.exceptions import ErrorCategory, ErrorTarget, ValidationException
+
+
+class AutoMLImage(AutoMLVertical, ABC):
+    """Base class for all AutoML Image jobs.
+    You should not instantiate this class directly.
+    Instead you should create classes for specific AutoML Image tasks.
+
+    :keyword task_type: Required. Type of task to run.
+    Possible values include: "ImageClassification", "ImageClassificationMultilabel",
+                              "ImageObjectDetection", "ImageInstanceSegmentation"
+    :paramtype task_type: str
+    :keyword limits: Limit settings for all AutoML Image jobs. Defaults to None.
+    :paramtype limits: Optional[~azure.ai.ml.automl.ImageLimitSettings]
+    :keyword sweep: Sweep settings for all AutoML Image jobs. Defaults to None.
+    :paramtype sweep: Optional[~azure.ai.ml.automl.ImageSweepSettings]
+    :keyword kwargs: Additional keyword arguments for AutoMLImage.
+    :paramtype kwargs: Dict[str, Any]
+    """
+
+    def __init__(
+        self,
+        *,
+        task_type: str,
+        limits: Optional[ImageLimitSettings] = None,
+        sweep: Optional[ImageSweepSettings] = None,
+        **kwargs: Any,
+    ) -> None:
+        self.log_verbosity = kwargs.pop("log_verbosity", LogVerbosity.INFO)
+        self.target_column_name = kwargs.pop("target_column_name", None)
+        self.validation_data_size = kwargs.pop("validation_data_size", None)
+
+        super().__init__(
+            task_type=task_type,
+            training_data=kwargs.pop("training_data", None),
+            validation_data=kwargs.pop("validation_data", None),
+            **kwargs,
+        )
+
+        # Set default value for self._limits as it is a required property in rest object.
+        self._limits = limits or ImageLimitSettings()
+        self._sweep = sweep
+
+    @property
+    def log_verbosity(self) -> LogVerbosity:
+        """Returns the verbosity of the logger.
+
+        :return: The log verbosity.
+        :rtype: ~azure.ai.ml._restclient.v2023_04_01_preview.models.LogVerbosity
+        """
+        return self._log_verbosity
+
+    @log_verbosity.setter
+    def log_verbosity(self, value: Union[str, LogVerbosity]) -> None:
+        """Sets the verbosity of the logger.
+
+        :param value: The value to set the log verbosity to.
+                      Possible values include: "NotSet", "Debug", "Info", "Warning", "Error", "Critical".
+        :type value: Union[str, ~azure.ai.ml._restclient.v2023_04_01_preview.models.LogVerbosity]
+        """
+        self._log_verbosity = None if value is None else LogVerbosity[camel_to_snake(value).upper()]
+
+    @property
+    def limits(self) -> ImageLimitSettings:
+        """Returns the limit settings for all AutoML Image jobs.
+
+        :return: The limit settings.
+        :rtype: ~azure.ai.ml.automl.ImageLimitSettings
+        """
+        return self._limits
+
+    @limits.setter
+    def limits(self, value: Union[Dict, ImageLimitSettings]) -> None:
+        if isinstance(value, ImageLimitSettings):
+            self._limits = value
+        else:
+            if not isinstance(value, dict):
+                msg = "Expected a dictionary for limit settings."
+                raise ValidationException(
+                    message=msg,
+                    no_personal_data_message=msg,
+                    target=ErrorTarget.AUTOML,
+                    error_category=ErrorCategory.USER_ERROR,
+                )
+            self.set_limits(**value)
+
+    @property
+    def sweep(self) -> Optional[ImageSweepSettings]:
+        """Returns the sweep settings for all AutoML Image jobs.
+
+        :return: The sweep settings.
+        :rtype: ~azure.ai.ml.automl.ImageSweepSettings
+        """
+        return self._sweep
+
+    @sweep.setter
+    def sweep(self, value: Union[Dict, ImageSweepSettings]) -> None:
+        """Sets the sweep settings for all AutoML Image jobs.
+
+        :param value: The value to set the sweep settings to.
+        :type value: Union[Dict, ~azure.ai.ml.automl.ImageSweepSettings]
+        :raises ~azure.ai.ml.exceptions.ValidationException: If value is not a dictionary.
+        :return: None
+        """
+        if isinstance(value, ImageSweepSettings):
+            self._sweep = value
+        else:
+            if not isinstance(value, dict):
+                msg = "Expected a dictionary for sweep settings."
+                raise ValidationException(
+                    message=msg,
+                    no_personal_data_message=msg,
+                    target=ErrorTarget.AUTOML,
+                    error_category=ErrorCategory.USER_ERROR,
+                )
+            self.set_sweep(**value)
+
+    def set_data(
+        self,
+        *,
+        training_data: Input,
+        target_column_name: str,
+        validation_data: Optional[Input] = None,
+        validation_data_size: Optional[float] = None,
+    ) -> None:
+        """Data settings for all AutoML Image jobs.
+
+        :keyword training_data: Required. Training data.
+        :type training_data: ~azure.ai.ml.entities.Input
+        :keyword target_column_name: Required. Target column name.
+        :type target_column_name: str
+        :keyword validation_data: Optional. Validation data.
+        :type validation_data: Optional[~azure.ai.ml.entities.Input]
+        :keyword validation_data_size: Optional. The fraction of training dataset that needs to be set aside for
+                                      validation purpose. Values should be in range (0.0 , 1.0).
+                                      Applied only when validation dataset is not provided.
+        :type validation_data_size: Optional[float]
+        :return: None
+        """
+        self.target_column_name = self.target_column_name if target_column_name is None else target_column_name
+        self.training_data = self.training_data if training_data is None else training_data
+        self.validation_data = self.validation_data if validation_data is None else validation_data
+        self.validation_data_size = self.validation_data_size if validation_data_size is None else validation_data_size
+
+    def set_limits(
+        self,
+        *,
+        max_concurrent_trials: Optional[int] = None,
+        max_trials: Optional[int] = None,
+        timeout_minutes: Optional[int] = None,
+    ) -> None:
+        """Limit settings for all AutoML Image Jobs.
+
+        :keyword max_concurrent_trials: Maximum number of trials to run concurrently.
+        :type max_concurrent_trials: Optional[int]. Defaults to None.
+        :keyword max_trials: Maximum number of trials to run. Defaults to None.
+        :type max_trials: Optional[int]
+        :keyword timeout_minutes: AutoML job timeout.
+        :type timeout_minutes: ~datetime.timedelta
+        :return: None
+        """
+        self._limits = self._limits or ImageLimitSettings()
+        self._limits.max_concurrent_trials = (
+            max_concurrent_trials if max_concurrent_trials is not None else self._limits.max_concurrent_trials
+        )
+        self._limits.max_trials = max_trials if max_trials is not None else self._limits.max_trials
+        self._limits.timeout_minutes = timeout_minutes if timeout_minutes is not None else self._limits.timeout_minutes
+
+    def set_sweep(
+        self,
+        *,
+        sampling_algorithm: Union[
+            str, SamplingAlgorithmType.RANDOM, SamplingAlgorithmType.GRID, SamplingAlgorithmType.BAYESIAN
+        ],
+        early_termination: Optional[Union[BanditPolicy, MedianStoppingPolicy, TruncationSelectionPolicy]] = None,
+    ) -> None:
+        """Sweep settings for all AutoML Image jobs.
+
+        :keyword sampling_algorithm: Required. Type of the hyperparameter sampling
+            algorithms. Possible values include: "Grid", "Random", "Bayesian".
+        :type sampling_algorithm: Union[str, ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType.RANDOM,
+            ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType.GRID,
+            ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType.BAYESIAN]
+        :keyword early_termination: Type of early termination policy.
+        :type early_termination: Union[
+            ~azure.mgmt.machinelearningservices.models.BanditPolicy,
+            ~azure.mgmt.machinelearningservices.models.MedianStoppingPolicy,
+            ~azure.mgmt.machinelearningservices.models.TruncationSelectionPolicy]
+        :return: None
+        """
+        if self._sweep:
+            self._sweep.sampling_algorithm = sampling_algorithm
+        else:
+            self._sweep = ImageSweepSettings(sampling_algorithm=sampling_algorithm)
+
+        self._sweep.early_termination = early_termination or self._sweep.early_termination
+
+    def __eq__(self, other: object) -> bool:
+        """Compares two AutoMLImage objects for equality.
+
+        :param other: The other AutoMLImage object to compare to.
+        :type other: ~azure.ai.ml.automl.AutoMLImage
+        :return: True if the two AutoMLImage objects are equal; False otherwise.
+        :rtype: bool
+        """
+        if not isinstance(other, AutoMLImage):
+            return NotImplemented
+
+        return (
+            self.target_column_name == other.target_column_name
+            and self.training_data == other.training_data
+            and self.validation_data == other.validation_data
+            and self.validation_data_size == other.validation_data_size
+            and self._limits == other._limits
+            and self._sweep == other._sweep
+        )
+
+    def __ne__(self, other: object) -> bool:
+        """Compares two AutoMLImage objects for inequality.
+
+        :param other: The other AutoMLImage object to compare to.
+        :type other: ~azure.ai.ml.automl.AutoMLImage
+        :return: True if the two AutoMLImage objects are not equal; False otherwise.
+        :rtype: bool
+        """
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image_classification_base.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image_classification_base.py
new file mode 100644
index 00000000..ef0c8a2d
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image_classification_base.py
@@ -0,0 +1,439 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+# pylint: disable=protected-access
+
+from typing import Any, Dict, List, Optional, Union
+
+from azure.ai.ml._restclient.v2023_04_01_preview.models import LearningRateScheduler, StochasticOptimizer
+from azure.ai.ml._utils.utils import camel_to_snake
+from azure.ai.ml.entities._job.automl.image.automl_image import AutoMLImage
+from azure.ai.ml.entities._job.automl.image.image_classification_search_space import ImageClassificationSearchSpace
+from azure.ai.ml.entities._job.automl.image.image_limit_settings import ImageLimitSettings
+from azure.ai.ml.entities._job.automl.image.image_model_settings import ImageModelSettingsClassification
+from azure.ai.ml.entities._job.automl.image.image_sweep_settings import ImageSweepSettings
+from azure.ai.ml.entities._job.automl.search_space import SearchSpace
+from azure.ai.ml.entities._job.automl.utils import cast_to_specific_search_space
+from azure.ai.ml.exceptions import ErrorCategory, ErrorTarget, ValidationException
+
+
+class AutoMLImageClassificationBase(AutoMLImage):
+    """Base class for AutoML Image Classification and Image Classification Multilabel tasks.
+    Please do not instantiate this class directly. Instantiate one of the child classes instead.
+
+    :keyword task_type: Type of task to run.
+    Possible values include: "ImageClassification", "ImageClassificationMultilabel".
+    :paramtype task_type: str
+    :keyword limits: Limits for Automl image classification jobs. Defaults to None.
+    :paramtype limits: Optional[~azure.ai.ml.automl.ImageLimitSettings]
+    :keyword sweep: Sweep settings for Automl image classification jobs. Defaults to None.
+    :paramtype sweep: Optional[~azure.ai.ml.automl.ImageSweepSettings]
+    :keyword training_parameters: Training parameters for Automl image classification jobs. Defaults to None.
+    :paramtype training_parameters: Optional[~azure.ai.ml.automl.ImageModelSettingsClassification]
+    :keyword search_space: Search space for Automl image classification jobs. Defaults to None.
+    :paramtype search_space: Optional[List[~azure.ai.ml.automl.ImageClassificationSearchSpace]]
+    :keyword kwargs: Other Keyword arguments for AutoMLImageClassificationBase class.
+    :paramtype kwargs: Dict[str, Any]
+    """
+
+    def __init__(
+        self,
+        *,
+        task_type: str,
+        limits: Optional[ImageLimitSettings] = None,
+        sweep: Optional[ImageSweepSettings] = None,
+        training_parameters: Optional[ImageModelSettingsClassification] = None,
+        search_space: Optional[List[ImageClassificationSearchSpace]] = None,
+        **kwargs: Any,
+    ) -> None:
+        self._training_parameters: Optional[ImageModelSettingsClassification] = None
+
+        super().__init__(task_type=task_type, limits=limits, sweep=sweep, **kwargs)
+        self.training_parameters = training_parameters  # Assigning training_parameters through setter method.
+        self._search_space = search_space
+
+    @property
+    def training_parameters(self) -> Optional[ImageModelSettingsClassification]:
+        """
+        :rtype: ~azure.ai.ml.automl.ImageModelSettingsClassification
+        :return: Training parameters for AutoML Image Classification and Image Classification Multilabel tasks.
+        """
+        return self._training_parameters
+
+    @training_parameters.setter
+    def training_parameters(self, value: Union[Dict, ImageModelSettingsClassification]) -> None:
+        """Setting Image training parameters for AutoML Image Classification and Image Classification Multilabel tasks.
+
+        :param value: Training parameters for AutoML Image Classification and Image Classification Multilabel tasks.
+        :type value: Union[Dict, ~azure.ai.ml.automl.ImageModelSettingsClassification]
+        :raises ~azure.ml.exceptions.ValidationException if value is not a dictionary or
+         ImageModelSettingsClassification.
+        :return: None
+        """
+        if value is None:
+            self._training_parameters = None
+        elif isinstance(value, ImageModelSettingsClassification):
+            self._training_parameters = value
+            # set_training_parameters convert parameter values from snake case str to enum.
+            # We need to add any future enum parameters in this call to support snake case str.
+            self.set_training_parameters(
+                optimizer=value.optimizer,
+                learning_rate_scheduler=value.learning_rate_scheduler,
+            )
+        else:
+            if not isinstance(value, dict):
+                msg = "Expected a dictionary for model settings."
+                raise ValidationException(
+                    message=msg,
+                    no_personal_data_message=msg,
+                    target=ErrorTarget.AUTOML,
+                    error_category=ErrorCategory.USER_ERROR,
+                )
+            self.set_training_parameters(**value)
+
+    @property
+    def search_space(self) -> Optional[List[ImageClassificationSearchSpace]]:
+        """
+        :rtype: List[~azure.ai.ml.automl.ImageClassificationSearchSpace]
+        :return: Search space for AutoML Image Classification and Image Classification Multilabel tasks.
+        """
+        return self._search_space
+
+    @search_space.setter
+    def search_space(self, value: Union[List[Dict], List[SearchSpace]]) -> None:
+        """Setting Image search space for AutoML Image Classification and Image Classification Multilabel tasks.
+
+        :param value: Search space for AutoML Image Classification and Image Classification Multilabel tasks.
+        :type value: Union[List[Dict], List[~azure.ai.ml.automl.ImageClassificationSearchSpace]]
+        :raises ~azure.ml.exceptions.ValidationException if value is not a list of dictionaries or
+            ImageClassificationSearchSpace.
+        """
+        if not isinstance(value, list):
+            msg = "Expected a list for search space."
+            raise ValidationException(
+                message=msg,
+                no_personal_data_message=msg,
+                target=ErrorTarget.AUTOML,
+                error_category=ErrorCategory.USER_ERROR,
+            )
+
+        all_dict_type = all(isinstance(item, dict) for item in value)
+        all_search_space_type = all(isinstance(item, SearchSpace) for item in value)
+
+        if all_search_space_type or all_dict_type:
+            self._search_space = [
+                cast_to_specific_search_space(item, ImageClassificationSearchSpace, self.task_type)  # type: ignore
+                for item in value
+            ]
+        else:
+            msg = "Expected all items in the list to be either dictionaries or ImageClassificationSearchSpace objects."
+            raise ValidationException(
+                message=msg,
+                no_personal_data_message=msg,
+                target=ErrorTarget.AUTOML,
+                error_category=ErrorCategory.USER_ERROR,
+            )
+
+    # pylint: disable=too-many-locals
+    def set_training_parameters(
+        self,
+        *,
+        advanced_settings: Optional[str] = None,
+        ams_gradient: Optional[bool] = None,
+        beta1: Optional[float] = None,
+        beta2: Optional[float] = None,
+        checkpoint_frequency: Optional[int] = None,
+        checkpoint_run_id: Optional[str] = None,
+        distributed: Optional[bool] = None,
+        early_stopping: Optional[bool] = None,
+        early_stopping_delay: Optional[int] = None,
+        early_stopping_patience: Optional[int] = None,
+        enable_onnx_normalization: Optional[bool] = None,
+        evaluation_frequency: Optional[int] = None,
+        gradient_accumulation_step: Optional[int] = None,
+        layers_to_freeze: Optional[int] = None,
+        learning_rate: Optional[float] = None,
+        learning_rate_scheduler: Optional[Union[str, LearningRateScheduler]] = None,
+        model_name: Optional[str] = None,
+        momentum: Optional[float] = None,
+        nesterov: Optional[bool] = None,
+        number_of_epochs: Optional[int] = None,
+        number_of_workers: Optional[int] = None,
+        optimizer: Optional[Union[str, StochasticOptimizer]] = None,
+        random_seed: Optional[int] = None,
+        step_lr_gamma: Optional[float] = None,
+        step_lr_step_size: Optional[int] = None,
+        training_batch_size: Optional[int] = None,
+        validation_batch_size: Optional[int] = None,
+        warmup_cosine_lr_cycles: Optional[float] = None,
+        warmup_cosine_lr_warmup_epochs: Optional[int] = None,
+        weight_decay: Optional[float] = None,
+        training_crop_size: Optional[int] = None,
+        validation_crop_size: Optional[int] = None,
+        validation_resize_size: Optional[int] = None,
+        weighted_loss: Optional[int] = None,
+    ) -> None:
+        """Setting Image training parameters for AutoML Image Classification and Image Classification Multilabel tasks.
+
+        :keyword advanced_settings: Settings for advanced scenarios.
+        :paramtype advanced_settings: str
+        :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+        :paramtype ams_gradient: bool
+        :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+         range [0, 1].
+        :paramtype beta1: float
+        :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+         range [0, 1].
+        :paramtype beta2: float
+        :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
+         integer.
+        :paramtype checkpoint_frequency: int
+        :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+         incremental training.
+        :paramtype checkpoint_run_id: str
+        :keyword distributed: Whether to use distributed training.
+        :paramtype distributed: bool
+        :keyword early_stopping: Enable early stopping logic during training.
+        :paramtype early_stopping: bool
+        :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+         before primary metric improvement
+         is tracked for early stopping. Must be a positive integer.
+        :paramtype early_stopping_delay: int
+        :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+         primary metric improvement before
+         the run is stopped. Must be a positive integer.
+        :paramtype early_stopping_patience: int
+        :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+        :paramtype enable_onnx_normalization: bool
+        :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+         Must be a positive integer.
+        :paramtype evaluation_frequency: int
+        :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+         "GradAccumulationStep" steps without
+         updating the model weights while accumulating the gradients of those steps, and then using
+         the accumulated gradients to compute the weight updates. Must be a positive integer.
+        :paramtype gradient_accumulation_step: int
+        :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+         integer.
+         For instance, passing 2 as value for 'seresnext' means
+         freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+         please
+         see: https://learn.microsoft.com/azure/machine-learning/reference-automl-images-hyperparameters#model-agnostic-hyperparameters.   # pylint: disable=line-too-long
+        :type layers_to_freeze: int
+        :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+        :paramtype learning_rate: float
+        :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+         'step'. Possible values include: "None", "WarmupCosine", "Step".
+        :type learning_rate_scheduler: str or
+         ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+        :keyword model_name: Name of the model to use for training.
+         For more information on the available models please visit the official documentation:
+         https://learn.microsoft.com/azure/machine-learning/how-to-auto-train-image-models.
+        :type model_name: str
+        :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+         1].
+        :paramtype momentum: float
+        :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+        :paramtype nesterov: bool
+        :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+        :paramtype number_of_epochs: int
+        :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+        :paramtype number_of_workers: int
+        :keyword optimizer: Type of optimizer. Possible values include: "None", "Sgd", "Adam", "Adamw".
+        :type optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+        :keyword random_seed: Random seed to be used when using deterministic training.
+        :paramtype random_seed: int
+        :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+         in the range [0, 1].
+        :paramtype step_lr_gamma: float
+        :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+         a positive integer.
+        :paramtype step_lr_step_size: int
+        :keyword training_batch_size: Training batch size. Must be a positive integer.
+        :paramtype training_batch_size: int
+        :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+        :paramtype validation_batch_size: int
+        :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+         'warmup_cosine'. Must be a float in the range [0, 1].
+        :paramtype warmup_cosine_lr_cycles: float
+        :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+         'warmup_cosine'. Must be a positive integer.
+        :paramtype warmup_cosine_lr_warmup_epochs: int
+        :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+         be a float in the range[0, 1].
+        :paramtype weight_decay: float
+        :keyword training_crop_size: Image crop size that is input to the neural network for the
+         training dataset. Must be a positive integer.
+        :paramtype training_crop_size: int
+        :keyword validation_crop_size: Image crop size that is input to the neural network for the
+         validation dataset. Must be a positive integer.
+        :paramtype validation_crop_size: int
+        :keyword validation_resize_size: Image size to which to resize before cropping for validation
+         dataset. Must be a positive integer.
+        :paramtype validation_resize_size: int
+        :keyword weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
+         1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
+         0 or 1 or 2.
+        :paramtype weighted_loss: int
+        """
+        self._training_parameters = self._training_parameters or ImageModelSettingsClassification()
+
+        self._training_parameters.advanced_settings = (
+            advanced_settings if advanced_settings is not None else self._training_parameters.advanced_settings
+        )
+        self._training_parameters.ams_gradient = (
+            ams_gradient if ams_gradient is not None else self._training_parameters.ams_gradient
+        )
+        self._training_parameters.beta1 = beta1 if beta1 is not None else self._training_parameters.beta1
+        self._training_parameters.beta2 = beta2 if beta2 is not None else self._training_parameters.beta2
+        self._training_parameters.checkpoint_frequency = (
+            checkpoint_frequency if checkpoint_frequency is not None else self._training_parameters.checkpoint_frequency
+        )
+        self._training_parameters.checkpoint_run_id = (
+            checkpoint_run_id if checkpoint_run_id is not None else self._training_parameters.checkpoint_run_id
+        )
+        self._training_parameters.distributed = (
+            distributed if distributed is not None else self._training_parameters.distributed
+        )
+        self._training_parameters.early_stopping = (
+            early_stopping if early_stopping is not None else self._training_parameters.early_stopping
+        )
+        self._training_parameters.early_stopping_delay = (
+            early_stopping_delay if early_stopping_delay is not None else self._training_parameters.early_stopping_delay
+        )
+        self._training_parameters.early_stopping_patience = (
+            early_stopping_patience
+            if early_stopping_patience is not None
+            else self._training_parameters.early_stopping_patience
+        )
+        self._training_parameters.enable_onnx_normalization = (
+            enable_onnx_normalization
+            if enable_onnx_normalization is not None
+            else self._training_parameters.enable_onnx_normalization
+        )
+        self._training_parameters.evaluation_frequency = (
+            evaluation_frequency if evaluation_frequency is not None else self._training_parameters.evaluation_frequency
+        )
+        self._training_parameters.gradient_accumulation_step = (
+            gradient_accumulation_step
+            if gradient_accumulation_step is not None
+            else self._training_parameters.gradient_accumulation_step
+        )
+        self._training_parameters.layers_to_freeze = (
+            layers_to_freeze if layers_to_freeze is not None else self._training_parameters.layers_to_freeze
+        )
+        self._training_parameters.learning_rate = (
+            learning_rate if learning_rate is not None else self._training_parameters.learning_rate
+        )
+        self._training_parameters.learning_rate_scheduler = (
+            LearningRateScheduler[camel_to_snake(learning_rate_scheduler).upper()]
+            if learning_rate_scheduler is not None
+            else self._training_parameters.learning_rate_scheduler
+        )
+        self._training_parameters.model_name = (
+            model_name if model_name is not None else self._training_parameters.model_name
+        )
+        self._training_parameters.momentum = momentum if momentum is not None else self._training_parameters.momentum
+        self._training_parameters.nesterov = nesterov if nesterov is not None else self._training_parameters.nesterov
+        self._training_parameters.number_of_epochs = (
+            number_of_epochs if number_of_epochs is not None else self._training_parameters.number_of_epochs
+        )
+        self._training_parameters.number_of_workers = (
+            number_of_workers if number_of_workers is not None else self._training_parameters.number_of_workers
+        )
+        self._training_parameters.optimizer = (
+            StochasticOptimizer[camel_to_snake(optimizer).upper()]
+            if optimizer is not None
+            else self._training_parameters.optimizer
+        )
+        self._training_parameters.random_seed = (
+            random_seed if random_seed is not None else self._training_parameters.random_seed
+        )
+        self._training_parameters.step_lr_gamma = (
+            step_lr_gamma if step_lr_gamma is not None else self._training_parameters.step_lr_gamma
+        )
+        self._training_parameters.step_lr_step_size = (
+            step_lr_step_size if step_lr_step_size is not None else self._training_parameters.step_lr_step_size
+        )
+        self._training_parameters.training_batch_size = (
+            training_batch_size if training_batch_size is not None else self._training_parameters.training_batch_size
+        )
+        self._training_parameters.validation_batch_size = (
+            validation_batch_size
+            if validation_batch_size is not None
+            else self._training_parameters.validation_batch_size
+        )
+        self._training_parameters.warmup_cosine_lr_cycles = (
+            warmup_cosine_lr_cycles
+            if warmup_cosine_lr_cycles is not None
+            else self._training_parameters.warmup_cosine_lr_cycles
+        )
+        self._training_parameters.warmup_cosine_lr_warmup_epochs = (
+            warmup_cosine_lr_warmup_epochs
+            if warmup_cosine_lr_warmup_epochs is not None
+            else self._training_parameters.warmup_cosine_lr_warmup_epochs
+        )
+        self._training_parameters.weight_decay = (
+            weight_decay if weight_decay is not None else self._training_parameters.weight_decay
+        )
+        self._training_parameters.training_crop_size = (
+            training_crop_size if training_crop_size is not None else self._training_parameters.training_crop_size
+        )
+        self._training_parameters.validation_crop_size = (
+            validation_crop_size if validation_crop_size is not None else self._training_parameters.validation_crop_size
+        )
+        self._training_parameters.validation_resize_size = (
+            validation_resize_size
+            if validation_resize_size is not None
+            else self._training_parameters.validation_resize_size
+        )
+        self._training_parameters.weighted_loss = (
+            weighted_loss if weighted_loss is not None else self._training_parameters.weighted_loss
+        )
+
+    # pylint: enable=too-many-locals
+
+    def extend_search_space(
+        self,
+        value: Union[SearchSpace, List[SearchSpace]],
+    ) -> None:
+        """Add Search space for AutoML Image Classification and Image Classification Multilabel tasks.
+
+        :param value: specify either an instance of ImageClassificationSearchSpace or list of
+            ImageClassificationSearchSpace for searching through the parameter space
+        :type value: Union[ImageClassificationSearchSpace, List[ImageClassificationSearchSpace]]
+        """
+        self._search_space = self._search_space or []
+
+        if isinstance(value, list):
+            self._search_space.extend(
+                [
+                    cast_to_specific_search_space(item, ImageClassificationSearchSpace, self.task_type)  # type: ignore
+                    for item in value
+                ]
+            )
+        else:
+            self._search_space.append(
+                cast_to_specific_search_space(value, ImageClassificationSearchSpace, self.task_type)  # type: ignore
+            )
+
+    @classmethod
+    def _get_search_space_from_str(cls, search_space_str: str) -> Optional[List[ImageClassificationSearchSpace]]:
+        return (
+            [ImageClassificationSearchSpace._from_rest_object(entry) for entry in search_space_str if entry is not None]
+            if search_space_str is not None
+            else None
+        )
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, AutoMLImageClassificationBase):
+            return NotImplemented
+
+        if not super().__eq__(other):
+            return False
+
+        return self._training_parameters == other._training_parameters and self._search_space == other._search_space
+
+    def __ne__(self, other: object) -> bool:
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image_object_detection_base.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image_object_detection_base.py
new file mode 100644
index 00000000..db0c7bc6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/automl_image_object_detection_base.py
@@ -0,0 +1,524 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+# pylint: disable=protected-access
+
+from typing import Any, Dict, List, Optional, Union
+
+from azure.ai.ml._restclient.v2023_04_01_preview.models import (
+    LearningRateScheduler,
+    LogTrainingMetrics,
+    LogValidationLoss,
+    ModelSize,
+    StochasticOptimizer,
+    ValidationMetricType,
+)
+from azure.ai.ml._utils.utils import camel_to_snake
+from azure.ai.ml.entities._job.automl import SearchSpace
+from azure.ai.ml.entities._job.automl.image.automl_image import AutoMLImage
+from azure.ai.ml.entities._job.automl.image.image_limit_settings import ImageLimitSettings
+from azure.ai.ml.entities._job.automl.image.image_model_settings import ImageModelSettingsObjectDetection
+from azure.ai.ml.entities._job.automl.image.image_object_detection_search_space import ImageObjectDetectionSearchSpace
+from azure.ai.ml.entities._job.automl.image.image_sweep_settings import ImageSweepSettings
+from azure.ai.ml.entities._job.automl.utils import cast_to_specific_search_space
+from azure.ai.ml.exceptions import ErrorCategory, ErrorTarget, ValidationException
+
+
+class AutoMLImageObjectDetectionBase(AutoMLImage):
+    """Base class for AutoML Image Object Detection and Image Instance Segmentation tasks.
+
+    :keyword task_type: Type of task to run. Possible values include: "ImageObjectDetection",
+    "ImageInstanceSegmentation".
+    :paramtype task_type: str
+    :keyword limits: The resource limits for the job.
+    :paramtype limits: Optional[~azure.ai.ml.entities._job.automl.image.image_limit_settings.ImageLimitSettings]
+    :keyword sweep: The sweep settings for the job.
+    :paramtype sweep: Optional[~azure.ai.ml.entities._job.automl.image.image_sweep_settings.ImageSweepSettings]
+    :keyword training_parameters: The training parameters for the job.
+    :paramtype training_parameters: Optional[~azure.ai.ml.automl.ImageModelSettingsObjectDetection]
+    :keyword search_space: The search space for the job.
+    :paramtype search_space: Optional[List[~azure.ai.ml.automl.ImageObjectDetectionSearchSpace]]
+    """
+
+    def __init__(
+        self,
+        *,
+        task_type: str,
+        limits: Optional[ImageLimitSettings] = None,
+        sweep: Optional[ImageSweepSettings] = None,
+        training_parameters: Optional[ImageModelSettingsObjectDetection] = None,
+        search_space: Optional[List[ImageObjectDetectionSearchSpace]] = None,
+        **kwargs: Any,
+    ) -> None:
+        self._training_parameters: Optional[ImageModelSettingsObjectDetection] = None
+
+        super().__init__(task_type=task_type, limits=limits, sweep=sweep, **kwargs)
+
+        self.training_parameters = training_parameters  # Assigning training_parameters through setter method.
+
+        self._search_space = search_space
+
+    @property
+    def training_parameters(self) -> Optional[ImageModelSettingsObjectDetection]:
+        return self._training_parameters
+
+    @training_parameters.setter
+    def training_parameters(self, value: Union[Dict, ImageModelSettingsObjectDetection]) -> None:
+        if value is None:
+            self._training_parameters = None
+        elif isinstance(value, ImageModelSettingsObjectDetection):
+            self._training_parameters = value
+            # set_training_parameters convert parameter values from snake case str to enum.
+            # We need to add any future enum parameters in this call to support snake case str.
+            self.set_training_parameters(
+                optimizer=value.optimizer,
+                learning_rate_scheduler=value.learning_rate_scheduler,
+                model_size=value.model_size,
+                validation_metric_type=value.validation_metric_type,
+                log_training_metrics=value.log_training_metrics,
+                log_validation_loss=value.log_validation_loss,
+            )
+        elif value is None:
+            self._training_parameters = value
+        else:
+            if not isinstance(value, dict):
+                msg = "Expected a dictionary for model settings."
+                raise ValidationException(
+                    message=msg,
+                    no_personal_data_message=msg,
+                    target=ErrorTarget.AUTOML,
+                    error_category=ErrorCategory.USER_ERROR,
+                )
+            self.set_training_parameters(**value)
+
+    @property
+    def search_space(self) -> Optional[List[ImageObjectDetectionSearchSpace]]:
+        return self._search_space
+
+    @search_space.setter
+    def search_space(self, value: Union[List[Dict], List[SearchSpace]]) -> None:
+        if not isinstance(value, list):
+            msg = "Expected a list for search space."
+            raise ValidationException(
+                message=msg,
+                no_personal_data_message=msg,
+                target=ErrorTarget.AUTOML,
+                error_category=ErrorCategory.USER_ERROR,
+            )
+
+        all_dict_type = all(isinstance(item, dict) for item in value)
+        all_search_space_type = all(isinstance(item, SearchSpace) for item in value)
+
+        if all_search_space_type or all_dict_type:
+            self._search_space = [
+                cast_to_specific_search_space(item, ImageObjectDetectionSearchSpace, self.task_type)  # type: ignore
+                for item in value
+            ]
+        else:
+            msg = "Expected all items in the list to be either dictionaries or SearchSpace objects."
+            raise ValidationException(
+                message=msg,
+                no_personal_data_message=msg,
+                target=ErrorTarget.AUTOML,
+                error_category=ErrorCategory.USER_ERROR,
+            )
+
+    # pylint: disable=too-many-locals
+    def set_training_parameters(
+        self,
+        *,
+        advanced_settings: Optional[str] = None,
+        ams_gradient: Optional[bool] = None,
+        beta1: Optional[float] = None,
+        beta2: Optional[float] = None,
+        checkpoint_frequency: Optional[int] = None,
+        checkpoint_run_id: Optional[str] = None,
+        distributed: Optional[bool] = None,
+        early_stopping: Optional[bool] = None,
+        early_stopping_delay: Optional[int] = None,
+        early_stopping_patience: Optional[int] = None,
+        enable_onnx_normalization: Optional[bool] = None,
+        evaluation_frequency: Optional[int] = None,
+        gradient_accumulation_step: Optional[int] = None,
+        layers_to_freeze: Optional[int] = None,
+        learning_rate: Optional[float] = None,
+        learning_rate_scheduler: Optional[Union[str, LearningRateScheduler]] = None,
+        model_name: Optional[str] = None,
+        momentum: Optional[float] = None,
+        nesterov: Optional[bool] = None,
+        number_of_epochs: Optional[int] = None,
+        number_of_workers: Optional[int] = None,
+        optimizer: Optional[Union[str, StochasticOptimizer]] = None,
+        random_seed: Optional[int] = None,
+        step_lr_gamma: Optional[float] = None,
+        step_lr_step_size: Optional[int] = None,
+        training_batch_size: Optional[int] = None,
+        validation_batch_size: Optional[int] = None,
+        warmup_cosine_lr_cycles: Optional[float] = None,
+        warmup_cosine_lr_warmup_epochs: Optional[int] = None,
+        weight_decay: Optional[float] = None,
+        box_detections_per_image: Optional[int] = None,
+        box_score_threshold: Optional[float] = None,
+        image_size: Optional[int] = None,
+        max_size: Optional[int] = None,
+        min_size: Optional[int] = None,
+        model_size: Optional[Union[str, ModelSize]] = None,
+        multi_scale: Optional[bool] = None,
+        nms_iou_threshold: Optional[float] = None,
+        tile_grid_size: Optional[str] = None,
+        tile_overlap_ratio: Optional[float] = None,
+        tile_predictions_nms_threshold: Optional[float] = None,
+        validation_iou_threshold: Optional[float] = None,
+        validation_metric_type: Optional[Union[str, ValidationMetricType]] = None,
+        log_training_metrics: Optional[Union[str, LogTrainingMetrics]] = None,
+        log_validation_loss: Optional[Union[str, LogValidationLoss]] = None,
+    ) -> None:
+        """Setting Image training parameters for for AutoML Image Object Detection and Image Instance Segmentation
+        tasks.
+
+        :keyword advanced_settings: Settings for advanced scenarios.
+        :paramtype advanced_settings: str
+        :keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+        :paramtype ams_gradient: bool
+        :keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+         range [0, 1].
+        :paramtype beta1: float
+        :keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+         range [0, 1].
+        :paramtype beta2: float
+        :keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
+         integer.
+        :paramtype checkpoint_frequency: int
+        :keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+         incremental training.
+        :paramtype checkpoint_run_id: str
+        :keyword distributed: Whether to use distributed training.
+        :paramtype distributed: bool
+        :keyword early_stopping: Enable early stopping logic during training.
+        :paramtype early_stopping: bool
+        :keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+         before primary metric improvement
+         is tracked for early stopping. Must be a positive integer.
+        :paramtype early_stopping_delay: int
+        :keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
+         primary metric improvement before
+         the run is stopped. Must be a positive integer.
+        :paramtype early_stopping_patience: int
+        :keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
+        :paramtype enable_onnx_normalization: bool
+        :keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+         Must be a positive integer.
+        :paramtype evaluation_frequency: int
+        :keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
+         "GradAccumulationStep" steps without
+         updating the model weights while accumulating the gradients of those steps, and then using
+         the accumulated gradients to compute the weight updates. Must be a positive integer.
+        :paramtype gradient_accumulation_step: int
+        :keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+         integer.
+         For instance, passing 2 as value for 'seresnext' means
+         freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+         please
+         see: https://learn.microsoft.com/azure/machine-learning/reference-automl-images-hyperparameters#model-agnostic-hyperparameters.   # pylint: disable=line-too-long
+        :type layers_to_freeze: int
+        :keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+        :paramtype learning_rate: float
+        :keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+         'step'. Possible values include: "None", "WarmupCosine", "Step".
+        :type learning_rate_scheduler: str or
+         ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+        :keyword model_name: Name of the model to use for training.
+         For more information on the available models please visit the official documentation:
+         https://learn.microsoft.com/azure/machine-learning/how-to-auto-train-image-models.
+        :type model_name: str
+        :keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+         1].
+        :paramtype momentum: float
+        :keyword nesterov: Enable nesterov when optimizer is 'sgd'.
+        :paramtype nesterov: bool
+        :keyword number_of_epochs: Number of training epochs. Must be a positive integer.
+        :paramtype number_of_epochs: int
+        :keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
+        :paramtype number_of_workers: int
+        :keyword optimizer: Type of optimizer. Possible values include: "None", "Sgd", "Adam", "Adamw".
+        :type optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+        :keyword random_seed: Random seed to be used when using deterministic training.
+        :paramtype random_seed: int
+        :keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+         in the range [0, 1].
+        :paramtype step_lr_gamma: float
+        :keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+         a positive integer.
+        :paramtype step_lr_step_size: int
+        :keyword training_batch_size: Training batch size. Must be a positive integer.
+        :paramtype training_batch_size: int
+        :keyword validation_batch_size: Validation batch size. Must be a positive integer.
+        :paramtype validation_batch_size: int
+        :keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+         'warmup_cosine'. Must be a float in the range [0, 1].
+        :paramtype warmup_cosine_lr_cycles: float
+        :keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+         'warmup_cosine'. Must be a positive integer.
+        :paramtype warmup_cosine_lr_warmup_epochs: int
+        :keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+         be a float in the range[0, 1].
+        :paramtype weight_decay: float
+        :keyword box_detections_per_image: Maximum number of detections per image, for all classes.
+         Must be a positive integer.
+         Note: This settings is not supported for the 'yolov5' algorithm.
+        :type box_detections_per_image: int
+        :keyword box_score_threshold: During inference, only return proposals with a classification
+         score greater than
+         BoxScoreThreshold. Must be a float in the range[0, 1].
+        :paramtype box_score_threshold: float
+        :keyword image_size: Image size for training and validation. Must be a positive integer.
+         Note: The training run may get into CUDA OOM if the size is too big.
+         Note: This settings is only supported for the 'yolov5' algorithm.
+        :type image_size: int
+        :keyword max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
+         Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+         Note: This settings is not supported for the 'yolov5' algorithm.
+        :type max_size: int
+        :keyword min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
+         Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+         Note: This settings is not supported for the 'yolov5' algorithm.
+        :type min_size: int
+        :keyword model_size: Model size. Must be 'small', 'medium', 'large', or 'extra_large'.
+         Note: training run may get into CUDA OOM if the model size is too big.
+         Note: This settings is only supported for the 'yolov5' algorithm.
+        :type model_size: str or ~azure.mgmt.machinelearningservices.models.ModelSize
+        :keyword multi_scale: Enable multi-scale image by varying image size by +/- 50%.
+         Note: training run may get into CUDA OOM if no sufficient GPU memory.
+         Note: This settings is only supported for the 'yolov5' algorithm.
+        :type multi_scale: bool
+        :keyword nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
+         float in the range [0, 1].
+        :paramtype nms_iou_threshold: float
+        :keyword tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must
+         not be
+         None to enable small object detection logic. A string containing two integers in mxn format.
+        :type tile_grid_size: str
+        :keyword tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be
+         float in the range [0, 1).
+        :paramtype tile_overlap_ratio: float
+        :keyword tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
+         predictions from tiles and image.
+         Used in validation/ inference. Must be float in the range [0, 1].
+         NMS: Non-maximum suppression.
+        :type tile_predictions_nms_threshold: str
+        :keyword validation_iou_threshold: IOU threshold to use when computing validation metric. Must
+         be float in the range [0, 1].
+        :paramtype validation_iou_threshold: float
+        :keyword validation_metric_type: Metric computation method to use for validation metrics. Must
+         be 'none', 'coco', 'voc', or 'coco_voc'.
+        :paramtype validation_metric_type: str or ~azure.mgmt.machinelearningservices.models.ValidationMetricType
+        :keyword log_training_metrics: indicates whether or not to log training metrics. Must
+         be 'Enable' or 'Disable'
+        :paramtype log_training_metrics: str or ~azure.mgmt.machinelearningservices.models.LogTrainingMetrics
+        :keyword log_validation_loss: indicates whether or not to log validation loss. Must
+         be 'Enable' or 'Disable'
+        :paramtype log_validation_loss: str or ~azure.mgmt.machinelearningservices.models.LogValidationLoss
+        """
+        self._training_parameters = self._training_parameters or ImageModelSettingsObjectDetection()
+
+        self._training_parameters.advanced_settings = (
+            advanced_settings if advanced_settings is not None else self._training_parameters.advanced_settings
+        )
+        self._training_parameters.ams_gradient = (
+            ams_gradient if ams_gradient is not None else self._training_parameters.ams_gradient
+        )
+        self._training_parameters.beta1 = beta1 if beta1 is not None else self._training_parameters.beta1
+        self._training_parameters.beta2 = beta2 if beta2 is not None else self._training_parameters.beta2
+        self._training_parameters.checkpoint_frequency = (
+            checkpoint_frequency if checkpoint_frequency is not None else self._training_parameters.checkpoint_frequency
+        )
+        self._training_parameters.checkpoint_run_id = (
+            checkpoint_run_id if checkpoint_run_id is not None else self._training_parameters.checkpoint_run_id
+        )
+        self._training_parameters.distributed = (
+            distributed if distributed is not None else self._training_parameters.distributed
+        )
+        self._training_parameters.early_stopping = (
+            early_stopping if early_stopping is not None else self._training_parameters.early_stopping
+        )
+        self._training_parameters.early_stopping_delay = (
+            early_stopping_delay if early_stopping_delay is not None else self._training_parameters.early_stopping_delay
+        )
+        self._training_parameters.early_stopping_patience = (
+            early_stopping_patience
+            if early_stopping_patience is not None
+            else self._training_parameters.early_stopping_patience
+        )
+        self._training_parameters.enable_onnx_normalization = (
+            enable_onnx_normalization
+            if enable_onnx_normalization is not None
+            else self._training_parameters.enable_onnx_normalization
+        )
+        self._training_parameters.evaluation_frequency = (
+            evaluation_frequency if evaluation_frequency is not None else self._training_parameters.evaluation_frequency
+        )
+        self._training_parameters.gradient_accumulation_step = (
+            gradient_accumulation_step
+            if gradient_accumulation_step is not None
+            else self._training_parameters.gradient_accumulation_step
+        )
+        self._training_parameters.layers_to_freeze = (
+            layers_to_freeze if layers_to_freeze is not None else self._training_parameters.layers_to_freeze
+        )
+        self._training_parameters.learning_rate = (
+            learning_rate if learning_rate is not None else self._training_parameters.learning_rate
+        )
+        self._training_parameters.learning_rate_scheduler = (
+            LearningRateScheduler[camel_to_snake(learning_rate_scheduler)]
+            if learning_rate_scheduler is not None
+            else self._training_parameters.learning_rate_scheduler
+        )
+        self._training_parameters.model_name = (
+            model_name if model_name is not None else self._training_parameters.model_name
+        )
+        self._training_parameters.momentum = momentum if momentum is not None else self._training_parameters.momentum
+        self._training_parameters.nesterov = nesterov if nesterov is not None else self._training_parameters.nesterov
+        self._training_parameters.number_of_epochs = (
+            number_of_epochs if number_of_epochs is not None else self._training_parameters.number_of_epochs
+        )
+        self._training_parameters.number_of_workers = (
+            number_of_workers if number_of_workers is not None else self._training_parameters.number_of_workers
+        )
+        self._training_parameters.optimizer = (
+            StochasticOptimizer[camel_to_snake(optimizer)]
+            if optimizer is not None
+            else self._training_parameters.optimizer
+        )
+        self._training_parameters.random_seed = (
+            random_seed if random_seed is not None else self._training_parameters.random_seed
+        )
+        self._training_parameters.step_lr_gamma = (
+            step_lr_gamma if step_lr_gamma is not None else self._training_parameters.step_lr_gamma
+        )
+        self._training_parameters.step_lr_step_size = (
+            step_lr_step_size if step_lr_step_size is not None else self._training_parameters.step_lr_step_size
+        )
+        self._training_parameters.training_batch_size = (
+            training_batch_size if training_batch_size is not None else self._training_parameters.training_batch_size
+        )
+        self._training_parameters.validation_batch_size = (
+            validation_batch_size
+            if validation_batch_size is not None
+            else self._training_parameters.validation_batch_size
+        )
+        self._training_parameters.warmup_cosine_lr_cycles = (
+            warmup_cosine_lr_cycles
+            if warmup_cosine_lr_cycles is not None
+            else self._training_parameters.warmup_cosine_lr_cycles
+        )
+        self._training_parameters.warmup_cosine_lr_warmup_epochs = (
+            warmup_cosine_lr_warmup_epochs
+            if warmup_cosine_lr_warmup_epochs is not None
+            else self._training_parameters.warmup_cosine_lr_warmup_epochs
+        )
+        self._training_parameters.weight_decay = (
+            weight_decay if weight_decay is not None else self._training_parameters.weight_decay
+        )
+        self._training_parameters.box_detections_per_image = (
+            box_detections_per_image
+            if box_detections_per_image is not None
+            else self._training_parameters.box_detections_per_image
+        )
+        self._training_parameters.box_score_threshold = (
+            box_score_threshold if box_score_threshold is not None else self._training_parameters.box_score_threshold
+        )
+        self._training_parameters.image_size = (
+            image_size if image_size is not None else self._training_parameters.image_size
+        )
+        self._training_parameters.max_size = max_size if max_size is not None else self._training_parameters.max_size
+        self._training_parameters.min_size = min_size if min_size is not None else self._training_parameters.min_size
+        self._training_parameters.model_size = (
+            ModelSize[camel_to_snake(model_size)] if model_size is not None else self._training_parameters.model_size
+        )
+        self._training_parameters.multi_scale = (
+            multi_scale if multi_scale is not None else self._training_parameters.multi_scale
+        )
+        self._training_parameters.nms_iou_threshold = (
+            nms_iou_threshold if nms_iou_threshold is not None else self._training_parameters.nms_iou_threshold
+        )
+        self._training_parameters.tile_grid_size = (
+            tile_grid_size if tile_grid_size is not None else self._training_parameters.tile_grid_size
+        )
+        self._training_parameters.tile_overlap_ratio = (
+            tile_overlap_ratio if tile_overlap_ratio is not None else self._training_parameters.tile_overlap_ratio
+        )
+        self._training_parameters.tile_predictions_nms_threshold = (
+            tile_predictions_nms_threshold
+            if tile_predictions_nms_threshold is not None
+            else self._training_parameters.tile_predictions_nms_threshold
+        )
+        self._training_parameters.validation_iou_threshold = (
+            validation_iou_threshold
+            if validation_iou_threshold is not None
+            else self._training_parameters.validation_iou_threshold
+        )
+        self._training_parameters.validation_metric_type = (
+            ValidationMetricType[camel_to_snake(validation_metric_type)]
+            if validation_metric_type is not None
+            else self._training_parameters.validation_metric_type
+        )
+        self._training_parameters.log_training_metrics = (
+            LogTrainingMetrics[camel_to_snake(log_training_metrics)]
+            if log_training_metrics is not None
+            else self._training_parameters.log_training_metrics
+        )
+        self._training_parameters.log_validation_loss = (
+            LogValidationLoss[camel_to_snake(log_validation_loss)]
+            if log_validation_loss is not None
+            else self._training_parameters.log_validation_loss
+        )
+
+    # pylint: enable=too-many-locals
+
+    def extend_search_space(
+        self,
+        value: Union[SearchSpace, List[SearchSpace]],
+    ) -> None:
+        """Add search space for AutoML Image Object Detection and Image Instance Segmentation tasks.
+
+        :param value: Search through the parameter space
+        :type value: Union[SearchSpace, List[SearchSpace]]
+        """
+        self._search_space = self._search_space or []
+
+        if isinstance(value, list):
+            self._search_space.extend(
+                [
+                    cast_to_specific_search_space(item, ImageObjectDetectionSearchSpace, self.task_type)  # type: ignore
+                    for item in value
+                ]
+            )
+        else:
+            self._search_space.append(
+                cast_to_specific_search_space(value, ImageObjectDetectionSearchSpace, self.task_type)  # type: ignore
+            )
+
+    @classmethod
+    def _get_search_space_from_str(cls, search_space_str: str) -> Optional[List[ImageObjectDetectionSearchSpace]]:
+        return (
+            [
+                ImageObjectDetectionSearchSpace._from_rest_object(entry)
+                for entry in search_space_str
+                if entry is not None
+            ]
+            if search_space_str is not None
+            else None
+        )
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, AutoMLImageObjectDetectionBase):
+            return NotImplemented
+
+        if not super().__eq__(other):
+            return False
+
+        return self._training_parameters == other._training_parameters and self._search_space == other._search_space
+
+    def __ne__(self, other: object) -> bool:
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_job.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_job.py
new file mode 100644
index 00000000..a1b9dbc3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_job.py
@@ -0,0 +1,244 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+# pylint: disable=protected-access
+
+from typing import Any, Dict, Optional, Union
+
+from azure.ai.ml._restclient.v2023_04_01_preview.models import AutoMLJob as RestAutoMLJob
+from azure.ai.ml._restclient.v2023_04_01_preview.models import ClassificationPrimaryMetrics
+from azure.ai.ml._restclient.v2023_04_01_preview.models import ImageClassification as RestImageClassification
+from azure.ai.ml._restclient.v2023_04_01_preview.models import JobBase, TaskType
+from azure.ai.ml._utils.utils import camel_to_snake, is_data_binding_expression
+from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY
+from azure.ai.ml.constants._job.automl import AutoMLConstants
+from azure.ai.ml.entities._credentials import _BaseJobIdentityConfiguration
+from azure.ai.ml.entities._job._input_output_helpers import from_rest_data_outputs, to_rest_data_outputs
+from azure.ai.ml.entities._job.automl.image.automl_image_classification_base import AutoMLImageClassificationBase
+from azure.ai.ml.entities._job.automl.image.image_limit_settings import ImageLimitSettings
+from azure.ai.ml.entities._job.automl.image.image_model_settings import ImageModelSettingsClassification
+from azure.ai.ml.entities._job.automl.image.image_sweep_settings import ImageSweepSettings
+from azure.ai.ml.entities._util import load_from_dict
+
+
+class ImageClassificationJob(AutoMLImageClassificationBase):
+    """Configuration for AutoML multi-class Image Classification job.
+
+    :param primary_metric: The primary metric to use for optimization.
+    :type primary_metric: Optional[str, ~azure.ai.ml.automl.ClassificationMultilabelPrimaryMetrics]
+    :param kwargs: Job-specific arguments.
+    :type kwargs: Dict[str, Any]
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/ml_samples_automl_image.py
+            :start-after: [START automl.automl_image_job.image_classification_job]
+            :end-before: [END automl.automl_image_job.image_classification_job]
+            :language: python
+            :dedent: 8
+            :caption: creating an automl image classification job
+    """
+
+    _DEFAULT_PRIMARY_METRIC = ClassificationPrimaryMetrics.ACCURACY
+
+    def __init__(
+        self,
+        *,
+        primary_metric: Optional[Union[str, ClassificationPrimaryMetrics]] = None,
+        **kwargs: Any,
+    ) -> None:
+
+        # Extract any super class init settings
+        limits = kwargs.pop("limits", None)
+        sweep = kwargs.pop("sweep", None)
+        training_parameters = kwargs.pop("training_parameters", None)
+        search_space = kwargs.pop("search_space", None)
+
+        super().__init__(
+            task_type=TaskType.IMAGE_CLASSIFICATION,
+            limits=limits,
+            sweep=sweep,
+            training_parameters=training_parameters,
+            search_space=search_space,
+            **kwargs,
+        )
+
+        self.primary_metric = primary_metric or ImageClassificationJob._DEFAULT_PRIMARY_METRIC
+
+    @property
+    def primary_metric(self) -> Optional[Union[str, ClassificationPrimaryMetrics]]:
+        return self._primary_metric
+
+    @primary_metric.setter
+    def primary_metric(self, value: Union[str, ClassificationPrimaryMetrics]) -> None:
+        if is_data_binding_expression(str(value), ["parent"]):
+            self._primary_metric = value
+            return
+        self._primary_metric = (
+            ImageClassificationJob._DEFAULT_PRIMARY_METRIC
+            if value is None
+            else ClassificationPrimaryMetrics[camel_to_snake(value).upper()]
+        )
+
+    def _to_rest_object(self) -> JobBase:
+        image_classification_task = RestImageClassification(
+            target_column_name=self.target_column_name,
+            training_data=self.training_data,
+            validation_data=self.validation_data,
+            validation_data_size=self.validation_data_size,
+            limit_settings=self._limits._to_rest_object() if self._limits else None,
+            sweep_settings=self._sweep._to_rest_object() if self._sweep else None,
+            model_settings=self._training_parameters._to_rest_object() if self._training_parameters else None,
+            search_space=(
+                [entry._to_rest_object() for entry in self._search_space if entry is not None]
+                if self._search_space is not None
+                else None
+            ),
+            primary_metric=self.primary_metric,
+            log_verbosity=self.log_verbosity,
+        )
+        # resolve data inputs in rest obj
+        self._resolve_data_inputs(image_classification_task)
+
+        properties = RestAutoMLJob(
+            display_name=self.display_name,
+            description=self.description,
+            experiment_name=self.experiment_name,
+            tags=self.tags,
+            compute_id=self.compute,
+            properties=self.properties,
+            environment_id=self.environment_id,
+            environment_variables=self.environment_variables,
+            services=self.services,
+            outputs=to_rest_data_outputs(self.outputs),
+            resources=self.resources,
+            task_details=image_classification_task,
+            identity=self.identity._to_job_rest_object() if self.identity else None,
+            queue_settings=self.queue_settings,
+        )
+
+        result = JobBase(properties=properties)
+        result.name = self.name
+        return result
+
+    @classmethod
+    def _from_rest_object(cls, obj: JobBase) -> "ImageClassificationJob":
+        properties: RestAutoMLJob = obj.properties
+        task_details: RestImageClassification = properties.task_details
+
+        job_args_dict = {
+            "id": obj.id,
+            "name": obj.name,
+            "description": properties.description,
+            "tags": properties.tags,
+            "properties": properties.properties,
+            "experiment_name": properties.experiment_name,
+            "services": properties.services,
+            "status": properties.status,
+            "creation_context": obj.system_data,
+            "display_name": properties.display_name,
+            "compute": properties.compute_id,
+            "outputs": from_rest_data_outputs(properties.outputs),
+            "resources": properties.resources,
+            "identity": (
+                _BaseJobIdentityConfiguration._from_rest_object(properties.identity) if properties.identity else None
+            ),
+            "queue_settings": properties.queue_settings,
+        }
+
+        image_classification_job = cls(
+            target_column_name=task_details.target_column_name,
+            training_data=task_details.training_data,
+            validation_data=task_details.validation_data,
+            validation_data_size=task_details.validation_data_size,
+            limits=(
+                ImageLimitSettings._from_rest_object(task_details.limit_settings)
+                if task_details.limit_settings
+                else None
+            ),
+            sweep=(
+                ImageSweepSettings._from_rest_object(task_details.sweep_settings)
+                if task_details.sweep_settings
+                else None
+            ),
+            training_parameters=(
+                ImageModelSettingsClassification._from_rest_object(task_details.model_settings)
+                if task_details.model_settings
+                else None
+            ),
+            search_space=cls._get_search_space_from_str(task_details.search_space),
+            primary_metric=task_details.primary_metric,
+            log_verbosity=task_details.log_verbosity,
+            **job_args_dict,
+        )
+
+        image_classification_job._restore_data_inputs()
+
+        return image_classification_job
+
+    @classmethod
+    def _load_from_dict(
+        cls,
+        data: Dict,
+        context: Dict,
+        additional_message: str,
+        **kwargs: Any,
+    ) -> "ImageClassificationJob":
+        from azure.ai.ml._schema.automl.image_vertical.image_classification import ImageClassificationSchema
+        from azure.ai.ml._schema.pipeline.automl_node import ImageClassificationMulticlassNodeSchema
+
+        inside_pipeline = kwargs.pop("inside_pipeline", False)
+        if inside_pipeline:
+            if context.get("inside_pipeline", None) is None:
+                context["inside_pipeline"] = True
+            loaded_data = load_from_dict(
+                ImageClassificationMulticlassNodeSchema,
+                data,
+                context,
+                additional_message,
+                **kwargs,
+            )
+        else:
+            loaded_data = load_from_dict(ImageClassificationSchema, data, context, additional_message, **kwargs)
+        job_instance = cls._create_instance_from_schema_dict(loaded_data)
+        return job_instance
+
+    @classmethod
+    def _create_instance_from_schema_dict(cls, loaded_data: Dict) -> "ImageClassificationJob":
+        loaded_data.pop(AutoMLConstants.TASK_TYPE_YAML, None)
+        data_settings = {
+            "training_data": loaded_data.pop("training_data"),
+            "target_column_name": loaded_data.pop("target_column_name"),
+            "validation_data": loaded_data.pop("validation_data", None),
+            "validation_data_size": loaded_data.pop("validation_data_size", None),
+        }
+        job = ImageClassificationJob(**loaded_data)
+        job.set_data(**data_settings)
+        return job
+
+    def _to_dict(self, inside_pipeline: bool = False) -> Dict:
+        from azure.ai.ml._schema.automl.image_vertical.image_classification import ImageClassificationSchema
+        from azure.ai.ml._schema.pipeline.automl_node import ImageClassificationMulticlassNodeSchema
+
+        schema_dict: dict = {}
+        if inside_pipeline:
+            schema_dict = ImageClassificationMulticlassNodeSchema(
+                context={BASE_PATH_CONTEXT_KEY: "./", "inside_pipeline": True}
+            ).dump(self)
+        else:
+            schema_dict = ImageClassificationSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
+
+        return schema_dict
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, ImageClassificationJob):
+            return NotImplemented
+
+        if not super().__eq__(other):
+            return False
+
+        return self.primary_metric == other.primary_metric
+
+    def __ne__(self, other: object) -> bool:
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_multilabel_job.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_multilabel_job.py
new file mode 100644
index 00000000..541f41c7
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_multilabel_job.py
@@ -0,0 +1,252 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+# pylint: disable=protected-access
+
+from typing import Any, Dict, Optional, Union
+
+from azure.ai.ml._restclient.v2023_04_01_preview.models import AutoMLJob as RestAutoMLJob
+from azure.ai.ml._restclient.v2023_04_01_preview.models import ClassificationMultilabelPrimaryMetrics
+from azure.ai.ml._restclient.v2023_04_01_preview.models import (
+    ImageClassificationMultilabel as RestImageClassificationMultilabel,
+)
+from azure.ai.ml._restclient.v2023_04_01_preview.models import JobBase, TaskType
+from azure.ai.ml._utils.utils import camel_to_snake, is_data_binding_expression
+from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY
+from azure.ai.ml.constants._job.automl import AutoMLConstants
+from azure.ai.ml.entities._credentials import _BaseJobIdentityConfiguration
+from azure.ai.ml.entities._job._input_output_helpers import from_rest_data_outputs, to_rest_data_outputs
+from azure.ai.ml.entities._job.automl.image.automl_image_classification_base import AutoMLImageClassificationBase
+from azure.ai.ml.entities._job.automl.image.image_limit_settings import ImageLimitSettings
+from azure.ai.ml.entities._job.automl.image.image_model_settings import ImageModelSettingsClassification
+from azure.ai.ml.entities._job.automl.image.image_sweep_settings import ImageSweepSettings
+from azure.ai.ml.entities._util import load_from_dict
+
+
+class ImageClassificationMultilabelJob(AutoMLImageClassificationBase):
+    """Configuration for AutoML multi-label Image Classification job.
+
+    :param primary_metric: The primary metric to use for optimization.
+    :type primary_metric: Optional[str, ~azure.ai.ml.automl.ClassificationMultilabelPrimaryMetrics]
+    :param kwargs: Job-specific arguments.
+    :type kwargs: Dict[str, Any]
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/ml_samples_automl_image.py
+            :start-after: [START automl.automl_image_job.image_classification_multilabel_job]
+            :end-before: [END automl.automl_image_job.image_classification_multilabel_job]
+            :language: python
+            :dedent: 8
+            :caption: creating an automl image classification multilabel job
+    """
+
+    _DEFAULT_PRIMARY_METRIC = ClassificationMultilabelPrimaryMetrics.IOU
+
+    def __init__(
+        self,
+        *,
+        primary_metric: Optional[Union[str, ClassificationMultilabelPrimaryMetrics]] = None,
+        **kwargs: Any,
+    ) -> None:
+
+        # Extract any super class init settings
+        limits = kwargs.pop("limits", None)
+        sweep = kwargs.pop("sweep", None)
+        training_parameters = kwargs.pop("training_parameters", None)
+        search_space = kwargs.pop("search_space", None)
+
+        super().__init__(
+            task_type=TaskType.IMAGE_CLASSIFICATION_MULTILABEL,
+            limits=limits,
+            sweep=sweep,
+            training_parameters=training_parameters,
+            search_space=search_space,
+            **kwargs,
+        )
+
+        self.primary_metric = primary_metric or ImageClassificationMultilabelJob._DEFAULT_PRIMARY_METRIC
+
+    @property
+    def primary_metric(self) -> Union[str, ClassificationMultilabelPrimaryMetrics]:
+        return self._primary_metric
+
+    @primary_metric.setter
+    def primary_metric(self, value: Union[str, ClassificationMultilabelPrimaryMetrics]) -> None:
+        if is_data_binding_expression(str(value), ["parent"]):
+            self._primary_metric = value
+            return
+        self._primary_metric = (
+            ImageClassificationMultilabelJob._DEFAULT_PRIMARY_METRIC
+            if value is None
+            else ClassificationMultilabelPrimaryMetrics[camel_to_snake(value).upper()]
+        )
+
+    def _to_rest_object(self) -> JobBase:
+        image_classification_multilabel_task = RestImageClassificationMultilabel(
+            target_column_name=self.target_column_name,
+            training_data=self.training_data,
+            validation_data=self.validation_data,
+            validation_data_size=self.validation_data_size,
+            limit_settings=self._limits._to_rest_object() if self._limits else None,
+            sweep_settings=self._sweep._to_rest_object() if self._sweep else None,
+            model_settings=self._training_parameters._to_rest_object() if self._training_parameters else None,
+            search_space=(
+                [entry._to_rest_object() for entry in self._search_space if entry is not None]
+                if self._search_space is not None
+                else None
+            ),
+            primary_metric=self.primary_metric,
+            log_verbosity=self.log_verbosity,
+        )
+        # resolve data inputs in rest obj
+        self._resolve_data_inputs(image_classification_multilabel_task)
+
+        properties = RestAutoMLJob(
+            display_name=self.display_name,
+            description=self.description,
+            experiment_name=self.experiment_name,
+            tags=self.tags,
+            compute_id=self.compute,
+            properties=self.properties,
+            environment_id=self.environment_id,
+            environment_variables=self.environment_variables,
+            services=self.services,
+            outputs=to_rest_data_outputs(self.outputs),
+            resources=self.resources,
+            task_details=image_classification_multilabel_task,
+            identity=self.identity._to_job_rest_object() if self.identity else None,
+            queue_settings=self.queue_settings,
+        )
+
+        result = JobBase(properties=properties)
+        result.name = self.name
+        return result
+
+    @classmethod
+    def _from_rest_object(cls, obj: JobBase) -> "ImageClassificationMultilabelJob":
+        properties: RestAutoMLJob = obj.properties
+        task_details: RestImageClassificationMultilabel = properties.task_details
+
+        job_args_dict = {
+            "id": obj.id,
+            "name": obj.name,
+            "description": properties.description,
+            "tags": properties.tags,
+            "properties": properties.properties,
+            "experiment_name": properties.experiment_name,
+            "services": properties.services,
+            "status": properties.status,
+            "creation_context": obj.system_data,
+            "display_name": properties.display_name,
+            "compute": properties.compute_id,
+            "outputs": from_rest_data_outputs(properties.outputs),
+            "resources": properties.resources,
+            "identity": (
+                _BaseJobIdentityConfiguration._from_rest_object(properties.identity) if properties.identity else None
+            ),
+            "queue_settings": properties.queue_settings,
+        }
+
+        image_classification_multilabel_job = cls(
+            target_column_name=task_details.target_column_name,
+            training_data=task_details.training_data,
+            validation_data=task_details.validation_data,
+            validation_data_size=task_details.validation_data_size,
+            limits=(
+                ImageLimitSettings._from_rest_object(task_details.limit_settings)
+                if task_details.limit_settings
+                else None
+            ),
+            sweep=(
+                ImageSweepSettings._from_rest_object(task_details.sweep_settings)
+                if task_details.sweep_settings
+                else None
+            ),
+            training_parameters=(
+                ImageModelSettingsClassification._from_rest_object(task_details.model_settings)
+                if task_details.model_settings
+                else None
+            ),
+            search_space=cls._get_search_space_from_str(task_details.search_space),
+            primary_metric=task_details.primary_metric,
+            log_verbosity=task_details.log_verbosity,
+            **job_args_dict,
+        )
+
+        image_classification_multilabel_job._restore_data_inputs()
+
+        return image_classification_multilabel_job
+
+    @classmethod
+    def _load_from_dict(
+        cls,
+        data: Dict,
+        context: Dict,
+        additional_message: str,
+        **kwargs: Any,
+    ) -> "ImageClassificationMultilabelJob":
+        from azure.ai.ml._schema.automl.image_vertical.image_classification import ImageClassificationMultilabelSchema
+        from azure.ai.ml._schema.pipeline.automl_node import ImageClassificationMultilabelNodeSchema
+
+        inside_pipeline = kwargs.pop("inside_pipeline", False)
+        if inside_pipeline:
+            if context.get("inside_pipeline", None) is None:
+                context["inside_pipeline"] = True
+            loaded_data = load_from_dict(
+                ImageClassificationMultilabelNodeSchema,
+                data,
+                context,
+                additional_message,
+                **kwargs,
+            )
+        else:
+            loaded_data = load_from_dict(
+                ImageClassificationMultilabelSchema,
+                data,
+                context,
+                additional_message,
+                **kwargs,
+            )
+        job_instance = cls._create_instance_from_schema_dict(loaded_data)
+        return job_instance
+
+    @classmethod
+    def _create_instance_from_schema_dict(cls, loaded_data: Dict) -> "ImageClassificationMultilabelJob":
+        loaded_data.pop(AutoMLConstants.TASK_TYPE_YAML, None)
+        data_settings = {
+            "training_data": loaded_data.pop("training_data"),
+            "target_column_name": loaded_data.pop("target_column_name"),
+            "validation_data": loaded_data.pop("validation_data", None),
+            "validation_data_size": loaded_data.pop("validation_data_size", None),
+        }
+        job = ImageClassificationMultilabelJob(**loaded_data)
+        job.set_data(**data_settings)
+        return job
+
+    def _to_dict(self, inside_pipeline: bool = False) -> Dict:
+        from azure.ai.ml._schema.automl.image_vertical.image_classification import ImageClassificationMultilabelSchema
+        from azure.ai.ml._schema.pipeline.automl_node import ImageClassificationMultilabelNodeSchema
+
+        schema_dict: dict = {}
+        if inside_pipeline:
+            schema_dict = ImageClassificationMultilabelNodeSchema(
+                context={BASE_PATH_CONTEXT_KEY: "./", "inside_pipeline": True}
+            ).dump(self)
+        else:
+            schema_dict = ImageClassificationMultilabelSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
+
+        return schema_dict
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, ImageClassificationMultilabelJob):
+            return NotImplemented
+
+        if not super().__eq__(other):
+            return False
+
+        return self.primary_metric == other.primary_metric
+
+    def __ne__(self, other: object) -> bool:
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_search_space.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_search_space.py
new file mode 100644
index 00000000..0691f243
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_classification_search_space.py
@@ -0,0 +1,437 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+# pylint: disable=R0902,too-many-locals
+
+from typing import Optional, Union
+
+from azure.ai.ml._restclient.v2023_04_01_preview.models import ImageModelDistributionSettingsClassification
+from azure.ai.ml.entities._job.automl.search_space import SearchSpace
+from azure.ai.ml.entities._job.automl.search_space_utils import _convert_from_rest_object, _convert_to_rest_object
+from azure.ai.ml.entities._job.sweep.search_space import SweepDistribution
+from azure.ai.ml.entities._mixins import RestTranslatableMixin
+
+
+class ImageClassificationSearchSpace(RestTranslatableMixin):
+    """Search space for AutoML Image Classification and Image Classification
+    Multilabel tasks.
+
+    :param ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+    :type ams_gradient: bool or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+        range [0, 1].
+    :type beta1: float or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+        range [0, 1].
+    :type beta2: float or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param distributed: Whether to use distributer training.
+    :type distributed: bool or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param early_stopping: Enable early stopping logic during training.
+    :type early_stopping: bool or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+        before primary metric improvement
+        is tracked for early stopping. Must be a positive integer.
+    :type early_stopping_delay: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param early_stopping_patience: Minimum number of epochs or validation evaluations with no
+        primary metric improvement before
+        the run is stopped. Must be a positive integer.
+    :type early_stopping_patience: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param enable_onnx_normalization: Enable normalization when exporting ONNX model.
+    :type enable_onnx_normalization: bool or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+        Must be a positive integer.
+    :type evaluation_frequency: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param gradient_accumulation_step: Gradient accumulation means running a configured number of
+        "GradAccumulationStep" steps without
+        updating the model weights while accumulating the gradients of those steps, and then using
+        the accumulated gradients to compute the weight updates. Must be a positive integer.
+    :type gradient_accumulation_step: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+        integer.
+        For instance, passing 2 as value for 'seresnext' means
+        freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+        please
+        see: https://learn.microsoft.com/azure/machine-learning/reference-automl-images-hyperparameters#model-agnostic-hyperparameters.    # pylint: disable=line-too-long
+    :type layers_to_freeze: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+    :type learning_rate: float or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+        'step'.
+    :type learning_rate_scheduler: str or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param model_name: Name of the model to use for training.
+        For more information on the available models please visit the official documentation:
+        https://learn.microsoft.com/azure/machine-learning/how-to-auto-train-image-models.
+    :type model_name: str or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+        1].
+    :type momentum: float or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param nesterov: Enable nesterov when optimizer is 'sgd'.
+    :type nesterov: bool or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param number_of_epochs: Number of training epochs. Must be a positive integer.
+    :type number_of_epochs: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param number_of_workers: Number of data loader workers. Must be a non-negative integer.
+    :type number_of_workers: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+    :type optimizer: str or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param random_seed: Random seed to be used when using deterministic training.
+    :type random_seed: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+        in the range [0, 1].
+    :type step_lr_gamma: float or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+        a positive integer.
+    :type step_lr_step_size: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param training_batch_size: Training batch size. Must be a positive integer.
+    :type training_batch_size: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param validation_batch_size: Validation batch size. Must be a positive integer.
+    :type validation_batch_size: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+        'warmup_cosine'. Must be a float in the range [0, 1].
+    :type warmup_cosine_lr_cycles: float or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+        'warmup_cosine'. Must be a positive integer.
+    :type warmup_cosine_lr_warmup_epochs: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+        be a float in the range[0, 1].
+    :type weight_decay: float or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param training_crop_size: Image crop size that is input to the neural network for the
+        training dataset. Must be a positive integer.
+    :type training_crop_size: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param validation_crop_size: Image crop size that is input to the neural network for the
+        validation dataset. Must be a positive integer.
+    :type validation_crop_size: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param validation_resize_size: Image size to which to resize before cropping for validation
+        dataset. Must be a positive integer.
+    :type validation_resize_size: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+    :param weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
+        1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
+        0 or 1 or 2.
+    :type weighted_loss: int or ~azure.ai.ml.entities._job.sweep.search_space.SweepDistribution
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/ml_samples_automl_image.py
+            :start-after: [START automl.automl_image_job.image_classification_search_space]
+            :end-before: [END automl.automl_image_job.image_classification_search_space]
+            :language: python
+            :dedent: 8
+            :caption: Defining an automl image classification search space
+    """
+
+    def __init__(
+        self,
+        *,
+        ams_gradient: Optional[Union[bool, SweepDistribution]] = None,
+        beta1: Optional[Union[float, SweepDistribution]] = None,
+        beta2: Optional[Union[float, SweepDistribution]] = None,
+        distributed: Optional[Union[bool, SweepDistribution]] = None,
+        early_stopping: Optional[Union[bool, SweepDistribution]] = None,
+        early_stopping_delay: Optional[Union[int, SweepDistribution]] = None,
+        early_stopping_patience: Optional[Union[int, SweepDistribution]] = None,
+        enable_onnx_normalization: Optional[Union[bool, SweepDistribution]] = None,
+        evaluation_frequency: Optional[Union[int, SweepDistribution]] = None,
+        gradient_accumulation_step: Optional[Union[int, SweepDistribution]] = None,
+        layers_to_freeze: Optional[Union[int, SweepDistribution]] = None,
+        learning_rate: Optional[Union[float, SweepDistribution]] = None,
+        learning_rate_scheduler: Optional[Union[str, SweepDistribution]] = None,
+        model_name: Optional[Union[str, SweepDistribution]] = None,
+        momentum: Optional[Union[float, SweepDistribution]] = None,
+        nesterov: Optional[Union[bool, SweepDistribution]] = None,
+        number_of_epochs: Optional[Union[int, SweepDistribution]] = None,
+        number_of_workers: Optional[Union[int, SweepDistribution]] = None,
+        optimizer: Optional[Union[str, SweepDistribution]] = None,
+        random_seed: Optional[Union[int, SweepDistribution]] = None,
+        step_lr_gamma: Optional[Union[float, SweepDistribution]] = None,
+        step_lr_step_size: Optional[Union[int, SweepDistribution]] = None,
+        training_batch_size: Optional[Union[int, SweepDistribution]] = None,
+        validation_batch_size: Optional[Union[int, SweepDistribution]] = None,
+        warmup_cosine_lr_cycles: Optional[Union[float, SweepDistribution]] = None,
+        warmup_cosine_lr_warmup_epochs: Optional[Union[int, SweepDistribution]] = None,
+        weight_decay: Optional[Union[float, SweepDistribution]] = None,
+        training_crop_size: Optional[Union[int, SweepDistribution]] = None,
+        validation_crop_size: Optional[Union[int, SweepDistribution]] = None,
+        validation_resize_size: Optional[Union[int, SweepDistribution]] = None,
+        weighted_loss: Optional[Union[int, SweepDistribution]] = None,
+    ) -> None:
+        self.ams_gradient = ams_gradient
+        self.beta1 = beta1
+        self.beta2 = beta2
+        self.distributed = distributed
+        self.early_stopping = early_stopping
+        self.early_stopping_delay = early_stopping_delay
+        self.early_stopping_patience = early_stopping_patience
+        self.enable_onnx_normalization = enable_onnx_normalization
+        self.evaluation_frequency = evaluation_frequency
+        self.gradient_accumulation_step = gradient_accumulation_step
+        self.layers_to_freeze = layers_to_freeze
+        self.learning_rate = learning_rate
+        self.learning_rate_scheduler = learning_rate_scheduler
+        self.model_name = model_name
+        self.momentum = momentum
+        self.nesterov = nesterov
+        self.number_of_epochs = number_of_epochs
+        self.number_of_workers = number_of_workers
+        self.optimizer = optimizer
+        self.random_seed = random_seed
+        self.step_lr_gamma = step_lr_gamma
+        self.step_lr_step_size = step_lr_step_size
+        self.training_batch_size = training_batch_size
+        self.validation_batch_size = validation_batch_size
+        self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
+        self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
+        self.weight_decay = weight_decay
+        self.training_crop_size = training_crop_size
+        self.validation_crop_size = validation_crop_size
+        self.validation_resize_size = validation_resize_size
+        self.weighted_loss = weighted_loss
+
+    def _to_rest_object(self) -> ImageModelDistributionSettingsClassification:
+        return ImageModelDistributionSettingsClassification(
+            ams_gradient=_convert_to_rest_object(self.ams_gradient) if self.ams_gradient is not None else None,
+            beta1=_convert_to_rest_object(self.beta1) if self.beta1 is not None else None,
+            beta2=_convert_to_rest_object(self.beta2) if self.beta2 is not None else None,
+            distributed=_convert_to_rest_object(self.distributed) if self.distributed is not None else None,
+            early_stopping=_convert_to_rest_object(self.early_stopping) if self.early_stopping is not None else None,
+            early_stopping_delay=(
+                _convert_to_rest_object(self.early_stopping_delay) if self.early_stopping_delay is not None else None
+            ),
+            early_stopping_patience=(
+                _convert_to_rest_object(self.early_stopping_patience)
+                if self.early_stopping_patience is not None
+                else None
+            ),
+            enable_onnx_normalization=(
+                _convert_to_rest_object(self.enable_onnx_normalization)
+                if self.enable_onnx_normalization is not None
+                else None
+            ),
+            evaluation_frequency=(
+                _convert_to_rest_object(self.evaluation_frequency) if self.evaluation_frequency is not None else None
+            ),
+            gradient_accumulation_step=(
+                _convert_to_rest_object(self.gradient_accumulation_step)
+                if self.gradient_accumulation_step is not None
+                else None
+            ),
+            layers_to_freeze=(
+                _convert_to_rest_object(self.layers_to_freeze) if self.layers_to_freeze is not None else None
+            ),
+            learning_rate=_convert_to_rest_object(self.learning_rate) if self.learning_rate is not None else None,
+            learning_rate_scheduler=(
+                _convert_to_rest_object(self.learning_rate_scheduler)
+                if self.learning_rate_scheduler is not None
+                else None
+            ),
+            model_name=_convert_to_rest_object(self.model_name) if self.model_name is not None else None,
+            momentum=_convert_to_rest_object(self.momentum) if self.momentum is not None else None,
+            nesterov=_convert_to_rest_object(self.nesterov) if self.nesterov is not None else None,
+            number_of_epochs=(
+                _convert_to_rest_object(self.number_of_epochs) if self.number_of_epochs is not None else None
+            ),
+            number_of_workers=(
+                _convert_to_rest_object(self.number_of_workers) if self.number_of_workers is not None else None
+            ),
+            optimizer=_convert_to_rest_object(self.optimizer) if self.optimizer is not None else None,
+            random_seed=_convert_to_rest_object(self.random_seed) if self.random_seed is not None else None,
+            step_lr_gamma=_convert_to_rest_object(self.step_lr_gamma) if self.step_lr_gamma is not None else None,
+            step_lr_step_size=(
+                _convert_to_rest_object(self.step_lr_step_size) if self.step_lr_step_size is not None else None
+            ),
+            training_batch_size=(
+                _convert_to_rest_object(self.training_batch_size) if self.training_batch_size is not None else None
+            ),
+            validation_batch_size=(
+                _convert_to_rest_object(self.validation_batch_size) if self.validation_batch_size is not None else None
+            ),
+            warmup_cosine_lr_cycles=(
+                _convert_to_rest_object(self.warmup_cosine_lr_cycles)
+                if self.warmup_cosine_lr_cycles is not None
+                else None
+            ),
+            warmup_cosine_lr_warmup_epochs=(
+                _convert_to_rest_object(self.warmup_cosine_lr_warmup_epochs)
+                if self.warmup_cosine_lr_warmup_epochs is not None
+                else None
+            ),
+            weight_decay=_convert_to_rest_object(self.weight_decay) if self.weight_decay is not None else None,
+            training_crop_size=(
+                _convert_to_rest_object(self.training_crop_size) if self.training_crop_size is not None else None
+            ),
+            validation_crop_size=(
+                _convert_to_rest_object(self.validation_crop_size) if self.validation_crop_size is not None else None
+            ),
+            validation_resize_size=(
+                _convert_to_rest_object(self.validation_resize_size)
+                if self.validation_resize_size is not None
+                else None
+            ),
+            weighted_loss=_convert_to_rest_object(self.weighted_loss) if self.weighted_loss is not None else None,
+        )
+
+    @classmethod
+    def _from_rest_object(cls, obj: ImageModelDistributionSettingsClassification) -> "ImageClassificationSearchSpace":
+        return cls(
+            ams_gradient=_convert_from_rest_object(obj.ams_gradient) if obj.ams_gradient is not None else None,
+            beta1=_convert_from_rest_object(obj.beta1) if obj.beta1 is not None else None,
+            beta2=_convert_from_rest_object(obj.beta2) if obj.beta2 is not None else None,
+            distributed=_convert_from_rest_object(obj.distributed) if obj.distributed is not None else None,
+            early_stopping=_convert_from_rest_object(obj.early_stopping) if obj.early_stopping is not None else None,
+            early_stopping_delay=(
+                _convert_from_rest_object(obj.early_stopping_delay) if obj.early_stopping_delay is not None else None
+            ),
+            early_stopping_patience=(
+                _convert_from_rest_object(obj.early_stopping_patience)
+                if obj.early_stopping_patience is not None
+                else None
+            ),
+            enable_onnx_normalization=(
+                _convert_from_rest_object(obj.enable_onnx_normalization)
+                if obj.enable_onnx_normalization is not None
+                else None
+            ),
+            evaluation_frequency=(
+                _convert_from_rest_object(obj.evaluation_frequency) if obj.evaluation_frequency is not None else None
+            ),
+            gradient_accumulation_step=(
+                _convert_from_rest_object(obj.gradient_accumulation_step)
+                if obj.gradient_accumulation_step is not None
+                else None
+            ),
+            layers_to_freeze=(
+                _convert_from_rest_object(obj.layers_to_freeze) if obj.layers_to_freeze is not None else None
+            ),
+            learning_rate=_convert_from_rest_object(obj.learning_rate) if obj.learning_rate is not None else None,
+            learning_rate_scheduler=(
+                _convert_from_rest_object(obj.learning_rate_scheduler)
+                if obj.learning_rate_scheduler is not None
+                else None
+            ),
+            model_name=_convert_from_rest_object(obj.model_name) if obj.model_name is not None else None,
+            momentum=_convert_from_rest_object(obj.momentum) if obj.momentum is not None else None,
+            nesterov=_convert_from_rest_object(obj.nesterov) if obj.nesterov is not None else None,
+            number_of_epochs=(
+                _convert_from_rest_object(obj.number_of_epochs) if obj.number_of_epochs is not None else None
+            ),
+            number_of_workers=(
+                _convert_from_rest_object(obj.number_of_workers) if obj.number_of_workers is not None else None
+            ),
+            optimizer=_convert_from_rest_object(obj.optimizer) if obj.optimizer is not None else None,
+            random_seed=_convert_from_rest_object(obj.random_seed) if obj.random_seed is not None else None,
+            step_lr_gamma=_convert_from_rest_object(obj.step_lr_gamma) if obj.step_lr_gamma is not None else None,
+            step_lr_step_size=(
+                _convert_from_rest_object(obj.step_lr_step_size) if obj.step_lr_step_size is not None else None
+            ),
+            training_batch_size=(
+                _convert_from_rest_object(obj.training_batch_size) if obj.training_batch_size is not None else None
+            ),
+            validation_batch_size=(
+                _convert_from_rest_object(obj.validation_batch_size) if obj.validation_batch_size is not None else None
+            ),
+            warmup_cosine_lr_cycles=(
+                _convert_from_rest_object(obj.warmup_cosine_lr_cycles)
+                if obj.warmup_cosine_lr_cycles is not None
+                else None
+            ),
+            warmup_cosine_lr_warmup_epochs=(
+                _convert_from_rest_object(obj.warmup_cosine_lr_warmup_epochs)
+                if obj.warmup_cosine_lr_warmup_epochs is not None
+                else None
+            ),
+            weight_decay=_convert_from_rest_object(obj.weight_decay) if obj.weight_decay is not None else None,
+            training_crop_size=(
+                _convert_from_rest_object(obj.training_crop_size) if obj.training_crop_size is not None else None
+            ),
+            validation_crop_size=(
+                _convert_from_rest_object(obj.validation_crop_size) if obj.validation_crop_size is not None else None
+            ),
+            validation_resize_size=(
+                _convert_from_rest_object(obj.validation_resize_size)
+                if obj.validation_resize_size is not None
+                else None
+            ),
+            weighted_loss=_convert_from_rest_object(obj.weighted_loss) if obj.weighted_loss is not None else None,
+        )
+
+    @classmethod
+    def _from_search_space_object(cls, obj: SearchSpace) -> "ImageClassificationSearchSpace":
+        return cls(
+            ams_gradient=obj.ams_gradient if hasattr(obj, "ams_gradient") else None,
+            beta1=obj.beta1 if hasattr(obj, "beta1") else None,
+            beta2=obj.beta2 if hasattr(obj, "beta2") else None,
+            distributed=obj.distributed if hasattr(obj, "distributed") else None,
+            early_stopping=obj.early_stopping if hasattr(obj, "early_stopping") else None,
+            early_stopping_delay=obj.early_stopping_delay if hasattr(obj, "early_stopping_delay") else None,
+            early_stopping_patience=obj.early_stopping_patience if hasattr(obj, "early_stopping_patience") else None,
+            enable_onnx_normalization=(
+                obj.enable_onnx_normalization if hasattr(obj, "enable_onnx_normalization") else None
+            ),
+            evaluation_frequency=obj.evaluation_frequency if hasattr(obj, "evaluation_frequency") else None,
+            gradient_accumulation_step=(
+                obj.gradient_accumulation_step if hasattr(obj, "gradient_accumulation_step") else None
+            ),
+            layers_to_freeze=obj.layers_to_freeze if hasattr(obj, "layers_to_freeze") else None,
+            learning_rate=obj.learning_rate if hasattr(obj, "learning_rate") else None,
+            learning_rate_scheduler=obj.learning_rate_scheduler if hasattr(obj, "learning_rate_scheduler") else None,
+            model_name=obj.model_name if hasattr(obj, "model_name") else None,
+            momentum=obj.momentum if hasattr(obj, "momentum") else None,
+            nesterov=obj.nesterov if hasattr(obj, "nesterov") else None,
+            number_of_epochs=obj.number_of_epochs if hasattr(obj, "number_of_epochs") else None,
+            number_of_workers=obj.number_of_workers if hasattr(obj, "number_of_workers") else None,
+            optimizer=obj.optimizer if hasattr(obj, "optimizer") else None,
+            random_seed=obj.random_seed if hasattr(obj, "random_seed") else None,
+            step_lr_gamma=obj.step_lr_gamma if hasattr(obj, "step_lr_gamma") else None,
+            step_lr_step_size=obj.step_lr_step_size if hasattr(obj, "step_lr_step_size") else None,
+            training_batch_size=obj.training_batch_size if hasattr(obj, "training_batch_size") else None,
+            validation_batch_size=obj.validation_batch_size if hasattr(obj, "validation_batch_size") else None,
+            warmup_cosine_lr_cycles=obj.warmup_cosine_lr_cycles if hasattr(obj, "warmup_cosine_lr_cycles") else None,
+            warmup_cosine_lr_warmup_epochs=(
+                obj.warmup_cosine_lr_warmup_epochs if hasattr(obj, "warmup_cosine_lr_warmup_epochs") else None
+            ),
+            weight_decay=obj.weight_decay if hasattr(obj, "weight_decay") else None,
+            training_crop_size=obj.training_crop_size if hasattr(obj, "training_crop_size") else None,
+            validation_crop_size=obj.validation_crop_size if hasattr(obj, "validation_crop_size") else None,
+            validation_resize_size=obj.validation_resize_size if hasattr(obj, "validation_resize_size") else None,
+            weighted_loss=obj.weighted_loss if hasattr(obj, "weighted_loss") else None,
+        )
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, ImageClassificationSearchSpace):
+            return NotImplemented
+
+        return (
+            self.ams_gradient == other.ams_gradient
+            and self.beta1 == other.beta1
+            and self.beta2 == other.beta2
+            and self.distributed == other.distributed
+            and self.early_stopping == other.early_stopping
+            and self.early_stopping_delay == other.early_stopping_delay
+            and self.early_stopping_patience == other.early_stopping_patience
+            and self.enable_onnx_normalization == other.enable_onnx_normalization
+            and self.evaluation_frequency == other.evaluation_frequency
+            and self.gradient_accumulation_step == other.gradient_accumulation_step
+            and self.layers_to_freeze == other.layers_to_freeze
+            and self.learning_rate == other.learning_rate
+            and self.learning_rate_scheduler == other.learning_rate_scheduler
+            and self.model_name == other.model_name
+            and self.momentum == other.momentum
+            and self.nesterov == other.nesterov
+            and self.number_of_epochs == other.number_of_epochs
+            and self.number_of_workers == other.number_of_workers
+            and self.optimizer == other.optimizer
+            and self.random_seed == other.random_seed
+            and self.step_lr_gamma == other.step_lr_gamma
+            and self.step_lr_step_size == other.step_lr_step_size
+            and self.training_batch_size == other.training_batch_size
+            and self.validation_batch_size == other.validation_batch_size
+            and self.warmup_cosine_lr_cycles == other.warmup_cosine_lr_cycles
+            and self.warmup_cosine_lr_warmup_epochs == other.warmup_cosine_lr_warmup_epochs
+            and self.weight_decay == other.weight_decay
+            and self.training_crop_size == other.training_crop_size
+            and self.validation_crop_size == other.validation_crop_size
+            and self.validation_resize_size == other.validation_resize_size
+            and self.weighted_loss == other.weighted_loss
+        )
+
+    def __ne__(self, other: object) -> bool:
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_instance_segmentation_job.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_instance_segmentation_job.py
new file mode 100644
index 00000000..c97d3c11
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_instance_segmentation_job.py
@@ -0,0 +1,249 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+# pylint: disable=protected-access
+
+from typing import Any, Dict, Optional, Union
+
+from azure.ai.ml._restclient.v2023_04_01_preview.models import AutoMLJob as RestAutoMLJob
+from azure.ai.ml._restclient.v2023_04_01_preview.models import (
+    ImageInstanceSegmentation as RestImageInstanceSegmentation,
+)
+from azure.ai.ml._restclient.v2023_04_01_preview.models import InstanceSegmentationPrimaryMetrics, JobBase, TaskType
+from azure.ai.ml._utils.utils import camel_to_snake, is_data_binding_expression
+from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY
+from azure.ai.ml.constants._job.automl import AutoMLConstants
+from azure.ai.ml.entities._credentials import _BaseJobIdentityConfiguration
+from azure.ai.ml.entities._job._input_output_helpers import from_rest_data_outputs, to_rest_data_outputs
+from azure.ai.ml.entities._job.automl.image.automl_image_object_detection_base import AutoMLImageObjectDetectionBase
+from azure.ai.ml.entities._job.automl.image.image_limit_settings import ImageLimitSettings
+from azure.ai.ml.entities._job.automl.image.image_model_settings import ImageModelSettingsObjectDetection
+from azure.ai.ml.entities._job.automl.image.image_sweep_settings import ImageSweepSettings
+from azure.ai.ml.entities._util import load_from_dict
+
+
+class ImageInstanceSegmentationJob(AutoMLImageObjectDetectionBase):
+    """Configuration for AutoML Image Instance Segmentation job.
+
+    :keyword primary_metric: The primary metric to use for optimization.
+    :paramtype primary_metric: Optional[str, ~azure.ai.ml.automl.InstanceSegmentationPrimaryMetrics]
+    :keyword kwargs: Job-specific arguments.
+    :paramtype kwargs: Dict[str, Any]
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/ml_samples_automl_image.py
+            :start-after: [START automl.automl_image_job.image_instance_segmentation_job]
+            :end-before: [END automl.automl_image_job.image_instance_segmentation_job]
+            :language: python
+            :dedent: 8
+            :caption: creating an automl image instance segmentation job
+    """
+
+    _DEFAULT_PRIMARY_METRIC = InstanceSegmentationPrimaryMetrics.MEAN_AVERAGE_PRECISION
+
+    def __init__(
+        self,
+        *,
+        primary_metric: Optional[Union[str, InstanceSegmentationPrimaryMetrics]] = None,
+        **kwargs: Any,
+    ) -> None:
+        # Extract any super class init settings
+        limits = kwargs.pop("limits", None)
+        sweep = kwargs.pop("sweep", None)
+        training_parameters = kwargs.pop("training_parameters", None)
+        search_space = kwargs.pop("search_space", None)
+
+        super().__init__(
+            task_type=TaskType.IMAGE_INSTANCE_SEGMENTATION,
+            limits=limits,
+            sweep=sweep,
+            training_parameters=training_parameters,
+            search_space=search_space,
+            **kwargs,
+        )
+        self.primary_metric = primary_metric or ImageInstanceSegmentationJob._DEFAULT_PRIMARY_METRIC
+
+    @property
+    def primary_metric(self) -> Union[str, InstanceSegmentationPrimaryMetrics]:
+        return self._primary_metric
+
+    @primary_metric.setter
+    def primary_metric(self, value: Union[str, InstanceSegmentationPrimaryMetrics]) -> None:
+        if is_data_binding_expression(str(value), ["parent"]):
+            self._primary_metric = value
+            return
+        self._primary_metric = (
+            ImageInstanceSegmentationJob._DEFAULT_PRIMARY_METRIC
+            if value is None
+            else InstanceSegmentationPrimaryMetrics[camel_to_snake(value).upper()]
+        )
+
+    def _to_rest_object(self) -> JobBase:
+        image_instance_segmentation_task = RestImageInstanceSegmentation(
+            target_column_name=self.target_column_name,
+            training_data=self.training_data,
+            validation_data=self.validation_data,
+            validation_data_size=self.validation_data_size,
+            limit_settings=self._limits._to_rest_object() if self._limits else None,
+            sweep_settings=self._sweep._to_rest_object() if self._sweep else None,
+            model_settings=self._training_parameters._to_rest_object() if self._training_parameters else None,
+            search_space=(
+                [entry._to_rest_object() for entry in self._search_space if entry is not None]
+                if self._search_space is not None
+                else None
+            ),
+            primary_metric=self.primary_metric,
+            log_verbosity=self.log_verbosity,
+        )
+        # resolve data inputs in rest obj
+        self._resolve_data_inputs(image_instance_segmentation_task)
+
+        properties = RestAutoMLJob(
+            display_name=self.display_name,
+            description=self.description,
+            experiment_name=self.experiment_name,
+            tags=self.tags,
+            compute_id=self.compute,
+            properties=self.properties,
+            environment_id=self.environment_id,
+            environment_variables=self.environment_variables,
+            services=self.services,
+            outputs=to_rest_data_outputs(self.outputs),
+            resources=self.resources,
+            task_details=image_instance_segmentation_task,
+            identity=self.identity._to_job_rest_object() if self.identity else None,
+            queue_settings=self.queue_settings,
+        )
+
+        result = JobBase(properties=properties)
+        result.name = self.name
+        return result
+
+    @classmethod
+    def _from_rest_object(cls, obj: JobBase) -> "ImageInstanceSegmentationJob":
+        properties: RestAutoMLJob = obj.properties
+        task_details: RestImageInstanceSegmentation = properties.task_details
+
+        job_args_dict = {
+            "id": obj.id,
+            "name": obj.name,
+            "description": properties.description,
+            "tags": properties.tags,
+            "properties": properties.properties,
+            "experiment_name": properties.experiment_name,
+            "services": properties.services,
+            "status": properties.status,
+            "creation_context": obj.system_data,
+            "display_name": properties.display_name,
+            "compute": properties.compute_id,
+            "outputs": from_rest_data_outputs(properties.outputs),
+            "resources": properties.resources,
+            "identity": (
+                _BaseJobIdentityConfiguration._from_rest_object(properties.identity) if properties.identity else None
+            ),
+            "queue_settings": properties.queue_settings,
+        }
+
+        image_instance_segmentation_job = cls(
+            target_column_name=task_details.target_column_name,
+            training_data=task_details.training_data,
+            validation_data=task_details.validation_data,
+            validation_data_size=task_details.validation_data_size,
+            limits=(
+                ImageLimitSettings._from_rest_object(task_details.limit_settings)
+                if task_details.limit_settings
+                else None
+            ),
+            sweep=(
+                ImageSweepSettings._from_rest_object(task_details.sweep_settings)
+                if task_details.sweep_settings
+                else None
+            ),
+            training_parameters=(
+                ImageModelSettingsObjectDetection._from_rest_object(task_details.model_settings)
+                if task_details.model_settings
+                else None
+            ),
+            search_space=cls._get_search_space_from_str(task_details.search_space),
+            primary_metric=task_details.primary_metric,
+            log_verbosity=task_details.log_verbosity,
+            **job_args_dict,
+        )
+
+        image_instance_segmentation_job._restore_data_inputs()
+
+        return image_instance_segmentation_job
+
+    @classmethod
+    def _load_from_dict(
+        cls,
+        data: Dict,
+        context: Dict,
+        additional_message: str,
+        **kwargs: Any,
+    ) -> "ImageInstanceSegmentationJob":
+        from azure.ai.ml._schema.automl.image_vertical.image_object_detection import ImageInstanceSegmentationSchema
+        from azure.ai.ml._schema.pipeline.automl_node import ImageInstanceSegmentationNodeSchema
+
+        inside_pipeline = kwargs.pop("inside_pipeline", False)
+        if inside_pipeline:
+            if context.get("inside_pipeline", None) is None:
+                context["inside_pipeline"] = True
+            loaded_data = load_from_dict(
+                ImageInstanceSegmentationNodeSchema,
+                data,
+                context,
+                additional_message,
+                **kwargs,
+            )
+        else:
+            loaded_data = load_from_dict(
+                ImageInstanceSegmentationSchema,
+                data,
+                context,
+                additional_message,
+                **kwargs,
+            )
+        job_instance = cls._create_instance_from_schema_dict(loaded_data)
+        return job_instance
+
+    @classmethod
+    def _create_instance_from_schema_dict(cls, loaded_data: Dict) -> "ImageInstanceSegmentationJob":
+        loaded_data.pop(AutoMLConstants.TASK_TYPE_YAML, None)
+        data_settings = {
+            "training_data": loaded_data.pop("training_data"),
+            "target_column_name": loaded_data.pop("target_column_name"),
+            "validation_data": loaded_data.pop("validation_data", None),
+            "validation_data_size": loaded_data.pop("validation_data_size", None),
+        }
+        job = ImageInstanceSegmentationJob(**loaded_data)
+        job.set_data(**data_settings)
+        return job
+
+    def _to_dict(self, inside_pipeline: bool = False) -> Dict:
+        from azure.ai.ml._schema.automl.image_vertical.image_object_detection import ImageInstanceSegmentationSchema
+        from azure.ai.ml._schema.pipeline.automl_node import ImageInstanceSegmentationNodeSchema
+
+        schema_dict: dict = {}
+        if inside_pipeline:
+            schema_dict = ImageInstanceSegmentationNodeSchema(
+                context={BASE_PATH_CONTEXT_KEY: "./", "inside_pipeline": True}
+            ).dump(self)
+        else:
+            schema_dict = ImageInstanceSegmentationSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
+
+        return schema_dict
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, ImageInstanceSegmentationJob):
+            return NotImplemented
+
+        if not super().__eq__(other):
+            return False
+
+        return self.primary_metric == other.primary_metric
+
+    def __ne__(self, other: object) -> bool:
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_limit_settings.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_limit_settings.py
new file mode 100644
index 00000000..12ec8b57
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_limit_settings.py
@@ -0,0 +1,117 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+from typing import Optional
+
+from azure.ai.ml._restclient.v2023_04_01_preview.models import ImageLimitSettings as RestImageLimitSettings
+from azure.ai.ml._utils.utils import from_iso_duration_format_mins, to_iso_duration_format_mins
+from azure.ai.ml.entities._mixins import RestTranslatableMixin
+
+
+class ImageLimitSettings(RestTranslatableMixin):
+    r"""Limit settings for AutoML Image Verticals.
+
+    ImageLimitSettings is a class that contains the following parameters:  max_concurrent_trials, max_trials, and \
+    timeout_minutes.
+
+    This is an optional configuration method to configure limits parameters such as timeouts etc.
+
+        .. note::
+
+            The number of concurrent runs is gated on the resources available in the specified compute target.
+            Ensure that the compute target has the available resources for the desired concurrency.
+
+    :keyword max_concurrent_trials: Maximum number of concurrent AutoML iterations, defaults to None.
+    :paramtype  max_concurrent_trials: typing.Optional[int]
+    :keyword max_trials: Represents the maximum number of trials (children jobs).
+    :paramtype  max_trials: typing.Optional[int]
+    :keyword timeout_minutes: AutoML job timeout. Defaults to None
+    :paramtype  timeout_minutes: typing.Optional[int]
+    :raises ValueError: If max_concurrent_trials is not None and is not a positive integer.
+    :raises ValueError: If max_trials is not None and is not a positive integer.
+    :raises ValueError: If timeout_minutes is not None and is not a positive integer.
+    :return: ImageLimitSettings object.
+    :rtype: ImageLimitSettings
+
+    .. tip::
+        It's a good practice to match max_concurrent_trials count with the number of nodes in the cluster.
+        For example, if you have a cluster with 4 nodes, set max_concurrent_trials to 4.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/ml_samples_automl_image.py
+                :start-after: [START automl.automl_image_job.image_limit_settings]
+                :end-before: [END automl.automl_image_job.image_limit_settings]
+                :language: python
+                :dedent: 8
+                :caption: Defining the limit settings for an automl image job.
+    """
+
+    def __init__(
+        self,
+        *,
+        max_concurrent_trials: Optional[int] = None,
+        max_trials: Optional[int] = None,
+        timeout_minutes: Optional[int] = None,
+    ) -> None:
+        self.max_concurrent_trials = max_concurrent_trials
+        self.max_trials = max_trials
+        self.timeout_minutes = timeout_minutes
+
+    def _to_rest_object(self) -> RestImageLimitSettings:
+        """Convert ImageLimitSettings objects to a rest object.
+
+        :return: A rest object of ImageLimitSettings objects.
+        :rtype: RestImageLimitSettings
+        """
+        return RestImageLimitSettings(
+            max_concurrent_trials=self.max_concurrent_trials,
+            max_trials=self.max_trials,
+            timeout=to_iso_duration_format_mins(self.timeout_minutes),
+        )
+
+    @classmethod
+    def _from_rest_object(cls, obj: RestImageLimitSettings) -> "ImageLimitSettings":
+        """Convert the rest object to a dict containing items to init the ImageLimitSettings objects.
+
+        :param obj: Limit settings for the AutoML job in Rest format.
+        :type obj: RestImageLimitSettings
+        :return: Limit settings for an AutoML Image Vertical.
+        :rtype: ImageLimitSettings
+        """
+        return cls(
+            max_concurrent_trials=obj.max_concurrent_trials,
+            max_trials=obj.max_trials,
+            timeout_minutes=from_iso_duration_format_mins(obj.timeout),
+        )
+
+    def __eq__(self, other: object) -> bool:
+        """Check equality between two ImageLimitSettings objects.
+
+        This method check instances equality and returns True if both of
+            the instances have the same attributes with the same values.
+
+        :param other: Any object
+        :type other: object
+        :return: True or False
+        :rtype: bool
+        """
+        if not isinstance(other, ImageLimitSettings):
+            return NotImplemented
+
+        return (
+            self.max_concurrent_trials == other.max_concurrent_trials
+            and self.max_trials == other.max_trials
+            and self.timeout_minutes == other.timeout_minutes
+        )
+
+    def __ne__(self, other: object) -> bool:
+        """Check inequality between two ImageLimitSettings objects.
+
+        :param other: Any object
+        :type other: object
+        :return: True or False
+        :rtype: bool
+        """
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_model_settings.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_model_settings.py
new file mode 100644
index 00000000..890f987a
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_model_settings.py
@@ -0,0 +1,876 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+from typing import Any, Optional
+
+# pylint: disable=R0902,too-many-locals
+from azure.ai.ml._restclient.v2023_04_01_preview.models import (
+    ImageModelSettingsClassification as RestImageModelSettingsClassification,
+)
+from azure.ai.ml._restclient.v2023_04_01_preview.models import (
+    ImageModelSettingsObjectDetection as RestImageModelSettingsObjectDetection,
+)
+from azure.ai.ml._restclient.v2023_04_01_preview.models import (
+    LearningRateScheduler,
+    LogTrainingMetrics,
+    LogValidationLoss,
+    ModelSize,
+    StochasticOptimizer,
+    ValidationMetricType,
+)
+from azure.ai.ml.entities._mixins import RestTranslatableMixin
+
+
+class ImageModelDistributionSettings(RestTranslatableMixin):
+    """Model settings for all AutoML Image Verticals.
+    Please do not instantiate directly. Use the child classes instead.
+
+    :param advanced_settings: Settings for advanced scenarios.
+    :type advanced_settings: str
+    :param ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+    :type ams_gradient: bool
+    :param beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+     [0, 1].
+    :type beta1: float
+    :param beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+     [0, 1].
+    :type beta2: float
+    :param checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
+    :type checkpoint_frequency: int
+    :param checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+     incremental training.
+    :type checkpoint_run_id: str
+    :param distributed: Whether to use distributed training.
+    :type distributed: bool
+    :param early_stopping: Enable early stopping logic during training.
+    :type early_stopping: bool
+    :param early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+     primary metric improvement
+     is tracked for early stopping. Must be a positive integer.
+    :type early_stopping_delay: int
+    :param early_stopping_patience: Minimum number of epochs or validation evaluations with no
+     primary metric improvement before
+     the run is stopped. Must be a positive integer.
+    :type early_stopping_patience: int
+    :param enable_onnx_normalization: Enable normalization when exporting ONNX model.
+    :type enable_onnx_normalization: bool
+    :param evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+     be a positive integer.
+    :type evaluation_frequency: int
+    :param gradient_accumulation_step: Gradient accumulation means running a configured number of
+     "GradAccumulationStep" steps without
+     updating the model weights while accumulating the gradients of those steps, and then using
+     the accumulated gradients to compute the weight updates. Must be a positive integer.
+    :type gradient_accumulation_step: int
+    :param layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+     For instance, passing 2 as value for 'seresnext' means
+     freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+     please
+     see: https://learn.microsoft.com/azure/machine-learning/how-to-auto-train-image-models.
+    :type layers_to_freeze: int
+    :param learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+    :type learning_rate: float
+    :param learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+     'step'. Possible values include: "None", "WarmupCosine", "Step".
+    :type learning_rate_scheduler: str or
+     ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+    :param model_name: Name of the model to use for training.
+     For more information on the available models please visit the official documentation:
+     https://learn.microsoft.com/azure/machine-learning/how-to-auto-train-image-models.
+    :type model_name: str
+    :param momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+    :type momentum: float
+    :param nesterov: Enable nesterov when optimizer is 'sgd'.
+    :type nesterov: bool
+    :param number_of_epochs: Number of training epochs. Must be a positive integer.
+    :type number_of_epochs: int
+    :param number_of_workers: Number of data loader workers. Must be a non-negative integer.
+    :type number_of_workers: int
+    :param optimizer: Type of optimizer. Possible values include: "None", "Sgd", "Adam", "Adamw".
+    :type optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+    :param random_seed: Random seed to be used when using deterministic training.
+    :type random_seed: int
+    :param step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+     the range [0, 1].
+    :type step_lr_gamma: float
+    :param step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+     positive integer.
+    :type step_lr_step_size: int
+    :param training_batch_size: Training batch size. Must be a positive integer.
+    :type training_batch_size: int
+    :param validation_batch_size: Validation batch size. Must be a positive integer.
+    :type validation_batch_size: int
+    :param warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+     'warmup_cosine'. Must be a float in the range [0, 1].
+    :type warmup_cosine_lr_cycles: float
+    :param warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+     'warmup_cosine'. Must be a positive integer.
+    :type warmup_cosine_lr_warmup_epochs: int
+    :param weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+     a float in the range[0, 1].
+    :type weight_decay: float
+    """
+
+    def __init__(
+        self,
+        *,
+        advanced_settings: Optional[str] = None,
+        ams_gradient: Optional[bool] = None,
+        beta1: Optional[float] = None,
+        beta2: Optional[float] = None,
+        checkpoint_frequency: Optional[int] = None,
+        checkpoint_run_id: Optional[str] = None,
+        distributed: Optional[bool] = None,
+        early_stopping: Optional[bool] = None,
+        early_stopping_delay: Optional[int] = None,
+        early_stopping_patience: Optional[int] = None,
+        enable_onnx_normalization: Optional[bool] = None,
+        evaluation_frequency: Optional[int] = None,
+        gradient_accumulation_step: Optional[int] = None,
+        layers_to_freeze: Optional[int] = None,
+        learning_rate: Optional[float] = None,
+        learning_rate_scheduler: Optional[LearningRateScheduler] = None,
+        model_name: Optional[str] = None,
+        momentum: Optional[float] = None,
+        nesterov: Optional[bool] = None,
+        number_of_epochs: Optional[int] = None,
+        number_of_workers: Optional[int] = None,
+        optimizer: Optional[StochasticOptimizer] = None,
+        random_seed: Optional[int] = None,
+        step_lr_gamma: Optional[float] = None,
+        step_lr_step_size: Optional[int] = None,
+        training_batch_size: Optional[int] = None,
+        validation_batch_size: Optional[int] = None,
+        warmup_cosine_lr_cycles: Optional[float] = None,
+        warmup_cosine_lr_warmup_epochs: Optional[int] = None,
+        weight_decay: Optional[float] = None,
+    ):
+        self.advanced_settings = advanced_settings
+        self.ams_gradient = ams_gradient
+        self.beta1 = beta1
+        self.beta2 = beta2
+        self.checkpoint_frequency = checkpoint_frequency
+        self.checkpoint_run_id = checkpoint_run_id
+        self.distributed = distributed
+        self.early_stopping = early_stopping
+        self.early_stopping_delay = early_stopping_delay
+        self.early_stopping_patience = early_stopping_patience
+        self.enable_onnx_normalization = enable_onnx_normalization
+        self.evaluation_frequency = evaluation_frequency
+        self.gradient_accumulation_step = gradient_accumulation_step
+        self.layers_to_freeze = layers_to_freeze
+        self.learning_rate = learning_rate
+        self.learning_rate_scheduler = learning_rate_scheduler
+        self.model_name = model_name
+        self.momentum = momentum
+        self.nesterov = nesterov
+        self.number_of_epochs = number_of_epochs
+        self.number_of_workers = number_of_workers
+        self.optimizer = optimizer
+        self.random_seed = random_seed
+        self.step_lr_gamma = step_lr_gamma
+        self.step_lr_step_size = step_lr_step_size
+        self.training_batch_size = training_batch_size
+        self.validation_batch_size = validation_batch_size
+        self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
+        self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
+        self.weight_decay = weight_decay
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, ImageModelDistributionSettings):
+            return NotImplemented
+
+        return (
+            self.advanced_settings == other.advanced_settings
+            and self.ams_gradient == other.ams_gradient
+            and self.beta1 == other.beta1
+            and self.beta2 == other.beta2
+            and self.checkpoint_frequency == other.checkpoint_frequency
+            and self.checkpoint_run_id == other.checkpoint_run_id
+            and self.distributed == other.distributed
+            and self.early_stopping == other.early_stopping
+            and self.early_stopping_delay == other.early_stopping_delay
+            and self.early_stopping_patience == other.early_stopping_patience
+            and self.enable_onnx_normalization == other.enable_onnx_normalization
+            and self.evaluation_frequency == other.evaluation_frequency
+            and self.gradient_accumulation_step == other.gradient_accumulation_step
+            and self.layers_to_freeze == other.layers_to_freeze
+            and self.learning_rate == other.learning_rate
+            and self.learning_rate_scheduler == other.learning_rate_scheduler
+            and self.model_name == other.model_name
+            and self.momentum == other.momentum
+            and self.nesterov == other.nesterov
+            and self.number_of_epochs == other.number_of_epochs
+            and self.number_of_workers == other.number_of_workers
+            and self.optimizer == other.optimizer
+            and self.random_seed == other.random_seed
+            and self.step_lr_gamma == other.step_lr_gamma
+            and self.step_lr_step_size == other.step_lr_step_size
+            and self.training_batch_size == other.training_batch_size
+            and self.validation_batch_size == other.validation_batch_size
+            and self.warmup_cosine_lr_cycles == other.warmup_cosine_lr_cycles
+            and self.warmup_cosine_lr_warmup_epochs == other.warmup_cosine_lr_warmup_epochs
+            and self.weight_decay == other.weight_decay
+        )
+
+
+class ImageModelSettingsClassification(ImageModelDistributionSettings):
+    """Model settings for AutoML Image Classification tasks.
+
+    :param advanced_settings: Settings for advanced scenarios.
+    :type advanced_settings: str
+    :param ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+    :type ams_gradient: bool
+    :param beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+     [0, 1].
+    :type beta1: float
+    :param beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+     [0, 1].
+    :type beta2: float
+    :param checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
+    :type checkpoint_frequency: int
+    :param checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+     incremental training.
+    :type checkpoint_run_id: str
+    :param distributed: Whether to use distributed training.
+    :type distributed: bool
+    :param early_stopping: Enable early stopping logic during training.
+    :type early_stopping: bool
+    :param early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+     primary metric improvement
+     is tracked for early stopping. Must be a positive integer.
+    :type early_stopping_delay: int
+    :param early_stopping_patience: Minimum number of epochs or validation evaluations with no
+     primary metric improvement before
+     the run is stopped. Must be a positive integer.
+    :type early_stopping_patience: int
+    :param enable_onnx_normalization: Enable normalization when exporting ONNX model.
+    :type enable_onnx_normalization: bool
+    :param evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+     be a positive integer.
+    :type evaluation_frequency: int
+    :param gradient_accumulation_step: Gradient accumulation means running a configured number of
+     "GradAccumulationStep" steps without
+     updating the model weights while accumulating the gradients of those steps, and then using
+     the accumulated gradients to compute the weight updates. Must be a positive integer.
+    :type gradient_accumulation_step: int
+    :param layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+     For instance, passing 2 as value for 'seresnext' means
+     freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+     please
+     see: https://learn.microsoft.com/azure/machine-learning/how-to-auto-train-image-models.
+    :type layers_to_freeze: int
+    :param learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+    :type learning_rate: float
+    :param learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+     'step'. Possible values include: "None", "WarmupCosine", "Step".
+    :type learning_rate_scheduler: str or
+     ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+    :param model_name: Name of the model to use for training.
+     For more information on the available models please visit the official documentation:
+     https://learn.microsoft.com/azure/machine-learning/how-to-auto-train-image-models.
+    :type model_name: str
+    :param momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+    :type momentum: float
+    :param nesterov: Enable nesterov when optimizer is 'sgd'.
+    :type nesterov: bool
+    :param number_of_epochs: Number of training epochs. Must be a positive integer.
+    :type number_of_epochs: int
+    :param number_of_workers: Number of data loader workers. Must be a non-negative integer.
+    :type number_of_workers: int
+    :param optimizer: Type of optimizer. Possible values include: "None", "Sgd", "Adam", "Adamw".
+    :type optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+    :param random_seed: Random seed to be used when using deterministic training.
+    :type random_seed: int
+    :param step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+     the range [0, 1].
+    :type step_lr_gamma: float
+    :param step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+     positive integer.
+    :type step_lr_step_size: int
+    :param training_batch_size: Training batch size. Must be a positive integer.
+    :type training_batch_size: int
+    :param validation_batch_size: Validation batch size. Must be a positive integer.
+    :type validation_batch_size: int
+    :param warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+     'warmup_cosine'. Must be a float in the range [0, 1].
+    :type warmup_cosine_lr_cycles: float
+    :param warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+     'warmup_cosine'. Must be a positive integer.
+    :type warmup_cosine_lr_warmup_epochs: int
+    :param weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+     a float in the range[0, 1].
+    :type weight_decay: float
+    :param training_crop_size: Image crop size that is input to the neural network for the training
+     dataset. Must be a positive integer.
+    :type training_crop_size: int
+    :param validation_crop_size: Image crop size that is input to the neural network for the
+     validation dataset. Must be a positive integer.
+    :type validation_crop_size: int
+    :param validation_resize_size: Image size to which to resize before cropping for validation
+     dataset. Must be a positive integer.
+    :type validation_resize_size: int
+    :param weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
+     1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
+     0 or 1 or 2.
+    :type weighted_loss: int
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/ml_samples_automl_image.py
+            :start-after: [START automl.automl_image_job.image_classification_model_settings]
+            :end-before: [END automl.automl_image_job.image_classification_model_settings]
+            :language: python
+            :dedent: 8
+            :caption: Defining the automl image classification model settings.
+    """
+
+    def __init__(
+        self,
+        *,
+        advanced_settings: Optional[str] = None,
+        ams_gradient: Optional[bool] = None,
+        beta1: Optional[float] = None,
+        beta2: Optional[float] = None,
+        checkpoint_frequency: Optional[int] = None,
+        checkpoint_run_id: Optional[str] = None,
+        distributed: Optional[bool] = None,
+        early_stopping: Optional[bool] = None,
+        early_stopping_delay: Optional[int] = None,
+        early_stopping_patience: Optional[int] = None,
+        enable_onnx_normalization: Optional[bool] = None,
+        evaluation_frequency: Optional[int] = None,
+        gradient_accumulation_step: Optional[int] = None,
+        layers_to_freeze: Optional[int] = None,
+        learning_rate: Optional[float] = None,
+        learning_rate_scheduler: Optional[LearningRateScheduler] = None,
+        model_name: Optional[str] = None,
+        momentum: Optional[float] = None,
+        nesterov: Optional[bool] = None,
+        number_of_epochs: Optional[int] = None,
+        number_of_workers: Optional[int] = None,
+        optimizer: Optional[StochasticOptimizer] = None,
+        random_seed: Optional[int] = None,
+        step_lr_gamma: Optional[float] = None,
+        step_lr_step_size: Optional[int] = None,
+        training_batch_size: Optional[int] = None,
+        validation_batch_size: Optional[int] = None,
+        warmup_cosine_lr_cycles: Optional[float] = None,
+        warmup_cosine_lr_warmup_epochs: Optional[int] = None,
+        weight_decay: Optional[float] = None,
+        training_crop_size: Optional[int] = None,
+        validation_crop_size: Optional[int] = None,
+        validation_resize_size: Optional[int] = None,
+        weighted_loss: Optional[int] = None,
+        **kwargs: Any,
+    ):
+        super(ImageModelSettingsClassification, self).__init__(
+            advanced_settings=advanced_settings,
+            ams_gradient=ams_gradient,
+            beta1=beta1,
+            beta2=beta2,
+            checkpoint_frequency=checkpoint_frequency,
+            checkpoint_run_id=checkpoint_run_id,
+            distributed=distributed,
+            early_stopping=early_stopping,
+            early_stopping_delay=early_stopping_delay,
+            early_stopping_patience=early_stopping_patience,
+            enable_onnx_normalization=enable_onnx_normalization,
+            evaluation_frequency=evaluation_frequency,
+            gradient_accumulation_step=gradient_accumulation_step,
+            layers_to_freeze=layers_to_freeze,
+            learning_rate=learning_rate,
+            learning_rate_scheduler=learning_rate_scheduler,
+            model_name=model_name,
+            momentum=momentum,
+            nesterov=nesterov,
+            number_of_epochs=number_of_epochs,
+            number_of_workers=number_of_workers,
+            optimizer=optimizer,
+            random_seed=random_seed,
+            step_lr_gamma=step_lr_gamma,
+            step_lr_step_size=step_lr_step_size,
+            training_batch_size=training_batch_size,
+            validation_batch_size=validation_batch_size,
+            warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
+            warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
+            weight_decay=weight_decay,
+            **kwargs,
+        )
+        self.training_crop_size = training_crop_size
+        self.validation_crop_size = validation_crop_size
+        self.validation_resize_size = validation_resize_size
+        self.weighted_loss = weighted_loss
+
+    def _to_rest_object(self) -> RestImageModelSettingsClassification:
+        return RestImageModelSettingsClassification(
+            advanced_settings=self.advanced_settings,
+            ams_gradient=self.ams_gradient,
+            beta1=self.beta1,
+            beta2=self.beta2,
+            checkpoint_frequency=self.checkpoint_frequency,
+            checkpoint_run_id=self.checkpoint_run_id,
+            distributed=self.distributed,
+            early_stopping=self.early_stopping,
+            early_stopping_delay=self.early_stopping_delay,
+            early_stopping_patience=self.early_stopping_patience,
+            enable_onnx_normalization=self.enable_onnx_normalization,
+            evaluation_frequency=self.evaluation_frequency,
+            gradient_accumulation_step=self.gradient_accumulation_step,
+            layers_to_freeze=self.layers_to_freeze,
+            learning_rate=self.learning_rate,
+            learning_rate_scheduler=self.learning_rate_scheduler,
+            model_name=self.model_name,
+            momentum=self.momentum,
+            nesterov=self.nesterov,
+            number_of_epochs=self.number_of_epochs,
+            number_of_workers=self.number_of_workers,
+            optimizer=self.optimizer,
+            random_seed=self.random_seed,
+            step_lr_gamma=self.step_lr_gamma,
+            step_lr_step_size=self.step_lr_step_size,
+            training_batch_size=self.training_batch_size,
+            validation_batch_size=self.validation_batch_size,
+            warmup_cosine_lr_cycles=self.warmup_cosine_lr_cycles,
+            warmup_cosine_lr_warmup_epochs=self.warmup_cosine_lr_warmup_epochs,
+            weight_decay=self.weight_decay,
+            training_crop_size=self.training_crop_size,
+            validation_crop_size=self.validation_crop_size,
+            validation_resize_size=self.validation_resize_size,
+            weighted_loss=self.weighted_loss,
+        )
+
+    @classmethod
+    def _from_rest_object(cls, obj: RestImageModelSettingsClassification) -> "ImageModelSettingsClassification":
+        return cls(
+            advanced_settings=obj.advanced_settings,
+            ams_gradient=obj.ams_gradient,
+            beta1=obj.beta1,
+            beta2=obj.beta2,
+            checkpoint_frequency=obj.checkpoint_frequency,
+            checkpoint_run_id=obj.checkpoint_run_id,
+            distributed=obj.distributed,
+            early_stopping=obj.early_stopping,
+            early_stopping_delay=obj.early_stopping_delay,
+            early_stopping_patience=obj.early_stopping_patience,
+            enable_onnx_normalization=obj.enable_onnx_normalization,
+            evaluation_frequency=obj.evaluation_frequency,
+            gradient_accumulation_step=obj.gradient_accumulation_step,
+            layers_to_freeze=obj.layers_to_freeze,
+            learning_rate=obj.learning_rate,
+            learning_rate_scheduler=obj.learning_rate_scheduler,
+            model_name=obj.model_name,
+            momentum=obj.momentum,
+            nesterov=obj.nesterov,
+            number_of_epochs=obj.number_of_epochs,
+            number_of_workers=obj.number_of_workers,
+            optimizer=obj.optimizer,
+            random_seed=obj.random_seed,
+            step_lr_gamma=obj.step_lr_gamma,
+            step_lr_step_size=obj.step_lr_step_size,
+            training_batch_size=obj.training_batch_size,
+            validation_batch_size=obj.validation_batch_size,
+            warmup_cosine_lr_cycles=obj.warmup_cosine_lr_cycles,
+            warmup_cosine_lr_warmup_epochs=obj.warmup_cosine_lr_warmup_epochs,
+            weight_decay=obj.weight_decay,
+            training_crop_size=obj.training_crop_size,
+            validation_crop_size=obj.validation_crop_size,
+            validation_resize_size=obj.validation_resize_size,
+            weighted_loss=obj.weighted_loss,
+        )
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, ImageModelSettingsClassification):
+            return NotImplemented
+
+        return (
+            super().__eq__(other)
+            and self.training_crop_size == other.training_crop_size
+            and self.validation_crop_size == other.validation_crop_size
+            and self.validation_resize_size == other.validation_resize_size
+            and self.weighted_loss == other.weighted_loss
+        )
+
+    def __ne__(self, other: object) -> bool:
+        return not self.__eq__(other)
+
+
+class ImageModelSettingsObjectDetection(ImageModelDistributionSettings):
+    """Model settings for AutoML Image Object Detection Task.
+
+    :param advanced_settings: Settings for advanced scenarios.
+    :type advanced_settings: str
+    :param ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+    :type ams_gradient: bool
+    :param beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+     [0, 1].
+    :type beta1: float
+    :param beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
+     [0, 1].
+    :type beta2: float
+    :param checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
+    :type checkpoint_frequency: int
+    :param checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
+     incremental training.
+    :type checkpoint_run_id: str
+    :param distributed: Whether to use distributed training.
+    :type distributed: bool
+    :param early_stopping: Enable early stopping logic during training.
+    :type early_stopping: bool
+    :param early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
+     primary metric improvement
+     is tracked for early stopping. Must be a positive integer.
+    :type early_stopping_delay: int
+    :param early_stopping_patience: Minimum number of epochs or validation evaluations with no
+     primary metric improvement before
+     the run is stopped. Must be a positive integer.
+    :type early_stopping_patience: int
+    :param enable_onnx_normalization: Enable normalization when exporting ONNX model.
+    :type enable_onnx_normalization: bool
+    :param evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
+     be a positive integer.
+    :type evaluation_frequency: int
+    :param gradient_accumulation_step: Gradient accumulation means running a configured number of
+     "GradAccumulationStep" steps without
+     updating the model weights while accumulating the gradients of those steps, and then using
+     the accumulated gradients to compute the weight updates. Must be a positive integer.
+    :type gradient_accumulation_step: int
+    :param layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
+     For instance, passing 2 as value for 'seresnext' means
+     freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
+     please
+     see: https://learn.microsoft.com/azure/machine-learning/how-to-auto-train-image-models.
+    :type layers_to_freeze: int
+    :param learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+    :type learning_rate: float
+    :param learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+     'step'. Possible values include: "None", "WarmupCosine", "Step".
+    :type learning_rate_scheduler: str or
+     ~azure.mgmt.machinelearningservices.models.LearningRateScheduler
+    :param model_name: Name of the model to use for training.
+     For more information on the available models please visit the official documentation:
+     https://learn.microsoft.com/azure/machine-learning/how-to-auto-train-image-models.
+    :type model_name: str
+    :param momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
+    :type momentum: float
+    :param nesterov: Enable nesterov when optimizer is 'sgd'.
+    :type nesterov: bool
+    :param number_of_epochs: Number of training epochs. Must be a positive integer.
+    :type number_of_epochs: int
+    :param number_of_workers: Number of data loader workers. Must be a non-negative integer.
+    :type number_of_workers: int
+    :param optimizer: Type of optimizer. Possible values include: "None", "Sgd", "Adam", "Adamw".
+    :type optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
+    :param random_seed: Random seed to be used when using deterministic training.
+    :type random_seed: int
+    :param step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
+     the range [0, 1].
+    :type step_lr_gamma: float
+    :param step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
+     positive integer.
+    :type step_lr_step_size: int
+    :param training_batch_size: Training batch size. Must be a positive integer.
+    :type training_batch_size: int
+    :param validation_batch_size: Validation batch size. Must be a positive integer.
+    :type validation_batch_size: int
+    :param warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+     'warmup_cosine'. Must be a float in the range [0, 1].
+    :type warmup_cosine_lr_cycles: float
+    :param warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+     'warmup_cosine'. Must be a positive integer.
+    :type warmup_cosine_lr_warmup_epochs: int
+    :param weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
+     a float in the range[0, 1].
+    :type weight_decay: float
+    :param box_detections_per_image: Maximum number of detections per image, for all classes. Must
+     be a positive integer.
+     Note: This settings is not supported for the 'yolov5' algorithm.
+    :type box_detections_per_image: int
+    :param box_score_threshold: During inference, only return proposals with a classification score
+     greater than
+     BoxScoreThreshold. Must be a float in the range[0, 1].
+    :type box_score_threshold: float
+    :param image_size: Image size for train and validation. Must be a positive integer.
+     Note: The training run may get into CUDA OOM if the size is too big.
+     Note: This settings is only supported for the 'yolov5' algorithm.
+    :type image_size: int
+    :param max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
+     Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+     Note: This settings is not supported for the 'yolov5' algorithm.
+    :type max_size: int
+    :param min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
+     Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+     Note: This settings is not supported for the 'yolov5' algorithm.
+    :type min_size: int
+    :param model_size: Model size. Must be 'small', 'medium', 'large'.
+     Note: training run may get into CUDA OOM if the model size is too big.
+     Note: This settings is only supported for the 'yolov5' algorithm. Possible values include:
+     "None", "Small", "Medium", "Large", "ExtraLarge".
+    :type model_size: str or ~azure.mgmt.machinelearningservices.models.ModelSize
+    :param multi_scale: Enable multi-scale image by varying image size by +/- 50%.
+     Note: training run may get into CUDA OOM if no sufficient GPU memory.
+     Note: This settings is only supported for the 'yolov5' algorithm.
+    :type multi_scale: bool
+    :param nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be a
+     float in the range [0, 1].
+    :type nms_iou_threshold: float
+    :param tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must not
+     be
+     None to enable small object detection logic. A string containing two integers in mxn format.
+     Note: This settings is not supported for the 'yolov5' algorithm.
+    :type tile_grid_size: str
+    :param tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be float
+     in the range [0, 1).
+     Note: This settings is not supported for the 'yolov5' algorithm.
+    :type tile_overlap_ratio: float
+    :param tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
+     predictions from tiles and image.
+     Used in validation/ inference. Must be float in the range [0, 1].
+     Note: This settings is not supported for the 'yolov5' algorithm.
+    :type tile_predictions_nms_threshold: float
+    :param validation_iou_threshold: IOU threshold to use when computing validation metric. Must be
+     float in the range [0, 1].
+    :type validation_iou_threshold: float
+    :param validation_metric_type: Metric computation method to use for validation metrics. Possible
+     values include: "None", "Coco", "Voc", "CocoVoc".
+    :type validation_metric_type: str or
+     ~azure.mgmt.machinelearningservices.models.ValidationMetricType
+    :param log_training_metrics: indicates whether or not to log training metrics
+    :type log_training_metrics: str or
+     ~azure.mgmt.machinelearningservices.models.LogTrainingMetrics
+    :param log_validation_loss: indicates whether or not to log validation loss
+    :type log_validation_loss: str or
+     ~azure.mgmt.machinelearningservices.models.LogValidationLoss
+
+    .. literalinclude:: ../samples/ml_samples_automl_image.py
+            :start-after: [START automl.automl_image_job.image_object_detection_model_settings]
+            :end-before: [END automl.automl_image_job.image_object_detection_model_settings]
+            :language: python
+            :dedent: 8
+            :caption: Defining the automl image object detection or instance segmentation model settings.
+    """
+
+    def __init__(
+        self,
+        *,
+        advanced_settings: Optional[str] = None,
+        ams_gradient: Optional[bool] = None,
+        beta1: Optional[float] = None,
+        beta2: Optional[float] = None,
+        checkpoint_frequency: Optional[int] = None,
+        checkpoint_run_id: Optional[str] = None,
+        distributed: Optional[bool] = None,
+        early_stopping: Optional[bool] = None,
+        early_stopping_delay: Optional[int] = None,
+        early_stopping_patience: Optional[int] = None,
+        enable_onnx_normalization: Optional[bool] = None,
+        evaluation_frequency: Optional[int] = None,
+        gradient_accumulation_step: Optional[int] = None,
+        layers_to_freeze: Optional[int] = None,
+        learning_rate: Optional[float] = None,
+        learning_rate_scheduler: Optional[LearningRateScheduler] = None,
+        model_name: Optional[str] = None,
+        momentum: Optional[float] = None,
+        nesterov: Optional[bool] = None,
+        number_of_epochs: Optional[int] = None,
+        number_of_workers: Optional[int] = None,
+        optimizer: Optional[StochasticOptimizer] = None,
+        random_seed: Optional[int] = None,
+        step_lr_gamma: Optional[float] = None,
+        step_lr_step_size: Optional[int] = None,
+        training_batch_size: Optional[int] = None,
+        validation_batch_size: Optional[int] = None,
+        warmup_cosine_lr_cycles: Optional[float] = None,
+        warmup_cosine_lr_warmup_epochs: Optional[int] = None,
+        weight_decay: Optional[float] = None,
+        box_detections_per_image: Optional[int] = None,
+        box_score_threshold: Optional[float] = None,
+        image_size: Optional[int] = None,
+        max_size: Optional[int] = None,
+        min_size: Optional[int] = None,
+        model_size: Optional[ModelSize] = None,
+        multi_scale: Optional[bool] = None,
+        nms_iou_threshold: Optional[float] = None,
+        tile_grid_size: Optional[str] = None,
+        tile_overlap_ratio: Optional[float] = None,
+        tile_predictions_nms_threshold: Optional[float] = None,
+        validation_iou_threshold: Optional[float] = None,
+        validation_metric_type: Optional[ValidationMetricType] = None,
+        log_training_metrics: Optional[LogTrainingMetrics] = None,
+        log_validation_loss: Optional[LogValidationLoss] = None,
+        **kwargs: Any,
+    ):
+        super(ImageModelSettingsObjectDetection, self).__init__(
+            advanced_settings=advanced_settings,
+            ams_gradient=ams_gradient,
+            beta1=beta1,
+            beta2=beta2,
+            checkpoint_frequency=checkpoint_frequency,
+            checkpoint_run_id=checkpoint_run_id,
+            distributed=distributed,
+            early_stopping=early_stopping,
+            early_stopping_delay=early_stopping_delay,
+            early_stopping_patience=early_stopping_patience,
+            enable_onnx_normalization=enable_onnx_normalization,
+            evaluation_frequency=evaluation_frequency,
+            gradient_accumulation_step=gradient_accumulation_step,
+            layers_to_freeze=layers_to_freeze,
+            learning_rate=learning_rate,
+            learning_rate_scheduler=learning_rate_scheduler,
+            model_name=model_name,
+            momentum=momentum,
+            nesterov=nesterov,
+            number_of_epochs=number_of_epochs,
+            number_of_workers=number_of_workers,
+            optimizer=optimizer,
+            random_seed=random_seed,
+            step_lr_gamma=step_lr_gamma,
+            step_lr_step_size=step_lr_step_size,
+            training_batch_size=training_batch_size,
+            validation_batch_size=validation_batch_size,
+            warmup_cosine_lr_cycles=warmup_cosine_lr_cycles,
+            warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs,
+            weight_decay=weight_decay,
+            **kwargs,
+        )
+        self.box_detections_per_image = box_detections_per_image
+        self.box_score_threshold = box_score_threshold
+        self.image_size = image_size
+        self.max_size = max_size
+        self.min_size = min_size
+        self.model_size = model_size
+        self.multi_scale = multi_scale
+        self.nms_iou_threshold = nms_iou_threshold
+        self.tile_grid_size = tile_grid_size
+        self.tile_overlap_ratio = tile_overlap_ratio
+        self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
+        self.validation_iou_threshold = validation_iou_threshold
+        self.validation_metric_type = validation_metric_type
+        self.log_training_metrics = log_training_metrics
+        self.log_validation_loss = log_validation_loss
+
+    def _to_rest_object(self) -> RestImageModelSettingsObjectDetection:
+        return RestImageModelSettingsObjectDetection(
+            advanced_settings=self.advanced_settings,
+            ams_gradient=self.ams_gradient,
+            beta1=self.beta1,
+            beta2=self.beta2,
+            checkpoint_frequency=self.checkpoint_frequency,
+            checkpoint_run_id=self.checkpoint_run_id,
+            distributed=self.distributed,
+            early_stopping=self.early_stopping,
+            early_stopping_delay=self.early_stopping_delay,
+            early_stopping_patience=self.early_stopping_patience,
+            enable_onnx_normalization=self.enable_onnx_normalization,
+            evaluation_frequency=self.evaluation_frequency,
+            gradient_accumulation_step=self.gradient_accumulation_step,
+            layers_to_freeze=self.layers_to_freeze,
+            learning_rate=self.learning_rate,
+            learning_rate_scheduler=self.learning_rate_scheduler,
+            model_name=self.model_name,
+            momentum=self.momentum,
+            nesterov=self.nesterov,
+            number_of_epochs=self.number_of_epochs,
+            number_of_workers=self.number_of_workers,
+            optimizer=self.optimizer,
+            random_seed=self.random_seed,
+            step_lr_gamma=self.step_lr_gamma,
+            step_lr_step_size=self.step_lr_step_size,
+            training_batch_size=self.training_batch_size,
+            validation_batch_size=self.validation_batch_size,
+            warmup_cosine_lr_cycles=self.warmup_cosine_lr_cycles,
+            warmup_cosine_lr_warmup_epochs=self.warmup_cosine_lr_warmup_epochs,
+            weight_decay=self.weight_decay,
+            box_detections_per_image=self.box_detections_per_image,
+            box_score_threshold=self.box_score_threshold,
+            image_size=self.image_size,
+            max_size=self.max_size,
+            min_size=self.min_size,
+            model_size=self.model_size,
+            multi_scale=self.multi_scale,
+            nms_iou_threshold=self.nms_iou_threshold,
+            tile_grid_size=self.tile_grid_size,
+            tile_overlap_ratio=self.tile_overlap_ratio,
+            tile_predictions_nms_threshold=self.tile_predictions_nms_threshold,
+            validation_iou_threshold=self.validation_iou_threshold,
+            validation_metric_type=self.validation_metric_type,
+            log_training_metrics=self.log_training_metrics,
+            log_validation_loss=self.log_validation_loss,
+        )
+
+    @classmethod
+    def _from_rest_object(cls, obj: RestImageModelSettingsObjectDetection) -> "ImageModelSettingsObjectDetection":
+        return cls(
+            advanced_settings=obj.advanced_settings,
+            ams_gradient=obj.ams_gradient,
+            beta1=obj.beta1,
+            beta2=obj.beta2,
+            checkpoint_frequency=obj.checkpoint_frequency,
+            checkpoint_run_id=obj.checkpoint_run_id,
+            distributed=obj.distributed,
+            early_stopping=obj.early_stopping,
+            early_stopping_delay=obj.early_stopping_delay,
+            early_stopping_patience=obj.early_stopping_patience,
+            enable_onnx_normalization=obj.enable_onnx_normalization,
+            evaluation_frequency=obj.evaluation_frequency,
+            gradient_accumulation_step=obj.gradient_accumulation_step,
+            layers_to_freeze=obj.layers_to_freeze,
+            learning_rate=obj.learning_rate,
+            learning_rate_scheduler=obj.learning_rate_scheduler,
+            model_name=obj.model_name,
+            momentum=obj.momentum,
+            nesterov=obj.nesterov,
+            number_of_epochs=obj.number_of_epochs,
+            number_of_workers=obj.number_of_workers,
+            optimizer=obj.optimizer,
+            random_seed=obj.random_seed,
+            step_lr_gamma=obj.step_lr_gamma,
+            step_lr_step_size=obj.step_lr_step_size,
+            training_batch_size=obj.training_batch_size,
+            validation_batch_size=obj.validation_batch_size,
+            warmup_cosine_lr_cycles=obj.warmup_cosine_lr_cycles,
+            warmup_cosine_lr_warmup_epochs=obj.warmup_cosine_lr_warmup_epochs,
+            weight_decay=obj.weight_decay,
+            box_detections_per_image=obj.box_detections_per_image,
+            box_score_threshold=obj.box_score_threshold,
+            image_size=obj.image_size,
+            max_size=obj.max_size,
+            min_size=obj.min_size,
+            model_size=obj.model_size,
+            multi_scale=obj.multi_scale,
+            nms_iou_threshold=obj.nms_iou_threshold,
+            tile_grid_size=obj.tile_grid_size,
+            tile_overlap_ratio=obj.tile_overlap_ratio,
+            tile_predictions_nms_threshold=obj.tile_predictions_nms_threshold,
+            validation_iou_threshold=obj.validation_iou_threshold,
+            validation_metric_type=obj.validation_metric_type,
+            log_training_metrics=obj.log_training_metrics,
+            log_validation_loss=obj.log_validation_loss,
+        )
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, ImageModelSettingsObjectDetection):
+            return NotImplemented
+
+        return (
+            super().__eq__(other)
+            and self.box_detections_per_image == other.box_detections_per_image
+            and self.box_score_threshold == other.box_score_threshold
+            and self.image_size == other.image_size
+            and self.max_size == other.max_size
+            and self.min_size == other.min_size
+            and self.model_size == other.model_size
+            and self.multi_scale == other.multi_scale
+            and self.nms_iou_threshold == other.nms_iou_threshold
+            and self.tile_grid_size == other.tile_grid_size
+            and self.tile_overlap_ratio == other.tile_overlap_ratio
+            and self.tile_predictions_nms_threshold == other.tile_predictions_nms_threshold
+            and self.validation_iou_threshold == other.validation_iou_threshold
+            and self.validation_metric_type == other.validation_metric_type
+            and self.log_training_metrics == other.log_training_metrics
+            and self.log_validation_loss == other.log_validation_loss
+        )
+
+    def __ne__(self, other: object) -> bool:
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_object_detection_job.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_object_detection_job.py
new file mode 100644
index 00000000..f8d070d2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_object_detection_job.py
@@ -0,0 +1,240 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+# pylint: disable=protected-access
+
+from typing import Any, Dict, Optional, Union
+
+from azure.ai.ml._restclient.v2023_04_01_preview.models import AutoMLJob as RestAutoMLJob
+from azure.ai.ml._restclient.v2023_04_01_preview.models import ImageObjectDetection as RestImageObjectDetection
+from azure.ai.ml._restclient.v2023_04_01_preview.models import JobBase, ObjectDetectionPrimaryMetrics, TaskType
+from azure.ai.ml._utils.utils import camel_to_snake, is_data_binding_expression
+from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY
+from azure.ai.ml.constants._job.automl import AutoMLConstants
+from azure.ai.ml.entities._credentials import _BaseJobIdentityConfiguration
+from azure.ai.ml.entities._job._input_output_helpers import from_rest_data_outputs, to_rest_data_outputs
+from azure.ai.ml.entities._job.automl.image.automl_image_object_detection_base import AutoMLImageObjectDetectionBase
+from azure.ai.ml.entities._job.automl.image.image_limit_settings import ImageLimitSettings
+from azure.ai.ml.entities._job.automl.image.image_model_settings import ImageModelSettingsObjectDetection
+from azure.ai.ml.entities._job.automl.image.image_sweep_settings import ImageSweepSettings
+from azure.ai.ml.entities._util import load_from_dict
+
+
+class ImageObjectDetectionJob(AutoMLImageObjectDetectionBase):
+    """Configuration for AutoML Image Object Detection job.
+
+    :keyword primary_metric: The primary metric to use for optimization.
+    :paramtype primary_metric: Optional[str, ~azure.ai.ml.ObjectDetectionPrimaryMetrics]
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/ml_samples_automl_image.py
+            :start-after: [START automl.automl_image_job.image_object_detection_job]
+            :end-before: [END automl.automl_image_job.image_object_detection_job]
+            :language: python
+            :dedent: 8
+            :caption: creating an automl image object detection job
+    """
+
+    _DEFAULT_PRIMARY_METRIC = ObjectDetectionPrimaryMetrics.MEAN_AVERAGE_PRECISION
+
+    def __init__(
+        self,
+        *,
+        primary_metric: Optional[Union[str, ObjectDetectionPrimaryMetrics]] = None,
+        **kwargs: Any,
+    ) -> None:
+
+        # Extract any super class init settings
+        limits = kwargs.pop("limits", None)
+        sweep = kwargs.pop("sweep", None)
+        training_parameters = kwargs.pop("training_parameters", None)
+        search_space = kwargs.pop("search_space", None)
+
+        super().__init__(
+            task_type=TaskType.IMAGE_OBJECT_DETECTION,
+            limits=limits,
+            sweep=sweep,
+            training_parameters=training_parameters,
+            search_space=search_space,
+            **kwargs,
+        )
+
+        self.primary_metric = primary_metric or ImageObjectDetectionJob._DEFAULT_PRIMARY_METRIC
+
+    @property
+    def primary_metric(self) -> Union[str, ObjectDetectionPrimaryMetrics]:
+        return self._primary_metric
+
+    @primary_metric.setter
+    def primary_metric(self, value: Union[str, ObjectDetectionPrimaryMetrics]) -> None:
+        if is_data_binding_expression(str(value), ["parent"]):
+            self._primary_metric = value
+            return
+        self._primary_metric = (
+            ImageObjectDetectionJob._DEFAULT_PRIMARY_METRIC
+            if value is None
+            else ObjectDetectionPrimaryMetrics[camel_to_snake(value).upper()]
+        )
+
+    def _to_rest_object(self) -> JobBase:
+        image_object_detection_task = RestImageObjectDetection(
+            target_column_name=self.target_column_name,
+            training_data=self.training_data,
+            validation_data=self.validation_data,
+            validation_data_size=self.validation_data_size,
+            limit_settings=self._limits._to_rest_object() if self._limits else None,
+            sweep_settings=self._sweep._to_rest_object() if self._sweep else None,
+            model_settings=self._training_parameters._to_rest_object() if self._training_parameters else None,
+            search_space=(
+                [entry._to_rest_object() for entry in self._search_space if entry is not None]
+                if self._search_space is not None
+                else None
+            ),
+            primary_metric=self.primary_metric,
+            log_verbosity=self.log_verbosity,
+        )
+        # resolve data inputs in rest object
+        self._resolve_data_inputs(image_object_detection_task)
+
+        properties = RestAutoMLJob(
+            display_name=self.display_name,
+            description=self.description,
+            experiment_name=self.experiment_name,
+            tags=self.tags,
+            compute_id=self.compute,
+            properties=self.properties,
+            environment_id=self.environment_id,
+            environment_variables=self.environment_variables,
+            services=self.services,
+            outputs=to_rest_data_outputs(self.outputs),
+            resources=self.resources,
+            task_details=image_object_detection_task,
+            identity=self.identity._to_job_rest_object() if self.identity else None,
+            queue_settings=self.queue_settings,
+        )
+
+        result = JobBase(properties=properties)
+        result.name = self.name
+        return result
+
+    @classmethod
+    def _from_rest_object(cls, obj: JobBase) -> "ImageObjectDetectionJob":
+        properties: RestAutoMLJob = obj.properties
+        task_details: RestImageObjectDetection = properties.task_details
+
+        job_args_dict = {
+            "id": obj.id,
+            "name": obj.name,
+            "description": properties.description,
+            "tags": properties.tags,
+            "properties": properties.properties,
+            "experiment_name": properties.experiment_name,
+            "services": properties.services,
+            "status": properties.status,
+            "creation_context": obj.system_data,
+            "display_name": properties.display_name,
+            "compute": properties.compute_id,
+            "outputs": from_rest_data_outputs(properties.outputs),
+            "resources": properties.resources,
+            "identity": (
+                _BaseJobIdentityConfiguration._from_rest_object(properties.identity) if properties.identity else None
+            ),
+            "queue_settings": properties.queue_settings,
+        }
+
+        image_object_detection_job = cls(
+            target_column_name=task_details.target_column_name,
+            training_data=task_details.training_data,
+            validation_data=task_details.validation_data,
+            validation_data_size=task_details.validation_data_size,
+            limits=(
+                ImageLimitSettings._from_rest_object(task_details.limit_settings)
+                if task_details.limit_settings
+                else None
+            ),
+            sweep=(
+                ImageSweepSettings._from_rest_object(task_details.sweep_settings)
+                if task_details.sweep_settings
+                else None
+            ),
+            training_parameters=(
+                ImageModelSettingsObjectDetection._from_rest_object(task_details.model_settings)
+                if task_details.model_settings
+                else None
+            ),
+            search_space=cls._get_search_space_from_str(task_details.search_space),
+            primary_metric=task_details.primary_metric,
+            log_verbosity=task_details.log_verbosity,
+            **job_args_dict,
+        )
+
+        image_object_detection_job._restore_data_inputs()
+
+        return image_object_detection_job
+
+    @classmethod
+    def _load_from_dict(
+        cls,
+        data: Dict,
+        context: Dict,
+        additional_message: str,
+        **kwargs: Any,
+    ) -> "ImageObjectDetectionJob":
+        from azure.ai.ml._schema.automl.image_vertical.image_object_detection import ImageObjectDetectionSchema
+        from azure.ai.ml._schema.pipeline.automl_node import ImageObjectDetectionNodeSchema
+
+        if kwargs.pop("inside_pipeline", False):
+            if context.get("inside_pipeline", None) is None:
+                context["inside_pipeline"] = True
+            loaded_data = load_from_dict(
+                ImageObjectDetectionNodeSchema,
+                data,
+                context,
+                additional_message,
+                **kwargs,
+            )
+        else:
+            loaded_data = load_from_dict(ImageObjectDetectionSchema, data, context, additional_message, **kwargs)
+        job_instance = cls._create_instance_from_schema_dict(loaded_data)
+        return job_instance
+
+    @classmethod
+    def _create_instance_from_schema_dict(cls, loaded_data: Dict) -> "ImageObjectDetectionJob":
+        loaded_data.pop(AutoMLConstants.TASK_TYPE_YAML, None)
+        data_settings = {
+            "training_data": loaded_data.pop("training_data"),
+            "target_column_name": loaded_data.pop("target_column_name"),
+            "validation_data": loaded_data.pop("validation_data", None),
+            "validation_data_size": loaded_data.pop("validation_data_size", None),
+        }
+        job = ImageObjectDetectionJob(**loaded_data)
+        job.set_data(**data_settings)
+        return job
+
+    def _to_dict(self, inside_pipeline: bool = False) -> Dict:
+        from azure.ai.ml._schema.automl.image_vertical.image_object_detection import ImageObjectDetectionSchema
+        from azure.ai.ml._schema.pipeline.automl_node import ImageObjectDetectionNodeSchema
+
+        schema_dict: dict = {}
+        if inside_pipeline:
+            schema_dict = ImageObjectDetectionNodeSchema(
+                context={BASE_PATH_CONTEXT_KEY: "./", "inside_pipeline": True}
+            ).dump(self)
+        else:
+            schema_dict = ImageObjectDetectionSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
+
+        return schema_dict
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, ImageObjectDetectionJob):
+            return NotImplemented
+
+        if not super().__eq__(other):
+            return False
+
+        return self.primary_metric == other.primary_metric
+
+    def __ne__(self, other: object) -> bool:
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_object_detection_search_space.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_object_detection_search_space.py
new file mode 100644
index 00000000..a9004d1e
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_object_detection_search_space.py
@@ -0,0 +1,899 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+# pylint: disable=R0902,too-many-locals
+
+from typing import Optional, Union
+
+from azure.ai.ml._restclient.v2023_04_01_preview.models import ImageModelDistributionSettingsObjectDetection
+from azure.ai.ml.entities._job.automl.search_space import SearchSpace
+from azure.ai.ml.entities._job.automl.search_space_utils import _convert_from_rest_object, _convert_to_rest_object
+from azure.ai.ml.entities._mixins import RestTranslatableMixin
+from azure.ai.ml.sweep import (
+    Choice,
+    LogNormal,
+    LogUniform,
+    Normal,
+    QLogNormal,
+    QLogUniform,
+    QNormal,
+    QUniform,
+    Randint,
+    Uniform,
+)
+
+
+class ImageObjectDetectionSearchSpace(RestTranslatableMixin):
+    """Search space for AutoML Image Object Detection and Image Instance Segmentation tasks.
+
+    :param ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
+    :type ams_gradient: bool or ~azure.ai.ml.entities.SweepDistribution
+    :param beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
+        range [0, 1].
+    :type beta1: float or ~azure.ai.ml.entities.SweepDistribution
+    :param beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
+        range [0, 1].
+    :type beta2: float or ~azure.ai.ml.entities.SweepDistribution
+    :param distributed: Whether to use distributer training.
+    :type distributed: bool or ~azure.ai.ml.entities.SweepDistribution
+    :param early_stopping: Enable early stopping logic during training.
+    :type early_stopping: bool or ~azure.ai.ml.entities.SweepDistribution
+    :param early_stopping_delay: Minimum number of epochs or validation evaluations to wait
+        before primary metric improvement
+        is tracked for early stopping. Must be a positive integer.
+    :type early_stopping_delay: int or ~azure.ai.ml.entities.SweepDistribution
+    :param early_stopping_patience: Minimum number of epochs or validation evaluations with no
+        primary metric improvement before the run is stopped. Must be a positive integer.
+    :type early_stopping_patience: int or ~azure.ai.ml.entities.SweepDistribution
+    :param enable_onnx_normalization: Enable normalization when exporting ONNX model.
+    :type enable_onnx_normalization: bool or ~azure.ai.ml.entities.SweepDistribution
+    :param evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
+        Must be a positive integer.
+    :type evaluation_frequency: int or ~azure.ai.ml.entities.SweepDistribution
+    :param gradient_accumulation_step: Gradient accumulation means running a configured number of
+        "GradAccumulationStep" steps without updating the model weights while accumulating the gradients of those steps,
+        and then using the accumulated gradients to compute the weight updates. Must be a positive integer.
+    :type gradient_accumulation_step: int or ~azure.ai.ml.entities.SweepDistribution
+    :param layers_to_freeze: Number of layers to freeze for the model. Must be a positive
+        integer. For instance, passing 2 as value for 'seresnext' means freezing layer0 and layer1.
+        For a full list of models supported and details on layer freeze, please
+        see: https://learn.microsoft.com/azure/machine-learning/reference-automl-images-hyperparameters#model-agnostic-hyperparameters.    # pylint: disable=line-too-long
+    :type layers_to_freeze: int or ~azure.ai.ml.entities.SweepDistribution
+    :param learning_rate: Initial learning rate. Must be a float in the range [0, 1].
+        :type learning_rate: float or ~azure.ai.ml.entities.SweepDistribution
+    :param learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
+        'step'.
+    :type learning_rate_scheduler: str or ~azure.ai.ml.entities.SweepDistribution
+    :param model_name: Name of the model to use for training.
+        For more information on the available models please visit the official documentation:
+        https://learn.microsoft.com/azure/machine-learning/how-to-auto-train-image-models.
+    :type model_name: str or ~azure.ai.ml.entities.SweepDistribution
+    :param momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
+        1].
+    :type momentum: float or ~azure.ai.ml.entities.SweepDistribution
+    :param nesterov: Enable nesterov when optimizer is 'sgd'.
+    :type nesterov: bool or ~azure.ai.ml.entities.SweepDistribution
+    :param number_of_epochs: Number of training epochs. Must be a positive integer.
+    :type number_of_epochs: int or ~azure.ai.ml.entities.SweepDistribution
+    :param number_of_workers: Number of data loader workers. Must be a non-negative integer.
+    :type number_of_workers: int or ~azure.ai.ml.entities.SweepDistribution
+    :param optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
+    :type optimizer: str or ~azure.ai.ml.entities.SweepDistribution
+    :param random_seed: Random seed to be used when using deterministic training.
+    :type random_seed: int or ~azure.ai.ml.entities.SweepDistribution
+    :param step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
+        in the range [0, 1].
+    :type step_lr_gamma: float or ~azure.ai.ml.entities.SweepDistribution
+    :param step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
+        a positive integer.
+    :type step_lr_step_size: int or ~azure.ai.ml.entities.SweepDistribution
+    :param training_batch_size: Training batch size. Must be a positive integer.
+    :type training_batch_size: int or ~azure.ai.ml.entities.SweepDistribution
+    :param validation_batch_size: Validation batch size. Must be a positive integer.
+    :type validation_batch_size: int or ~azure.ai.ml.entities.SweepDistribution
+    :param warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
+        'warmup_cosine'. Must be a float in the range [0, 1].
+    :type warmup_cosine_lr_cycles: float or ~azure.ai.ml.entities.SweepDistribution
+    :param warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
+        'warmup_cosine'. Must be a positive integer.
+    :type warmup_cosine_lr_warmup_epochs: int or ~azure.ai.ml.entities.SweepDistribution
+    :param weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
+        be a float in the range[0, 1].
+    :type weight_decay: int or ~azure.ai.ml.entities.SweepDistribution
+    :param box_detections_per_image: Maximum number of detections per image, for all classes.
+        Must be a positive integer. Note: This settings is not supported for the 'yolov5' algorithm.
+    :type box_detections_per_image: int or ~azure.ai.ml.entities.SweepDistribution
+    :param box_score_threshold: During inference, only return proposals with a classification
+        score greater than BoxScoreThreshold. Must be a float in the range[0, 1].
+    :type box_score_threshold: float or ~azure.ai.ml.entities.SweepDistribution
+    :param image_size: Image size for train and validation. Must be a positive integer.
+        Note: The training run may get into CUDA OOM if the size is too big.
+        Note: This settings is only supported for the 'yolov5' algorithm.
+    :type image_size: int or ~azure.ai.ml.entities.SweepDistribution
+    :param max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
+        Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+        Note: This settings is not supported for the 'yolov5' algorithm.
+        :type max_size: int or ~azure.ai.ml.entities.SweepDistribution
+    :param min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
+        Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
+        Note: This settings is not supported for the 'yolov5' algorithm.
+    :type min_size: int or ~azure.ai.ml.entities.SweepDistribution
+    :param model_size: Model size. Must be 'small', 'medium', 'large', or 'extra_large'.
+        Note: training run may get into CUDA OOM if the model size is too big.
+        Note: This settings is only supported for the 'yolov5' algorithm.
+    :type model_size: str or ~azure.ai.ml.entities.SweepDistribution
+    :param multi_scale: Enable multi-scale image by varying image size by +/- 50%.
+        Note: training run may get into CUDA OOM if no sufficient GPU memory.
+        Note: This settings is only supported for the 'yolov5' algorithm.
+    :type multi_scale: bool or ~azure.ai.ml.entities.SweepDistribution
+    :param nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
+        float in the range [0, 1].
+    :type nms_iou_threshold: float or ~azure.ai.ml.entities.SweepDistribution
+    :param tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must
+        not be None to enable small object detection logic. A string containing two integers in mxn format.
+    :type tile_grid_size: str or ~azure.ai.ml.entities.SweepDistribution
+    :param tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be
+        float in the range [0, 1).
+    :type tile_overlap_ratio: float or ~azure.ai.ml.entities.SweepDistribution
+    :param tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
+        predictions from tiles and image. Used in validation/ inference. Must be float in the range [0, 1].
+        NMS: Non-maximum suppression.
+    :type tile_predictions_nms_threshold: float or ~azure.ai.ml.entities.SweepDistribution
+    :param validation_iou_threshold: IOU threshold to use when computing validation metric. Must
+        be float in the range [0, 1].
+    :type validation_iou_threshold: float or ~azure.ai.ml.entities.SweepDistribution
+    :param validation_metric_type: Metric computation method to use for validation metrics. Must
+        be 'none', 'coco', 'voc', or 'coco_voc'.
+    :type validation_metric_type: str or ~azure.ai.ml.entities.SweepDistribution
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/ml_samples_automl_image.py
+            :start-after: [START automl.automl_image_job.image_object_detection_search_space]
+            :end-before: [END automl.automl_image_job.image_object_detection_search_space]
+            :language: python
+            :dedent: 8
+            :caption: Defining an automl image object detection or instance segmentation search space
+    """
+
+    def __init__(
+        self,
+        *,
+        ams_gradient: Optional[
+            Union[
+                bool,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        beta1: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        beta2: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        distributed: Optional[
+            Union[
+                bool,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        early_stopping: Optional[
+            Union[
+                bool,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        early_stopping_delay: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        early_stopping_patience: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        enable_onnx_normalization: Optional[
+            Union[
+                bool,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        evaluation_frequency: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        gradient_accumulation_step: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        layers_to_freeze: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        learning_rate: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        learning_rate_scheduler: Optional[
+            Union[
+                str, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        model_name: Optional[
+            Union[
+                str, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        momentum: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        nesterov: Optional[
+            Union[
+                bool,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        number_of_epochs: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        number_of_workers: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        optimizer: Optional[
+            Union[
+                str, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        random_seed: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        step_lr_gamma: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        step_lr_step_size: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        training_batch_size: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        validation_batch_size: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        warmup_cosine_lr_cycles: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        warmup_cosine_lr_warmup_epochs: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        weight_decay: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        box_detections_per_image: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        box_score_threshold: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        image_size: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        max_size: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        min_size: Optional[
+            Union[
+                int, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        model_size: Optional[
+            Union[
+                str, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        multi_scale: Optional[
+            Union[
+                bool,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        nms_iou_threshold: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        tile_grid_size: Optional[
+            Union[
+                str, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+        tile_overlap_ratio: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        tile_predictions_nms_threshold: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        validation_iou_threshold: Optional[
+            Union[
+                float,
+                Choice,
+                LogNormal,
+                LogUniform,
+                Normal,
+                QLogNormal,
+                QLogUniform,
+                QNormal,
+                QUniform,
+                Randint,
+                Uniform,
+            ]
+        ] = None,
+        validation_metric_type: Optional[
+            Union[
+                str, Choice, LogNormal, LogUniform, Normal, QLogNormal, QLogUniform, QNormal, QUniform, Randint, Uniform
+            ]
+        ] = None,
+    ) -> None:
+        self.ams_gradient = ams_gradient
+        self.beta1 = beta1
+        self.beta2 = beta2
+        self.distributed = distributed
+        self.early_stopping = early_stopping
+        self.early_stopping_delay = early_stopping_delay
+        self.early_stopping_patience = early_stopping_patience
+        self.enable_onnx_normalization = enable_onnx_normalization
+        self.evaluation_frequency = evaluation_frequency
+        self.gradient_accumulation_step = gradient_accumulation_step
+        self.layers_to_freeze = layers_to_freeze
+        self.learning_rate = learning_rate
+        self.learning_rate_scheduler = learning_rate_scheduler
+        self.model_name = model_name
+        self.momentum = momentum
+        self.nesterov = nesterov
+        self.number_of_epochs = number_of_epochs
+        self.number_of_workers = number_of_workers
+        self.optimizer = optimizer
+        self.random_seed = random_seed
+        self.step_lr_gamma = step_lr_gamma
+        self.step_lr_step_size = step_lr_step_size
+        self.training_batch_size = training_batch_size
+        self.validation_batch_size = validation_batch_size
+        self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
+        self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
+        self.weight_decay = weight_decay
+        self.box_detections_per_image = box_detections_per_image
+        self.box_score_threshold = box_score_threshold
+        self.image_size = image_size
+        self.max_size = max_size
+        self.min_size = min_size
+        self.model_size = model_size
+        self.multi_scale = multi_scale
+        self.nms_iou_threshold = nms_iou_threshold
+        self.tile_grid_size = tile_grid_size
+        self.tile_overlap_ratio = tile_overlap_ratio
+        self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
+        self.validation_iou_threshold = validation_iou_threshold
+        self.validation_metric_type = validation_metric_type
+
+    def _to_rest_object(self) -> ImageModelDistributionSettingsObjectDetection:
+        return ImageModelDistributionSettingsObjectDetection(
+            ams_gradient=_convert_to_rest_object(self.ams_gradient) if self.ams_gradient is not None else None,
+            beta1=_convert_to_rest_object(self.beta1) if self.beta1 is not None else None,
+            beta2=_convert_to_rest_object(self.beta2) if self.beta2 is not None else None,
+            distributed=_convert_to_rest_object(self.distributed) if self.distributed is not None else None,
+            early_stopping=_convert_to_rest_object(self.early_stopping) if self.early_stopping is not None else None,
+            early_stopping_delay=(
+                _convert_to_rest_object(self.early_stopping_delay) if self.early_stopping_delay is not None else None
+            ),
+            early_stopping_patience=(
+                _convert_to_rest_object(self.early_stopping_patience)
+                if self.early_stopping_patience is not None
+                else None
+            ),
+            enable_onnx_normalization=(
+                _convert_to_rest_object(self.enable_onnx_normalization)
+                if self.enable_onnx_normalization is not None
+                else None
+            ),
+            evaluation_frequency=(
+                _convert_to_rest_object(self.evaluation_frequency) if self.evaluation_frequency is not None else None
+            ),
+            gradient_accumulation_step=(
+                _convert_to_rest_object(self.gradient_accumulation_step)
+                if self.gradient_accumulation_step is not None
+                else None
+            ),
+            layers_to_freeze=(
+                _convert_to_rest_object(self.layers_to_freeze) if self.layers_to_freeze is not None else None
+            ),
+            learning_rate=_convert_to_rest_object(self.learning_rate) if self.learning_rate is not None else None,
+            learning_rate_scheduler=(
+                _convert_to_rest_object(self.learning_rate_scheduler)
+                if self.learning_rate_scheduler is not None
+                else None
+            ),
+            model_name=_convert_to_rest_object(self.model_name) if self.model_name is not None else None,
+            momentum=_convert_to_rest_object(self.momentum) if self.momentum is not None else None,
+            nesterov=_convert_to_rest_object(self.nesterov) if self.nesterov is not None else None,
+            number_of_epochs=(
+                _convert_to_rest_object(self.number_of_epochs) if self.number_of_epochs is not None else None
+            ),
+            number_of_workers=(
+                _convert_to_rest_object(self.number_of_workers) if self.number_of_workers is not None else None
+            ),
+            optimizer=_convert_to_rest_object(self.optimizer) if self.optimizer is not None else None,
+            random_seed=_convert_to_rest_object(self.random_seed) if self.random_seed is not None else None,
+            step_lr_gamma=_convert_to_rest_object(self.step_lr_gamma) if self.step_lr_gamma is not None else None,
+            step_lr_step_size=(
+                _convert_to_rest_object(self.step_lr_step_size) if self.step_lr_step_size is not None else None
+            ),
+            training_batch_size=(
+                _convert_to_rest_object(self.training_batch_size) if self.training_batch_size is not None else None
+            ),
+            validation_batch_size=(
+                _convert_to_rest_object(self.validation_batch_size) if self.validation_batch_size is not None else None
+            ),
+            warmup_cosine_lr_cycles=(
+                _convert_to_rest_object(self.warmup_cosine_lr_cycles)
+                if self.warmup_cosine_lr_cycles is not None
+                else None
+            ),
+            warmup_cosine_lr_warmup_epochs=(
+                _convert_to_rest_object(self.warmup_cosine_lr_warmup_epochs)
+                if self.warmup_cosine_lr_warmup_epochs is not None
+                else None
+            ),
+            weight_decay=_convert_to_rest_object(self.weight_decay) if self.weight_decay is not None else None,
+            box_detections_per_image=(
+                _convert_to_rest_object(self.box_detections_per_image)
+                if self.box_detections_per_image is not None
+                else None
+            ),
+            box_score_threshold=(
+                _convert_to_rest_object(self.box_score_threshold) if self.box_score_threshold is not None else None
+            ),
+            image_size=_convert_to_rest_object(self.image_size) if self.image_size is not None else None,
+            max_size=_convert_to_rest_object(self.max_size) if self.max_size is not None else None,
+            min_size=_convert_to_rest_object(self.min_size) if self.min_size is not None else None,
+            model_size=_convert_to_rest_object(self.model_size) if self.model_size is not None else None,
+            multi_scale=_convert_to_rest_object(self.multi_scale) if self.multi_scale is not None else None,
+            nms_iou_threshold=(
+                _convert_to_rest_object(self.nms_iou_threshold) if self.nms_iou_threshold is not None else None
+            ),
+            tile_grid_size=_convert_to_rest_object(self.tile_grid_size) if self.tile_grid_size is not None else None,
+            tile_overlap_ratio=(
+                _convert_to_rest_object(self.tile_overlap_ratio) if self.tile_overlap_ratio is not None else None
+            ),
+            tile_predictions_nms_threshold=(
+                _convert_to_rest_object(self.tile_predictions_nms_threshold)
+                if self.tile_predictions_nms_threshold is not None
+                else None
+            ),
+            validation_iou_threshold=(
+                _convert_to_rest_object(self.validation_iou_threshold)
+                if self.validation_iou_threshold is not None
+                else None
+            ),
+            validation_metric_type=(
+                _convert_to_rest_object(self.validation_metric_type)
+                if self.validation_metric_type is not None
+                else None
+            ),
+        )
+
+    @classmethod
+    def _from_rest_object(cls, obj: ImageModelDistributionSettingsObjectDetection) -> "ImageObjectDetectionSearchSpace":
+        return cls(
+            ams_gradient=_convert_from_rest_object(obj.ams_gradient) if obj.ams_gradient is not None else None,
+            beta1=_convert_from_rest_object(obj.beta1) if obj.beta1 is not None else None,
+            beta2=_convert_from_rest_object(obj.beta2) if obj.beta2 is not None else None,
+            distributed=_convert_from_rest_object(obj.distributed) if obj.distributed is not None else None,
+            early_stopping=_convert_from_rest_object(obj.early_stopping) if obj.early_stopping is not None else None,
+            early_stopping_delay=(
+                _convert_from_rest_object(obj.early_stopping_delay) if obj.early_stopping_delay is not None else None
+            ),
+            early_stopping_patience=(
+                _convert_from_rest_object(obj.early_stopping_patience)
+                if obj.early_stopping_patience is not None
+                else None
+            ),
+            enable_onnx_normalization=(
+                _convert_from_rest_object(obj.enable_onnx_normalization)
+                if obj.enable_onnx_normalization is not None
+                else None
+            ),
+            evaluation_frequency=(
+                _convert_from_rest_object(obj.evaluation_frequency) if obj.evaluation_frequency is not None else None
+            ),
+            gradient_accumulation_step=(
+                _convert_from_rest_object(obj.gradient_accumulation_step)
+                if obj.gradient_accumulation_step is not None
+                else None
+            ),
+            layers_to_freeze=(
+                _convert_from_rest_object(obj.layers_to_freeze) if obj.layers_to_freeze is not None else None
+            ),
+            learning_rate=_convert_from_rest_object(obj.learning_rate) if obj.learning_rate is not None else None,
+            learning_rate_scheduler=(
+                _convert_from_rest_object(obj.learning_rate_scheduler)
+                if obj.learning_rate_scheduler is not None
+                else None
+            ),
+            model_name=_convert_from_rest_object(obj.model_name) if obj.model_name is not None else None,
+            momentum=_convert_from_rest_object(obj.momentum) if obj.momentum is not None else None,
+            nesterov=_convert_from_rest_object(obj.nesterov) if obj.nesterov is not None else None,
+            number_of_epochs=(
+                _convert_from_rest_object(obj.number_of_epochs) if obj.number_of_epochs is not None else None
+            ),
+            number_of_workers=(
+                _convert_from_rest_object(obj.number_of_workers) if obj.number_of_workers is not None else None
+            ),
+            optimizer=_convert_from_rest_object(obj.optimizer) if obj.optimizer is not None else None,
+            random_seed=_convert_from_rest_object(obj.random_seed) if obj.random_seed is not None else None,
+            step_lr_gamma=_convert_from_rest_object(obj.step_lr_gamma) if obj.step_lr_gamma is not None else None,
+            step_lr_step_size=(
+                _convert_from_rest_object(obj.step_lr_step_size) if obj.step_lr_step_size is not None else None
+            ),
+            training_batch_size=(
+                _convert_from_rest_object(obj.training_batch_size) if obj.training_batch_size is not None else None
+            ),
+            validation_batch_size=(
+                _convert_from_rest_object(obj.validation_batch_size) if obj.validation_batch_size is not None else None
+            ),
+            warmup_cosine_lr_cycles=(
+                _convert_from_rest_object(obj.warmup_cosine_lr_cycles)
+                if obj.warmup_cosine_lr_cycles is not None
+                else None
+            ),
+            warmup_cosine_lr_warmup_epochs=(
+                _convert_from_rest_object(obj.warmup_cosine_lr_warmup_epochs)
+                if obj.warmup_cosine_lr_warmup_epochs is not None
+                else None
+            ),
+            weight_decay=_convert_from_rest_object(obj.weight_decay) if obj.weight_decay is not None else None,
+            box_detections_per_image=(
+                _convert_from_rest_object(obj.box_detections_per_image)
+                if obj.box_detections_per_image is not None
+                else None
+            ),
+            box_score_threshold=(
+                _convert_from_rest_object(obj.box_score_threshold) if obj.box_score_threshold is not None else None
+            ),
+            image_size=_convert_from_rest_object(obj.image_size) if obj.image_size is not None else None,
+            max_size=_convert_from_rest_object(obj.max_size) if obj.max_size is not None else None,
+            min_size=_convert_from_rest_object(obj.min_size) if obj.min_size is not None else None,
+            model_size=_convert_from_rest_object(obj.model_size) if obj.model_size is not None else None,
+            multi_scale=_convert_from_rest_object(obj.multi_scale) if obj.multi_scale is not None else None,
+            nms_iou_threshold=(
+                _convert_from_rest_object(obj.nms_iou_threshold) if obj.nms_iou_threshold is not None else None
+            ),
+            tile_grid_size=_convert_from_rest_object(obj.tile_grid_size) if obj.tile_grid_size is not None else None,
+            tile_overlap_ratio=(
+                _convert_from_rest_object(obj.tile_overlap_ratio) if obj.tile_overlap_ratio is not None else None
+            ),
+            tile_predictions_nms_threshold=(
+                _convert_from_rest_object(obj.tile_predictions_nms_threshold)
+                if obj.tile_predictions_nms_threshold is not None
+                else None
+            ),
+            validation_iou_threshold=(
+                _convert_from_rest_object(obj.validation_iou_threshold)
+                if obj.validation_iou_threshold is not None
+                else None
+            ),
+            validation_metric_type=(
+                _convert_from_rest_object(obj.validation_metric_type)
+                if obj.validation_metric_type is not None
+                else None
+            ),
+        )
+
+    @classmethod
+    def _from_search_space_object(cls, obj: SearchSpace) -> "ImageObjectDetectionSearchSpace":
+        return cls(
+            ams_gradient=obj.ams_gradient if hasattr(obj, "ams_gradient") else None,
+            beta1=obj.beta1 if hasattr(obj, "beta1") else None,
+            beta2=obj.beta2 if hasattr(obj, "beta2") else None,
+            distributed=obj.distributed if hasattr(obj, "distributed") else None,
+            early_stopping=obj.early_stopping if hasattr(obj, "early_stopping") else None,
+            early_stopping_delay=obj.early_stopping_delay if hasattr(obj, "early_stopping_delay") else None,
+            early_stopping_patience=obj.early_stopping_patience if hasattr(obj, "early_stopping_patience") else None,
+            enable_onnx_normalization=(
+                obj.enable_onnx_normalization if hasattr(obj, "enable_onnx_normalization") else None
+            ),
+            evaluation_frequency=obj.evaluation_frequency if hasattr(obj, "evaluation_frequency") else None,
+            gradient_accumulation_step=(
+                obj.gradient_accumulation_step if hasattr(obj, "gradient_accumulation_step") else None
+            ),
+            layers_to_freeze=obj.layers_to_freeze if hasattr(obj, "layers_to_freeze") else None,
+            learning_rate=obj.learning_rate if hasattr(obj, "learning_rate") else None,
+            learning_rate_scheduler=obj.learning_rate_scheduler if hasattr(obj, "learning_rate_scheduler") else None,
+            model_name=obj.model_name if hasattr(obj, "model_name") else None,
+            momentum=obj.momentum if hasattr(obj, "momentum") else None,
+            nesterov=obj.nesterov if hasattr(obj, "nesterov") else None,
+            number_of_epochs=obj.number_of_epochs if hasattr(obj, "number_of_epochs") else None,
+            number_of_workers=obj.number_of_workers if hasattr(obj, "number_of_workers") else None,
+            optimizer=obj.optimizer if hasattr(obj, "optimizer") else None,
+            random_seed=obj.random_seed if hasattr(obj, "random_seed") else None,
+            step_lr_gamma=obj.step_lr_gamma if hasattr(obj, "step_lr_gamma") else None,
+            step_lr_step_size=obj.step_lr_step_size if hasattr(obj, "step_lr_step_size") else None,
+            training_batch_size=obj.training_batch_size if hasattr(obj, "training_batch_size") else None,
+            validation_batch_size=obj.validation_batch_size if hasattr(obj, "validation_batch_size") else None,
+            warmup_cosine_lr_cycles=obj.warmup_cosine_lr_cycles if hasattr(obj, "warmup_cosine_lr_cycles") else None,
+            warmup_cosine_lr_warmup_epochs=(
+                obj.warmup_cosine_lr_warmup_epochs if hasattr(obj, "warmup_cosine_lr_warmup_epochs") else None
+            ),
+            weight_decay=obj.weight_decay if hasattr(obj, "weight_decay") else None,
+            box_detections_per_image=obj.box_detections_per_image if hasattr(obj, "box_detections_per_image") else None,
+            box_score_threshold=obj.box_score_threshold if hasattr(obj, "box_score_threshold") else None,
+            image_size=obj.image_size if hasattr(obj, "image_size") else None,
+            max_size=obj.max_size if hasattr(obj, "max_size") else None,
+            min_size=obj.min_size if hasattr(obj, "min_size") else None,
+            model_size=obj.model_size if hasattr(obj, "model_size") else None,
+            multi_scale=obj.multi_scale if hasattr(obj, "multi_scale") else None,
+            nms_iou_threshold=obj.nms_iou_threshold if hasattr(obj, "nms_iou_threshold") else None,
+            tile_grid_size=obj.tile_grid_size if hasattr(obj, "tile_grid_size") else None,
+            tile_overlap_ratio=obj.tile_overlap_ratio if hasattr(obj, "tile_overlap_ratio") else None,
+            tile_predictions_nms_threshold=(
+                obj.tile_predictions_nms_threshold if hasattr(obj, "tile_predictions_nms_threshold") else None
+            ),
+            validation_iou_threshold=obj.validation_iou_threshold if hasattr(obj, "validation_iou_threshold") else None,
+            validation_metric_type=obj.validation_metric_type if hasattr(obj, "validation_metric_type") else None,
+        )
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, ImageObjectDetectionSearchSpace):
+            return NotImplemented
+
+        return (
+            self.ams_gradient == other.ams_gradient
+            and self.beta1 == other.beta1
+            and self.beta2 == other.beta2
+            and self.distributed == other.distributed
+            and self.early_stopping == other.early_stopping
+            and self.early_stopping_delay == other.early_stopping_delay
+            and self.early_stopping_patience == other.early_stopping_patience
+            and self.enable_onnx_normalization == other.enable_onnx_normalization
+            and self.evaluation_frequency == other.evaluation_frequency
+            and self.gradient_accumulation_step == other.gradient_accumulation_step
+            and self.layers_to_freeze == other.layers_to_freeze
+            and self.learning_rate == other.learning_rate
+            and self.learning_rate_scheduler == other.learning_rate_scheduler
+            and self.model_name == other.model_name
+            and self.momentum == other.momentum
+            and self.nesterov == other.nesterov
+            and self.number_of_epochs == other.number_of_epochs
+            and self.number_of_workers == other.number_of_workers
+            and self.optimizer == other.optimizer
+            and self.random_seed == other.random_seed
+            and self.step_lr_gamma == other.step_lr_gamma
+            and self.step_lr_step_size == other.step_lr_step_size
+            and self.training_batch_size == other.training_batch_size
+            and self.validation_batch_size == other.validation_batch_size
+            and self.warmup_cosine_lr_cycles == other.warmup_cosine_lr_cycles
+            and self.warmup_cosine_lr_warmup_epochs == other.warmup_cosine_lr_warmup_epochs
+            and self.weight_decay == other.weight_decay
+            and self.box_detections_per_image == other.box_detections_per_image
+            and self.box_score_threshold == other.box_score_threshold
+            and self.image_size == other.image_size
+            and self.max_size == other.max_size
+            and self.min_size == other.min_size
+            and self.model_size == other.model_size
+            and self.multi_scale == other.multi_scale
+            and self.nms_iou_threshold == other.nms_iou_threshold
+            and self.tile_grid_size == other.tile_grid_size
+            and self.tile_overlap_ratio == other.tile_overlap_ratio
+            and self.tile_predictions_nms_threshold == other.tile_predictions_nms_threshold
+            and self.validation_iou_threshold == other.validation_iou_threshold
+            and self.validation_metric_type == other.validation_metric_type
+        )
+
+    def __ne__(self, other: object) -> bool:
+        return not self.__eq__(other)
diff --git a/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_sweep_settings.py b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_sweep_settings.py
new file mode 100644
index 00000000..b5e9ffaf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/ai/ml/entities/_job/automl/image/image_sweep_settings.py
@@ -0,0 +1,86 @@
+# ---------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# ---------------------------------------------------------
+
+# pylint: disable=protected-access
+
+from typing import Optional, Union
+
+from azure.ai.ml._restclient.v2023_04_01_preview.models import ImageSweepSettings as RestImageSweepSettings
+from azure.ai.ml._restclient.v2023_04_01_preview.models import SamplingAlgorithmType
+from azure.ai.ml.entities._job.sweep.early_termination_policy import (
+    BanditPolicy,
+    EarlyTerminationPolicy,
+    MedianStoppingPolicy,
+    TruncationSelectionPolicy,
+)
+from azure.ai.ml.entities._mixins import RestTranslatableMixin
+
+
+class ImageSweepSettings(RestTranslatableMixin):
+    """Sweep settings for all AutoML Image Verticals.
+
+    :keyword sampling_algorithm: Required. Type of the hyperparameter sampling.
+        algorithms. Possible values include: "Grid", "Random", "Bayesian".
+    :paramtype sampling_algorithm: Union[
+        str,
+        ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType.GRID,
+        ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType.BAYESIAN,
+        ~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType.RANDOM
+
+    ]
+    :keyword early_termination: Type of early termination policy.
+    :paramtype early_termination: Union[
+
+        ~azure.mgmt.machinelearningservices.models.BanditPolicy,
+        ~azure.mgmt.machinelearningservices.models.MedianStoppingPolicy,
+        ~azure.mgmt.machinelearningservices.models.TruncationSelectionPolicy
+
+    ]
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/ml_samples_automl_image.py
+            :start-after: [START automl.automl_image_job.image_sweep_settings]
+            :end-before: [END automl.automl_image_job.image_sweep_settings]
+            :language: python
+            :dedent: 8
+            :caption: Defining the sweep settings for an automl image job.
+    """
+
+    def __init__(
+        self,
+        *,
+        sampling_algorithm: Union[
+            str, SamplingAlgorithmType.GRID, SamplingAlgorithmType.BAYESIAN, SamplingAlgorithmType.RANDOM
+        ],
+        early_termination: Optional[
+            Union[EarlyTerminationPolicy, BanditPolicy, MedianStoppingPolicy, TruncationSelectionPolicy]
+        ] = None,
+    ):
+        self.sampling_algorithm = sampling_algorithm
+        self.early_termination = early_termination
+
+    def _to_rest_object(self) -> RestImageSweepSettings:
+        return RestImageSweepSettings(
+            sampling_algorithm=self.sampling_algorithm,
+            early_termination=self.early_termination._to_rest_object() if self.early_termination else None,
+        )
+
+    @classmethod
+    def _from_rest_object(cls, obj: RestImageSweepSettings) -> "ImageSweepSettings":
+        return cls(
+            sampling_algorithm=obj.sampling_algorithm,
+            early_termination=(
+                EarlyTerminationPolicy._from_rest_object(obj.early_termination) if obj.early_termination else None
+            ),
+        )
+
+    def __eq__(self, other: object) -> bool:
+        if not isinstance(other, ImageSweepSettings):
+            return NotImplemented
+
+        return self.sampling_algorithm == other.sampling_algorithm and self.early_termination == other.early_termination
+
+    def __ne__(self, other: object) -> bool:
+        return not self.__eq__(other)