"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
# @generated-id: 1d1ee09f1913

from __future__ import annotations
from mistralai.client.types import (
    BaseModel,
    Nullable,
    OptionalNullable,
    UNSET,
    UNSET_SENTINEL,
)
from pydantic import model_serializer
from typing import Any, Dict, List, Optional, Union
from typing_extensions import NotRequired, TypeAliasType, TypedDict


FIMCompletionStreamRequestStopTypedDict = TypeAliasType(
    "FIMCompletionStreamRequestStopTypedDict", Union[str, List[str]]
)
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""


FIMCompletionStreamRequestStop = TypeAliasType(
    "FIMCompletionStreamRequestStop", Union[str, List[str]]
)
r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""


class FIMCompletionStreamRequestTypedDict(TypedDict):
    model: str
    r"""ID of the model with FIM to use."""
    prompt: str
    r"""The text/code to complete."""
    temperature: NotRequired[Nullable[float]]
    r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""
    top_p: NotRequired[Nullable[float]]
    r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""
    max_tokens: NotRequired[Nullable[int]]
    r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""
    stream: NotRequired[bool]
    stop: NotRequired[Nullable[FIMCompletionStreamRequestStopTypedDict]]
    r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""
    random_seed: NotRequired[Nullable[int]]
    r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""
    metadata: NotRequired[Nullable[Dict[str, Any]]]
    suffix: NotRequired[Nullable[str]]
    r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
    min_tokens: NotRequired[Nullable[int]]
    r"""The minimum number of tokens to generate in the completion."""


class FIMCompletionStreamRequest(BaseModel):
    model: str
    r"""ID of the model with FIM to use."""

    prompt: str
    r"""The text/code to complete."""

    temperature: OptionalNullable[float] = UNSET
    r"""What sampling temperature to use, we recommend between 0.0 and 0.7. Higher values like 0.7 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. The default value varies depending on the model you are targeting. Call the `/models` endpoint to retrieve the appropriate value."""

    top_p: OptionalNullable[float] = UNSET
    r"""Nucleus sampling, where the model considers the results of the tokens with `top_p` probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both."""

    max_tokens: OptionalNullable[int] = UNSET
    r"""The maximum number of tokens to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length."""

    stream: Optional[bool] = True

    stop: OptionalNullable[FIMCompletionStreamRequestStop] = UNSET
    r"""Stop generation if this token is detected. Or if one of these tokens is detected when providing an array"""

    random_seed: OptionalNullable[int] = UNSET
    r"""The seed to use for random sampling. If set, different calls will generate deterministic results."""

    metadata: OptionalNullable[Dict[str, Any]] = UNSET

    suffix: OptionalNullable[str] = UNSET
    r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""

    min_tokens: OptionalNullable[int] = UNSET
    r"""The minimum number of tokens to generate in the completion."""

    @model_serializer(mode="wrap")
    def serialize_model(self, handler):
        optional_fields = set(
            [
                "temperature",
                "top_p",
                "max_tokens",
                "stream",
                "stop",
                "random_seed",
                "metadata",
                "suffix",
                "min_tokens",
            ]
        )
        nullable_fields = set(
            [
                "temperature",
                "top_p",
                "max_tokens",
                "stop",
                "random_seed",
                "metadata",
                "suffix",
                "min_tokens",
            ]
        )
        serialized = handler(self)
        m = {}

        for n, f in type(self).model_fields.items():
            k = f.alias or n
            val = serialized.get(k, serialized.get(n))
            is_nullable_and_explicitly_set = (
                k in nullable_fields
                and (self.__pydantic_fields_set__.intersection({n}))  # pylint: disable=no-member
            )

            if val != UNSET_SENTINEL:
                if (
                    val is not None
                    or k not in optional_fields
                    or is_nullable_and_explicitly_set
                ):
                    m[k] = val

        return m
