Skip to content

openai

Classes

OpenAIClient

OpenAIClient(client: OWUIClientBase)

Bases: ResourceBase

Client for the OpenAI-compatible endpoints.

This resource handles configuration of OpenAI providers, as well as proxying requests for chat completions, embeddings, and speech generation to the configured providers.

Source code in src/owui_client/client_base.py
def __init__(self, client: OWUIClientBase):
    self._client = client

Functions

get_config
get_config() -> dict

Get the current OpenAI API configuration.

Returns:

Name Type Description
dict dict

Configuration object containing ENABLE_OPENAI_API, OPENAI_API_BASE_URLS, OPENAI_API_KEYS, and OPENAI_API_CONFIGS.

Source code in src/owui_client/routers/openai.py
async def get_config(self) -> dict:
    """
    Get the current OpenAI API configuration.

    Returns:
        dict: Configuration object containing `ENABLE_OPENAI_API`, `OPENAI_API_BASE_URLS`, `OPENAI_API_KEYS`, and `OPENAI_API_CONFIGS`.
    """
    # Use full URL to bypass base_url path
    return await self._request("GET", self._get_url("openai/config"))
update_config
update_config(form_data: OpenAIConfigForm) -> dict

Update the OpenAI API configuration.

Parameters:

Name Type Description Default
form_data `OpenAIConfigForm`

The new configuration.

required

Returns:

Name Type Description
dict dict

The updated configuration object.

Source code in src/owui_client/routers/openai.py
async def update_config(self, form_data: OpenAIConfigForm) -> dict:
    """
    Update the OpenAI API configuration.

    Args:
        form_data (`OpenAIConfigForm`): The new configuration.

    Returns:
        dict: The updated configuration object.
    """
    return await self._request(
        "POST", self._get_url("openai/config/update"), json=form_data.model_dump()
    )
get_models
get_models(url_idx: Optional[int] = None) -> dict

Get available OpenAI models.

Parameters:

Name Type Description Default
url_idx Optional[int]

The index of the specific provider to fetch models from. If None, fetches and merges models from all enabled providers.

None

Returns:

Name Type Description
dict dict

A dictionary containing a data key with a list of model objects.

Source code in src/owui_client/routers/openai.py
async def get_models(self, url_idx: Optional[int] = None) -> dict:
    """
    Get available OpenAI models.

    Args:
        url_idx (Optional[int]): The index of the specific provider to fetch models from.
            If None, fetches and merges models from all enabled providers.

    Returns:
        dict: A dictionary containing a `data` key with a list of model objects.
    """
    path = "openai/models"
    if url_idx is not None:
        path = f"{path}/{url_idx}"
    return await self._request("GET", self._get_url(path))
verify_connection
verify_connection(
    form_data: ConnectionVerificationForm,
) -> dict

Verify connectivity to a specific OpenAI-compatible provider.

Parameters:

Name Type Description Default
form_data `ConnectionVerificationForm`

The connection details to verify.

required

Returns:

Name Type Description
dict dict

The response from the provider (typically the models list) if successful.

Source code in src/owui_client/routers/openai.py
async def verify_connection(self, form_data: ConnectionVerificationForm) -> dict:
    """
    Verify connectivity to a specific OpenAI-compatible provider.

    Args:
        form_data (`ConnectionVerificationForm`): The connection details to verify.

    Returns:
        dict: The response from the provider (typically the models list) if successful.
    """
    return await self._request(
        "POST", self._get_url("openai/verify"), json=form_data.model_dump()
    )
speech
speech(payload: dict) -> bytes

Generate speech from text (TTS).

Parameters:

Name Type Description Default
payload dict

OpenAI-compatible speech payload (e.g. {"model": "tts-1", "input": "Hello", "voice": "alloy"}).

required

Returns:

Name Type Description
bytes bytes

The audio file content (MP3).

Source code in src/owui_client/routers/openai.py
async def speech(self, payload: dict) -> bytes:
    """
    Generate speech from text (TTS).

    Args:
        payload (dict): OpenAI-compatible speech payload (e.g. `{"model": "tts-1", "input": "Hello", "voice": "alloy"}`).

    Returns:
        bytes: The audio file content (MP3).
    """
    # Returns MP3 file bytes
    return await self._request(
        "POST", self._get_url("openai/audio/speech"), json=payload, model=bytes
    )
chat_completions
chat_completions(payload: dict) -> dict

Generate a chat completion.

Proxies the request to the appropriate OpenAI-compatible provider based on the model ID.

Parameters:

Name Type Description Default
payload dict

OpenAI-compatible chat completion payload (e.g. {"model": "gpt-3.5-turbo", "messages": [...]}).

required

Returns:

Name Type Description
dict dict

The chat completion response object.

Source code in src/owui_client/routers/openai.py
async def chat_completions(self, payload: dict) -> dict:
    """
    Generate a chat completion.

    Proxies the request to the appropriate OpenAI-compatible provider based on the model ID.

    Args:
        payload (dict): OpenAI-compatible chat completion payload (e.g. `{"model": "gpt-3.5-turbo", "messages": [...]}`).

    Returns:
        dict: The chat completion response object.
    """
    return await self._request(
        "POST", self._get_url("openai/chat/completions"), json=payload
    )
embeddings
embeddings(payload: dict) -> dict

Generate embeddings for the input text.

Parameters:

Name Type Description Default
payload dict

OpenAI-compatible embeddings payload (e.g. {"model": "text-embedding-3-small", "input": "text"}).

required

Returns:

Name Type Description
dict dict

The embeddings response object.

Source code in src/owui_client/routers/openai.py
async def embeddings(self, payload: dict) -> dict:
    """
    Generate embeddings for the input text.

    Args:
        payload (dict): OpenAI-compatible embeddings payload (e.g. `{"model": "text-embedding-3-small", "input": "text"}`).

    Returns:
        dict: The embeddings response object.
    """
    return await self._request(
        "POST", self._get_url("openai/embeddings"), json=payload
    )
proxy
proxy(
    method: str, path: str, payload: Optional[dict] = None
) -> dict

Deprecated: Proxy arbitrary requests to the first OpenAI provider.

This endpoint is deprecated and may not work as expected with multiple providers.

Parameters:

Name Type Description Default
method str

HTTP method (e.g. "GET", "POST").

required
path str

The path to append to the base URL (e.g. "models").

required
payload Optional[dict]

JSON payload for the request.

None

Returns:

Name Type Description
dict dict

The JSON response from the provider.

Source code in src/owui_client/routers/openai.py
async def proxy(self, method: str, path: str, payload: Optional[dict] = None) -> dict:
    """
    Deprecated: Proxy arbitrary requests to the first OpenAI provider.

    This endpoint is deprecated and may not work as expected with multiple providers.

    Args:
        method (str): HTTP method (e.g. "GET", "POST").
        path (str): The path to append to the base URL (e.g. "models").
        payload (Optional[dict]): JSON payload for the request.

    Returns:
        dict: The JSON response from the provider.
    """
    return await self._request(
        method, self._get_url(f"openai/{path}"), json=payload
    )