diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 288087c17b..c5fe8ab6d6 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "2.24.0"
+ ".": "2.25.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 476a5b7658..e639782c85 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 148
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-6bfe886b5ded0fe3bf37ca672698814e16e0836a093ceef65dac37ae44d1ad6b.yml
-openapi_spec_hash: 6b1344a59044318e824c8d1af96033c7
-config_hash: 7f49c38fa3abe9b7038ffe62262c4912
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b879fff3f51e71e4f1ce17f03efc017a46d888a1bfd88eb655a6210a86f02acf.yml
+openapi_spec_hash: cbf649cc2c944fb3f77450ec752ab1e9
+config_hash: 9c56fcc7ff64785b5cd448d9a754b4b3
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 91da3fc859..52381619cc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,26 @@
# Changelog
+## 2.25.0 (2026-03-04)
+
+Full Changelog: [v2.24.0...v2.25.0](https://github.com/openai/openai-python/compare/v2.24.0...v2.25.0)
+
+### Features
+
+* **api:** remove prompt_cache_key param from responses, phase field from message types ([44fb382](https://github.com/openai/openai-python/commit/44fb382698872d98d5f72c880b47846c7b594f4f))
+
+
+### Bug Fixes
+
+* **api:** internal schema fixes ([0c0f970](https://github.com/openai/openai-python/commit/0c0f970cbd164131bf06f7ab38f170bbcb323683))
+* **api:** manual updates ([9fc323f](https://github.com/openai/openai-python/commit/9fc323f4da6cfca9de194e12c1486a3cd1bfa4b5))
+* **api:** readd phase ([1b27b5a](https://github.com/openai/openai-python/commit/1b27b5a834f5cb75f80c597259d0df0352ba83bd))
+
+
+### Chores
+
+* **internal:** codegen related update ([b1de941](https://github.com/openai/openai-python/commit/b1de9419a68fd6fb97a63f415fb3d1e5851582cb))
+* **internal:** reduce warnings ([7cdbd06](https://github.com/openai/openai-python/commit/7cdbd06d3ca41af64d616b4b4bb61226cc38b662))
+
## 2.24.0 (2026-02-24)
Full Changelog: [v2.23.0...v2.24.0](https://github.com/openai/openai-python/compare/v2.23.0...v2.24.0)
diff --git a/api.md b/api.md
index da521d3418..a7981f7185 100644
--- a/api.md
+++ b/api.md
@@ -309,6 +309,7 @@ Types:
from openai.types.fine_tuning.checkpoints import (
PermissionCreateResponse,
PermissionRetrieveResponse,
+ PermissionListResponse,
PermissionDeleteResponse,
)
```
@@ -317,6 +318,7 @@ Methods:
- client.fine_tuning.checkpoints.permissions.create(fine_tuned_model_checkpoint, \*\*params) -> SyncPage[PermissionCreateResponse]
- client.fine_tuning.checkpoints.permissions.retrieve(fine_tuned_model_checkpoint, \*\*params) -> PermissionRetrieveResponse
+- client.fine_tuning.checkpoints.permissions.list(fine_tuned_model_checkpoint, \*\*params) -> SyncConversationCursorPage[PermissionListResponse]
- client.fine_tuning.checkpoints.permissions.delete(permission_id, \*, fine_tuned_model_checkpoint) -> PermissionDeleteResponse
## Alpha
diff --git a/pyproject.toml b/pyproject.toml
index 49ab3668e8..f7aae6cbdb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "openai"
-version = "2.24.0"
+version = "2.25.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/openai/_client.py b/src/openai/_client.py
index 0399bbf742..aadf3601f2 100644
--- a/src/openai/_client.py
+++ b/src/openai/_client.py
@@ -180,6 +180,9 @@ def __init__(
@cached_property
def completions(self) -> Completions:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import Completions
return Completions(self)
@@ -192,18 +195,25 @@ def chat(self) -> Chat:
@cached_property
def embeddings(self) -> Embeddings:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import Embeddings
return Embeddings(self)
@cached_property
def files(self) -> Files:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import Files
return Files(self)
@cached_property
def images(self) -> Images:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import Images
return Images(self)
@@ -216,12 +226,16 @@ def audio(self) -> Audio:
@cached_property
def moderations(self) -> Moderations:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import Moderations
return Moderations(self)
@cached_property
def models(self) -> Models:
+ """List and describe the various models available in the API."""
from .resources.models import Models
return Models(self)
@@ -252,12 +266,14 @@ def beta(self) -> Beta:
@cached_property
def batches(self) -> Batches:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import Batches
return Batches(self)
@cached_property
def uploads(self) -> Uploads:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import Uploads
return Uploads(self)
@@ -276,12 +292,14 @@ def realtime(self) -> Realtime:
@cached_property
def conversations(self) -> Conversations:
+ """Manage conversations and conversation items."""
from .resources.conversations import Conversations
return Conversations(self)
@cached_property
def evals(self) -> Evals:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import Evals
return Evals(self)
@@ -537,6 +555,9 @@ def __init__(
@cached_property
def completions(self) -> AsyncCompletions:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import AsyncCompletions
return AsyncCompletions(self)
@@ -549,18 +570,25 @@ def chat(self) -> AsyncChat:
@cached_property
def embeddings(self) -> AsyncEmbeddings:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import AsyncEmbeddings
return AsyncEmbeddings(self)
@cached_property
def files(self) -> AsyncFiles:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import AsyncFiles
return AsyncFiles(self)
@cached_property
def images(self) -> AsyncImages:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import AsyncImages
return AsyncImages(self)
@@ -573,12 +601,16 @@ def audio(self) -> AsyncAudio:
@cached_property
def moderations(self) -> AsyncModerations:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import AsyncModerations
return AsyncModerations(self)
@cached_property
def models(self) -> AsyncModels:
+ """List and describe the various models available in the API."""
from .resources.models import AsyncModels
return AsyncModels(self)
@@ -609,12 +641,14 @@ def beta(self) -> AsyncBeta:
@cached_property
def batches(self) -> AsyncBatches:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import AsyncBatches
return AsyncBatches(self)
@cached_property
def uploads(self) -> AsyncUploads:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import AsyncUploads
return AsyncUploads(self)
@@ -633,12 +667,14 @@ def realtime(self) -> AsyncRealtime:
@cached_property
def conversations(self) -> AsyncConversations:
+ """Manage conversations and conversation items."""
from .resources.conversations import AsyncConversations
return AsyncConversations(self)
@cached_property
def evals(self) -> AsyncEvals:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import AsyncEvals
return AsyncEvals(self)
@@ -805,6 +841,9 @@ def __init__(self, client: OpenAI) -> None:
@cached_property
def completions(self) -> completions.CompletionsWithRawResponse:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import CompletionsWithRawResponse
return CompletionsWithRawResponse(self._client.completions)
@@ -817,18 +856,25 @@ def chat(self) -> chat.ChatWithRawResponse:
@cached_property
def embeddings(self) -> embeddings.EmbeddingsWithRawResponse:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import EmbeddingsWithRawResponse
return EmbeddingsWithRawResponse(self._client.embeddings)
@cached_property
def files(self) -> files.FilesWithRawResponse:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import FilesWithRawResponse
return FilesWithRawResponse(self._client.files)
@cached_property
def images(self) -> images.ImagesWithRawResponse:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import ImagesWithRawResponse
return ImagesWithRawResponse(self._client.images)
@@ -841,12 +887,16 @@ def audio(self) -> audio.AudioWithRawResponse:
@cached_property
def moderations(self) -> moderations.ModerationsWithRawResponse:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import ModerationsWithRawResponse
return ModerationsWithRawResponse(self._client.moderations)
@cached_property
def models(self) -> models.ModelsWithRawResponse:
+ """List and describe the various models available in the API."""
from .resources.models import ModelsWithRawResponse
return ModelsWithRawResponse(self._client.models)
@@ -871,12 +921,14 @@ def beta(self) -> beta.BetaWithRawResponse:
@cached_property
def batches(self) -> batches.BatchesWithRawResponse:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import BatchesWithRawResponse
return BatchesWithRawResponse(self._client.batches)
@cached_property
def uploads(self) -> uploads.UploadsWithRawResponse:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import UploadsWithRawResponse
return UploadsWithRawResponse(self._client.uploads)
@@ -895,12 +947,14 @@ def realtime(self) -> realtime.RealtimeWithRawResponse:
@cached_property
def conversations(self) -> conversations.ConversationsWithRawResponse:
+ """Manage conversations and conversation items."""
from .resources.conversations import ConversationsWithRawResponse
return ConversationsWithRawResponse(self._client.conversations)
@cached_property
def evals(self) -> evals.EvalsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import EvalsWithRawResponse
return EvalsWithRawResponse(self._client.evals)
@@ -932,6 +986,9 @@ def __init__(self, client: AsyncOpenAI) -> None:
@cached_property
def completions(self) -> completions.AsyncCompletionsWithRawResponse:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import AsyncCompletionsWithRawResponse
return AsyncCompletionsWithRawResponse(self._client.completions)
@@ -944,18 +1001,25 @@ def chat(self) -> chat.AsyncChatWithRawResponse:
@cached_property
def embeddings(self) -> embeddings.AsyncEmbeddingsWithRawResponse:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import AsyncEmbeddingsWithRawResponse
return AsyncEmbeddingsWithRawResponse(self._client.embeddings)
@cached_property
def files(self) -> files.AsyncFilesWithRawResponse:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import AsyncFilesWithRawResponse
return AsyncFilesWithRawResponse(self._client.files)
@cached_property
def images(self) -> images.AsyncImagesWithRawResponse:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import AsyncImagesWithRawResponse
return AsyncImagesWithRawResponse(self._client.images)
@@ -968,12 +1032,16 @@ def audio(self) -> audio.AsyncAudioWithRawResponse:
@cached_property
def moderations(self) -> moderations.AsyncModerationsWithRawResponse:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import AsyncModerationsWithRawResponse
return AsyncModerationsWithRawResponse(self._client.moderations)
@cached_property
def models(self) -> models.AsyncModelsWithRawResponse:
+ """List and describe the various models available in the API."""
from .resources.models import AsyncModelsWithRawResponse
return AsyncModelsWithRawResponse(self._client.models)
@@ -998,12 +1066,14 @@ def beta(self) -> beta.AsyncBetaWithRawResponse:
@cached_property
def batches(self) -> batches.AsyncBatchesWithRawResponse:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import AsyncBatchesWithRawResponse
return AsyncBatchesWithRawResponse(self._client.batches)
@cached_property
def uploads(self) -> uploads.AsyncUploadsWithRawResponse:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import AsyncUploadsWithRawResponse
return AsyncUploadsWithRawResponse(self._client.uploads)
@@ -1022,12 +1092,14 @@ def realtime(self) -> realtime.AsyncRealtimeWithRawResponse:
@cached_property
def conversations(self) -> conversations.AsyncConversationsWithRawResponse:
+ """Manage conversations and conversation items."""
from .resources.conversations import AsyncConversationsWithRawResponse
return AsyncConversationsWithRawResponse(self._client.conversations)
@cached_property
def evals(self) -> evals.AsyncEvalsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import AsyncEvalsWithRawResponse
return AsyncEvalsWithRawResponse(self._client.evals)
@@ -1059,6 +1131,9 @@ def __init__(self, client: OpenAI) -> None:
@cached_property
def completions(self) -> completions.CompletionsWithStreamingResponse:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import CompletionsWithStreamingResponse
return CompletionsWithStreamingResponse(self._client.completions)
@@ -1071,18 +1146,25 @@ def chat(self) -> chat.ChatWithStreamingResponse:
@cached_property
def embeddings(self) -> embeddings.EmbeddingsWithStreamingResponse:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import EmbeddingsWithStreamingResponse
return EmbeddingsWithStreamingResponse(self._client.embeddings)
@cached_property
def files(self) -> files.FilesWithStreamingResponse:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import FilesWithStreamingResponse
return FilesWithStreamingResponse(self._client.files)
@cached_property
def images(self) -> images.ImagesWithStreamingResponse:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import ImagesWithStreamingResponse
return ImagesWithStreamingResponse(self._client.images)
@@ -1095,12 +1177,16 @@ def audio(self) -> audio.AudioWithStreamingResponse:
@cached_property
def moderations(self) -> moderations.ModerationsWithStreamingResponse:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import ModerationsWithStreamingResponse
return ModerationsWithStreamingResponse(self._client.moderations)
@cached_property
def models(self) -> models.ModelsWithStreamingResponse:
+ """List and describe the various models available in the API."""
from .resources.models import ModelsWithStreamingResponse
return ModelsWithStreamingResponse(self._client.models)
@@ -1125,12 +1211,14 @@ def beta(self) -> beta.BetaWithStreamingResponse:
@cached_property
def batches(self) -> batches.BatchesWithStreamingResponse:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import BatchesWithStreamingResponse
return BatchesWithStreamingResponse(self._client.batches)
@cached_property
def uploads(self) -> uploads.UploadsWithStreamingResponse:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import UploadsWithStreamingResponse
return UploadsWithStreamingResponse(self._client.uploads)
@@ -1149,12 +1237,14 @@ def realtime(self) -> realtime.RealtimeWithStreamingResponse:
@cached_property
def conversations(self) -> conversations.ConversationsWithStreamingResponse:
+ """Manage conversations and conversation items."""
from .resources.conversations import ConversationsWithStreamingResponse
return ConversationsWithStreamingResponse(self._client.conversations)
@cached_property
def evals(self) -> evals.EvalsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import EvalsWithStreamingResponse
return EvalsWithStreamingResponse(self._client.evals)
@@ -1186,6 +1276,9 @@ def __init__(self, client: AsyncOpenAI) -> None:
@cached_property
def completions(self) -> completions.AsyncCompletionsWithStreamingResponse:
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
from .resources.completions import AsyncCompletionsWithStreamingResponse
return AsyncCompletionsWithStreamingResponse(self._client.completions)
@@ -1198,18 +1291,25 @@ def chat(self) -> chat.AsyncChatWithStreamingResponse:
@cached_property
def embeddings(self) -> embeddings.AsyncEmbeddingsWithStreamingResponse:
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
from .resources.embeddings import AsyncEmbeddingsWithStreamingResponse
return AsyncEmbeddingsWithStreamingResponse(self._client.embeddings)
@cached_property
def files(self) -> files.AsyncFilesWithStreamingResponse:
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
from .resources.files import AsyncFilesWithStreamingResponse
return AsyncFilesWithStreamingResponse(self._client.files)
@cached_property
def images(self) -> images.AsyncImagesWithStreamingResponse:
+ """Given a prompt and/or an input image, the model will generate a new image."""
from .resources.images import AsyncImagesWithStreamingResponse
return AsyncImagesWithStreamingResponse(self._client.images)
@@ -1222,12 +1322,16 @@ def audio(self) -> audio.AsyncAudioWithStreamingResponse:
@cached_property
def moderations(self) -> moderations.AsyncModerationsWithStreamingResponse:
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
from .resources.moderations import AsyncModerationsWithStreamingResponse
return AsyncModerationsWithStreamingResponse(self._client.moderations)
@cached_property
def models(self) -> models.AsyncModelsWithStreamingResponse:
+ """List and describe the various models available in the API."""
from .resources.models import AsyncModelsWithStreamingResponse
return AsyncModelsWithStreamingResponse(self._client.models)
@@ -1252,12 +1356,14 @@ def beta(self) -> beta.AsyncBetaWithStreamingResponse:
@cached_property
def batches(self) -> batches.AsyncBatchesWithStreamingResponse:
+ """Create large batches of API requests to run asynchronously."""
from .resources.batches import AsyncBatchesWithStreamingResponse
return AsyncBatchesWithStreamingResponse(self._client.batches)
@cached_property
def uploads(self) -> uploads.AsyncUploadsWithStreamingResponse:
+ """Use Uploads to upload large files in multiple parts."""
from .resources.uploads import AsyncUploadsWithStreamingResponse
return AsyncUploadsWithStreamingResponse(self._client.uploads)
@@ -1276,12 +1382,14 @@ def realtime(self) -> realtime.AsyncRealtimeWithStreamingResponse:
@cached_property
def conversations(self) -> conversations.AsyncConversationsWithStreamingResponse:
+ """Manage conversations and conversation items."""
from .resources.conversations import AsyncConversationsWithStreamingResponse
return AsyncConversationsWithStreamingResponse(self._client.conversations)
@cached_property
def evals(self) -> evals.AsyncEvalsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
from .resources.evals import AsyncEvalsWithStreamingResponse
return AsyncEvalsWithStreamingResponse(self._client.evals)
diff --git a/src/openai/_version.py b/src/openai/_version.py
index 08cf29390a..417b40c283 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "openai"
-__version__ = "2.24.0" # x-release-please-version
+__version__ = "2.25.0" # x-release-please-version
diff --git a/src/openai/resources/audio/audio.py b/src/openai/resources/audio/audio.py
index 383b7073bf..040a058df6 100644
--- a/src/openai/resources/audio/audio.py
+++ b/src/openai/resources/audio/audio.py
@@ -35,14 +35,17 @@
class Audio(SyncAPIResource):
@cached_property
def transcriptions(self) -> Transcriptions:
+ """Turn audio into text or text into audio."""
return Transcriptions(self._client)
@cached_property
def translations(self) -> Translations:
+ """Turn audio into text or text into audio."""
return Translations(self._client)
@cached_property
def speech(self) -> Speech:
+ """Turn audio into text or text into audio."""
return Speech(self._client)
@cached_property
@@ -68,14 +71,17 @@ def with_streaming_response(self) -> AudioWithStreamingResponse:
class AsyncAudio(AsyncAPIResource):
@cached_property
def transcriptions(self) -> AsyncTranscriptions:
+ """Turn audio into text or text into audio."""
return AsyncTranscriptions(self._client)
@cached_property
def translations(self) -> AsyncTranslations:
+ """Turn audio into text or text into audio."""
return AsyncTranslations(self._client)
@cached_property
def speech(self) -> AsyncSpeech:
+ """Turn audio into text or text into audio."""
return AsyncSpeech(self._client)
@cached_property
@@ -104,14 +110,17 @@ def __init__(self, audio: Audio) -> None:
@cached_property
def transcriptions(self) -> TranscriptionsWithRawResponse:
+ """Turn audio into text or text into audio."""
return TranscriptionsWithRawResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> TranslationsWithRawResponse:
+ """Turn audio into text or text into audio."""
return TranslationsWithRawResponse(self._audio.translations)
@cached_property
def speech(self) -> SpeechWithRawResponse:
+ """Turn audio into text or text into audio."""
return SpeechWithRawResponse(self._audio.speech)
@@ -121,14 +130,17 @@ def __init__(self, audio: AsyncAudio) -> None:
@cached_property
def transcriptions(self) -> AsyncTranscriptionsWithRawResponse:
+ """Turn audio into text or text into audio."""
return AsyncTranscriptionsWithRawResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> AsyncTranslationsWithRawResponse:
+ """Turn audio into text or text into audio."""
return AsyncTranslationsWithRawResponse(self._audio.translations)
@cached_property
def speech(self) -> AsyncSpeechWithRawResponse:
+ """Turn audio into text or text into audio."""
return AsyncSpeechWithRawResponse(self._audio.speech)
@@ -138,14 +150,17 @@ def __init__(self, audio: Audio) -> None:
@cached_property
def transcriptions(self) -> TranscriptionsWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return TranscriptionsWithStreamingResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> TranslationsWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return TranslationsWithStreamingResponse(self._audio.translations)
@cached_property
def speech(self) -> SpeechWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return SpeechWithStreamingResponse(self._audio.speech)
@@ -155,12 +170,15 @@ def __init__(self, audio: AsyncAudio) -> None:
@cached_property
def transcriptions(self) -> AsyncTranscriptionsWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return AsyncTranscriptionsWithStreamingResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> AsyncTranslationsWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return AsyncTranslationsWithStreamingResponse(self._audio.translations)
@cached_property
def speech(self) -> AsyncSpeechWithStreamingResponse:
+ """Turn audio into text or text into audio."""
return AsyncSpeechWithStreamingResponse(self._audio.speech)
diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py
index 96a32f9268..f937321baa 100644
--- a/src/openai/resources/audio/speech.py
+++ b/src/openai/resources/audio/speech.py
@@ -26,6 +26,8 @@
class Speech(SyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> SpeechWithRawResponse:
"""
@@ -125,6 +127,8 @@ def create(
class AsyncSpeech(AsyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> AsyncSpeechWithRawResponse:
"""
diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py
index bc6e9f22de..25e6e0cb5e 100644
--- a/src/openai/resources/audio/transcriptions.py
+++ b/src/openai/resources/audio/transcriptions.py
@@ -42,6 +42,8 @@
class Transcriptions(SyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> TranscriptionsWithRawResponse:
"""
@@ -497,6 +499,8 @@ def create(
class AsyncTranscriptions(AsyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> AsyncTranscriptionsWithRawResponse:
"""
diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py
index 310f901fb3..0751a65586 100644
--- a/src/openai/resources/audio/translations.py
+++ b/src/openai/resources/audio/translations.py
@@ -27,6 +27,8 @@
class Translations(SyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> TranslationsWithRawResponse:
"""
@@ -170,6 +172,8 @@ def create(
class AsyncTranslations(AsyncAPIResource):
+ """Turn audio into text or text into audio."""
+
@cached_property
def with_raw_response(self) -> AsyncTranslationsWithRawResponse:
"""
diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py
index bc856bf5aa..005a32870e 100644
--- a/src/openai/resources/batches.py
+++ b/src/openai/resources/batches.py
@@ -23,6 +23,8 @@
class Batches(SyncAPIResource):
+ """Create large batches of API requests to run asynchronously."""
+
@cached_property
def with_raw_response(self) -> BatchesWithRawResponse:
"""
@@ -247,6 +249,8 @@ def cancel(
class AsyncBatches(AsyncAPIResource):
+ """Create large batches of API requests to run asynchronously."""
+
@cached_property
def with_raw_response(self) -> AsyncBatchesWithRawResponse:
"""
diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py
index 8c69700059..bf22122553 100644
--- a/src/openai/resources/beta/assistants.py
+++ b/src/openai/resources/beta/assistants.py
@@ -33,6 +33,8 @@
class Assistants(SyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> AssistantsWithRawResponse:
"""
@@ -507,6 +509,8 @@ def delete(
class AsyncAssistants(AsyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> AsyncAssistantsWithRawResponse:
"""
diff --git a/src/openai/resources/beta/beta.py b/src/openai/resources/beta/beta.py
index 5ee3639db1..388a1c5d1f 100644
--- a/src/openai/resources/beta/beta.py
+++ b/src/openai/resources/beta/beta.py
@@ -52,10 +52,12 @@ def chatkit(self) -> ChatKit:
@cached_property
def assistants(self) -> Assistants:
+ """Build Assistants that can call models and use tools."""
return Assistants(self._client)
@cached_property
def threads(self) -> Threads:
+ """Build Assistants that can call models and use tools."""
return Threads(self._client)
@cached_property
@@ -93,10 +95,12 @@ def chatkit(self) -> AsyncChatKit:
@cached_property
def assistants(self) -> AsyncAssistants:
+ """Build Assistants that can call models and use tools."""
return AsyncAssistants(self._client)
@cached_property
def threads(self) -> AsyncThreads:
+ """Build Assistants that can call models and use tools."""
return AsyncThreads(self._client)
@cached_property
@@ -129,10 +133,12 @@ def chatkit(self) -> ChatKitWithRawResponse:
@cached_property
def assistants(self) -> AssistantsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AssistantsWithRawResponse(self._beta.assistants)
@cached_property
def threads(self) -> ThreadsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return ThreadsWithRawResponse(self._beta.threads)
@@ -146,10 +152,12 @@ def chatkit(self) -> AsyncChatKitWithRawResponse:
@cached_property
def assistants(self) -> AsyncAssistantsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncAssistantsWithRawResponse(self._beta.assistants)
@cached_property
def threads(self) -> AsyncThreadsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncThreadsWithRawResponse(self._beta.threads)
@@ -163,10 +171,12 @@ def chatkit(self) -> ChatKitWithStreamingResponse:
@cached_property
def assistants(self) -> AssistantsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AssistantsWithStreamingResponse(self._beta.assistants)
@cached_property
def threads(self) -> ThreadsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return ThreadsWithStreamingResponse(self._beta.threads)
@@ -180,8 +190,10 @@ def chatkit(self) -> AsyncChatKitWithStreamingResponse:
@cached_property
def assistants(self) -> AsyncAssistantsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncAssistantsWithStreamingResponse(self._beta.assistants)
@cached_property
def threads(self) -> AsyncThreadsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncThreadsWithStreamingResponse(self._beta.threads)
diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py
index d94ecca9a2..e783310933 100644
--- a/src/openai/resources/beta/threads/messages.py
+++ b/src/openai/resources/beta/threads/messages.py
@@ -29,6 +29,8 @@
class Messages(SyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
@@ -312,6 +314,8 @@ def delete(
class AsyncMessages(AsyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
"""
diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py
index 90845a2f62..20862185d2 100644
--- a/src/openai/resources/beta/threads/runs/runs.py
+++ b/src/openai/resources/beta/threads/runs/runs.py
@@ -59,8 +59,11 @@
class Runs(SyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def steps(self) -> Steps:
+ """Build Assistants that can call models and use tools."""
return Steps(self._client)
@cached_property
@@ -1518,8 +1521,11 @@ def submit_tool_outputs_stream(
class AsyncRuns(AsyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def steps(self) -> AsyncSteps:
+ """Build Assistants that can call models and use tools."""
return AsyncSteps(self._client)
@cached_property
@@ -3015,6 +3021,7 @@ def __init__(self, runs: Runs) -> None:
@cached_property
def steps(self) -> StepsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return StepsWithRawResponse(self._runs.steps)
@@ -3055,6 +3062,7 @@ def __init__(self, runs: AsyncRuns) -> None:
@cached_property
def steps(self) -> AsyncStepsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncStepsWithRawResponse(self._runs.steps)
@@ -3095,6 +3103,7 @@ def __init__(self, runs: Runs) -> None:
@cached_property
def steps(self) -> StepsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return StepsWithStreamingResponse(self._runs.steps)
@@ -3135,4 +3144,5 @@ def __init__(self, runs: AsyncRuns) -> None:
@cached_property
def steps(self) -> AsyncStepsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncStepsWithStreamingResponse(self._runs.steps)
diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py
index 254a94435c..dea5df69bc 100644
--- a/src/openai/resources/beta/threads/runs/steps.py
+++ b/src/openai/resources/beta/threads/runs/steps.py
@@ -24,6 +24,8 @@
class Steps(SyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> StepsWithRawResponse:
"""
@@ -180,6 +182,8 @@ def list(
class AsyncSteps(AsyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def with_raw_response(self) -> AsyncStepsWithRawResponse:
"""
diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py
index a804fda159..0a93baf452 100644
--- a/src/openai/resources/beta/threads/threads.py
+++ b/src/openai/resources/beta/threads/threads.py
@@ -60,12 +60,16 @@
class Threads(SyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def runs(self) -> Runs:
+ """Build Assistants that can call models and use tools."""
return Runs(self._client)
@cached_property
def messages(self) -> Messages:
+ """Build Assistants that can call models and use tools."""
return Messages(self._client)
@cached_property
@@ -922,12 +926,16 @@ def create_and_run_stream(
class AsyncThreads(AsyncAPIResource):
+ """Build Assistants that can call models and use tools."""
+
@cached_property
def runs(self) -> AsyncRuns:
+ """Build Assistants that can call models and use tools."""
return AsyncRuns(self._client)
@cached_property
def messages(self) -> AsyncMessages:
+ """Build Assistants that can call models and use tools."""
return AsyncMessages(self._client)
@cached_property
@@ -1819,10 +1827,12 @@ def __init__(self, threads: Threads) -> None:
@cached_property
def runs(self) -> RunsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return RunsWithRawResponse(self._threads.runs)
@cached_property
def messages(self) -> MessagesWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return MessagesWithRawResponse(self._threads.messages)
@@ -1858,10 +1868,12 @@ def __init__(self, threads: AsyncThreads) -> None:
@cached_property
def runs(self) -> AsyncRunsWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncRunsWithRawResponse(self._threads.runs)
@cached_property
def messages(self) -> AsyncMessagesWithRawResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncMessagesWithRawResponse(self._threads.messages)
@@ -1897,10 +1909,12 @@ def __init__(self, threads: Threads) -> None:
@cached_property
def runs(self) -> RunsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return RunsWithStreamingResponse(self._threads.runs)
@cached_property
def messages(self) -> MessagesWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return MessagesWithStreamingResponse(self._threads.messages)
@@ -1936,8 +1950,10 @@ def __init__(self, threads: AsyncThreads) -> None:
@cached_property
def runs(self) -> AsyncRunsWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncRunsWithStreamingResponse(self._threads.runs)
@cached_property
def messages(self) -> AsyncMessagesWithStreamingResponse:
+ """Build Assistants that can call models and use tools."""
return AsyncMessagesWithStreamingResponse(self._threads.messages)
diff --git a/src/openai/resources/chat/chat.py b/src/openai/resources/chat/chat.py
index 14f9224b41..2c921e7480 100644
--- a/src/openai/resources/chat/chat.py
+++ b/src/openai/resources/chat/chat.py
@@ -19,6 +19,9 @@
class Chat(SyncAPIResource):
@cached_property
def completions(self) -> Completions:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return Completions(self._client)
@cached_property
@@ -44,6 +47,9 @@ def with_streaming_response(self) -> ChatWithStreamingResponse:
class AsyncChat(AsyncAPIResource):
@cached_property
def completions(self) -> AsyncCompletions:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncCompletions(self._client)
@cached_property
@@ -72,6 +78,9 @@ def __init__(self, chat: Chat) -> None:
@cached_property
def completions(self) -> CompletionsWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return CompletionsWithRawResponse(self._chat.completions)
@@ -81,6 +90,9 @@ def __init__(self, chat: AsyncChat) -> None:
@cached_property
def completions(self) -> AsyncCompletionsWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncCompletionsWithRawResponse(self._chat.completions)
@@ -90,6 +102,9 @@ def __init__(self, chat: Chat) -> None:
@cached_property
def completions(self) -> CompletionsWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return CompletionsWithStreamingResponse(self._chat.completions)
@@ -99,4 +114,7 @@ def __init__(self, chat: AsyncChat) -> None:
@cached_property
def completions(self) -> AsyncCompletionsWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncCompletionsWithStreamingResponse(self._chat.completions)
diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py
index 5d56d05d87..a705c1f658 100644
--- a/src/openai/resources/chat/completions/completions.py
+++ b/src/openai/resources/chat/completions/completions.py
@@ -58,8 +58,15 @@
class Completions(SyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
@cached_property
def messages(self) -> Messages:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return Messages(self._client)
@cached_property
@@ -1554,8 +1561,15 @@ def stream(
class AsyncCompletions(AsyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
@cached_property
def messages(self) -> AsyncMessages:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncMessages(self._client)
@cached_property
@@ -3075,6 +3089,9 @@ def __init__(self, completions: Completions) -> None:
@cached_property
def messages(self) -> MessagesWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return MessagesWithRawResponse(self._completions.messages)
@@ -3103,6 +3120,9 @@ def __init__(self, completions: AsyncCompletions) -> None:
@cached_property
def messages(self) -> AsyncMessagesWithRawResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncMessagesWithRawResponse(self._completions.messages)
@@ -3131,6 +3151,9 @@ def __init__(self, completions: Completions) -> None:
@cached_property
def messages(self) -> MessagesWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return MessagesWithStreamingResponse(self._completions.messages)
@@ -3159,6 +3182,9 @@ def __init__(self, completions: AsyncCompletions) -> None:
@cached_property
def messages(self) -> AsyncMessagesWithStreamingResponse:
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
return AsyncMessagesWithStreamingResponse(self._completions.messages)
diff --git a/src/openai/resources/chat/completions/messages.py b/src/openai/resources/chat/completions/messages.py
index 3d6dc79cd6..b1c6a08d51 100644
--- a/src/openai/resources/chat/completions/messages.py
+++ b/src/openai/resources/chat/completions/messages.py
@@ -21,6 +21,10 @@
class Messages(SyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
@cached_property
def with_raw_response(self) -> MessagesWithRawResponse:
"""
@@ -99,6 +103,10 @@ def list(
class AsyncMessages(AsyncAPIResource):
+ """
+ Given a list of messages comprising a conversation, the model will return a response.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncMessagesWithRawResponse:
"""
diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py
index 4b6e29395b..4c9e266787 100644
--- a/src/openai/resources/completions.py
+++ b/src/openai/resources/completions.py
@@ -25,6 +25,10 @@
class Completions(SyncAPIResource):
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
+
@cached_property
def with_raw_response(self) -> CompletionsWithRawResponse:
"""
@@ -584,6 +588,10 @@ def create(
class AsyncCompletions(AsyncAPIResource):
+ """
+ Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
"""
diff --git a/src/openai/resources/conversations/conversations.py b/src/openai/resources/conversations/conversations.py
index da037a4e22..f2c54e4d04 100644
--- a/src/openai/resources/conversations/conversations.py
+++ b/src/openai/resources/conversations/conversations.py
@@ -31,8 +31,11 @@
class Conversations(SyncAPIResource):
+ """Manage conversations and conversation items."""
+
@cached_property
def items(self) -> Items:
+ """Manage conversations and conversation items."""
return Items(self._client)
@cached_property
@@ -214,8 +217,11 @@ def delete(
class AsyncConversations(AsyncAPIResource):
+ """Manage conversations and conversation items."""
+
@cached_property
def items(self) -> AsyncItems:
+ """Manage conversations and conversation items."""
return AsyncItems(self._client)
@cached_property
@@ -417,6 +423,7 @@ def __init__(self, conversations: Conversations) -> None:
@cached_property
def items(self) -> ItemsWithRawResponse:
+ """Manage conversations and conversation items."""
return ItemsWithRawResponse(self._conversations.items)
@@ -439,6 +446,7 @@ def __init__(self, conversations: AsyncConversations) -> None:
@cached_property
def items(self) -> AsyncItemsWithRawResponse:
+ """Manage conversations and conversation items."""
return AsyncItemsWithRawResponse(self._conversations.items)
@@ -461,6 +469,7 @@ def __init__(self, conversations: Conversations) -> None:
@cached_property
def items(self) -> ItemsWithStreamingResponse:
+ """Manage conversations and conversation items."""
return ItemsWithStreamingResponse(self._conversations.items)
@@ -483,4 +492,5 @@ def __init__(self, conversations: AsyncConversations) -> None:
@cached_property
def items(self) -> AsyncItemsWithStreamingResponse:
+ """Manage conversations and conversation items."""
return AsyncItemsWithStreamingResponse(self._conversations.items)
diff --git a/src/openai/resources/conversations/items.py b/src/openai/resources/conversations/items.py
index 3dba144849..1f8e101f7f 100644
--- a/src/openai/resources/conversations/items.py
+++ b/src/openai/resources/conversations/items.py
@@ -26,6 +26,8 @@
class Items(SyncAPIResource):
+ """Manage conversations and conversation items."""
+
@cached_property
def with_raw_response(self) -> ItemsWithRawResponse:
"""
@@ -256,6 +258,8 @@ def delete(
class AsyncItems(AsyncAPIResource):
+ """Manage conversations and conversation items."""
+
@cached_property
def with_raw_response(self) -> AsyncItemsWithRawResponse:
"""
diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py
index 5dc3dfa9b3..86eb949a40 100644
--- a/src/openai/resources/embeddings.py
+++ b/src/openai/resources/embeddings.py
@@ -25,6 +25,10 @@
class Embeddings(SyncAPIResource):
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
+
@cached_property
def with_raw_response(self) -> EmbeddingsWithRawResponse:
"""
@@ -144,6 +148,10 @@ def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse:
class AsyncEmbeddings(AsyncAPIResource):
+ """
+ Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncEmbeddingsWithRawResponse:
"""
diff --git a/src/openai/resources/evals/evals.py b/src/openai/resources/evals/evals.py
index 40c4a3e9a3..f0fe28fe8c 100644
--- a/src/openai/resources/evals/evals.py
+++ b/src/openai/resources/evals/evals.py
@@ -35,8 +35,11 @@
class Evals(SyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def runs(self) -> Runs:
+ """Manage and run evals in the OpenAI platform."""
return Runs(self._client)
@cached_property
@@ -299,8 +302,11 @@ def delete(
class AsyncEvals(AsyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def runs(self) -> AsyncRuns:
+ """Manage and run evals in the OpenAI platform."""
return AsyncRuns(self._client)
@cached_property
@@ -584,6 +590,7 @@ def __init__(self, evals: Evals) -> None:
@cached_property
def runs(self) -> RunsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
return RunsWithRawResponse(self._evals.runs)
@@ -609,6 +616,7 @@ def __init__(self, evals: AsyncEvals) -> None:
@cached_property
def runs(self) -> AsyncRunsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
return AsyncRunsWithRawResponse(self._evals.runs)
@@ -634,6 +642,7 @@ def __init__(self, evals: Evals) -> None:
@cached_property
def runs(self) -> RunsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
return RunsWithStreamingResponse(self._evals.runs)
@@ -659,4 +668,5 @@ def __init__(self, evals: AsyncEvals) -> None:
@cached_property
def runs(self) -> AsyncRunsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
return AsyncRunsWithStreamingResponse(self._evals.runs)
diff --git a/src/openai/resources/evals/runs/output_items.py b/src/openai/resources/evals/runs/output_items.py
index c2dee72122..c2e6647715 100644
--- a/src/openai/resources/evals/runs/output_items.py
+++ b/src/openai/resources/evals/runs/output_items.py
@@ -22,6 +22,8 @@
class OutputItems(SyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def with_raw_response(self) -> OutputItemsWithRawResponse:
"""
@@ -145,6 +147,8 @@ def list(
class AsyncOutputItems(AsyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def with_raw_response(self) -> AsyncOutputItemsWithRawResponse:
"""
diff --git a/src/openai/resources/evals/runs/runs.py b/src/openai/resources/evals/runs/runs.py
index b747b198f8..49eecd768f 100644
--- a/src/openai/resources/evals/runs/runs.py
+++ b/src/openai/resources/evals/runs/runs.py
@@ -35,8 +35,11 @@
class Runs(SyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def output_items(self) -> OutputItems:
+ """Manage and run evals in the OpenAI platform."""
return OutputItems(self._client)
@cached_property
@@ -285,8 +288,11 @@ def cancel(
class AsyncRuns(AsyncAPIResource):
+ """Manage and run evals in the OpenAI platform."""
+
@cached_property
def output_items(self) -> AsyncOutputItems:
+ """Manage and run evals in the OpenAI platform."""
return AsyncOutputItems(self._client)
@cached_property
@@ -556,6 +562,7 @@ def __init__(self, runs: Runs) -> None:
@cached_property
def output_items(self) -> OutputItemsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
return OutputItemsWithRawResponse(self._runs.output_items)
@@ -581,6 +588,7 @@ def __init__(self, runs: AsyncRuns) -> None:
@cached_property
def output_items(self) -> AsyncOutputItemsWithRawResponse:
+ """Manage and run evals in the OpenAI platform."""
return AsyncOutputItemsWithRawResponse(self._runs.output_items)
@@ -606,6 +614,7 @@ def __init__(self, runs: Runs) -> None:
@cached_property
def output_items(self) -> OutputItemsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
return OutputItemsWithStreamingResponse(self._runs.output_items)
@@ -631,4 +640,5 @@ def __init__(self, runs: AsyncRuns) -> None:
@cached_property
def output_items(self) -> AsyncOutputItemsWithStreamingResponse:
+ """Manage and run evals in the OpenAI platform."""
return AsyncOutputItemsWithStreamingResponse(self._runs.output_items)
diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py
index 964d6505e7..7341b326dc 100644
--- a/src/openai/resources/files.py
+++ b/src/openai/resources/files.py
@@ -33,6 +33,10 @@
class Files(SyncAPIResource):
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
+
@cached_property
def with_raw_response(self) -> FilesWithRawResponse:
"""
@@ -354,6 +358,10 @@ def wait_for_processing(
class AsyncFiles(AsyncAPIResource):
+ """
+ Files are used to upload documents that can be used with features like Assistants and Fine-tuning.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncFilesWithRawResponse:
"""
diff --git a/src/openai/resources/fine_tuning/alpha/alpha.py b/src/openai/resources/fine_tuning/alpha/alpha.py
index 54c05fab69..183208d0ab 100644
--- a/src/openai/resources/fine_tuning/alpha/alpha.py
+++ b/src/openai/resources/fine_tuning/alpha/alpha.py
@@ -19,6 +19,7 @@
class Alpha(SyncAPIResource):
@cached_property
def graders(self) -> Graders:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return Graders(self._client)
@cached_property
@@ -44,6 +45,7 @@ def with_streaming_response(self) -> AlphaWithStreamingResponse:
class AsyncAlpha(AsyncAPIResource):
@cached_property
def graders(self) -> AsyncGraders:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncGraders(self._client)
@cached_property
@@ -72,6 +74,7 @@ def __init__(self, alpha: Alpha) -> None:
@cached_property
def graders(self) -> GradersWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return GradersWithRawResponse(self._alpha.graders)
@@ -81,6 +84,7 @@ def __init__(self, alpha: AsyncAlpha) -> None:
@cached_property
def graders(self) -> AsyncGradersWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncGradersWithRawResponse(self._alpha.graders)
@@ -90,6 +94,7 @@ def __init__(self, alpha: Alpha) -> None:
@cached_property
def graders(self) -> GradersWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return GradersWithStreamingResponse(self._alpha.graders)
@@ -99,4 +104,5 @@ def __init__(self, alpha: AsyncAlpha) -> None:
@cached_property
def graders(self) -> AsyncGradersWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncGradersWithStreamingResponse(self._alpha.graders)
diff --git a/src/openai/resources/fine_tuning/alpha/graders.py b/src/openai/resources/fine_tuning/alpha/graders.py
index e7a9b925ea..e5d5dea5de 100644
--- a/src/openai/resources/fine_tuning/alpha/graders.py
+++ b/src/openai/resources/fine_tuning/alpha/graders.py
@@ -19,6 +19,8 @@
class Graders(SyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> GradersWithRawResponse:
"""
@@ -127,6 +129,8 @@ def validate(
class AsyncGraders(AsyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> AsyncGradersWithRawResponse:
"""
diff --git a/src/openai/resources/fine_tuning/checkpoints/checkpoints.py b/src/openai/resources/fine_tuning/checkpoints/checkpoints.py
index f59976a264..9c2ed6f576 100644
--- a/src/openai/resources/fine_tuning/checkpoints/checkpoints.py
+++ b/src/openai/resources/fine_tuning/checkpoints/checkpoints.py
@@ -19,6 +19,7 @@
class Checkpoints(SyncAPIResource):
@cached_property
def permissions(self) -> Permissions:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return Permissions(self._client)
@cached_property
@@ -44,6 +45,7 @@ def with_streaming_response(self) -> CheckpointsWithStreamingResponse:
class AsyncCheckpoints(AsyncAPIResource):
@cached_property
def permissions(self) -> AsyncPermissions:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncPermissions(self._client)
@cached_property
@@ -72,6 +74,7 @@ def __init__(self, checkpoints: Checkpoints) -> None:
@cached_property
def permissions(self) -> PermissionsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return PermissionsWithRawResponse(self._checkpoints.permissions)
@@ -81,6 +84,7 @@ def __init__(self, checkpoints: AsyncCheckpoints) -> None:
@cached_property
def permissions(self) -> AsyncPermissionsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncPermissionsWithRawResponse(self._checkpoints.permissions)
@@ -90,6 +94,7 @@ def __init__(self, checkpoints: Checkpoints) -> None:
@cached_property
def permissions(self) -> PermissionsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return PermissionsWithStreamingResponse(self._checkpoints.permissions)
@@ -99,4 +104,5 @@ def __init__(self, checkpoints: AsyncCheckpoints) -> None:
@cached_property
def permissions(self) -> AsyncPermissionsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncPermissionsWithStreamingResponse(self._checkpoints.permissions)
diff --git a/src/openai/resources/fine_tuning/checkpoints/permissions.py b/src/openai/resources/fine_tuning/checkpoints/permissions.py
index e7f55b82d9..35e06feee0 100644
--- a/src/openai/resources/fine_tuning/checkpoints/permissions.py
+++ b/src/openai/resources/fine_tuning/checkpoints/permissions.py
@@ -2,6 +2,7 @@
from __future__ import annotations
+import typing_extensions
from typing_extensions import Literal
import httpx
@@ -12,9 +13,14 @@
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
-from ....pagination import SyncPage, AsyncPage
+from ....pagination import SyncPage, AsyncPage, SyncConversationCursorPage, AsyncConversationCursorPage
from ...._base_client import AsyncPaginator, make_request_options
-from ....types.fine_tuning.checkpoints import permission_create_params, permission_retrieve_params
+from ....types.fine_tuning.checkpoints import (
+ permission_list_params,
+ permission_create_params,
+ permission_retrieve_params,
+)
+from ....types.fine_tuning.checkpoints.permission_list_response import PermissionListResponse
from ....types.fine_tuning.checkpoints.permission_create_response import PermissionCreateResponse
from ....types.fine_tuning.checkpoints.permission_delete_response import PermissionDeleteResponse
from ....types.fine_tuning.checkpoints.permission_retrieve_response import PermissionRetrieveResponse
@@ -23,6 +29,8 @@
class Permissions(SyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> PermissionsWithRawResponse:
"""
@@ -86,6 +94,7 @@ def create(
method="post",
)
+ @typing_extensions.deprecated("Retrieve is deprecated. Please swap to the paginated list method instead.")
def retrieve(
self,
fine_tuned_model_checkpoint: str,
@@ -148,6 +157,69 @@ def retrieve(
cast_to=PermissionRetrieveResponse,
)
+ def list(
+ self,
+ fine_tuned_model_checkpoint: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["ascending", "descending"] | Omit = omit,
+ project_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SyncConversationCursorPage[PermissionListResponse]:
+ """
+ **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
+
+ Organization owners can use this endpoint to view all permissions for a
+ fine-tuned model checkpoint.
+
+ Args:
+ after: Identifier for the last permission ID from the previous pagination request.
+
+ limit: Number of permissions to retrieve.
+
+ order: The order in which to retrieve permissions.
+
+ project_id: The ID of the project to get permissions for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not fine_tuned_model_checkpoint:
+ raise ValueError(
+ f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
+ )
+ return self._get_api_list(
+ f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ page=SyncConversationCursorPage[PermissionListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ "project_id": project_id,
+ },
+ permission_list_params.PermissionListParams,
+ ),
+ ),
+ model=PermissionListResponse,
+ )
+
def delete(
self,
permission_id: str,
@@ -191,6 +263,8 @@ def delete(
class AsyncPermissions(AsyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> AsyncPermissionsWithRawResponse:
"""
@@ -254,6 +328,7 @@ def create(
method="post",
)
+ @typing_extensions.deprecated("Retrieve is deprecated. Please swap to the paginated list method instead.")
async def retrieve(
self,
fine_tuned_model_checkpoint: str,
@@ -316,6 +391,69 @@ async def retrieve(
cast_to=PermissionRetrieveResponse,
)
+ def list(
+ self,
+ fine_tuned_model_checkpoint: str,
+ *,
+ after: str | Omit = omit,
+ limit: int | Omit = omit,
+ order: Literal["ascending", "descending"] | Omit = omit,
+ project_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> AsyncPaginator[PermissionListResponse, AsyncConversationCursorPage[PermissionListResponse]]:
+ """
+ **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
+
+ Organization owners can use this endpoint to view all permissions for a
+ fine-tuned model checkpoint.
+
+ Args:
+ after: Identifier for the last permission ID from the previous pagination request.
+
+ limit: Number of permissions to retrieve.
+
+ order: The order in which to retrieve permissions.
+
+ project_id: The ID of the project to get permissions for.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not fine_tuned_model_checkpoint:
+ raise ValueError(
+ f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
+ )
+ return self._get_api_list(
+ f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+ page=AsyncConversationCursorPage[PermissionListResponse],
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "after": after,
+ "limit": limit,
+ "order": order,
+ "project_id": project_id,
+ },
+ permission_list_params.PermissionListParams,
+ ),
+ ),
+ model=PermissionListResponse,
+ )
+
async def delete(
self,
permission_id: str,
@@ -365,8 +503,13 @@ def __init__(self, permissions: Permissions) -> None:
self.create = _legacy_response.to_raw_response_wrapper(
permissions.create,
)
- self.retrieve = _legacy_response.to_raw_response_wrapper(
- permissions.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.to_raw_response_wrapper(
+ permissions.retrieve, # pyright: ignore[reportDeprecated],
+ )
+ )
+ self.list = _legacy_response.to_raw_response_wrapper(
+ permissions.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
permissions.delete,
@@ -380,8 +523,13 @@ def __init__(self, permissions: AsyncPermissions) -> None:
self.create = _legacy_response.async_to_raw_response_wrapper(
permissions.create,
)
- self.retrieve = _legacy_response.async_to_raw_response_wrapper(
- permissions.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ _legacy_response.async_to_raw_response_wrapper(
+ permissions.retrieve, # pyright: ignore[reportDeprecated],
+ )
+ )
+ self.list = _legacy_response.async_to_raw_response_wrapper(
+ permissions.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
permissions.delete,
@@ -395,8 +543,13 @@ def __init__(self, permissions: Permissions) -> None:
self.create = to_streamed_response_wrapper(
permissions.create,
)
- self.retrieve = to_streamed_response_wrapper(
- permissions.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ to_streamed_response_wrapper(
+ permissions.retrieve, # pyright: ignore[reportDeprecated],
+ )
+ )
+ self.list = to_streamed_response_wrapper(
+ permissions.list,
)
self.delete = to_streamed_response_wrapper(
permissions.delete,
@@ -410,8 +563,13 @@ def __init__(self, permissions: AsyncPermissions) -> None:
self.create = async_to_streamed_response_wrapper(
permissions.create,
)
- self.retrieve = async_to_streamed_response_wrapper(
- permissions.retrieve,
+ self.retrieve = ( # pyright: ignore[reportDeprecated]
+ async_to_streamed_response_wrapper(
+ permissions.retrieve, # pyright: ignore[reportDeprecated],
+ )
+ )
+ self.list = async_to_streamed_response_wrapper(
+ permissions.list,
)
self.delete = async_to_streamed_response_wrapper(
permissions.delete,
diff --git a/src/openai/resources/fine_tuning/fine_tuning.py b/src/openai/resources/fine_tuning/fine_tuning.py
index 25ae3e8cf4..60f1f44bdc 100644
--- a/src/openai/resources/fine_tuning/fine_tuning.py
+++ b/src/openai/resources/fine_tuning/fine_tuning.py
@@ -35,6 +35,7 @@
class FineTuning(SyncAPIResource):
@cached_property
def jobs(self) -> Jobs:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return Jobs(self._client)
@cached_property
@@ -68,6 +69,7 @@ def with_streaming_response(self) -> FineTuningWithStreamingResponse:
class AsyncFineTuning(AsyncAPIResource):
@cached_property
def jobs(self) -> AsyncJobs:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncJobs(self._client)
@cached_property
@@ -104,6 +106,7 @@ def __init__(self, fine_tuning: FineTuning) -> None:
@cached_property
def jobs(self) -> JobsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return JobsWithRawResponse(self._fine_tuning.jobs)
@cached_property
@@ -121,6 +124,7 @@ def __init__(self, fine_tuning: AsyncFineTuning) -> None:
@cached_property
def jobs(self) -> AsyncJobsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncJobsWithRawResponse(self._fine_tuning.jobs)
@cached_property
@@ -138,6 +142,7 @@ def __init__(self, fine_tuning: FineTuning) -> None:
@cached_property
def jobs(self) -> JobsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return JobsWithStreamingResponse(self._fine_tuning.jobs)
@cached_property
@@ -155,6 +160,7 @@ def __init__(self, fine_tuning: AsyncFineTuning) -> None:
@cached_property
def jobs(self) -> AsyncJobsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncJobsWithStreamingResponse(self._fine_tuning.jobs)
@cached_property
diff --git a/src/openai/resources/fine_tuning/jobs/checkpoints.py b/src/openai/resources/fine_tuning/jobs/checkpoints.py
index f65856f0c6..6f14a0994e 100644
--- a/src/openai/resources/fine_tuning/jobs/checkpoints.py
+++ b/src/openai/resources/fine_tuning/jobs/checkpoints.py
@@ -22,6 +22,8 @@
class Checkpoints(SyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> CheckpointsWithRawResponse:
"""
@@ -93,6 +95,8 @@ def list(
class AsyncCheckpoints(AsyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def with_raw_response(self) -> AsyncCheckpointsWithRawResponse:
"""
diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py
index b292e057cf..e38baa5539 100644
--- a/src/openai/resources/fine_tuning/jobs/jobs.py
+++ b/src/openai/resources/fine_tuning/jobs/jobs.py
@@ -35,8 +35,11 @@
class Jobs(SyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def checkpoints(self) -> Checkpoints:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return Checkpoints(self._client)
@cached_property
@@ -415,8 +418,11 @@ def resume(
class AsyncJobs(AsyncAPIResource):
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
+
@cached_property
def checkpoints(self) -> AsyncCheckpoints:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncCheckpoints(self._client)
@cached_property
@@ -822,6 +828,7 @@ def __init__(self, jobs: Jobs) -> None:
@cached_property
def checkpoints(self) -> CheckpointsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return CheckpointsWithRawResponse(self._jobs.checkpoints)
@@ -853,6 +860,7 @@ def __init__(self, jobs: AsyncJobs) -> None:
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithRawResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncCheckpointsWithRawResponse(self._jobs.checkpoints)
@@ -884,6 +892,7 @@ def __init__(self, jobs: Jobs) -> None:
@cached_property
def checkpoints(self) -> CheckpointsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return CheckpointsWithStreamingResponse(self._jobs.checkpoints)
@@ -915,4 +924,5 @@ def __init__(self, jobs: AsyncJobs) -> None:
@cached_property
def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse:
+ """Manage fine-tuning jobs to tailor a model to your specific training data."""
return AsyncCheckpointsWithStreamingResponse(self._jobs.checkpoints)
diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py
index 647eb1ca24..6959c2aeff 100644
--- a/src/openai/resources/images.py
+++ b/src/openai/resources/images.py
@@ -25,6 +25,8 @@
class Images(SyncAPIResource):
+ """Given a prompt and/or an input image, the model will generate a new image."""
+
@cached_property
def with_raw_response(self) -> ImagesWithRawResponse:
"""
@@ -915,6 +917,8 @@ def generate(
class AsyncImages(AsyncAPIResource):
+ """Given a prompt and/or an input image, the model will generate a new image."""
+
@cached_property
def with_raw_response(self) -> AsyncImagesWithRawResponse:
"""
diff --git a/src/openai/resources/models.py b/src/openai/resources/models.py
index a8f7691055..508393263f 100644
--- a/src/openai/resources/models.py
+++ b/src/openai/resources/models.py
@@ -21,6 +21,8 @@
class Models(SyncAPIResource):
+ """List and describe the various models available in the API."""
+
@cached_property
def with_raw_response(self) -> ModelsWithRawResponse:
"""
@@ -134,6 +136,8 @@ def delete(
class AsyncModels(AsyncAPIResource):
+ """List and describe the various models available in the API."""
+
@cached_property
def with_raw_response(self) -> AsyncModelsWithRawResponse:
"""
diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py
index 5f378f71e7..0b9a2d23c7 100644
--- a/src/openai/resources/moderations.py
+++ b/src/openai/resources/moderations.py
@@ -22,6 +22,10 @@
class Moderations(SyncAPIResource):
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
+
@cached_property
def with_raw_response(self) -> ModerationsWithRawResponse:
"""
@@ -92,6 +96,10 @@ def create(
class AsyncModerations(AsyncAPIResource):
+ """
+ Given text and/or image inputs, classifies if those inputs are potentially harmful.
+ """
+
@cached_property
def with_raw_response(self) -> AsyncModerationsWithRawResponse:
"""
diff --git a/src/openai/resources/uploads/parts.py b/src/openai/resources/uploads/parts.py
index 73eabd4083..034547f308 100644
--- a/src/openai/resources/uploads/parts.py
+++ b/src/openai/resources/uploads/parts.py
@@ -20,6 +20,8 @@
class Parts(SyncAPIResource):
+ """Use Uploads to upload large files in multiple parts."""
+
@cached_property
def with_raw_response(self) -> PartsWithRawResponse:
"""
@@ -95,6 +97,8 @@ def create(
class AsyncParts(AsyncAPIResource):
+ """Use Uploads to upload large files in multiple parts."""
+
@cached_property
def with_raw_response(self) -> AsyncPartsWithRawResponse:
"""
diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py
index 2873b913ba..f5e5e6f664 100644
--- a/src/openai/resources/uploads/uploads.py
+++ b/src/openai/resources/uploads/uploads.py
@@ -41,8 +41,11 @@
class Uploads(SyncAPIResource):
+ """Use Uploads to upload large files in multiple parts."""
+
@cached_property
def parts(self) -> Parts:
+ """Use Uploads to upload large files in multiple parts."""
return Parts(self._client)
@cached_property
@@ -343,8 +346,11 @@ def complete(
class AsyncUploads(AsyncAPIResource):
+ """Use Uploads to upload large files in multiple parts."""
+
@cached_property
def parts(self) -> AsyncParts:
+ """Use Uploads to upload large files in multiple parts."""
return AsyncParts(self._client)
@cached_property
@@ -671,6 +677,7 @@ def __init__(self, uploads: Uploads) -> None:
@cached_property
def parts(self) -> PartsWithRawResponse:
+ """Use Uploads to upload large files in multiple parts."""
return PartsWithRawResponse(self._uploads.parts)
@@ -690,6 +697,7 @@ def __init__(self, uploads: AsyncUploads) -> None:
@cached_property
def parts(self) -> AsyncPartsWithRawResponse:
+ """Use Uploads to upload large files in multiple parts."""
return AsyncPartsWithRawResponse(self._uploads.parts)
@@ -709,6 +717,7 @@ def __init__(self, uploads: Uploads) -> None:
@cached_property
def parts(self) -> PartsWithStreamingResponse:
+ """Use Uploads to upload large files in multiple parts."""
return PartsWithStreamingResponse(self._uploads.parts)
@@ -728,4 +737,5 @@ def __init__(self, uploads: AsyncUploads) -> None:
@cached_property
def parts(self) -> AsyncPartsWithStreamingResponse:
+ """Use Uploads to upload large files in multiple parts."""
return AsyncPartsWithStreamingResponse(self._uploads.parts)
diff --git a/src/openai/types/fine_tuning/checkpoints/__init__.py b/src/openai/types/fine_tuning/checkpoints/__init__.py
index 2947b33145..5447b4d818 100644
--- a/src/openai/types/fine_tuning/checkpoints/__init__.py
+++ b/src/openai/types/fine_tuning/checkpoints/__init__.py
@@ -2,7 +2,9 @@
from __future__ import annotations
+from .permission_list_params import PermissionListParams as PermissionListParams
from .permission_create_params import PermissionCreateParams as PermissionCreateParams
+from .permission_list_response import PermissionListResponse as PermissionListResponse
from .permission_create_response import PermissionCreateResponse as PermissionCreateResponse
from .permission_delete_response import PermissionDeleteResponse as PermissionDeleteResponse
from .permission_retrieve_params import PermissionRetrieveParams as PermissionRetrieveParams
diff --git a/src/openai/types/fine_tuning/checkpoints/permission_list_params.py b/src/openai/types/fine_tuning/checkpoints/permission_list_params.py
new file mode 100644
index 0000000000..1f389920aa
--- /dev/null
+++ b/src/openai/types/fine_tuning/checkpoints/permission_list_params.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, TypedDict
+
+__all__ = ["PermissionListParams"]
+
+
+class PermissionListParams(TypedDict, total=False):
+ after: str
+ """Identifier for the last permission ID from the previous pagination request."""
+
+ limit: int
+ """Number of permissions to retrieve."""
+
+ order: Literal["ascending", "descending"]
+ """The order in which to retrieve permissions."""
+
+ project_id: str
+ """The ID of the project to get permissions for."""
diff --git a/src/openai/types/fine_tuning/checkpoints/permission_list_response.py b/src/openai/types/fine_tuning/checkpoints/permission_list_response.py
new file mode 100644
index 0000000000..26e913e0c2
--- /dev/null
+++ b/src/openai/types/fine_tuning/checkpoints/permission_list_response.py
@@ -0,0 +1,25 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ...._models import BaseModel
+
+__all__ = ["PermissionListResponse"]
+
+
+class PermissionListResponse(BaseModel):
+ """
+ The `checkpoint.permission` object represents a permission for a fine-tuned model checkpoint.
+ """
+
+ id: str
+ """The permission identifier, which can be referenced in the API endpoints."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the permission was created."""
+
+ object: Literal["checkpoint.permission"]
+ """The object type, which is always "checkpoint.permission"."""
+
+ project_id: str
+ """The project identifier that the permission is for."""
diff --git a/src/openai/types/responses/easy_input_message.py b/src/openai/types/responses/easy_input_message.py
index 6f4d782734..7a45f2bce9 100644
--- a/src/openai/types/responses/easy_input_message.py
+++ b/src/openai/types/responses/easy_input_message.py
@@ -31,7 +31,11 @@ class EasyInputMessage(BaseModel):
"""
phase: Optional[Literal["commentary", "final_answer"]] = None
- """The phase of an assistant message.
+ """
+ Labels an `assistant` message as intermediate commentary (`commentary`) or the
+ final answer (`final_answer`). For models like `gpt-5.3-codex` and beyond, when
+ sending follow-up requests, preserve and resend phase on all assistant messages
+ — dropping it can degrade performance. Not used for user messages.
Use `commentary` for an intermediate assistant message and `final_answer` for
the final assistant message. For follow-up requests with models like
diff --git a/src/openai/types/responses/easy_input_message_param.py b/src/openai/types/responses/easy_input_message_param.py
index f7eb42ba71..7048e2034b 100644
--- a/src/openai/types/responses/easy_input_message_param.py
+++ b/src/openai/types/responses/easy_input_message_param.py
@@ -32,7 +32,11 @@ class EasyInputMessageParam(TypedDict, total=False):
"""
phase: Optional[Literal["commentary", "final_answer"]]
- """The phase of an assistant message.
+ """
+ Labels an `assistant` message as intermediate commentary (`commentary`) or the
+ final answer (`final_answer`). For models like `gpt-5.3-codex` and beyond, when
+ sending follow-up requests, preserve and resend phase on all assistant messages
+ — dropping it can degrade performance. Not used for user messages.
Use `commentary` for an intermediate assistant message and `final_answer` for
the final assistant message. For follow-up requests with models like
diff --git a/src/openai/types/responses/response_output_message.py b/src/openai/types/responses/response_output_message.py
index a8720e1c57..62f0ed437f 100644
--- a/src/openai/types/responses/response_output_message.py
+++ b/src/openai/types/responses/response_output_message.py
@@ -36,7 +36,11 @@ class ResponseOutputMessage(BaseModel):
"""The type of the output message. Always `message`."""
phase: Optional[Literal["commentary", "final_answer"]] = None
- """The phase of an assistant message.
+ """
+ Labels an `assistant` message as intermediate commentary (`commentary`) or the
+ final answer (`final_answer`). For models like `gpt-5.3-codex` and beyond, when
+ sending follow-up requests, preserve and resend phase on all assistant messages
+ — dropping it can degrade performance. Not used for user messages.
Use `commentary` for an intermediate assistant message and `final_answer` for
the final assistant message. For follow-up requests with models like
diff --git a/src/openai/types/responses/response_output_message_param.py b/src/openai/types/responses/response_output_message_param.py
index 5d488d8c6b..20c1384739 100644
--- a/src/openai/types/responses/response_output_message_param.py
+++ b/src/openai/types/responses/response_output_message_param.py
@@ -36,7 +36,11 @@ class ResponseOutputMessageParam(TypedDict, total=False):
"""The type of the output message. Always `message`."""
phase: Optional[Literal["commentary", "final_answer"]]
- """The phase of an assistant message.
+ """
+ Labels an `assistant` message as intermediate commentary (`commentary`) or the
+ final answer (`final_answer`). For models like `gpt-5.3-codex` and beyond, when
+ sending follow-up requests, preserve and resend phase on all assistant messages
+ — dropping it can degrade performance. Not used for user messages.
Use `commentary` for an intermediate assistant message and `final_answer` for
the final assistant message. For follow-up requests with models like
diff --git a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
index 9420e3a34c..a3118fc838 100644
--- a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
+++ b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
@@ -9,13 +9,16 @@
from openai import OpenAI, AsyncOpenAI
from tests.utils import assert_matches_type
-from openai.pagination import SyncPage, AsyncPage
+from openai.pagination import SyncPage, AsyncPage, SyncConversationCursorPage, AsyncConversationCursorPage
from openai.types.fine_tuning.checkpoints import (
+ PermissionListResponse,
PermissionCreateResponse,
PermissionDeleteResponse,
PermissionRetrieveResponse,
)
+# pyright: reportDeprecated=false
+
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -68,52 +71,110 @@ def test_path_params_create(self, client: OpenAI) -> None:
@parametrize
def test_method_retrieve(self, client: OpenAI) -> None:
- permission = client.fine_tuning.checkpoints.permissions.retrieve(
- fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
+ with pytest.warns(DeprecationWarning):
+ permission = client.fine_tuning.checkpoints.permissions.retrieve(
+ fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+
assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
def test_method_retrieve_with_all_params(self, client: OpenAI) -> None:
- permission = client.fine_tuning.checkpoints.permissions.retrieve(
+ with pytest.warns(DeprecationWarning):
+ permission = client.fine_tuning.checkpoints.permissions.retrieve(
+ fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ after="after",
+ limit=0,
+ order="ascending",
+ project_id="project_id",
+ )
+
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: OpenAI) -> None:
+ with pytest.warns(DeprecationWarning):
+ response = client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
+ fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ permission = response.parse()
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: OpenAI) -> None:
+ with pytest.warns(DeprecationWarning):
+ with client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve(
+ fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ permission = response.parse()
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: OpenAI) -> None:
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
+ ):
+ client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
+ fine_tuned_model_checkpoint="",
+ )
+
+ @parametrize
+ def test_method_list(self, client: OpenAI) -> None:
+ permission = client.fine_tuning.checkpoints.permissions.list(
+ fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+ assert_matches_type(SyncConversationCursorPage[PermissionListResponse], permission, path=["response"])
+
+ @parametrize
+ def test_method_list_with_all_params(self, client: OpenAI) -> None:
+ permission = client.fine_tuning.checkpoints.permissions.list(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
after="after",
limit=0,
order="ascending",
project_id="project_id",
)
- assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+ assert_matches_type(SyncConversationCursorPage[PermissionListResponse], permission, path=["response"])
@parametrize
- def test_raw_response_retrieve(self, client: OpenAI) -> None:
- response = client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
+ def test_raw_response_list(self, client: OpenAI) -> None:
+ response = client.fine_tuning.checkpoints.permissions.with_raw_response.list(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
- assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+ assert_matches_type(SyncConversationCursorPage[PermissionListResponse], permission, path=["response"])
@parametrize
- def test_streaming_response_retrieve(self, client: OpenAI) -> None:
- with client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve(
+ def test_streaming_response_list(self, client: OpenAI) -> None:
+ with client.fine_tuning.checkpoints.permissions.with_streaming_response.list(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
- assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+ assert_matches_type(SyncConversationCursorPage[PermissionListResponse], permission, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
- def test_path_params_retrieve(self, client: OpenAI) -> None:
+ def test_path_params_list(self, client: OpenAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
):
- client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
+ client.fine_tuning.checkpoints.permissions.with_raw_response.list(
fine_tuned_model_checkpoint="",
)
@@ -219,52 +280,110 @@ async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
- permission = await async_client.fine_tuning.checkpoints.permissions.retrieve(
- fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
- )
+ with pytest.warns(DeprecationWarning):
+ permission = await async_client.fine_tuning.checkpoints.permissions.retrieve(
+ fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+
assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
@parametrize
async def test_method_retrieve_with_all_params(self, async_client: AsyncOpenAI) -> None:
- permission = await async_client.fine_tuning.checkpoints.permissions.retrieve(
+ with pytest.warns(DeprecationWarning):
+ permission = await async_client.fine_tuning.checkpoints.permissions.retrieve(
+ fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ after="after",
+ limit=0,
+ order="ascending",
+ project_id="project_id",
+ )
+
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.warns(DeprecationWarning):
+ response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
+ fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ permission = response.parse()
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.warns(DeprecationWarning):
+ async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve(
+ fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ permission = await response.parse()
+ assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ with pytest.warns(DeprecationWarning):
+ with pytest.raises(
+ ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
+ ):
+ await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
+ fine_tuned_model_checkpoint="",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncOpenAI) -> None:
+ permission = await async_client.fine_tuning.checkpoints.permissions.list(
+ fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
+ )
+ assert_matches_type(AsyncConversationCursorPage[PermissionListResponse], permission, path=["response"])
+
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
+ permission = await async_client.fine_tuning.checkpoints.permissions.list(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
after="after",
limit=0,
order="ascending",
project_id="project_id",
)
- assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+ assert_matches_type(AsyncConversationCursorPage[PermissionListResponse], permission, path=["response"])
@parametrize
- async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
+ async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
+ response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.list(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = response.parse()
- assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+ assert_matches_type(AsyncConversationCursorPage[PermissionListResponse], permission, path=["response"])
@parametrize
- async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
- async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.retrieve(
+ async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
+ async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.list(
fine_tuned_model_checkpoint="ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
permission = await response.parse()
- assert_matches_type(PermissionRetrieveResponse, permission, path=["response"])
+ assert_matches_type(AsyncConversationCursorPage[PermissionListResponse], permission, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
- async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
+ async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(
ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
):
- await async_client.fine_tuning.checkpoints.permissions.with_raw_response.retrieve(
+ await async_client.fine_tuning.checkpoints.permissions.with_raw_response.list(
fine_tuned_model_checkpoint="",
)