{ "openapi": "3.2.0", "info": { "title": "Azure AI Foundry Models Service", "license": { "name": "MIT", "url": "https://github.com/openai/openai-openapi/blob/master/LICENSE" }, "version": "v1" }, "tags": [ { "name": "Batch" }, { "name": "Chat" }, { "name": "Completions" }, { "name": "Containers" }, { "name": "Conversations" }, { "name": "Evals" }, { "name": "Files" }, { "name": "Embeddings" }, { "name": "Fine-tuning" }, { "name": "Models" }, { "name": "Realtime" }, { "name": "Responses" }, { "name": "Threads" }, { "name": "Vector Stores" } ], "paths": { "/batches": { "post": { "operationId": "createBatch", "summary": "Creates and executes a batch from an uploaded file of requests", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "accept", "in": "header", "required": true, "schema": { "type": "string", "enum": [ "application/json" ] } } ], "responses": { "201": { "description": "The request has succeeded and a new resource has been created as a result.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "id", "object", "endpoint", "completion_window", "status", "created_at" ], "properties": { "id": { "type": "string" }, "object": { "type": "string", "enum": [ "batch" ], "description": "The object type, which is always `batch`.", "x-stainless-const": true }, "endpoint": { "type": "string", "description": "The OpenAI API endpoint used by the batch." }, "model": { "type": "string", "description": "Model ID used to process the batch, like `gpt-5-2025-08-07`. OpenAI\n offers a wide range of models with different capabilities, performance\n characteristics, and price points. Refer to the [model\n guide](https://platform.openai.com/docs/models) to browse and compare available models." }, "errors": { "$ref": "#/components/schemas/OpenAI.BatchErrors" }, "completion_window": { "type": "string", "description": "The time frame within which the batch should be processed." }, "status": { "type": "string", "enum": [ "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled" ], "description": "The current status of the batch." }, "output_file_id": { "type": "string", "description": "The ID of the file containing the outputs of successfully executed requests." }, "error_file_id": { "type": "string", "description": "The ID of the file containing the outputs of requests with errors." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was created." }, "in_progress_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started processing." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch will expire." }, "finalizing_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started finalizing." }, "completed_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was completed." }, "failed_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch failed." }, "expired_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch expired." }, "cancelling_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started cancelling." }, "cancelled_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was cancelled." }, "request_counts": { "$ref": "#/components/schemas/OpenAI.BatchRequestCounts" }, "usage": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.BatchUsage" } ], "description": "Represents token usage details including input tokens, output tokens, a\n breakdown of output tokens, and the total tokens used. Only populated on\n batches created after September 7, 2025." }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "input_file_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Batch" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "type": "object", "properties": { "input_file_id": { "type": "string", "description": "The ID of an uploaded file that contains requests for the new batch.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour input file must be formatted as a [JSONL file](/docs/api-reference/batch/requestInput),\nand must be uploaded with the purpose `batch`." }, "endpoint": { "type": "string", "enum": [ "/v1/chat/completions", "/v1/embeddings" ], "description": "The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions` is supported." }, "completion_window": { "type": "string", "enum": [ "24h" ], "description": "The time frame within which the batch should be processed. Currently only `24h` is supported." } }, "required": [ "endpoint", "completion_window" ], "unevaluatedProperties": { "type": "string" } } } } } }, "get": { "operationId": "listBatches", "summary": "List your organization's batches.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "accept", "in": "header", "required": true, "schema": { "type": "string", "enum": [ "application/json" ] } }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListBatchesResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Batch" ] } }, "/batches/{batch_id}": { "get": { "operationId": "retrieveBatch", "summary": "Retrieves a batch.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "accept", "in": "header", "required": true, "schema": { "type": "string", "enum": [ "application/json" ] } }, { "name": "batch_id", "in": "path", "required": true, "description": "The ID of the batch to retrieve.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "id", "object", "endpoint", "completion_window", "status", "created_at" ], "properties": { "id": { "type": "string" }, "object": { "type": "string", "enum": [ "batch" ], "description": "The object type, which is always `batch`.", "x-stainless-const": true }, "endpoint": { "type": "string", "description": "The OpenAI API endpoint used by the batch." }, "model": { "type": "string", "description": "Model ID used to process the batch, like `gpt-5-2025-08-07`. OpenAI\n offers a wide range of models with different capabilities, performance\n characteristics, and price points. Refer to the [model\n guide](https://platform.openai.com/docs/models) to browse and compare available models." }, "errors": { "$ref": "#/components/schemas/OpenAI.BatchErrors" }, "completion_window": { "type": "string", "description": "The time frame within which the batch should be processed." }, "status": { "type": "string", "enum": [ "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled" ], "description": "The current status of the batch." }, "output_file_id": { "type": "string", "description": "The ID of the file containing the outputs of successfully executed requests." }, "error_file_id": { "type": "string", "description": "The ID of the file containing the outputs of requests with errors." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was created." }, "in_progress_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started processing." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch will expire." }, "finalizing_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started finalizing." }, "completed_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was completed." }, "failed_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch failed." }, "expired_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch expired." }, "cancelling_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started cancelling." }, "cancelled_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was cancelled." }, "request_counts": { "$ref": "#/components/schemas/OpenAI.BatchRequestCounts" }, "usage": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.BatchUsage" } ], "description": "Represents token usage details including input tokens, output tokens, a\n breakdown of output tokens, and the total tokens used. Only populated on\n batches created after September 7, 2025." }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "input_file_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Batch" ] } }, "/batches/{batch_id}/cancel": { "post": { "operationId": "cancelBatch", "summary": "Cancels an in-progress batch.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "accept", "in": "header", "required": true, "schema": { "type": "string", "enum": [ "application/json" ] } }, { "name": "batch_id", "in": "path", "required": true, "description": "The ID of the batch to cancel.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "id", "object", "endpoint", "completion_window", "status", "created_at" ], "properties": { "id": { "type": "string" }, "object": { "type": "string", "enum": [ "batch" ], "description": "The object type, which is always `batch`.", "x-stainless-const": true }, "endpoint": { "type": "string", "description": "The OpenAI API endpoint used by the batch." }, "model": { "type": "string", "description": "Model ID used to process the batch, like `gpt-5-2025-08-07`. OpenAI\n offers a wide range of models with different capabilities, performance\n characteristics, and price points. Refer to the [model\n guide](https://platform.openai.com/docs/models) to browse and compare available models." }, "errors": { "$ref": "#/components/schemas/OpenAI.BatchErrors" }, "completion_window": { "type": "string", "description": "The time frame within which the batch should be processed." }, "status": { "type": "string", "enum": [ "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled" ], "description": "The current status of the batch." }, "output_file_id": { "type": "string", "description": "The ID of the file containing the outputs of successfully executed requests." }, "error_file_id": { "type": "string", "description": "The ID of the file containing the outputs of requests with errors." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was created." }, "in_progress_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started processing." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch will expire." }, "finalizing_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started finalizing." }, "completed_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was completed." }, "failed_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch failed." }, "expired_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch expired." }, "cancelling_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started cancelling." }, "cancelled_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was cancelled." }, "request_counts": { "$ref": "#/components/schemas/OpenAI.BatchRequestCounts" }, "usage": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.BatchUsage" } ], "description": "Represents token usage details including input tokens, output tokens, a\n breakdown of output tokens, and the total tokens used. Only populated on\n batches created after September 7, 2025." }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "input_file_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Batch" ] } }, "/chat/completions": { "post": { "operationId": "createChatCompletion", "description": "Creates a chat completion.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "anyOf": [ { "type": "object", "required": [ "id", "choices", "created", "model", "object" ], "properties": { "id": { "type": "string", "description": "A unique identifier for the chat completion." }, "choices": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CreateChatCompletionResponseChoices" }, "description": "A list of chat completion choices. Can be more than one if `n` is greater than 1." }, "created": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) of when the chat completion was created." }, "model": { "type": "string", "description": "The model used for the chat completion." }, "system_fingerprint": { "type": "string", "description": "This fingerprint represents the backend configuration that the model runs with.\n Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.", "deprecated": true }, "object": { "type": "string", "enum": [ "chat.completion" ], "description": "The object type, which is always `chat.completion`.", "x-stainless-const": true }, "usage": { "$ref": "#/components/schemas/OpenAI.CompletionUsage" }, "prompt_filter_results": { "type": "array", "items": { "type": "object", "properties": { "prompt_index": { "type": "integer", "format": "int32", "description": "The index of the input prompt that this content filter result corresponds to." }, "content_filter_results": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterResultForPrompt" } ], "description": "The content filter results associated with the indexed input prompt." } }, "required": [ "prompt_index", "content_filter_results" ] } } } }, { "type": "object", "required": [ "id", "choices", "created", "model", "object" ], "properties": { "id": { "type": "string", "description": "A unique identifier for the chat completion. Each chunk has the same ID." }, "choices": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CreateChatCompletionStreamResponseChoices" }, "description": "A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the\n last chunk if you set `stream_options: {\"include_usage\": true}`." }, "created": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp." }, "model": { "type": "string", "description": "The model to generate the completion." }, "system_fingerprint": { "type": "string", "description": "This fingerprint represents the backend configuration that the model runs with.\n Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.", "deprecated": true }, "object": { "type": "string", "enum": [ "chat.completion.chunk" ], "description": "The object type, which is always `chat.completion.chunk`.", "x-stainless-const": true }, "usage": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.CompletionUsage" } ], "description": "An optional field that will only be present when you set\n `stream_options: {\"include_usage\": true}` in your request. When present, it\n contains a null value **except for the last chunk** which contains the\n token usage statistics for the entire request.\n*NOTE:** If the stream is interrupted or cancelled, you may not\n receive the final usage chunk which contains the total token usage for\n the request." }, "delta": { "$ref": "#/components/schemas/OpenAI.ChatCompletionStreamResponseDelta" }, "content_filter_results": { "$ref": "#/components/schemas/AzureContentFilterResultForChoice" } } } ] } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Chat" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "type": "object", "properties": { "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "top_logprobs": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "user": { "type": "string", "description": "A unique identifier representing your end-user, which can help to\nmonitor and detect abuse.", "deprecated": true }, "safety_identifier": { "type": "string", "description": "A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.\n The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers)." }, "prompt_cache_key": { "type": "string", "description": "Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching)." }, "prompt_cache_retention": { "anyOf": [ { "type": "string", "enum": [ "in-memory", "24h" ] }, { "type": "null" } ] }, "messages": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" }, "minItems": 1, "description": "A list of messages comprising the conversation so far. Depending on the\nmodel you use, different message types (modalities) are supported,\nlike text, images, and audio." }, "model": { "type": "string", "description": "Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI\n offers a wide range of models with different capabilities, performance\n characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)\n to browse and compare available models." }, "modalities": { "$ref": "#/components/schemas/OpenAI.ResponseModalities" }, "verbosity": { "$ref": "#/components/schemas/OpenAI.Verbosity" }, "reasoning_effort": { "$ref": "#/components/schemas/OpenAI.ReasoningEffort" }, "max_completion_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "description": "An upper bound for the number of tokens that can be generated for a\ncompletion, including visible output tokens and reasoning tokens." }, "frequency_penalty": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "minimum": -2, "maximum": 2, "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\n their existing frequency in the text so far, decreasing the model's\n likelihood to repeat the same line verbatim." }, "presence_penalty": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "minimum": -2, "maximum": 2, "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\n whether they appear in the text so far, increasing the model's likelihood\n to talk about new topics." }, "response_format": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.CreateChatCompletionRequestResponseFormat" } ], "description": "An object specifying the format that the model must output.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables\nStructured Outputs which ensures the model will match your supplied JSON\nschema.\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures\nthe message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model\nto produce JSON yourself via a system or user message. Without this, the\nmodel may generate an unending stream of whitespace until the generation\nreaches the token limit, resulting in a long-running and seemingly \"stuck\"\nrequest. Also note that the message content may be partially cut off if\n`finish_reason=\"length\"`, which indicates the generation exceeded\n`max_tokens` or the conversation exceeded the max context length." }, "audio": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateChatCompletionRequestAudio" }, { "type": "null" } ], "description": "Parameters for audio output. Required when audio output is requested with\n`modalities: [\"audio\"]`." }, "store": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ], "description": "Whether or not to store the output of this chat completion request for\nuse in model distillation or evals products." }, "stream": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ], "description": "If set to true, the model response data will be streamed to the client\nas it is generated using server-sent events." }, "stop": { "$ref": "#/components/schemas/OpenAI.StopConfiguration" }, "logit_bias": { "anyOf": [ { "type": "object", "unevaluatedProperties": { "type": "integer" } }, { "type": "null" } ], "description": "Modify the likelihood of specified tokens appearing in the completion.\n Accepts a JSON object that maps tokens (specified by their token ID in the\n tokenizer) to an associated bias value from -100 to 100. Mathematically,\n the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should\n decrease or increase likelihood of selection; values like -100 or 100\n should result in a ban or exclusive selection of the relevant token.", "x-oaiTypeLabel": "map" }, "logprobs": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ], "description": "Whether to return log probabilities of the output tokens or not. If true,\n returns the log probabilities of each output token returned in the\n `content` of `message`." }, "max_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "description": "The maximum number of tokens that can be generated in the chat completion.\nThis value can be used to control costs for text generated via API.\n\nThis value is now deprecated in favor of `max_completion_tokens`, and is\nnot compatible with o1 series models.", "deprecated": true }, "n": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 1, "maximum": 128, "description": "How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.", "default": 1 }, "prediction": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.PredictionContent" } ], "description": "Configuration for a predicted output, which can greatly improve\nresponse times when large parts of the model response are known\nahead of time. This is most common when you are regenerating a\nfile with only minor changes to most of the content." }, "seed": { "anyOf": [ { "type": "integer", "format": "int64" }, { "type": "null" } ], "description": "This feature is in Beta.\n If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.", "deprecated": true, "x-oaiMeta": { "beta": true } }, "stream_options": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionStreamOptions" }, { "type": "null" } ] }, "tools": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionTool" }, { "$ref": "#/components/schemas/OpenAI.CustomToolChatCompletions" } ] }, "description": "A list of tools the model may call. You can provide either\n [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools) or\n [function tools](https://platform.openai.com/docs/guides/function-calling)." }, "tool_choice": { "$ref": "#/components/schemas/OpenAI.ChatCompletionToolChoiceOption" }, "parallel_tool_calls": { "$ref": "#/components/schemas/OpenAI.ParallelToolCalls" }, "function_call": { "anyOf": [ { "type": "string", "enum": [ "none", "auto" ] }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionFunctionCallOption" } ], "description": "Deprecated in favor of `tool_choice`.\n Controls which (if any) function is called by the model.\n `none` means the model will not call a function and instead generates a\n message.\n `auto` means the model can pick between generating a message or calling a\n function.\n Specifying a particular function via `{\"name\": \"my_function\"}` forces the\n model to call that function.\n `none` is the default when no functions are present. `auto` is the default\n if functions are present.", "deprecated": true }, "functions": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionFunctions" }, "minItems": 1, "maxItems": 128, "description": "Deprecated in favor of `tools`.\n A list of functions the model may generate JSON inputs for.", "deprecated": true }, "user_security_context": { "$ref": "#/components/schemas/AzureUserSecurityContext" } }, "required": [ "messages", "model" ] } } } }, "x-ms-examples": { "Create a chat completion": { "$ref": "./examples/chat_completions.yaml" } } } }, "/completions": { "post": { "operationId": "createCompletion", "description": "Creates a completion.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "id", "choices", "created", "model", "object" ], "properties": { "id": { "type": "string", "description": "A unique identifier for the completion." }, "choices": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CreateCompletionResponseChoices" }, "description": "The list of completion choices the model generated for the input prompt." }, "created": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) of when the completion was created." }, "model": { "type": "string", "description": "The model used for completion." }, "system_fingerprint": { "type": "string", "description": "This fingerprint represents the backend configuration that the model runs with.\n Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism." }, "object": { "type": "string", "enum": [ "text_completion" ], "description": "The object type, which is always \"text_completion\"", "x-stainless-const": true }, "usage": { "$ref": "#/components/schemas/OpenAI.CompletionUsage" }, "prompt_filter_results": { "type": "array", "items": { "$ref": "#/components/schemas/AzureContentFilterResultForPrompt" } } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Completions" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "type": "object", "properties": { "model": { "type": "string", "description": "ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them.", "x-oaiTypeLabel": "string" }, "best_of": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 0, "maximum": 20, "description": "Generates `best_of` completions server-side and returns the \"best\" (the one with the highest log probability per token). Results cannot be streamed.\n When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.\n*Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.", "default": 1 }, "echo": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ], "description": "Echo back the prompt in addition to the completion" }, "frequency_penalty": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "minimum": -2, "maximum": 2, "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.\n [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)" }, "logit_bias": { "anyOf": [ { "type": "object", "unevaluatedProperties": { "type": "integer" } }, { "type": "null" } ], "description": "Modify the likelihood of specified tokens appearing in the completion.\n Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated.", "x-oaiTypeLabel": "map" }, "logprobs": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 0, "maximum": 5, "description": "Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.\n The maximum value for `logprobs` is 5." }, "max_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 0, "description": "The maximum number of [tokens](/tokenizer) that can be generated in the completion.\n The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.", "default": 16 }, "n": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 1, "maximum": 128, "description": "How many completions to generate for each prompt.\n*Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.", "default": 1 }, "presence_penalty": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "minimum": -2, "maximum": 2, "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.\n [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)" }, "seed": { "anyOf": [ { "type": "integer", "format": "int64" }, { "type": "null" } ], "description": "If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\n Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend." }, "stop": { "$ref": "#/components/schemas/OpenAI.StopConfiguration" }, "stream": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ], "description": "Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions)." }, "stream_options": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionStreamOptions" }, { "type": "null" } ] }, "suffix": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "description": "The suffix that comes after a completion of inserted text.\n This parameter is only supported for `gpt-3.5-turbo-instruct`." }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "minimum": 0, "maximum": 2, "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\n We generally recommend altering this or `top_p` but not both.", "default": 1 }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "minimum": 0, "maximum": 1, "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n We generally recommend altering this or `temperature` but not both.", "default": 1 }, "user": { "type": "string", "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids)." }, "prompt": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "type": "string" } }, { "type": "null" } ], "default": "<|endoftext|>" } }, "required": [ "model" ] } } } }, "x-ms-examples": { "Create a chat completion": { "$ref": "./examples/completions.yaml" } } } }, "/containers": { "get": { "operationId": "listContainers", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "description": "A cursor for use in pagination. `before` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include before=obj_foo in order to fetch the previous page of the list.", "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerListResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Containers" ] }, "post": { "operationId": "createContainer", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Containers" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateContainerBody" } } } } } }, "/containers/{container_id}": { "get": { "operationId": "retrieveContainer", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "description": "The ID of the container to retrieve.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Containers" ] }, "delete": { "operationId": "deleteContainer", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "description": "The ID of the container to delete.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Containers" ] } }, "/containers/{container_id}/files": { "get": { "operationId": "listContainerFiles", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "description": "The ID of the container to list files from.", "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "description": "A cursor for use in pagination. `before` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include before=obj_foo in order to fetch the previous page of the list.", "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerFileListResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Containers" ] }, "post": { "operationId": "createContainerFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "description": "The ID of the container to create a file in.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerFileResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Containers" ], "requestBody": { "required": true, "content": { "multipart/form-data": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateContainerFileBody" } } } } } }, "/containers/{container_id}/files/{file_id}": { "get": { "operationId": "retrieveContainerFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "description": "The ID of the container.", "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to retrieve.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerFileResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Containers" ] }, "delete": { "operationId": "deleteContainerFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "description": "The ID of the container.", "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to delete.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Containers" ] } }, "/containers/{container_id}/files/{file_id}/content": { "get": { "operationId": "retrieveContainerFileContent", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "description": "The ID of the container.", "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to retrieve content from.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/octet-stream": { "schema": { "contentMediaType": "application/octet-stream" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Containers" ] } }, "/conversations": { "post": { "operationId": "createConversation", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ConversationResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Conversations" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateConversationBody" } } } } } }, "/conversations/{conversation_id}": { "get": { "operationId": "retrieveConversation", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "conversation_id", "in": "path", "required": true, "description": "The ID of the conversation to retrieve.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ConversationResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Conversations" ] }, "post": { "operationId": "updateConversation", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "conversation_id", "in": "path", "required": true, "description": "The ID of the conversation to update.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ConversationResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Conversations" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.UpdateConversationBody" } } } } }, "delete": { "operationId": "deleteConversation", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "conversation_id", "in": "path", "required": true, "description": "The ID of the conversation to delete.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.DeletedConversationResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Conversations" ] } }, "/conversations/{conversation_id}/items": { "get": { "operationId": "listConversationItems", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "conversation_id", "in": "path", "required": true, "description": "The ID of the conversation to list items for.", "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "The order to return the input items in. Default is `desc`.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "An item ID to list items after, used in pagination.", "schema": { "type": "string" }, "explode": false }, { "name": "include", "in": "query", "required": false, "description": "Specify additional output data to include in the model response.", "schema": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.IncludeEnum" } }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ConversationItemList" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Conversations" ] }, "post": { "operationId": "createConversationItems", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "conversation_id", "in": "path", "required": true, "description": "The ID of the conversation to add the item to.", "schema": { "type": "string" } }, { "name": "include", "in": "query", "required": false, "description": "Additional fields to include in the response.", "schema": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.IncludeEnum" } }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ConversationItemList" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Conversations" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateConversationItemsParametersBody" } } } } } }, "/conversations/{conversation_id}/items/{item_id}": { "get": { "operationId": "retrieveConversationItem", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "conversation_id", "in": "path", "required": true, "description": "The ID of the conversation that contains the item.", "schema": { "type": "string" } }, { "name": "item_id", "in": "path", "required": true, "description": "The ID of the item to retrieve.", "schema": { "type": "string" } }, { "name": "include", "in": "query", "required": false, "description": "Additional fields to include in the response.", "schema": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.IncludeEnum" } }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ConversationItem" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Conversations" ] }, "delete": { "operationId": "deleteConversationItem", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "conversation_id", "in": "path", "required": true, "description": "The ID of the conversation that contains the item.", "schema": { "type": "string" } }, { "name": "item_id", "in": "path", "required": true, "description": "The ID of the item to delete.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ConversationResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Conversations" ] } }, "/embeddings": { "post": { "operationId": "createEmbedding", "summary": "Creates an embedding vector representing the input text.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateEmbeddingResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Embeddings" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateEmbeddingRequest" } } } }, "x-ms-examples": { "Create an embedding request": { "$ref": "./examples/embeddings.yaml" } } } }, "/evals": { "get": { "operationId": "listEvals", "summary": "List evaluations for a project.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "after", "in": "query", "required": false, "description": "Identifier for the last eval from the previous pagination request.", "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of evals to be returned in a single pagination response.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order for evals by timestamp. Use `asc` for ascending order or\n`desc` for descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ], "default": "asc" }, "explode": false }, { "name": "order_by", "in": "query", "required": false, "description": "Evals can be ordered by creation time or last updated time. Use\n`created_at` for creation time or `updated_at` for last updated\ntime.", "schema": { "type": "string", "enum": [ "created_at", "updated_at" ], "default": "created_at" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalList" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ] }, "post": { "operationId": "createEval", "description": "Create the structure of an evaluation that can be used to test a model's\nperformance.\n\nAn evaluation is a set of testing criteria and a datasource. After\ncreating an evaluation, you can run it on different models and model\nparameters. We support several types of graders and datasources.\n\nFor more information, see the [Evals guide](/docs/guides/evals).", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.Eval" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "type": "object", "properties": { "statusCode": { "type": "number", "enum": [ 201 ] }, "name": { "type": "string", "description": "The name of the evaluation." }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "data_source_config": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateEvalCustomDataSourceConfig" }, { "$ref": "#/components/schemas/OpenAI.CreateEvalLogsDataSourceConfig" }, { "$ref": "#/components/schemas/OpenAI.CreateEvalStoredCompletionsDataSourceConfig" } ], "description": "The configuration for the data source used for the evaluation runs. Dictates the schema of the data used in the evaluation." }, "testing_criteria": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateEvalLabelModelGrader" }, { "$ref": "#/components/schemas/OpenAI.EvalGraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.EvalGraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.EvalGraderPython" }, { "$ref": "#/components/schemas/OpenAI.EvalGraderScoreModel" }, { "$ref": "#/components/schemas/EvalGraderEndpoint" } ] }, "description": "A list of graders for all eval runs in this group. Graders can reference variables in the data source using double curly braces notation, like `{{item.variable_name}}`. To reference the model's output, use the `sample` namespace (ie, `{{sample.output_text}}`)." } }, "required": [ "statusCode", "data_source_config", "testing_criteria" ] } } } } } }, "/evals/{eval_id}": { "get": { "operationId": "getEval", "summary": "Retrieve an evaluation by its ID.", "description": "Retrieves an evaluation by its ID.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.Eval" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ] }, "post": { "operationId": "updateEval", "description": "Update select, mutable properties of a specified evaluation.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.Eval" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "type": "object", "properties": { "name": { "type": "string" }, "metadata": { "$ref": "#/components/schemas/OpenAI.Metadata" } } } } } } }, "delete": { "operationId": "deleteEval", "description": "Delete a specified evaluation.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "object", "deleted", "eval_id" ], "properties": { "object": { "type": "string", "enum": [ "eval.deleted" ] }, "deleted": { "type": "boolean" }, "eval_id": { "type": "string" } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ] } }, "/evals/{eval_id}/runs": { "get": { "operationId": "getEvalRuns", "summary": "", "description": "Retrieve a list of runs for a specified evaluation.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "after", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "asc", "desc" ], "default": "asc" }, "explode": false }, { "name": "status", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "queued", "in_progress", "completed", "canceled", "failed" ] }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRunList" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ] }, "post": { "operationId": "createEvalRun", "description": "Create a new evaluation run, beginning the grading process.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "201": { "description": "The request has succeeded and a new resource has been created as a result.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRun" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateEvalRunRequest" } } } } } }, "/evals/{eval_id}/runs/{run_id}": { "get": { "operationId": "getEvalRun", "description": "Retrieve a specific evaluation run by its ID.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRun" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ] }, "post": { "operationId": "cancelEvalRun", "description": "Cancel a specific evaluation run by its ID.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRun" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ] }, "delete": { "operationId": "deleteEvalRun", "description": "Delete a specific evaluation run by its ID.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "object", "deleted", "eval_run_id" ], "properties": { "object": { "type": "string", "enum": [ "eval_run.deleted" ] }, "deleted": { "type": "boolean" }, "eval_run_id": { "type": "string" } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ] } }, "/evals/{eval_id}/runs/{run_id}/output_items": { "get": { "operationId": "getEvalRunOutputItems", "description": "Get a list of output items for a specified evaluation run.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "after", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "status", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "fail", "pass" ] }, "explode": false }, { "name": "order", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "asc", "desc" ], "default": "asc" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRunOutputItemList" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ] } }, "/evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}": { "get": { "operationId": "getEvalRunOutputItem", "description": "Retrieve a specific output item from an evaluation run by its ID.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "output_item_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRunOutputItem" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Evals" ] } }, "/files": { "post": { "operationId": "createFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "id", "bytes", "created_at", "filename", "object", "purpose", "status" ], "properties": { "id": { "type": "string", "description": "The file identifier, which can be referenced in the API endpoints." }, "bytes": { "type": "integer", "description": "The size of the file, in bytes." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the file was created." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the file will expire." }, "filename": { "type": "string", "description": "The name of the file." }, "object": { "type": "string", "enum": [ "file" ], "description": "The object type, which is always `file`.", "x-stainless-const": true }, "status_details": { "type": "string", "description": "Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`.", "deprecated": true }, "purpose": { "type": "string", "enum": [ "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "evals" ], "description": "The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune` and `fine-tune-results`." }, "status": { "type": "string", "enum": [ "uploaded", "pending", "running", "processed", "error", "deleting", "deleted" ] } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Files" ], "requestBody": { "required": true, "content": { "multipart/form-data": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateFileRequest" } } } }, "x-ms-examples": { "Create a file request": { "$ref": "./examples/files.yaml" } } }, "get": { "operationId": "listFiles", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "purpose", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "schema": { "type": "integer" }, "explode": false }, { "name": "order", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListFilesResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Files" ] } }, "/files/{file_id}": { "get": { "operationId": "retrieveFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to use for this request.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "id", "bytes", "created_at", "filename", "object", "purpose", "status" ], "properties": { "id": { "type": "string", "description": "The file identifier, which can be referenced in the API endpoints." }, "bytes": { "type": "integer", "description": "The size of the file, in bytes." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the file was created." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the file will expire." }, "filename": { "type": "string", "description": "The name of the file." }, "object": { "type": "string", "enum": [ "file" ], "description": "The object type, which is always `file`.", "x-stainless-const": true }, "status_details": { "type": "string", "description": "Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`.", "deprecated": true }, "purpose": { "type": "string", "enum": [ "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "evals" ], "description": "The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune` and `fine-tune-results`." }, "status": { "type": "string", "enum": [ "uploaded", "pending", "running", "processed", "error", "deleting", "deleted" ] } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Files" ] }, "delete": { "operationId": "deleteFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to use for this request.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.DeleteFileResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Files" ] } }, "/files/{file_id}/content": { "get": { "operationId": "downloadFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to use for this request.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/octet-stream": { "schema": { "contentMediaType": "application/octet-stream" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Files" ] } }, "/fine_tuning/alpha/graders/run": { "post": { "operationId": "runGrader", "summary": "Run a grader.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RunGraderResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RunGraderRequest" } } } } } }, "/fine_tuning/alpha/graders/validate": { "post": { "operationId": "validateGrader", "summary": "Validate a grader.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ValidateGraderResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "type": "object", "properties": { "grader": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.GraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.GraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.GraderPython" }, { "$ref": "#/components/schemas/OpenAI.GraderScoreModel" }, { "$ref": "#/components/schemas/OpenAI.GraderMulti" }, { "$ref": "#/components/schemas/GraderEndpoint" } ] } } } } } } } }, "/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions": { "get": { "operationId": "listFineTuningCheckpointPermissions", "summary": "List checkpoint permissions", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuned_model_checkpoint", "in": "path", "required": true, "description": "The ID of the fine-tuned model checkpoint to get permissions for.", "schema": { "type": "string" } }, { "name": "project_id", "in": "query", "required": false, "description": "The ID of the project to get permissions for.", "schema": { "type": "string" }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "Identifier for the last permission ID from the previous pagination request.", "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "description": "Number of permissions to retrieve.", "schema": { "type": "integer", "format": "int32", "default": 10 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "The order in which to retrieve permissions.", "schema": { "type": "string", "enum": [ "ascending", "descending" ], "default": "descending" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListFineTuningCheckpointPermissionResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ] }, "post": { "operationId": "createFineTuningCheckpointPermission", "summary": "Create checkpoint permissions", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuned_model_checkpoint", "in": "path", "required": true, "description": "The ID of the fine-tuned model checkpoint to create a permission for.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListFineTuningCheckpointPermissionResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateFineTuningCheckpointPermissionRequest" } } } } } }, "/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}": { "delete": { "operationId": "deleteFineTuningCheckpointPermission", "summary": "Delete checkpoint permission", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuned_model_checkpoint", "in": "path", "required": true, "description": "The ID of the fine-tuned model checkpoint to delete a permission for.", "schema": { "type": "string" } }, { "name": "permission_id", "in": "path", "required": true, "description": "The ID of the fine-tuned model checkpoint permission to delete.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.DeleteFineTuningCheckpointPermissionResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs": { "post": { "operationId": "createFineTuningJob", "summary": "Creates a fine-tuning job which begins the process of creating a new model from a given dataset.\n\nResponse includes details of the enqueued job including job status and the name of the fine-tuned models once complete.\n\n[Learn more about fine-tuning](/docs/guides/fine-tuning)", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateFineTuningJobRequest" } } } } }, "get": { "operationId": "listPaginatedFineTuningJobs", "summary": "List your organization's fine-tuning jobs", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "after", "in": "query", "required": false, "description": "Identifier for the last job from the previous pagination request.", "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "description": "Number of fine-tuning jobs to retrieve.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListPaginatedFineTuningJobsResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}": { "get": { "operationId": "retrieveFineTuningJob", "summary": "Get info about a fine-tuning job.\n\n[Learn more about fine-tuning](/docs/guides/fine-tuning)", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/cancel": { "post": { "operationId": "cancelFineTuningJob", "summary": "Immediately cancel a fine-tune job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job to cancel.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints": { "get": { "operationId": "listFineTuningJobCheckpoints", "summary": "List the checkpoints for a fine-tuning job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job to get checkpoints for.", "schema": { "type": "string" } }, { "name": "after", "in": "query", "required": false, "description": "Identifier for the last checkpoint ID from the previous pagination request.", "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "description": "Number of checkpoints to retrieve.", "schema": { "type": "integer", "format": "int32", "default": 10 }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListFineTuningJobCheckpointsResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints/{fine_tuning_checkpoint_id}/copy": { "post": { "operationId": "FineTuning_CopyCheckpoint", "description": "Creates a copy of a fine-tuning checkpoint at the given destination account and region.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-copy-ft-checkpoints", "in": "header", "required": true, "description": "Enables access to checkpoint copy operations for models, an AOAI preview feature.\nThis feature requires the 'aoai-copy-ft-checkpoints' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "accept", "in": "header", "required": true, "schema": { "type": "string", "enum": [ "application/json" ] } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "fine_tuning_checkpoint_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CopyModelResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CopyModelRequest" } } } } }, "get": { "operationId": "FineTuning_GetCheckpoint", "description": "Gets the status of a fine-tuning checkpoint copy.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-copy-ft-checkpoints", "in": "header", "required": true, "description": "Enables access to checkpoint copy operations for models, an AOAI preview feature.\nThis feature requires the 'aoai-copy-ft-checkpoints' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "accept", "in": "header", "required": true, "schema": { "type": "string", "enum": [ "application/json" ] } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "fine_tuning_checkpoint_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CopyModelResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/events": { "get": { "operationId": "listFineTuningEvents", "summary": "Get status updates for a fine-tuning job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job to get events for.", "schema": { "type": "string" } }, { "name": "after", "in": "query", "required": false, "description": "Identifier for the last event from the previous pagination request.", "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "description": "Number of events to retrieve.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListFineTuningJobEventsResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/pause": { "post": { "operationId": "pauseFineTuningJob", "summary": "Pause a fine-tune job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job to pause.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/resume": { "post": { "operationId": "resumeFineTuningJob", "summary": "Resume a paused fine-tune job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job to resume.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Fine-tuning" ] } }, "/models": { "get": { "operationId": "listModels", "summary": "Lists the currently available models, and provides basic information about each one such as the\nowner and availability.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListModelsResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Models" ] } }, "/models/{model}": { "get": { "operationId": "retrieveModel", "summary": "Retrieves a model instance, providing basic information about the model such as the owner and\npermissioning.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "model", "in": "path", "required": true, "description": "The ID of the model to use for this request.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.Model" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Models" ] }, "delete": { "operationId": "deleteModel", "summary": "Deletes a model instance.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "model", "in": "path", "required": true, "description": "The ID of the model to delete.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.DeleteModelResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Models" ] } }, "/realtime/calls": { "post": { "operationId": "createRealtimeCall", "summary": "Create a new Realtime API call over WebRTC and receive the SDP answer needed to complete the peer connection.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "201": { "description": "The request has succeeded and a new resource has been created as a result.", "headers": { "location": { "required": false, "description": "Relative URL containing the call ID for subsequent control requests.", "schema": { "type": "string" } } }, "content": { "application/sdp": { "schema": { "type": "string" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Realtime" ], "requestBody": { "required": true, "content": { "multipart/form-data": { "schema": { "$ref": "#/components/schemas/OpenAI.RealtimeCallCreateRequest" }, "encoding": { "session": { "contentType": "application/json" } } } } } } }, "/realtime/calls/{call_id}/accept": { "post": { "operationId": "acceptRealtimeCall", "summary": "Accept an incoming SIP call and configure the realtime session that will handle it.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "call_id", "in": "path", "required": true, "description": "The identifier for the call provided in the realtime.call.incoming webhook.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded." }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Realtime" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestGA" } } }, "description": "Session configuration to apply before the caller is bridged to the model." } } }, "/realtime/calls/{call_id}/hangup": { "post": { "operationId": "hangupRealtimeCall", "summary": "End an active Realtime API call, whether it was initiated over SIP or WebRTC.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "call_id", "in": "path", "required": true, "description": "The identifier for the call.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded." }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Realtime" ] } }, "/realtime/calls/{call_id}/refer": { "post": { "operationId": "referRealtimeCall", "summary": "Transfer an active SIP call to a new destination using the SIP REFER verb.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "call_id", "in": "path", "required": true, "description": "The identifier for the call provided in the realtime.call.incoming webhook.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded." }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Realtime" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RealtimeCallReferRequest" } } }, "description": "Destination URI for the REFER request." } } }, "/realtime/calls/{call_id}/reject": { "post": { "operationId": "rejectRealtimeCall", "summary": "Decline an incoming SIP call by returning a SIP status code to the caller.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "call_id", "in": "path", "required": true, "description": "The identifier for the call provided in the realtime.call.incoming webhook.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded." }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Realtime" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RealtimeCallRejectRequest" } } }, "description": "Provide an optional SIP status code. When omitted the API responds with 603 Decline." } } }, "/realtime/client_secrets": { "post": { "operationId": "createRealtimeClientSecret", "summary": "Create a Realtime client secret with an associated session configuration.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RealtimeCreateClientSecretResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Realtime" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RealtimeCreateClientSecretRequest" } } }, "description": "Create a client secret with the given session configuration." } } }, "/realtime/sessions": { "post": { "operationId": "createRealtimeSession", "summary": "Create an ephemeral API token for use in client-side applications with the Realtime API.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Realtime" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequest" } } }, "description": "Create an ephemeral API key with the given session configuration." } } }, "/realtime/transcription_sessions": { "post": { "operationId": "createRealtimeTranscriptionSession", "summary": "Create an ephemeral API token for use in client-side applications with the Realtime API specifically for realtime transcriptions.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RealtimeTranscriptionSessionCreateResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Realtime" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RealtimeTranscriptionSessionCreateRequest" } } }, "description": "Create an ephemeral API key with the given session configuration." } } }, "/responses": { "post": { "operationId": "createResponse", "description": "Creates a model response.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "id", "object", "created_at", "error", "incomplete_details", "output", "instructions", "parallel_tool_calls", "content_filters" ], "properties": { "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "top_logprobs": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "user": { "type": "string", "description": "This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use `prompt_cache_key` instead to maintain caching optimizations.\n A stable identifier for your end-users.\n Used to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).", "deprecated": true }, "safety_identifier": { "type": "string", "description": "A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.\n The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers)." }, "prompt_cache_key": { "type": "string", "description": "Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching)." }, "prompt_cache_retention": { "anyOf": [ { "type": "string", "enum": [ "in-memory", "24h" ] }, { "type": "null" } ] }, "previous_response_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "model": { "type": "string", "description": "Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI\n offers a wide range of models with different capabilities, performance\n characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)\n to browse and compare available models." }, "reasoning": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Reasoning" }, { "type": "null" } ] }, "background": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ] }, "max_output_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "max_tool_calls": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "text": { "$ref": "#/components/schemas/OpenAI.ResponseTextParam" }, "tools": { "$ref": "#/components/schemas/OpenAI.ToolsArray" }, "tool_choice": { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" }, "prompt": { "$ref": "#/components/schemas/OpenAI.Prompt" }, "truncation": { "anyOf": [ { "type": "string", "enum": [ "auto", "disabled" ] }, { "type": "null" } ], "default": "disabled" }, "id": { "type": "string", "description": "Unique identifier for this Response." }, "object": { "type": "string", "enum": [ "response" ], "description": "The object type of this resource - always set to `response`.", "x-stainless-const": true }, "status": { "type": "string", "enum": [ "completed", "failed", "in_progress", "cancelled", "queued", "incomplete" ], "description": "The status of the response generation. One of `completed`, `failed`,\n `in_progress`, `cancelled`, `queued`, or `incomplete`." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) of when this Response was created." }, "completed_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "error": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseError" }, { "type": "null" } ] }, "incomplete_details": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseIncompleteDetails" }, { "type": "null" } ] }, "output": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.OutputItem" }, "description": "An array of content items generated by the model.\n - The length and order of items in the `output` array is dependent\n on the model's response.\n - Rather than accessing the first item in the `output` array and\n assuming it's an `assistant` message with the content generated by\n the model, you might consider using the `output_text` property where\n supported in SDKs." }, "instructions": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.InputItem" } }, { "type": "null" } ] }, "output_text": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "x-stainless-skip": true }, "usage": { "$ref": "#/components/schemas/OpenAI.ResponseUsage" }, "parallel_tool_calls": { "type": "boolean", "description": "Whether to allow the model to run tool calls in parallel.", "default": true }, "conversation": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationReference" }, { "type": "null" } ] }, "content_filters": { "type": "array", "items": { "$ref": "#/components/schemas/AzureContentFilterForResponsesAPI" }, "description": "The content filter results from RAI." } } } }, "text/event-stream": { "itemSchema": { "type": "object", "properties": { "event": { "type": "string" }, "data": { "type": "string" } }, "required": [ "event" ], "oneOf": [ { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseAudioDeltaEvent" } }, "event": { "const": "response.audio.delta" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseAudioTranscriptDeltaEvent" } }, "event": { "const": "response.audio.transcript.delta" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseCodeInterpreterCallCodeDeltaEvent" } }, "event": { "const": "response.code_interpreter_call_code.delta" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseCodeInterpreterCallInProgressEvent" } }, "event": { "const": "response.code_interpreter_call.in_progress" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseCodeInterpreterCallInterpretingEvent" } }, "event": { "const": "response.code_interpreter_call.interpreting" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseContentPartAddedEvent" } }, "event": { "const": "response.content_part.added" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseCreatedEvent" } }, "event": { "const": "response.created" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseErrorEvent" } }, "event": { "const": "error" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseFileSearchCallInProgressEvent" } }, "event": { "const": "response.file_search_call.in_progress" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseFileSearchCallSearchingEvent" } }, "event": { "const": "response.file_search_call.searching" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseFunctionCallArgumentsDeltaEvent" } }, "event": { "const": "response.function_call_arguments.delta" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseInProgressEvent" } }, "event": { "const": "response.in_progress" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseFailedEvent" } }, "event": { "const": "response.failed" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseIncompleteEvent" } }, "event": { "const": "response.incomplete" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseOutputItemAddedEvent" } }, "event": { "const": "response.output_item.added" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseReasoningSummaryPartAddedEvent" } }, "event": { "const": "response.reasoning_summary_part.added" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseReasoningSummaryTextDeltaEvent" } }, "event": { "const": "response.reasoning_summary_text.delta" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseReasoningTextDeltaEvent" } }, "event": { "const": "response.reasoning_text.delta" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseRefusalDeltaEvent" } }, "event": { "const": "response.refusal.delta" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseTextDeltaEvent" } }, "event": { "const": "response.output_text.delta" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseWebSearchCallInProgressEvent" } }, "event": { "const": "response.web_search_call.in_progress" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseWebSearchCallSearchingEvent" } }, "event": { "const": "response.web_search_call.searching" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseImageGenCallGeneratingEvent" } }, "event": { "const": "response.image_generation_call.generating" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseImageGenCallInProgressEvent" } }, "event": { "const": "response.image_generation_call.in_progress" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseImageGenCallPartialImageEvent" } }, "event": { "const": "response.image_generation_call.partial_image" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseMCPCallArgumentsDeltaEvent" } }, "event": { "const": "response.mcp_call_arguments.delta" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseMCPCallFailedEvent" } }, "event": { "const": "response.mcp_call.failed" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseMCPCallInProgressEvent" } }, "event": { "const": "response.mcp_call.in_progress" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseMCPListToolsFailedEvent" } }, "event": { "const": "response.mcp_list_tools.failed" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseMCPListToolsInProgressEvent" } }, "event": { "const": "response.mcp_list_tools.in_progress" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseOutputTextAnnotationAddedEvent" } }, "event": { "const": "response.output_text.annotation.added" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseQueuedEvent" } }, "event": { "const": "response.queued" } } }, { "properties": { "data": { "contentMediaType": "application/json", "contentSchema": { "$ref": "#/components/schemas/OpenAI.ResponseCustomToolCallInputDeltaEvent" } }, "event": { "const": "response.custom_tool_call_input.delta" } } }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true }, { "properties": { "data": { "contentMediaType": "application/json" } }, "x-ms-sse-terminal-event": true } ] } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Responses" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateResponse" } } } }, "x-ms-examples": { "Create a response request": { "$ref": "./examples/responses.yaml" } } } }, "/responses/{response_id}": { "get": { "operationId": "getResponse", "description": "Retrieves a model response with the given ID.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "response_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "include[]", "in": "query", "required": false, "description": "Additional fields to include in the response. See the include parameter for Response creation above for more information.", "schema": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.IncludeEnum" }, "default": [] } }, { "name": "stream", "in": "query", "required": false, "description": "If set to true, the model response data will be streamed to the client as it is generated using server-sent events.", "schema": { "type": "boolean" }, "explode": false }, { "name": "starting_after", "in": "query", "required": false, "description": "The sequence number of the event after which to start streaming.", "schema": { "type": "integer", "format": "int32" }, "explode": false }, { "name": "include_obfuscation", "in": "query", "required": false, "description": "When true, stream obfuscation will be enabled. Stream obfuscation adds random characters to an `obfuscation` field on streaming delta events to normalize payload sizes as a mitigation to certain side-channel attacks. These obfuscation fields are included by default, but add a small amount of overhead to the data stream. You can set `include_obfuscation` to false to optimize for bandwidth if you trust the network links between your application and the OpenAI API.", "schema": { "type": "boolean", "default": true }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "id", "object", "created_at", "error", "incomplete_details", "output", "instructions", "parallel_tool_calls", "content_filters" ], "properties": { "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "top_logprobs": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "user": { "type": "string", "description": "This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use `prompt_cache_key` instead to maintain caching optimizations.\n A stable identifier for your end-users.\n Used to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).", "deprecated": true }, "safety_identifier": { "type": "string", "description": "A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.\n The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers)." }, "prompt_cache_key": { "type": "string", "description": "Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching)." }, "prompt_cache_retention": { "anyOf": [ { "type": "string", "enum": [ "in-memory", "24h" ] }, { "type": "null" } ] }, "previous_response_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "model": { "type": "string", "description": "Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI\n offers a wide range of models with different capabilities, performance\n characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)\n to browse and compare available models." }, "reasoning": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Reasoning" }, { "type": "null" } ] }, "background": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ] }, "max_output_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "max_tool_calls": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "text": { "$ref": "#/components/schemas/OpenAI.ResponseTextParam" }, "tools": { "$ref": "#/components/schemas/OpenAI.ToolsArray" }, "tool_choice": { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" }, "prompt": { "$ref": "#/components/schemas/OpenAI.Prompt" }, "truncation": { "anyOf": [ { "type": "string", "enum": [ "auto", "disabled" ] }, { "type": "null" } ], "default": "disabled" }, "id": { "type": "string", "description": "Unique identifier for this Response." }, "object": { "type": "string", "enum": [ "response" ], "description": "The object type of this resource - always set to `response`.", "x-stainless-const": true }, "status": { "type": "string", "enum": [ "completed", "failed", "in_progress", "cancelled", "queued", "incomplete" ], "description": "The status of the response generation. One of `completed`, `failed`,\n `in_progress`, `cancelled`, `queued`, or `incomplete`." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) of when this Response was created." }, "completed_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "error": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseError" }, { "type": "null" } ] }, "incomplete_details": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseIncompleteDetails" }, { "type": "null" } ] }, "output": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.OutputItem" }, "description": "An array of content items generated by the model.\n - The length and order of items in the `output` array is dependent\n on the model's response.\n - Rather than accessing the first item in the `output` array and\n assuming it's an `assistant` message with the content generated by\n the model, you might consider using the `output_text` property where\n supported in SDKs." }, "instructions": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.InputItem" } }, { "type": "null" } ] }, "output_text": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "x-stainless-skip": true }, "usage": { "$ref": "#/components/schemas/OpenAI.ResponseUsage" }, "parallel_tool_calls": { "type": "boolean", "description": "Whether to allow the model to run tool calls in parallel.", "default": true }, "conversation": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationReference" }, { "type": "null" } ] }, "content_filters": { "type": "array", "items": { "$ref": "#/components/schemas/AzureContentFilterForResponsesAPI" }, "description": "The content filter results from RAI." } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Responses" ] }, "delete": { "operationId": "deleteResponse", "description": "Deletes a response by ID.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "response_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "object", "id", "deleted" ], "properties": { "object": { "type": "string", "enum": [ "response.deleted" ] }, "id": { "type": "string" }, "deleted": { "type": "boolean", "enum": [ true ] } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Responses" ] } }, "/responses/{response_id}/cancel": { "post": { "operationId": "cancelResponse", "description": "Cancels a model response with the given ID. Only responses created with the background parameter set to true can be cancelled.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "response_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "id", "object", "created_at", "error", "incomplete_details", "output", "instructions", "parallel_tool_calls", "content_filters" ], "properties": { "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "top_logprobs": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "user": { "type": "string", "description": "This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use `prompt_cache_key` instead to maintain caching optimizations.\n A stable identifier for your end-users.\n Used to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).", "deprecated": true }, "safety_identifier": { "type": "string", "description": "A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.\n The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers)." }, "prompt_cache_key": { "type": "string", "description": "Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching)." }, "prompt_cache_retention": { "anyOf": [ { "type": "string", "enum": [ "in-memory", "24h" ] }, { "type": "null" } ] }, "previous_response_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "model": { "type": "string", "description": "Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI\n offers a wide range of models with different capabilities, performance\n characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)\n to browse and compare available models." }, "reasoning": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Reasoning" }, { "type": "null" } ] }, "background": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ] }, "max_output_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "max_tool_calls": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "text": { "$ref": "#/components/schemas/OpenAI.ResponseTextParam" }, "tools": { "$ref": "#/components/schemas/OpenAI.ToolsArray" }, "tool_choice": { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" }, "prompt": { "$ref": "#/components/schemas/OpenAI.Prompt" }, "truncation": { "anyOf": [ { "type": "string", "enum": [ "auto", "disabled" ] }, { "type": "null" } ], "default": "disabled" }, "id": { "type": "string", "description": "Unique identifier for this Response." }, "object": { "type": "string", "enum": [ "response" ], "description": "The object type of this resource - always set to `response`.", "x-stainless-const": true }, "status": { "type": "string", "enum": [ "completed", "failed", "in_progress", "cancelled", "queued", "incomplete" ], "description": "The status of the response generation. One of `completed`, `failed`,\n `in_progress`, `cancelled`, `queued`, or `incomplete`." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) of when this Response was created." }, "completed_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "error": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseError" }, { "type": "null" } ] }, "incomplete_details": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseIncompleteDetails" }, { "type": "null" } ] }, "output": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.OutputItem" }, "description": "An array of content items generated by the model.\n - The length and order of items in the `output` array is dependent\n on the model's response.\n - Rather than accessing the first item in the `output` array and\n assuming it's an `assistant` message with the content generated by\n the model, you might consider using the `output_text` property where\n supported in SDKs." }, "instructions": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.InputItem" } }, { "type": "null" } ] }, "output_text": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "x-stainless-skip": true }, "usage": { "$ref": "#/components/schemas/OpenAI.ResponseUsage" }, "parallel_tool_calls": { "type": "boolean", "description": "Whether to allow the model to run tool calls in parallel.", "default": true }, "conversation": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationReference" }, { "type": "null" } ] }, "content_filters": { "type": "array", "items": { "$ref": "#/components/schemas/AzureContentFilterForResponsesAPI" }, "description": "The content filter results from RAI." } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Responses" ] } }, "/responses/{response_id}/input_items": { "get": { "operationId": "listInputItems", "description": "Returns a list of input items for a given response.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "response_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "description": "A cursor for use in pagination. `before` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include before=obj_foo in order to fetch the previous page of the list.", "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ResponseItemList" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Responses" ] } }, "/threads": { "post": { "operationId": "createThread", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ThreadObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateThreadRequest" } } } } } }, "/threads/runs": { "post": { "operationId": "createThreadAndRun", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RunObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateThreadAndRunRequest" } } } } } }, "/threads/{thread_id}": { "delete": { "operationId": "deleteThread", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.DeleteThreadResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ] }, "get": { "operationId": "retrieveThread", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ThreadObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ] }, "post": { "operationId": "modifyThread", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ThreadObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ModifyThreadRequest" } } } } } }, "/threads/{thread_id}/messages": { "get": { "operationId": "listMessages", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "schema": { "type": "integer", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "asc", "desc" ], "default": "desc" }, "explode": false }, { "name": "after", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false }, { "name": "run_id", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListMessagesResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ] }, "post": { "operationId": "createMessage", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.MessageObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateMessageRequest" } } } } } }, "/threads/{thread_id}/messages/{message_id}": { "delete": { "operationId": "deleteMessage", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "message_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.DeleteMessageResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ] }, "get": { "operationId": "retrieveMessage", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "message_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.MessageObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ] }, "post": { "operationId": "modifyMessage", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "message_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.MessageObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ModifyMessageRequest" } } } } } }, "/threads/{thread_id}/runs": { "post": { "operationId": "createRun", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RunObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateRunRequest" } } } } }, "get": { "operationId": "listRuns", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "schema": { "type": "integer", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "asc", "desc" ], "default": "desc" }, "explode": false }, { "name": "after", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListRunsResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ] } }, "/threads/{thread_id}/runs/{run_id}": { "get": { "operationId": "retrieveRun", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RunObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ] }, "post": { "operationId": "modifyRun", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RunObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ModifyRunRequest" } } } } } }, "/threads/{thread_id}/runs/{run_id}/cancel": { "post": { "operationId": "cancelRun", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RunObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ] } }, "/threads/{thread_id}/runs/{run_id}/steps": { "get": { "operationId": "listRunSteps", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "schema": { "type": "integer", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "asc", "desc" ], "default": "desc" }, "explode": false }, { "name": "after", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListRunStepsResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ] } }, "/threads/{thread_id}/runs/{run_id}/steps/{step_id}": { "get": { "operationId": "getRunStep", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "step_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RunStepObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ] } }, "/threads/{thread_id}/runs/{run_id}/submit_tool_outputs": { "post": { "operationId": "submitToolOutputsToRun", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "thread_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RunObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Threads" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.SubmitToolOutputsRunRequest" } } } } } }, "/vector_stores": { "get": { "operationId": "listVectorStores", "summary": "Returns a list of vector stores.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "description": "A cursor for use in pagination. `before` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include before=obj_foo in order to fetch the previous page of the list.", "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListVectorStoresResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ] }, "post": { "operationId": "createVectorStore", "summary": "Creates a vector store.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateVectorStoreRequest" } } } }, "x-ms-examples": { "Create a vector store request": { "$ref": "./examples/vector_stores.yaml" } } } }, "/vector_stores/{vector_store_id}": { "get": { "operationId": "getVectorStore", "summary": "Retrieves a vector store.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store to retrieve.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ] }, "post": { "operationId": "modifyVectorStore", "summary": "Modifies a vector store.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store to modify.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.UpdateVectorStoreRequest" } } } } }, "delete": { "operationId": "deleteVectorStore", "summary": "Delete a vector store.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store to delete.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.DeleteVectorStoreResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ] } }, "/vector_stores/{vector_store_id}/file_batches": { "post": { "operationId": "createVectorStoreFileBatch", "summary": "Create a vector store file batch.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store for which to create a file batch.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileBatchObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateVectorStoreFileBatchRequest" } } } } } }, "/vector_stores/{vector_store_id}/file_batches/{batch_id}": { "get": { "operationId": "getVectorStoreFileBatch", "summary": "Retrieves a vector store file batch.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the file batch belongs to.", "schema": { "type": "string" } }, { "name": "batch_id", "in": "path", "required": true, "description": "The ID of the file batch being retrieved.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileBatchObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ] } }, "/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel": { "post": { "operationId": "cancelVectorStoreFileBatch", "summary": "Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the file batch belongs to.", "schema": { "type": "string" } }, { "name": "batch_id", "in": "path", "required": true, "description": "The ID of the file batch to cancel.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileBatchObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ] } }, "/vector_stores/{vector_store_id}/file_batches/{batch_id}/files": { "get": { "operationId": "listFilesInVectorStoreBatch", "summary": "Returns a list of vector store files in a batch.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the file batch belongs to.", "schema": { "type": "string" } }, { "name": "batch_id", "in": "path", "required": true, "description": "The ID of the file batch that the files belong to.", "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "description": "A cursor for use in pagination. `before` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include before=obj_foo in order to fetch the previous page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "filter", "in": "query", "required": false, "description": "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.", "schema": { "type": "string", "enum": [ "in_progress", "completed", "failed", "cancelled" ] }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListVectorStoreFilesResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ] } }, "/vector_stores/{vector_store_id}/files": { "get": { "operationId": "listVectorStoreFiles", "summary": "Returns a list of vector store files.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the files belong to.", "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "description": "A cursor for use in pagination. `before` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include before=obj_foo in order to fetch the previous page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "filter", "in": "query", "required": false, "description": "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.", "schema": { "type": "string", "enum": [ "in_progress", "completed", "failed", "cancelled" ] }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListVectorStoreFilesResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ] }, "post": { "operationId": "createVectorStoreFile", "summary": "Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store for which to create a File.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateVectorStoreFileRequest" } } } } } }, "/vector_stores/{vector_store_id}/files/{file_id}": { "get": { "operationId": "getVectorStoreFile", "summary": "Retrieves a vector store file.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the file belongs to.", "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file being retrieved.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ] }, "post": { "operationId": "updateVectorStoreFileAttributes", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.UpdateVectorStoreFileAttributesRequest" } } } } }, "delete": { "operationId": "deleteVectorStoreFile", "summary": "Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the [delete file](/docs/api-reference/files/delete) endpoint.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the file belongs to.", "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to delete.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.DeleteVectorStoreFileResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ] } }, "/vector_stores/{vector_store_id}/files/{file_id}/content": { "get": { "operationId": "retrieveVectorStoreFileContent", "summary": "Retrieve vector store file content", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store to search.", "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to retrieve content for.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreSearchResultsPage" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ] } }, "/vector_stores/{vector_store_id}/search": { "post": { "operationId": "searchVectorStore", "summary": "Search vector store", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store to search.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreSearchResultsPage" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "properties": { "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "type": { "type": "string" }, "inner_error": {} }, "required": [ "code", "message", "param", "type" ] } } } } }, "tags": [ "Vector Stores" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreSearchRequest" } } } } } } }, "security": [ { "ApiKeyAuth": [] }, { "ApiKeyAuth_": [] }, { "OAuth2Auth": [ "https://cognitiveservices.azure.com/.default" ] } ], "components": { "schemas": { "AudioSegment": { "type": "object", "required": [ "id", "start", "end", "text", "temperature", "avg_logprob", "compression_ratio", "no_speech_prob", "tokens", "seek" ], "properties": { "id": { "type": "integer", "format": "int32", "description": "The 0-based index of this segment within a translation." }, "start": { "type": "number", "format": "float", "description": "The time at which this segment started relative to the beginning of the translated audio." }, "end": { "type": "number", "format": "float", "description": "The time at which this segment ended relative to the beginning of the translated audio." }, "text": { "type": "string", "description": "The translated text that was part of this audio segment." }, "temperature": { "type": "number", "format": "float", "description": "The temperature score associated with this audio segment." }, "avg_logprob": { "type": "number", "format": "float", "description": "The average log probability associated with this audio segment." }, "compression_ratio": { "type": "number", "format": "float", "description": "The compression ratio of this audio segment." }, "no_speech_prob": { "type": "number", "format": "float", "description": "The probability of no speech detection within this audio segment." }, "tokens": { "type": "array", "items": { "type": "integer", "format": "int32" }, "description": "The token IDs matching the translated text in this audio segment." }, "seek": { "type": "integer", "format": "int32", "description": "The seek position associated with the processing of this audio segment.\nSeek positions are expressed as hundredths of seconds.\nThe model may process several segments from a single seek position, so while the seek position will never represent\na later time than the segment's start, the segment's start may represent a significantly later time than the\nsegment's associated seek position." } } }, "AudioTaskLabel": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "transcribe", "translate" ] } ], "description": "Defines the possible descriptors for available audio operation responses." }, "AudioTranslationSegment": { "type": "object", "required": [ "id", "start", "end", "text", "temperature", "avg_logprob", "compression_ratio", "no_speech_prob", "tokens", "seek" ], "properties": { "id": { "type": "integer", "format": "int32", "description": "The 0-based index of this segment within a translation." }, "start": { "type": "number", "format": "float", "description": "The time at which this segment started relative to the beginning of the translated audio." }, "end": { "type": "number", "format": "float", "description": "The time at which this segment ended relative to the beginning of the translated audio." }, "text": { "type": "string", "description": "The translated text that was part of this audio segment." }, "temperature": { "type": "number", "format": "float", "description": "The temperature score associated with this audio segment." }, "avg_logprob": { "type": "number", "format": "float", "description": "The average log probability associated with this audio segment." }, "compression_ratio": { "type": "number", "format": "float", "description": "The compression ratio of this audio segment." }, "no_speech_prob": { "type": "number", "format": "float", "description": "The probability of no speech detection within this audio segment." }, "tokens": { "type": "array", "items": { "type": "integer", "format": "int32" }, "description": "The token IDs matching the translated text in this audio segment." }, "seek": { "type": "integer", "format": "int32", "description": "The seek position associated with the processing of this audio segment.\nSeek positions are expressed as hundredths of seconds.\nThe model may process several segments from a single seek position, so while the seek position will never represent\na later time than the segment's start, the segment's start may represent a significantly later time than the\nsegment's associated seek position." } }, "description": "Extended information about a single segment of translated audio data.\nSegments generally represent roughly 5-10 seconds of speech. Segment boundaries typically occur between words but not\nnecessarily sentences." }, "AzureAIFoundryModelsApiVersion": { "type": "string", "enum": [ "v1", "preview" ] }, "AzureAudioTranscriptionResponse": { "type": "object", "required": [ "text" ], "properties": { "text": { "type": "string", "description": "The transcribed text for the provided audio data." }, "task": { "allOf": [ { "$ref": "#/components/schemas/AudioTaskLabel" } ], "description": "The label that describes which operation type generated the accompanying response data." }, "language": { "type": "string", "description": "The spoken language that was detected in the transcribed audio data.\nThis is expressed as a two-letter ISO-639-1 language code like 'en' or 'fr'." }, "duration": { "type": "number", "format": "float", "description": "The total duration of the audio processed to produce accompanying transcription information." }, "segments": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.TranscriptionSegment" }, "description": "A collection of information about the timing, probabilities, and other detail of each processed audio segment." }, "words": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.TranscriptionWord" }, "description": "A collection of information about the timing of each processed word." } }, "description": "Result information for an operation that transcribed spoken audio into written text." }, "AzureAudioTranslationResponse": { "type": "object", "required": [ "text" ], "properties": { "text": { "type": "string", "description": "The translated text for the provided audio data." }, "task": { "allOf": [ { "$ref": "#/components/schemas/AudioTaskLabel" } ], "description": "The label that describes which operation type generated the accompanying response data." }, "language": { "type": "string", "description": "The spoken language that was detected in the translated audio data.\nThis is expressed as a two-letter ISO-639-1 language code like 'en' or 'fr'." }, "duration": { "type": "number", "format": "float", "description": "The total duration of the audio processed to produce accompanying translation information." }, "segments": { "type": "array", "items": { "$ref": "#/components/schemas/AudioTranslationSegment" }, "description": "A collection of information about the timing, probabilities, and other detail of each processed audio segment." } }, "description": "Result information for an operation that translated spoken audio into written text." }, "AzureCompletionsSamplingParams": { "type": "object", "properties": { "max_tokens": { "type": "integer", "format": "int32", "description": "The maximum number of tokens in the generated output." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.CreateEvalCompletionsRunDataSourceSamplingParams" } ], "description": "Sampling parameters for controlling the behavior of completions." }, "AzureContentFilterBlocklistIdResult": { "type": "object", "required": [ "id", "filtered" ], "properties": { "id": { "type": "string", "description": "The ID of the custom blocklist associated with the filtered status." }, "filtered": { "type": "boolean", "description": "Whether the associated blocklist resulted in the content being filtered." } }, "description": "A content filter result item that associates an existing custom blocklist ID with a value indicating whether or not\nthe corresponding blocklist resulted in content being filtered." }, "AzureContentFilterBlocklistResult": { "type": "object", "required": [ "filtered" ], "properties": { "filtered": { "type": "boolean", "description": "A value indicating whether any of the detailed blocklists resulted in a filtering action." }, "details": { "type": "array", "items": { "type": "object", "properties": { "filtered": { "type": "boolean", "description": "A value indicating whether the blocklist produced a filtering action." }, "id": { "type": "string", "description": "The ID of the custom blocklist evaluated." } }, "required": [ "filtered", "id" ] }, "description": "The pairs of individual blocklist IDs and whether they resulted in a filtering action." } }, "description": "A collection of true/false filtering results for configured custom blocklists." }, "AzureContentFilterCompletionTextSpan": { "type": "object", "required": [ "completion_start_offset", "completion_end_offset" ], "properties": { "completion_start_offset": { "type": "integer", "format": "int32", "description": "Offset of the UTF32 code point which begins the span." }, "completion_end_offset": { "type": "integer", "format": "int32", "description": "Offset of the first UTF32 code point which is excluded from the span. This field is always equal to completion_start_offset for empty spans. This field is always larger than completion_start_offset for non-empty spans." } }, "description": "A representation of a span of completion text as used by Azure OpenAI content filter results." }, "AzureContentFilterCompletionTextSpanDetectionResult": { "type": "object", "required": [ "filtered", "detected", "details" ], "properties": { "filtered": { "type": "boolean", "description": "Whether the content detection resulted in a content filtering action." }, "detected": { "type": "boolean", "description": "Whether the labeled content category was detected in the content." }, "details": { "type": "array", "items": { "$ref": "#/components/schemas/AzureContentFilterCompletionTextSpan" }, "description": "Detailed information about the detected completion text spans." } } }, "AzureContentFilterCustomTopicIdResult": { "type": "object", "required": [ "id", "detected" ], "properties": { "id": { "type": "string", "description": "The ID of the custom topic associated with the detected status." }, "detected": { "type": "boolean", "description": "Whether the associated custom topic resulted in the content being detected." } }, "description": "A content filter result item that associates an existing custom topic ID with a value indicating whether or not\nthe corresponding topic resulted in content being detected." }, "AzureContentFilterCustomTopicResult": { "type": "object", "required": [ "filtered" ], "properties": { "filtered": { "type": "boolean", "description": "A value indicating whether any of the detailed topics resulted in a filtering action." }, "details": { "type": "array", "items": { "type": "object", "properties": { "detected": { "type": "boolean", "description": "A value indicating whether the topic is detected." }, "id": { "type": "string", "description": "The ID of the custom topic evaluated." } }, "required": [ "detected", "id" ] }, "description": "The pairs of individual topic IDs and whether they are detected." } }, "description": "A collection of true/false filtering results for configured custom topics." }, "AzureContentFilterDetectionResult": { "type": "object", "required": [ "filtered", "detected" ], "properties": { "filtered": { "type": "boolean", "description": "Whether the content detection resulted in a content filtering action." }, "detected": { "type": "boolean", "description": "Whether the labeled content category was detected in the content." } }, "description": "A labeled content filter result item that indicates whether the content was detected and whether the content was\nfiltered." }, "AzureContentFilterForResponsesAPI": { "type": "object", "required": [ "blocked", "source_type", "content_filter_results", "content_filter_offsets" ], "properties": { "blocked": { "type": "boolean", "description": "Indicate if the response is blocked." }, "source_type": { "type": "string", "description": "The name of the source type of the message." }, "content_filter_results": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterResultsForResponsesAPI" } ], "description": "A content filter result for a single response item produced by a generative AI system." }, "content_filter_offsets": { "$ref": "#/components/schemas/AzureContentFilterResultOffsets" } } }, "AzureContentFilterHarmExtensions": { "type": "object", "properties": { "pii_sub_categories": { "type": "array", "items": { "$ref": "#/components/schemas/AzurePiiSubCategory" }, "description": "Configuration for PIIHarmSubCategory(s)." } }, "description": "Extensions for harm categories, providing additional configuration options." }, "AzureContentFilterImagePromptResults": { "type": "object", "required": [ "jailbreak" ], "properties": { "profanity": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that identifies whether crude, vulgar, or otherwise objection language is present in the\ncontent." }, "custom_blocklists": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterBlocklistResult" } ], "description": "A collection of binary filtering outcomes for configured custom blocklists." }, "custom_topics": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterCustomTopicResult" } ], "description": "A collection of binary filtering outcomes for configured custom topics." }, "jailbreak": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that describes user prompt injection attacks, where malicious users deliberately exploit\nsystem vulnerabilities to elicit unauthorized behavior from the LLM. This could lead to inappropriate content\ngeneration or violations of system-imposed restrictions." } }, "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterImageResponseResults" } ], "description": "A content filter result for an image generation operation's input request content." }, "AzureContentFilterImageResponseResults": { "type": "object", "properties": { "sexual": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to anatomical organs and genitals, romantic relationships, acts\nportrayed in erotic or affectionate terms, pregnancy, physical sexual acts, including those portrayed as an\nassault or a forced sexual violent act against one's will, prostitution, pornography, and abuse." }, "violence": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to physical actions intended to hurt, injure, damage, or kill\nsomeone or something; describes weapons, guns and related entities, such as manufactures, associations,\nlegislation, and so on." }, "hate": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that can refer to any content that attacks or uses pejorative or discriminatory\nlanguage with reference to a person or identity group based on certain differentiating attributes of these groups\nincluding but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation,\nreligion, immigration status, ability status, personal appearance, and body size." }, "self_harm": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that describes language related to physical actions intended to purposely hurt, injure,\ndamage one's body or kill oneself." } }, "description": "A content filter result for an image generation operation's output response content." }, "AzureContentFilterPersonallyIdentifiableInformationResult": { "type": "object", "properties": { "redacted_text": { "type": "string", "description": "The redacted text with PII information removed or masked." }, "sub_categories": { "type": "array", "items": { "$ref": "#/components/schemas/AzurePiiSubCategoryResult" }, "description": "Detailed results for individual PIIHarmSubCategory(s)." } }, "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A content filter detection result for Personally Identifiable Information that includes harm extensions." }, "AzureContentFilterResultForChoice": { "type": "object", "properties": { "sexual": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to anatomical organs and genitals, romantic relationships, acts\nportrayed in erotic or affectionate terms, pregnancy, physical sexual acts, including those portrayed as an\nassault or a forced sexual violent act against one's will, prostitution, pornography, and abuse." }, "hate": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that can refer to any content that attacks or uses pejorative or discriminatory\nlanguage with reference to a person or identity group based on certain differentiating attributes of these groups\nincluding but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation,\nreligion, immigration status, ability status, personal appearance, and body size." }, "violence": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to physical actions intended to hurt, injure, damage, or kill\nsomeone or something; describes weapons, guns and related entities, such as manufactures, associations,\nlegislation, and so on." }, "self_harm": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that describes language related to physical actions intended to purposely hurt, injure,\ndamage one's body or kill oneself." }, "profanity": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that identifies whether crude, vulgar, or otherwise objection language is present in the\ncontent." }, "custom_blocklists": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterBlocklistResult" } ], "description": "A collection of binary filtering outcomes for configured custom blocklists." }, "custom_topics": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterCustomTopicResult" } ], "description": "A collection of binary filtering outcomes for configured custom topics." }, "error": { "type": "object", "properties": { "code": { "type": "integer", "format": "int32", "description": "A distinct, machine-readable code associated with the error." }, "message": { "type": "string", "description": "A human-readable message associated with the error." } }, "required": [ "code", "message" ], "description": "If present, details about an error that prevented content filtering from completing its evaluation." }, "protected_material_text": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that describes a match against text protected under copyright or other status." }, "protected_material_code": { "type": "object", "properties": { "filtered": { "type": "boolean", "description": "Whether the content detection resulted in a content filtering action." }, "detected": { "type": "boolean", "description": "Whether the labeled content category was detected in the content." }, "citation": { "type": "object", "properties": { "license": { "type": "string", "description": "The name or identifier of the license associated with the detection." }, "URL": { "type": "string", "format": "uri", "description": "The URL associated with the license." } }, "description": "If available, the citation details describing the associated license and its location." } }, "required": [ "filtered", "detected" ], "description": "A detection result that describes a match against licensed code or other protected source material." }, "ungrounded_material": { "$ref": "#/components/schemas/AzureContentFilterCompletionTextSpanDetectionResult" }, "personally_identifiable_information": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterPersonallyIdentifiableInformationResult" } ], "description": "A detection result that describes matches against Personal Identifiable Information with configurable subcategories." } }, "description": "A content filter result for a single response item produced by a generative AI system." }, "AzureContentFilterResultForPrompt": { "type": "object", "properties": { "prompt_index": { "type": "integer", "format": "int32", "description": "The index of the input prompt associated with the accompanying content filter result categories." }, "content_filter_results": { "type": "object", "properties": { "sexual": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to anatomical organs and genitals, romantic relationships, acts\nportrayed in erotic or affectionate terms, pregnancy, physical sexual acts, including those portrayed as an\nassault or a forced sexual violent act against one's will, prostitution, pornography, and abuse." }, "hate": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that can refer to any content that attacks or uses pejorative or discriminatory\nlanguage with reference to a person or identity group based on certain differentiating attributes of these groups\nincluding but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation,\nreligion, immigration status, ability status, personal appearance, and body size." }, "violence": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to physical actions intended to hurt, injure, damage, or kill\nsomeone or something; describes weapons, guns and related entities, such as manufactures, associations,\nlegislation, and so on." }, "self_harm": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that describes language related to physical actions intended to purposely hurt, injure,\ndamage one's body or kill oneself." }, "profanity": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that identifies whether crude, vulgar, or otherwise objection language is present in the\ncontent." }, "custom_blocklists": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterBlocklistResult" } ], "description": "A collection of binary filtering outcomes for configured custom blocklists." }, "custom_topics": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterCustomTopicResult" } ], "description": "A collection of binary filtering outcomes for configured custom topics." }, "error": { "type": "object", "properties": { "code": { "type": "integer", "format": "int32", "description": "A distinct, machine-readable code associated with the error." }, "message": { "type": "string", "description": "A human-readable message associated with the error." } }, "required": [ "code", "message" ], "description": "If present, details about an error that prevented content filtering from completing its evaluation." }, "jailbreak": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that describes user prompt injection attacks, where malicious users deliberately exploit\nsystem vulnerabilities to elicit unauthorized behavior from the LLM. This could lead to inappropriate content\ngeneration or violations of system-imposed restrictions." }, "indirect_attack": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that describes attacks on systems powered by Generative AI models that can happen every time\nan application processes information that wasn’t directly authored by either the developer of the application or\nthe user." } }, "required": [ "jailbreak", "indirect_attack" ], "description": "The content filter category details for the result." } }, "description": "A content filter result associated with a single input prompt item into a generative AI system." }, "AzureContentFilterResultOffsets": { "type": "object", "required": [ "start_offset", "end_offset", "check_offset" ], "properties": { "start_offset": { "type": "integer", "format": "int32" }, "end_offset": { "type": "integer", "format": "int32" }, "check_offset": { "type": "integer", "format": "int32" } } }, "AzureContentFilterResultsForResponsesAPI": { "type": "object", "required": [ "jailbreak", "task_adherence" ], "properties": { "sexual": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to anatomical organs and genitals, romantic relationships, acts\nportrayed in erotic or affectionate terms, pregnancy, physical sexual acts, including those portrayed as an\nassault or a forced sexual violent act against one's will, prostitution, pornography, and abuse." }, "hate": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that can refer to any content that attacks or uses pejorative or discriminatory\nlanguage with reference to a person or identity group based on certain differentiating attributes of these groups\nincluding but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation,\nreligion, immigration status, ability status, personal appearance, and body size." }, "violence": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to physical actions intended to hurt, injure, damage, or kill\nsomeone or something; describes weapons, guns and related entities, such as manufactures, associations,\nlegislation, and so on." }, "self_harm": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that describes language related to physical actions intended to purposely hurt, injure,\ndamage one's body or kill oneself." }, "profanity": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that identifies whether crude, vulgar, or otherwise objection language is present in the\ncontent." }, "custom_blocklists": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterBlocklistResult" } ], "description": "A collection of binary filtering outcomes for configured custom blocklists." }, "custom_topics": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterCustomTopicResult" } ], "description": "A collection of binary filtering outcomes for configured custom topics." }, "error": { "type": "object", "properties": { "code": { "type": "integer", "format": "int32", "description": "A distinct, machine-readable code associated with the error." }, "message": { "type": "string", "description": "A human-readable message associated with the error." } }, "required": [ "code", "message" ], "description": "If present, details about an error that prevented content filtering from completing its evaluation." }, "jailbreak": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that describes user prompt injection attacks, where malicious users deliberately exploit\nsystem vulnerabilities to elicit unauthorized behavior from the LLM. This could lead to inappropriate content\ngeneration or violations of system-imposed restrictions." }, "task_adherence": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that indicates if the execution flow still sticks the plan." }, "protected_material_text": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that describes a match against text protected under copyright or other status." }, "protected_material_code": { "type": "object", "properties": { "filtered": { "type": "boolean", "description": "Whether the content detection resulted in a content filtering action." }, "detected": { "type": "boolean", "description": "Whether the labeled content category was detected in the content." }, "citation": { "type": "object", "properties": { "license": { "type": "string", "description": "The name or identifier of the license associated with the detection." }, "URL": { "type": "string", "format": "uri", "description": "The URL associated with the license." } }, "description": "If available, the citation details describing the associated license and its location." } }, "required": [ "filtered", "detected" ], "description": "A detection result that describes a match against licensed code or other protected source material." }, "ungrounded_material": { "$ref": "#/components/schemas/AzureContentFilterCompletionTextSpanDetectionResult" }, "personally_identifiable_information": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterPersonallyIdentifiableInformationResult" } ], "description": "A detection result that describes matches against Personal Identifiable Information with configurable subcategories." }, "indirect_attack": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that describes attacks on systems powered by Generative AI models that can happen every time\nan application processes information that wasn’t directly authored by either the developer of the application or\nthe user." } } }, "AzureContentFilterSeverityResult": { "type": "object", "required": [ "filtered", "severity" ], "properties": { "filtered": { "type": "boolean", "description": "Whether the content severity resulted in a content filtering action." }, "severity": { "type": "string", "enum": [ "safe", "low", "medium", "high" ], "description": "The labeled severity of the content." } }, "description": "A labeled content filter result item that indicates whether the content was filtered and what the qualitative\nseverity level of the content was, as evaluated against content filter configuration for the category." }, "AzureFileExpiryAnchor": { "type": "string", "enum": [ "created_at" ] }, "AzureFineTuneReinforcementMethod": { "type": "object", "required": [ "grader" ], "properties": { "grader": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.GraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.GraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.GraderScoreModel" }, { "$ref": "#/components/schemas/OpenAI.GraderMulti" }, { "$ref": "#/components/schemas/GraderEndpoint" } ] }, "response_format": { "allOf": [ { "$ref": "#/components/schemas/ResponseFormatJSONSchemaRequest" } ], "description": "Response format to be used while sampling during RFT training" }, "hyperparameters": { "$ref": "#/components/schemas/OpenAI.FineTuneReinforcementHyperparameters" } } }, "AzurePiiSubCategory": { "type": "object", "required": [ "sub_category", "detect", "redact", "filter" ], "properties": { "sub_category": { "type": "string", "description": "The PIIHarmSubCategory being configured." }, "detect": { "type": "boolean", "description": "Whether detection is enabled for this subcategory." }, "redact": { "type": "boolean", "description": "Whether content containing this subcategory should be redacted." }, "filter": { "type": "boolean", "description": "Whether content containing this subcategory should be blocked." } }, "description": "Configuration for individual PIIHarmSubCategory(s) within the harm extensions framework." }, "AzurePiiSubCategoryResult": { "type": "object", "required": [ "sub_category", "filtered", "detected", "redacted" ], "properties": { "sub_category": { "type": "string", "description": "The PIIHarmSubCategory that was evaluated." }, "filtered": { "type": "boolean", "description": "Whether the content detection resulted in a content filtering action for this subcategory." }, "detected": { "type": "boolean", "description": "Whether the labeled content subcategory was detected in the content." }, "redacted": { "type": "boolean", "description": "Whether the content was redacted for this subcategory." } }, "description": "Result details for individual PIIHarmSubCategory(s)." }, "AzureResponsesSamplingParams": { "type": "object", "properties": { "max_tokens": { "type": "integer", "format": "int32", "description": "The maximum number of tokens in the generated output." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.CreateEvalResponsesRunDataSourceSamplingParams" } ], "description": "Sampling parameters for controlling the behavior of responses." }, "AzureUserSecurityContext": { "type": "object", "properties": { "application_name": { "type": "string", "description": "The name of the application. Sensitive personal information should not be included in this field." }, "end_user_id": { "type": "string", "description": "This identifier is the Microsoft Entra ID (formerly Azure Active Directory) user object ID used to authenticate end-users within the generative AI application. Sensitive personal information should not be included in this field." }, "end_user_tenant_id": { "type": "string", "description": "The Microsoft 365 tenant ID the end user belongs to. It's required when the generative AI application is multitenant." }, "source_ip": { "type": "string", "description": "Captures the original client's IP address." } }, "description": "User security context contains several parameters that describe the application itself, and the end user that interacts with the application. These fields assist your security operations teams to investigate and mitigate security incidents by providing a comprehensive approach to protecting your AI applications. [Learn more](https://aka.ms/TP4AI/Documentation/EndUserContext) about protecting AI applications using Microsoft Defender for Cloud." }, "CopiedAccountDetails": { "type": "object", "required": [ "destinationResourceId", "region", "status" ], "properties": { "destinationResourceId": { "type": "string", "description": "The ID of the destination resource where the model was copied to." }, "region": { "type": "string", "description": "The region where the model was copied to." }, "status": { "type": "string", "enum": [ "Completed", "Failed", "InProgress" ], "description": "The status of the copy operation." } } }, "CopyModelRequest": { "type": "object", "required": [ "destinationResourceId", "region" ], "properties": { "destinationResourceId": { "type": "string", "description": "The ID of the destination Resource to copy." }, "region": { "type": "string", "description": "The region to copy the model to." } } }, "CopyModelResponse": { "type": "object", "required": [ "checkpointedModelName", "fineTuningJobId", "copiedAccountDetails" ], "properties": { "checkpointedModelName": { "type": "string", "description": "The ID of the copied model." }, "fineTuningJobId": { "type": "string", "description": "The ID of the fine-tuning job that the checkpoint was copied from." }, "copiedAccountDetails": { "type": "array", "items": { "$ref": "#/components/schemas/CopiedAccountDetails" }, "description": "The ID of the destination resource id where it was copied" } } }, "CreateVideoBody": { "type": "object", "required": [ "model", "prompt" ], "properties": { "model": { "type": "string", "description": "The name of the deployment to use for this request." }, "prompt": { "type": "string", "minLength": 1, "description": "Text prompt that describes the video to generate." }, "seconds": { "allOf": [ { "$ref": "#/components/schemas/VideoSeconds" } ], "description": "Clip duration in seconds. Defaults to 4 seconds.", "default": "4" }, "size": { "allOf": [ { "$ref": "#/components/schemas/VideoSize" } ], "description": "Output resolution formatted as width x height. Defaults to 720x1280.", "default": "720x1280" } } }, "CreateVideoBodyWithInputReference": { "type": "object", "required": [ "model", "prompt", "input_reference" ], "properties": { "model": { "type": "object", "description": "The name of the deployment to use for this request." }, "prompt": { "type": "object", "description": "Text prompt that describes the video to generate." }, "seconds": { "type": "object", "description": "Clip duration in seconds. Defaults to 4 seconds." }, "size": { "type": "object", "description": "Output resolution formatted as width x height. Defaults to 720x1280." }, "input_reference": { "type": "object", "description": "Optional image reference that guides generation.", "x-oaiTypeLabel": "file" } }, "description": "The properties of a video generation job request with media files." }, "CreateVideoRemixBody": { "type": "object", "required": [ "prompt" ], "properties": { "prompt": { "type": "string", "minLength": 1, "description": "Updated text prompt that directs the remix generation." } }, "description": "Parameters for remixing an existing generated video." }, "DeletedVideoResource": { "type": "object", "required": [ "object", "deleted", "id" ], "properties": { "object": { "type": "string", "description": "The object type that signals the deletion response.", "default": "video.deleted" }, "deleted": { "type": "boolean", "description": "Indicates that the video resource was deleted.", "default": true }, "id": { "type": "string", "description": "Identifier of the deleted video." } }, "description": "Confirmation payload returned after deleting a video." }, "Error": { "type": "object", "required": [ "code", "message" ], "properties": { "code": { "type": "string" }, "message": { "type": "string" } } }, "EvalGraderEndpoint": { "type": "object", "required": [ "type", "name", "url" ], "properties": { "type": { "type": "string", "enum": [ "endpoint" ], "default": "endpoint" }, "name": { "type": "string", "description": "The name of the grader" }, "url": { "type": "string", "pattern": "^https://", "description": "The HTTPS URL of the endpoint to call for grading" }, "headers": { "anyOf": [ { "type": "object", "unevaluatedProperties": { "type": "string" } }, { "type": "null" } ], "description": "Optional HTTP headers to include in requests to the endpoint" }, "rate_limit": { "anyOf": [ { "type": "integer", "format": "int32" }, { "type": "null" } ], "description": "Optional rate limit for requests per second to the endpoint\nMust be a positive integer" }, "pass_threshold": { "anyOf": [ { "type": "number", "format": "decimal" }, { "type": "null" } ], "description": "Optional threshold score above which the grade is considered passing\nIf not specified, all scores are considered valid" } } }, "GraderEndpoint": { "type": "object", "required": [ "type", "name", "url" ], "properties": { "type": { "type": "string", "enum": [ "endpoint" ], "default": "endpoint" }, "name": { "type": "string", "description": "The name of the grader" }, "url": { "type": "string", "pattern": "^https://", "description": "The HTTPS URL of the endpoint to call for grading" }, "headers": { "anyOf": [ { "type": "object", "unevaluatedProperties": { "type": "string" } }, { "type": "null" } ], "description": "Optional HTTP headers to include in requests to the endpoint" }, "rate_limit": { "anyOf": [ { "type": "integer", "format": "int32" }, { "type": "null" } ], "description": "Optional rate limit for requests per second to the endpoint\nMust be a positive integer" }, "pass_threshold": { "anyOf": [ { "type": "number", "format": "decimal" }, { "type": "null" } ], "description": "Optional threshold score above which the grade is considered passing\nIf not specified, all scores are considered valid" } }, "description": "Endpoint grader configuration for external HTTP endpoint evaluation" }, "OpenAI.Annotation": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.AnnotationType" } }, "discriminator": { "propertyName": "type", "mapping": { "file_citation": "#/components/schemas/OpenAI.FileCitationBody", "url_citation": "#/components/schemas/OpenAI.UrlCitationBody", "container_file_citation": "#/components/schemas/OpenAI.ContainerFileCitationBody", "file_path": "#/components/schemas/OpenAI.FilePath" } }, "description": "An annotation that applies to a span of output text." }, "OpenAI.AnnotationType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "file_citation", "url_citation", "container_file_citation", "file_path" ] } ] }, "OpenAI.ApplyPatchCallOutputStatus": { "type": "string", "enum": [ "completed", "failed" ] }, "OpenAI.ApplyPatchCallStatus": { "type": "string", "enum": [ "in_progress", "completed" ] }, "OpenAI.ApplyPatchCreateFileOperation": { "type": "object", "required": [ "type", "path", "diff" ], "properties": { "type": { "type": "string", "enum": [ "create_file" ], "description": "Create a new file with the provided diff.", "x-stainless-const": true, "default": "create_file" }, "path": { "type": "string", "description": "Path of the file to create." }, "diff": { "type": "string", "description": "Diff to apply." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchFileOperation" } ], "description": "Instruction describing how to create a file via the apply_patch tool.", "title": "Apply patch create file operation" }, "OpenAI.ApplyPatchDeleteFileOperation": { "type": "object", "required": [ "type", "path" ], "properties": { "type": { "type": "string", "enum": [ "delete_file" ], "description": "Delete the specified file.", "x-stainless-const": true, "default": "delete_file" }, "path": { "type": "string", "description": "Path of the file to delete." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchFileOperation" } ], "description": "Instruction describing how to delete a file via the apply_patch tool.", "title": "Apply patch delete file operation" }, "OpenAI.ApplyPatchFileOperation": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ApplyPatchFileOperationType" } }, "discriminator": { "propertyName": "type", "mapping": { "create_file": "#/components/schemas/OpenAI.ApplyPatchCreateFileOperation", "delete_file": "#/components/schemas/OpenAI.ApplyPatchDeleteFileOperation", "update_file": "#/components/schemas/OpenAI.ApplyPatchUpdateFileOperation" } }, "description": "One of the create_file, delete_file, or update_file operations applied via apply_patch.", "title": "Apply patch operation" }, "OpenAI.ApplyPatchFileOperationType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "create_file", "delete_file", "update_file" ] } ] }, "OpenAI.ApplyPatchToolParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "apply_patch" ], "description": "The type of the tool. Always `apply_patch`.", "x-stainless-const": true, "default": "apply_patch" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "Allows the assistant to create, delete, or update files using unified diffs.", "title": "Apply patch tool" }, "OpenAI.ApplyPatchUpdateFileOperation": { "type": "object", "required": [ "type", "path", "diff" ], "properties": { "type": { "type": "string", "enum": [ "update_file" ], "description": "Update an existing file with the provided diff.", "x-stainless-const": true, "default": "update_file" }, "path": { "type": "string", "description": "Path of the file to update." }, "diff": { "type": "string", "description": "Diff to apply." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchFileOperation" } ], "description": "Instruction describing how to update a file via the apply_patch tool.", "title": "Apply patch update file operation" }, "OpenAI.ApproximateLocation": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "approximate" ], "description": "The type of location approximation. Always `approximate`.", "x-stainless-const": true, "default": "approximate" }, "country": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "region": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "city": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "timezone": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } } }, "OpenAI.AssistantTool": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.AssistantToolType" } }, "discriminator": { "propertyName": "type", "mapping": { "code_interpreter": "#/components/schemas/OpenAI.AssistantToolsCode", "file_search": "#/components/schemas/OpenAI.AssistantToolsFileSearch", "function": "#/components/schemas/OpenAI.AssistantToolsFunction" } } }, "OpenAI.AssistantToolType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "code_interpreter", "file_search", "function" ] } ] }, "OpenAI.AssistantToolsCode": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "code_interpreter" ], "description": "The type of tool being defined: `code_interpreter`", "x-stainless-const": true } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.AssistantTool" } ], "title": "Code interpreter tool" }, "OpenAI.AssistantToolsFileSearch": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "file_search" ], "description": "The type of tool being defined: `file_search`", "x-stainless-const": true }, "file_search": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.AssistantToolsFileSearchFileSearch" } ], "description": "Overrides for the file search tool." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.AssistantTool" } ], "title": "FileSearch tool" }, "OpenAI.AssistantToolsFileSearchFileSearch": { "type": "object", "properties": { "max_num_results": { "type": "integer", "minimum": 1, "maximum": 50 }, "ranking_options": { "$ref": "#/components/schemas/OpenAI.FileSearchRankingOptions" } } }, "OpenAI.AssistantToolsFileSearchTypeOnly": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "file_search" ], "description": "The type of tool being defined: `file_search`", "x-stainless-const": true } }, "title": "AssistantToolsFileSearchTypeOnly" }, "OpenAI.AssistantToolsFunction": { "type": "object", "required": [ "type", "function" ], "properties": { "type": { "type": "string", "enum": [ "function" ], "description": "The type of tool being defined: `function`", "x-stainless-const": true }, "function": { "$ref": "#/components/schemas/OpenAI.FunctionObject" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.AssistantTool" } ], "title": "Function tool" }, "OpenAI.AssistantsApiResponseFormatOption": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "$ref": "#/components/schemas/OpenAI.ResponseFormatText" }, { "$ref": "#/components/schemas/OpenAI.ResponseFormatJsonObject" }, { "$ref": "#/components/schemas/OpenAI.ResponseFormatJsonSchema" } ], "description": "Specifies the format that the model must output. Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o), [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON.\n*Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length." }, "OpenAI.AssistantsApiToolChoiceOption": { "anyOf": [ { "type": "string", "enum": [ "none", "auto", "required" ] }, { "$ref": "#/components/schemas/OpenAI.AssistantsNamedToolChoice" } ], "description": "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tools and instead generates a message.\n`auto` is the default value and means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools before responding to the user.\nSpecifying a particular tool like `{\"type\": \"file_search\"}` or `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool." }, "OpenAI.AssistantsNamedToolChoice": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "function", "code_interpreter", "file_search" ], "description": "The type of the tool. If type is `function`, the function name must be set" }, "function": { "$ref": "#/components/schemas/OpenAI.AssistantsNamedToolChoiceFunction" } }, "description": "Specifies a tool the model should use. Use to force the model to call a specific tool." }, "OpenAI.AssistantsNamedToolChoiceFunction": { "type": "object", "required": [ "name" ], "properties": { "name": { "type": "string" } } }, "OpenAI.AudioTranscription": { "type": "object", "properties": { "model": { "type": "string", "description": "The model to use for transcription. Current options are `whisper-1`, `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need diarization with speaker labels." }, "language": { "type": "string", "description": "The language of the input audio. Supplying the input language in\n [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) format\n will improve accuracy and latency." }, "prompt": { "type": "string", "description": "An optional text to guide the model's style or continue a previous audio\n segment.\n For `whisper-1`, the [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).\n For `gpt-4o-transcribe` models (excluding `gpt-4o-transcribe-diarize`), the prompt is a free text string, for example \"expect words related to technology\"." } } }, "OpenAI.AutoChunkingStrategyRequestParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "auto" ], "description": "Always `auto`.", "x-stainless-const": true } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyRequestParam" } ], "description": "The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`.", "title": "Auto Chunking Strategy" }, "OpenAI.Batch": { "type": "object", "required": [ "id", "object", "endpoint", "completion_window", "status", "created_at" ], "properties": { "id": { "type": "string" }, "object": { "type": "string", "enum": [ "batch" ], "description": "The object type, which is always `batch`.", "x-stainless-const": true }, "endpoint": { "type": "string", "description": "The OpenAI API endpoint used by the batch." }, "model": { "type": "string", "description": "Model ID used to process the batch, like `gpt-5-2025-08-07`. OpenAI\n offers a wide range of models with different capabilities, performance\n characteristics, and price points. Refer to the [model\n guide](https://platform.openai.com/docs/models) to browse and compare available models." }, "errors": { "$ref": "#/components/schemas/OpenAI.BatchErrors" }, "completion_window": { "type": "string", "description": "The time frame within which the batch should be processed." }, "status": { "type": "string", "enum": [ "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled" ], "description": "The current status of the batch." }, "output_file_id": { "type": "string", "description": "The ID of the file containing the outputs of successfully executed requests." }, "error_file_id": { "type": "string", "description": "The ID of the file containing the outputs of requests with errors." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was created." }, "in_progress_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started processing." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch will expire." }, "finalizing_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started finalizing." }, "completed_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was completed." }, "failed_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch failed." }, "expired_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch expired." }, "cancelling_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch started cancelling." }, "cancelled_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the batch was cancelled." }, "request_counts": { "$ref": "#/components/schemas/OpenAI.BatchRequestCounts" }, "usage": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.BatchUsage" } ], "description": "Represents token usage details including input tokens, output tokens, a\n breakdown of output tokens, and the total tokens used. Only populated on\n batches created after September 7, 2025." }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "input_file_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "x-oaiMeta": { "name": "The batch object", "example": "{\n \"id\": \"batch_abc123\",\n \"object\": \"batch\",\n \"endpoint\": \"/v1/completions\",\n \"model\": \"gpt-5-2025-08-07\",\n \"errors\": null,\n \"input_file_id\": \"file-abc123\",\n \"completion_window\": \"24h\",\n \"status\": \"completed\",\n \"output_file_id\": \"file-cvaTdG\",\n \"error_file_id\": \"file-HOWS94\",\n \"created_at\": 1711471533,\n \"in_progress_at\": 1711471538,\n \"expires_at\": 1711557933,\n \"finalizing_at\": 1711493133,\n \"completed_at\": 1711493163,\n \"failed_at\": null,\n \"expired_at\": null,\n \"cancelling_at\": null,\n \"cancelled_at\": null,\n \"request_counts\": {\n \"total\": 100,\n \"completed\": 95,\n \"failed\": 5\n },\n \"usage\": {\n \"input_tokens\": 1500,\n \"input_tokens_details\": {\n \"cached_tokens\": 1024\n },\n \"output_tokens\": 500,\n \"output_tokens_details\": {\n \"reasoning_tokens\": 300\n },\n \"total_tokens\": 2000\n },\n \"metadata\": {\n \"customer_id\": \"user_123456789\",\n \"batch_description\": \"Nightly eval job\",\n }\n}\n" } }, "OpenAI.BatchError": { "type": "object", "properties": { "code": { "type": "string", "description": "An error code identifying the error type." }, "message": { "type": "string", "description": "A human-readable message providing more details about the error." }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "line": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] } } }, "OpenAI.BatchErrors": { "type": "object", "properties": { "object": { "type": "string" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.BatchError" } } } }, "OpenAI.BatchRequestCounts": { "type": "object", "required": [ "total", "completed", "failed" ], "properties": { "total": { "type": "integer", "description": "Total number of requests in the batch." }, "completed": { "type": "integer", "description": "Number of requests that have been completed successfully." }, "failed": { "type": "integer", "description": "Number of requests that have failed." } }, "description": "The request counts for different statuses within the batch." }, "OpenAI.BatchUsage": { "type": "object", "required": [ "input_tokens", "input_tokens_details", "output_tokens", "output_tokens_details", "total_tokens" ], "properties": { "input_tokens": { "type": "integer" }, "input_tokens_details": { "$ref": "#/components/schemas/OpenAI.BatchUsageInputTokensDetails" }, "output_tokens": { "type": "integer" }, "output_tokens_details": { "$ref": "#/components/schemas/OpenAI.BatchUsageOutputTokensDetails" }, "total_tokens": { "type": "integer" } } }, "OpenAI.BatchUsageInputTokensDetails": { "type": "object", "required": [ "cached_tokens" ], "properties": { "cached_tokens": { "type": "integer" } } }, "OpenAI.BatchUsageOutputTokensDetails": { "type": "object", "required": [ "reasoning_tokens" ], "properties": { "reasoning_tokens": { "type": "integer" } } }, "OpenAI.ChatCompletionAllowedTools": { "type": "object", "required": [ "mode", "tools" ], "properties": { "mode": { "type": "string", "enum": [ "auto", "required" ], "description": "Constrains the tools available to the model to a pre-defined set.\n `auto` allows the model to pick from among the allowed tools and generate a\n message.\n `required` requires the model to call one or more of the allowed tools." }, "tools": { "type": "array", "items": { "type": "object", "unevaluatedProperties": {} }, "description": "A list of tool definitions that the model should be allowed to call.\n For the Chat Completions API, the list of tool definitions might look like:\n ```json\n [\n { \"type\": \"function\", \"function\": { \"name\": \"get_weather\" } },\n { \"type\": \"function\", \"function\": { \"name\": \"get_time\" } }\n ]\n ```" } }, "description": "Constrains the tools available to the model to a pre-defined set.", "title": "Allowed tools" }, "OpenAI.ChatCompletionAllowedToolsChoice": { "type": "object", "required": [ "type", "allowed_tools" ], "properties": { "type": { "type": "string", "enum": [ "allowed_tools" ], "description": "Allowed tool configuration type. Always `allowed_tools`.", "x-stainless-const": true }, "allowed_tools": { "$ref": "#/components/schemas/OpenAI.ChatCompletionAllowedTools" } }, "description": "Constrains the tools available to the model to a pre-defined set.", "title": "Allowed tools" }, "OpenAI.ChatCompletionFunctionCallOption": { "type": "object", "required": [ "name" ], "properties": { "name": { "type": "string", "description": "The name of the function to call." } }, "description": "Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function.", "x-stainless-variantName": "function_call_option" }, "OpenAI.ChatCompletionFunctions": { "type": "object", "required": [ "name" ], "properties": { "description": { "type": "string", "description": "A description of what the function does, used by the model to choose when and how to call the function." }, "name": { "type": "string", "description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64." }, "parameters": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.FunctionParameters" } ], "description": "The parameters the functions accepts, described as a JSON Schema object.\nSee the [JSON Schema reference](https://json-schema.org/understanding-json-schema/)\nfor documentation about the format.\n\nOmitting `parameters` defines a function with an empty parameter list." } }, "deprecated": true }, "OpenAI.ChatCompletionMessageCustomToolCall": { "type": "object", "required": [ "id", "type", "custom" ], "properties": { "id": { "type": "string", "description": "The ID of the tool call." }, "type": { "type": "string", "enum": [ "custom" ], "description": "The type of the tool. Always `custom`.", "x-stainless-const": true }, "custom": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageCustomToolCallCustom" } ], "description": "The custom tool that the model called." } }, "description": "A call to a custom tool created by the model.", "title": "Custom tool call" }, "OpenAI.ChatCompletionMessageCustomToolCallCustom": { "type": "object", "required": [ "name", "input" ], "properties": { "name": { "type": "string" }, "input": { "type": "string" } } }, "OpenAI.ChatCompletionMessageToolCall": { "type": "object", "required": [ "id", "type", "function" ], "properties": { "id": { "type": "string", "description": "The ID of the tool call." }, "type": { "type": "string", "enum": [ "function" ], "description": "The type of the tool. Currently, only `function` is supported.", "x-stainless-const": true }, "function": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageToolCallFunction" } ], "description": "The function that the model called." } }, "description": "A call to a function tool created by the model.", "title": "Function tool call" }, "OpenAI.ChatCompletionMessageToolCallChunk": { "type": "object", "required": [ "index" ], "properties": { "index": { "type": "integer" }, "id": { "type": "string", "description": "The ID of the tool call." }, "type": { "type": "string", "enum": [ "function" ], "description": "The type of the tool. Currently, only `function` is supported.", "x-stainless-const": true }, "function": { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageToolCallChunkFunction" } } }, "OpenAI.ChatCompletionMessageToolCallChunkFunction": { "type": "object", "properties": { "name": { "type": "string" }, "arguments": { "type": "string" } } }, "OpenAI.ChatCompletionMessageToolCallFunction": { "type": "object", "required": [ "name", "arguments" ], "properties": { "name": { "type": "string" }, "arguments": { "type": "string" } } }, "OpenAI.ChatCompletionMessageToolCalls": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageToolCall" }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageCustomToolCall" } ] }, "description": "The tool calls generated by the model, such as function calls." }, "OpenAI.ChatCompletionMessageToolCallsItem": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageToolCall" }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageCustomToolCall" } ] }, "description": "The tool calls generated by the model, such as function calls." }, "OpenAI.ChatCompletionNamedToolChoice": { "type": "object", "required": [ "type", "function" ], "properties": { "type": { "type": "string", "enum": [ "function" ], "description": "For function calling, the type is always `function`.", "x-stainless-const": true }, "function": { "$ref": "#/components/schemas/OpenAI.ChatCompletionNamedToolChoiceFunction" } }, "description": "Specifies a tool the model should use. Use to force the model to call a specific function.", "title": "Function tool choice" }, "OpenAI.ChatCompletionNamedToolChoiceCustom": { "type": "object", "required": [ "type", "custom" ], "properties": { "type": { "type": "string", "enum": [ "custom" ], "description": "For custom tool calling, the type is always `custom`.", "x-stainless-const": true }, "custom": { "$ref": "#/components/schemas/OpenAI.ChatCompletionNamedToolChoiceCustomCustom" } }, "description": "Specifies a tool the model should use. Use to force the model to call a specific custom tool.", "title": "Custom tool choice" }, "OpenAI.ChatCompletionNamedToolChoiceCustomCustom": { "type": "object", "required": [ "name" ], "properties": { "name": { "type": "string" } } }, "OpenAI.ChatCompletionNamedToolChoiceFunction": { "type": "object", "required": [ "name" ], "properties": { "name": { "type": "string" } } }, "OpenAI.ChatCompletionRequestAssistantMessage": { "type": "object", "required": [ "role" ], "properties": { "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestAssistantMessageContentPart" } }, { "type": "null" } ] }, "refusal": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "role": { "type": "string", "enum": [ "assistant" ], "description": "The role of the messages author, in this case `assistant`.", "x-stainless-const": true }, "name": { "type": "string", "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." }, "audio": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestAssistantMessageAudio" }, { "type": "null" } ], "description": "Data about a previous audio response from the model." }, "tool_calls": { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageToolCalls" }, "function_call": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestAssistantMessageFunctionCall" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ], "description": "Messages sent by the model in response to user messages.", "title": "Assistant message", "x-stainless-soft-required": [ "content" ] }, "OpenAI.ChatCompletionRequestAssistantMessageAudio": { "type": "object", "required": [ "id" ], "properties": { "id": { "type": "string" } } }, "OpenAI.ChatCompletionRequestAssistantMessageContentPart": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestAssistantMessageContentPartType" } }, "discriminator": { "propertyName": "type", "mapping": { "refusal": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartRefusal", "text": "#/components/schemas/OpenAI.ChatCompletionRequestAssistantMessageContentPartChatCompletionRequestMessageContentPartText" } } }, "OpenAI.ChatCompletionRequestAssistantMessageContentPartChatCompletionRequestMessageContentPartText": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "The type of the content part.", "x-stainless-const": true }, "text": { "type": "string", "description": "The text content." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestAssistantMessageContentPart" } ], "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).", "title": "Text content part", "x-stainless-naming": { "go": { "variant_constructor": "TextContentPart" } } }, "OpenAI.ChatCompletionRequestAssistantMessageContentPartType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "text", "refusal" ] } ] }, "OpenAI.ChatCompletionRequestAssistantMessageFunctionCall": { "type": "object", "required": [ "arguments", "name" ], "properties": { "arguments": { "type": "string" }, "name": { "type": "string" } } }, "OpenAI.ChatCompletionRequestDeveloperMessage": { "type": "object", "required": [ "content", "role" ], "properties": { "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartText" } } ], "description": "The contents of the developer message." }, "role": { "type": "string", "enum": [ "developer" ], "description": "The role of the messages author, in this case `developer`.", "x-stainless-const": true }, "name": { "type": "string", "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ], "description": "Developer-provided instructions that the model should follow, regardless of\nmessages sent by the user. With o1 models and newer, `developer` messages\nreplace the previous `system` messages.", "title": "Developer message", "x-stainless-naming": { "go": { "variant_constructor": "DeveloperMessage" } } }, "OpenAI.ChatCompletionRequestFunctionMessage": { "type": "object", "required": [ "role", "content", "name" ], "properties": { "role": { "type": "string", "enum": [ "function" ], "description": "The role of the messages author, in this case `function`.", "x-stainless-const": true }, "content": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "name": { "type": "string", "description": "The name of the function to call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ], "title": "Function message", "deprecated": true }, "OpenAI.ChatCompletionRequestMessage": { "type": "object", "required": [ "role" ], "properties": { "role": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageType" } }, "discriminator": { "propertyName": "role", "mapping": { "assistant": "#/components/schemas/OpenAI.ChatCompletionRequestAssistantMessage", "developer": "#/components/schemas/OpenAI.ChatCompletionRequestDeveloperMessage", "function": "#/components/schemas/OpenAI.ChatCompletionRequestFunctionMessage", "system": "#/components/schemas/OpenAI.ChatCompletionRequestSystemMessage", "user": "#/components/schemas/OpenAI.ChatCompletionRequestUserMessage", "tool": "#/components/schemas/OpenAI.ChatCompletionRequestToolMessage" } } }, "OpenAI.ChatCompletionRequestMessageContentPartAudio": { "type": "object", "required": [ "type", "input_audio" ], "properties": { "type": { "type": "string", "enum": [ "input_audio" ], "description": "The type of the content part. Always `input_audio`.", "x-stainless-const": true }, "input_audio": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudio" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestUserMessageContentPart" } ], "description": "", "title": "Audio content part", "x-stainless-naming": { "go": { "variant_constructor": "InputAudioContentPart" } } }, "OpenAI.ChatCompletionRequestMessageContentPartAudioInputAudio": { "type": "object", "required": [ "data", "format" ], "properties": { "data": { "type": "string" }, "format": { "type": "string", "enum": [ "wav", "mp3" ] } } }, "OpenAI.ChatCompletionRequestMessageContentPartFile": { "type": "object", "required": [ "type", "file" ], "properties": { "type": { "type": "string", "enum": [ "file" ], "description": "The type of the content part. Always `file`.", "x-stainless-const": true }, "file": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartFileFile" } ], "x-stainless-naming": { "java": { "type_name": "FileObject" }, "kotlin": { "type_name": "FileObject" } } } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestUserMessageContentPart" } ], "description": "Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text generation.", "title": "File content part", "x-stainless-naming": { "go": { "variant_constructor": "FileContentPart" } } }, "OpenAI.ChatCompletionRequestMessageContentPartFileFile": { "type": "object", "properties": { "filename": { "type": "string" }, "file_data": { "type": "string" }, "file_id": { "type": "string" } } }, "OpenAI.ChatCompletionRequestMessageContentPartImage": { "type": "object", "required": [ "type", "image_url" ], "properties": { "type": { "type": "string", "enum": [ "image_url" ], "description": "The type of the content part.", "x-stainless-const": true }, "image_url": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartImageImageUrl" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestUserMessageContentPart" } ], "description": "", "title": "Image content part", "x-stainless-naming": { "go": { "variant_constructor": "ImageContentPart" } } }, "OpenAI.ChatCompletionRequestMessageContentPartImageImageUrl": { "type": "object", "required": [ "url" ], "properties": { "url": { "type": "string", "format": "uri" }, "detail": { "type": "string", "enum": [ "auto", "low", "high" ], "default": "auto" } } }, "OpenAI.ChatCompletionRequestMessageContentPartRefusal": { "type": "object", "required": [ "type", "refusal" ], "properties": { "type": { "type": "string", "enum": [ "refusal" ], "description": "The type of the content part.", "x-stainless-const": true }, "refusal": { "type": "string", "description": "The refusal message generated by the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestAssistantMessageContentPart" } ], "title": "Refusal content part" }, "OpenAI.ChatCompletionRequestMessageContentPartText": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "The type of the content part.", "x-stainless-const": true }, "text": { "type": "string", "description": "The text content." } }, "description": "", "title": "Text content part", "x-stainless-naming": { "go": { "variant_constructor": "TextContentPart" } } }, "OpenAI.ChatCompletionRequestMessageType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "developer", "system", "user", "assistant", "tool", "function" ] } ] }, "OpenAI.ChatCompletionRequestSystemMessage": { "type": "object", "required": [ "content", "role" ], "properties": { "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestSystemMessageContentPart" } } ], "description": "The contents of the system message." }, "role": { "type": "string", "enum": [ "system" ], "description": "The role of the messages author, in this case `system`.", "x-stainless-const": true }, "name": { "type": "string", "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ], "description": "Developer-provided instructions that the model should follow, regardless of\nmessages sent by the user. With o1 models and newer, use `developer` messages\nfor this purpose instead.", "title": "System message", "x-stainless-naming": { "go": { "variant_constructor": "SystemMessage" } } }, "OpenAI.ChatCompletionRequestSystemMessageContentPart": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartText" }, "OpenAI.ChatCompletionRequestToolMessage": { "type": "object", "required": [ "role", "content", "tool_call_id" ], "properties": { "role": { "type": "string", "enum": [ "tool" ], "description": "The role of the messages author, in this case `tool`.", "x-stainless-const": true }, "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestToolMessageContentPart" } } ], "description": "The contents of the tool message." }, "tool_call_id": { "type": "string", "description": "Tool call that this message is responding to." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ], "title": "Tool message", "x-stainless-naming": { "go": { "variant_constructor": "ToolMessage" } } }, "OpenAI.ChatCompletionRequestToolMessageContentPart": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartText" }, "OpenAI.ChatCompletionRequestUserMessage": { "type": "object", "required": [ "content", "role" ], "properties": { "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestUserMessageContentPart" } } ], "description": "The contents of the user message." }, "role": { "type": "string", "enum": [ "user" ], "description": "The role of the messages author, in this case `user`.", "x-stainless-const": true }, "name": { "type": "string", "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ], "description": "Messages sent by an end user, containing prompts or additional context\ninformation.", "title": "User message", "x-stainless-naming": { "go": { "variant_constructor": "UserMessage" } } }, "OpenAI.ChatCompletionRequestUserMessageContentPart": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestUserMessageContentPartType" } }, "discriminator": { "propertyName": "type", "mapping": { "image_url": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartImage", "input_audio": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartAudio", "file": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartFile", "text": "#/components/schemas/OpenAI.ChatCompletionRequestUserMessageContentPartChatCompletionRequestMessageContentPartText" } } }, "OpenAI.ChatCompletionRequestUserMessageContentPartChatCompletionRequestMessageContentPartText": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "The type of the content part.", "x-stainless-const": true }, "text": { "type": "string", "description": "The text content." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestUserMessageContentPart" } ], "description": "Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).", "title": "Text content part", "x-stainless-naming": { "go": { "variant_constructor": "TextContentPart" } } }, "OpenAI.ChatCompletionRequestUserMessageContentPartType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "text", "image_url", "input_audio", "file" ] } ] }, "OpenAI.ChatCompletionResponseMessage": { "type": "object", "required": [ "content", "refusal", "role" ], "properties": { "content": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "refusal": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "tool_calls": { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageToolCallsItem" }, "annotations": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionResponseMessageAnnotations" }, "description": "Annotations for the message, when applicable, as when using the\n [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat)." }, "role": { "type": "string", "enum": [ "assistant" ], "description": "The role of the author of this message.", "x-stainless-const": true }, "function_call": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionResponseMessageFunctionCall" } ], "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", "deprecated": true }, "audio": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionResponseMessageAudio" }, { "type": "null" } ] }, "reasoning_content": { "type": "string", "description": "An Azure-specific extension property containing generated reasoning content from supported models." } }, "description": "If the audio output modality is requested, this object contains data\nabout the audio response from the model." }, "OpenAI.ChatCompletionResponseMessageAnnotations": { "type": "object", "required": [ "type", "url_citation" ], "properties": { "type": { "type": "string", "enum": [ "url_citation" ], "x-stainless-const": true }, "url_citation": { "$ref": "#/components/schemas/OpenAI.ChatCompletionResponseMessageAnnotationsUrlCitation" } } }, "OpenAI.ChatCompletionResponseMessageAnnotationsUrlCitation": { "type": "object", "required": [ "end_index", "start_index", "url", "title" ], "properties": { "end_index": { "type": "integer" }, "start_index": { "type": "integer" }, "url": { "type": "string" }, "title": { "type": "string" } } }, "OpenAI.ChatCompletionResponseMessageAudio": { "type": "object", "required": [ "id", "expires_at", "data", "transcript" ], "properties": { "id": { "type": "string" }, "expires_at": { "type": "integer", "format": "unixtime" }, "data": { "type": "string" }, "transcript": { "type": "string" } } }, "OpenAI.ChatCompletionResponseMessageFunctionCall": { "type": "object", "required": [ "arguments", "name" ], "properties": { "arguments": { "type": "string" }, "name": { "type": "string" } } }, "OpenAI.ChatCompletionStreamOptions": { "type": "object", "properties": { "include_usage": { "type": "boolean", "description": "If set, an additional chunk will be streamed before the `data: [DONE]`\n message. The `usage` field on this chunk shows the token usage statistics\n for the entire request, and the `choices` field will always be an empty\n array.\n All other chunks will also include a `usage` field, but with a null\n value. **NOTE:** If the stream is interrupted, you may not receive the\n final usage chunk which contains the total token usage for the request." }, "include_obfuscation": { "type": "boolean", "description": "When true, stream obfuscation will be enabled. Stream obfuscation adds\n random characters to an `obfuscation` field on streaming delta events to\n normalize payload sizes as a mitigation to certain side-channel attacks.\n These obfuscation fields are included by default, but add a small amount\n of overhead to the data stream. You can set `include_obfuscation` to\n false to optimize for bandwidth if you trust the network links between\n your application and the OpenAI API." } }, "description": "Options for streaming response. Only set this when you set `stream: true`." }, "OpenAI.ChatCompletionStreamResponseDelta": { "type": "object", "properties": { "content": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "function_call": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionStreamResponseDeltaFunctionCall" } ], "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", "deprecated": true }, "tool_calls": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageToolCallChunk" } }, "role": { "type": "string", "enum": [ "developer", "system", "user", "assistant", "tool" ], "description": "The role of the author of this message." }, "refusal": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "reasoning_content": { "type": "string", "description": "An Azure-specific extension property containing generated reasoning content from supported models." } }, "description": "A chat completion delta generated by streamed model responses." }, "OpenAI.ChatCompletionStreamResponseDeltaFunctionCall": { "type": "object", "properties": { "arguments": { "type": "string" }, "name": { "type": "string" } } }, "OpenAI.ChatCompletionTokenLogprob": { "type": "object", "required": [ "token", "logprob", "bytes", "top_logprobs" ], "properties": { "token": { "type": "string", "description": "The token." }, "logprob": { "type": "number", "description": "The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely." }, "bytes": { "anyOf": [ { "type": "array", "items": { "type": "integer" } }, { "type": "null" } ] }, "top_logprobs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTokenLogprobTopLogprobs" }, "description": "List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned." } } }, "OpenAI.ChatCompletionTokenLogprobTopLogprobs": { "type": "object", "required": [ "token", "logprob", "bytes" ], "properties": { "token": { "type": "string" }, "logprob": { "type": "number" }, "bytes": { "anyOf": [ { "type": "array", "items": { "type": "integer" } }, { "type": "null" } ] } } }, "OpenAI.ChatCompletionTool": { "type": "object", "required": [ "type", "function" ], "properties": { "type": { "type": "string", "enum": [ "function" ], "description": "The type of the tool. Currently, only `function` is supported.", "x-stainless-const": true }, "function": { "$ref": "#/components/schemas/OpenAI.FunctionObject" } }, "description": "A function tool that can be used to generate a response.", "title": "Function tool" }, "OpenAI.ChatCompletionToolChoiceOption": { "anyOf": [ { "type": "string", "enum": [ "none", "auto", "required" ] }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionAllowedToolsChoice" }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionNamedToolChoice" }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionNamedToolChoiceCustom" } ], "description": "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n`none` is the default when no tools are present. `auto` is the default if tools are present.", "x-stainless-go-variant-constructor": { "naming": "tool_choice_option_{variant}" } }, "OpenAI.ChunkingStrategyRequestParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyRequestParamType" } }, "discriminator": { "propertyName": "type", "mapping": { "auto": "#/components/schemas/OpenAI.AutoChunkingStrategyRequestParam", "static": "#/components/schemas/OpenAI.StaticChunkingStrategyRequestParam" } }, "description": "The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty." }, "OpenAI.ChunkingStrategyRequestParamType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "auto", "static" ] } ] }, "OpenAI.ChunkingStrategyResponse": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyResponseType" } }, "discriminator": { "propertyName": "type", "mapping": { "static": "#/components/schemas/OpenAI.StaticChunkingStrategyResponseParam", "other": "#/components/schemas/OpenAI.OtherChunkingStrategyResponseParam" } }, "description": "The strategy used to chunk the file." }, "OpenAI.ChunkingStrategyResponseType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "static", "other" ] } ] }, "OpenAI.ClickButtonType": { "type": "string", "enum": [ "left", "right", "wheel", "back", "forward" ] }, "OpenAI.ClickParam": { "type": "object", "required": [ "type", "button", "x", "y" ], "properties": { "type": { "type": "string", "enum": [ "click" ], "description": "Specifies the event type. For a click action, this property is always `click`.", "x-stainless-const": true, "default": "click" }, "button": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ClickButtonType" } ], "description": "Indicates which mouse button was pressed during the click. One of `left`, `right`, `wheel`, `back`, or `forward`." }, "x": { "type": "integer", "description": "The x-coordinate where the click occurred." }, "y": { "type": "integer", "description": "The y-coordinate where the click occurred." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A click action.", "title": "Click" }, "OpenAI.CodeInterpreterContainerAuto": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "auto" ], "description": "Always `auto`.", "x-stainless-const": true, "default": "auto" }, "file_ids": { "type": "array", "items": { "type": "string" }, "maxItems": 50, "description": "An optional list of uploaded files to make available to your code." }, "memory_limit": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ContainerMemoryLimit" }, { "type": "null" } ] } }, "description": "Configuration for a code interpreter container. Optionally specify the IDs of the files to run the code on.", "title": "CodeInterpreterToolAuto", "x-stainless-naming": { "go": { "type_name": "ToolCodeInterpreterContainerCodeInterpreterContainerAuto" } } }, "OpenAI.CodeInterpreterOutputImage": { "type": "object", "required": [ "type", "url" ], "properties": { "type": { "type": "string", "enum": [ "image" ], "description": "The type of the output. Always `image`.", "x-stainless-const": true, "default": "image" }, "url": { "type": "string", "format": "uri", "description": "The URL of the image output from the code interpreter." } }, "description": "The image output from the code interpreter.", "title": "Code interpreter output image" }, "OpenAI.CodeInterpreterOutputLogs": { "type": "object", "required": [ "type", "logs" ], "properties": { "type": { "type": "string", "enum": [ "logs" ], "description": "The type of the output. Always `logs`.", "x-stainless-const": true, "default": "logs" }, "logs": { "type": "string", "description": "The logs output from the code interpreter." } }, "description": "The logs output from the code interpreter.", "title": "Code interpreter output logs" }, "OpenAI.CodeInterpreterTool": { "type": "object", "required": [ "type", "container" ], "properties": { "type": { "type": "string", "enum": [ "code_interpreter" ], "description": "The type of the code interpreter tool. Always `code_interpreter`.", "x-stainless-const": true }, "container": { "anyOf": [ { "type": "string" }, { "$ref": "#/components/schemas/OpenAI.CodeInterpreterContainerAuto" } ], "description": "The code interpreter container. Can be a container ID or an object that\n specifies uploaded file IDs to make available to your code, along with an\n optional `memory_limit` setting." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A tool that runs Python code to help generate a response to a prompt.", "title": "Code interpreter" }, "OpenAI.ComparisonFilter": { "type": "object", "required": [ "type", "key", "value" ], "properties": { "type": { "type": "string", "enum": [ "eq", "ne", "gt", "gte", "lt", "lte" ], "description": "Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`, `nin`.\n - `eq`: equals\n - `ne`: not equal\n - `gt`: greater than\n - `gte`: greater than or equal\n - `lt`: less than\n - `lte`: less than or equal\n - `in`: in\n - `nin`: not in", "default": "eq" }, "key": { "type": "string", "description": "The key to compare against the value." }, "value": { "anyOf": [ { "type": "string" }, { "type": "number" }, { "type": "boolean" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ComparisonFilterValueItems" } } ], "description": "The value to compare against the attribute key; supports string, number, or boolean types." } }, "description": "A filter used to compare a specified attribute key to a given value using a defined comparison operation.", "title": "Comparison Filter", "x-oaiMeta": { "name": "ComparisonFilter" } }, "OpenAI.ComparisonFilterValueItems": { "anyOf": [ { "type": "string" }, { "type": "number" } ] }, "OpenAI.CompletionUsage": { "type": "object", "required": [ "completion_tokens", "prompt_tokens", "total_tokens" ], "properties": { "completion_tokens": { "type": "integer", "description": "Number of tokens in the generated completion." }, "prompt_tokens": { "type": "integer", "description": "Number of tokens in the prompt." }, "total_tokens": { "type": "integer", "description": "Total number of tokens used in the request (prompt + completion)." }, "completion_tokens_details": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.CompletionUsageCompletionTokensDetails" } ], "description": "Breakdown of tokens used in a completion." }, "prompt_tokens_details": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.CompletionUsagePromptTokensDetails" } ], "description": "Breakdown of tokens used in the prompt." } }, "description": "Usage statistics for the completion request." }, "OpenAI.CompletionUsageCompletionTokensDetails": { "type": "object", "properties": { "accepted_prediction_tokens": { "type": "integer" }, "audio_tokens": { "type": "integer" }, "reasoning_tokens": { "type": "integer" }, "rejected_prediction_tokens": { "type": "integer" } } }, "OpenAI.CompletionUsagePromptTokensDetails": { "type": "object", "properties": { "audio_tokens": { "type": "integer" }, "cached_tokens": { "type": "integer" } } }, "OpenAI.CompoundFilter": { "type": "object", "required": [ "type", "filters" ], "properties": { "type": { "type": "string", "enum": [ "and", "or" ], "description": "Type of operation: `and` or `or`." }, "filters": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ComparisonFilter" }, {} ] }, "description": "Array of filters to combine. Items can be `ComparisonFilter` or `CompoundFilter`." } }, "description": "Combine multiple filters using `and` or `or`.", "title": "Compound Filter", "x-oaiMeta": { "name": "CompoundFilter" } }, "OpenAI.ComputerAction": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ComputerActionType" } }, "discriminator": { "propertyName": "type", "mapping": { "click": "#/components/schemas/OpenAI.ClickParam", "double_click": "#/components/schemas/OpenAI.DoubleClickAction", "drag": "#/components/schemas/OpenAI.Drag", "keypress": "#/components/schemas/OpenAI.KeyPressAction", "move": "#/components/schemas/OpenAI.Move", "screenshot": "#/components/schemas/OpenAI.Screenshot", "scroll": "#/components/schemas/OpenAI.Scroll", "type": "#/components/schemas/OpenAI.Type", "wait": "#/components/schemas/OpenAI.Wait" } } }, "OpenAI.ComputerActionType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "click", "double_click", "drag", "keypress", "move", "screenshot", "scroll", "type", "wait" ] } ] }, "OpenAI.ComputerCallSafetyCheckParam": { "type": "object", "required": [ "id" ], "properties": { "id": { "type": "string", "description": "The ID of the pending safety check." }, "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "description": "A pending safety check for the computer call." }, "OpenAI.ComputerEnvironment": { "type": "string", "enum": [ "windows", "mac", "linux", "ubuntu", "browser" ] }, "OpenAI.ComputerScreenshotContent": { "type": "object", "required": [ "type", "image_url", "file_id" ], "properties": { "type": { "type": "string", "enum": [ "computer_screenshot" ], "description": "Specifies the event type. For a computer screenshot, this property is always set to `computer_screenshot`.", "x-stainless-const": true, "default": "computer_screenshot" }, "image_url": { "anyOf": [ { "type": "string", "format": "uri" }, { "type": "null" } ] }, "file_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "description": "A screenshot of a computer.", "title": "Computer screenshot" }, "OpenAI.ComputerScreenshotImage": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "computer_screenshot" ], "description": "Specifies the event type. For a computer screenshot, this property is\n always set to `computer_screenshot`.", "x-stainless-const": true, "default": "computer_screenshot" }, "image_url": { "type": "string", "format": "uri", "description": "The URL of the screenshot image." }, "file_id": { "type": "string", "description": "The identifier of an uploaded file that contains the screenshot." } }, "description": "A computer screenshot image used with the computer use tool." }, "OpenAI.ComputerUsePreviewTool": { "type": "object", "required": [ "type", "environment", "display_width", "display_height" ], "properties": { "type": { "type": "string", "enum": [ "computer_use_preview" ], "description": "The type of the computer use tool. Always `computer_use_preview`.", "x-stainless-const": true, "default": "computer_use_preview" }, "environment": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerEnvironment" } ], "description": "The type of computer environment to control." }, "display_width": { "type": "integer", "description": "The width of the computer display." }, "display_height": { "type": "integer", "description": "The height of the computer display." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A tool that controls a virtual computer. Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).", "title": "Computer use preview" }, "OpenAI.ContainerFileCitationBody": { "type": "object", "required": [ "type", "container_id", "file_id", "start_index", "end_index", "filename" ], "properties": { "type": { "type": "string", "enum": [ "container_file_citation" ], "description": "The type of the container file citation. Always `container_file_citation`.", "x-stainless-const": true, "default": "container_file_citation" }, "container_id": { "type": "string", "description": "The ID of the container file." }, "file_id": { "type": "string", "description": "The ID of the file." }, "start_index": { "type": "integer", "description": "The index of the first character of the container file citation in the message." }, "end_index": { "type": "integer", "description": "The index of the last character of the container file citation in the message." }, "filename": { "type": "string", "description": "The filename of the container file cited." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Annotation" } ], "description": "A citation for a container file used to generate a model response.", "title": "Container file citation" }, "OpenAI.ContainerFileListResource": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of object returned, must be 'list'." }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ContainerFileResource" }, "description": "A list of container files." }, "first_id": { "type": "string", "description": "The ID of the first file in the list." }, "last_id": { "type": "string", "description": "The ID of the last file in the list." }, "has_more": { "type": "boolean", "description": "Whether there are more files available." } } }, "OpenAI.ContainerFileResource": { "type": "object", "required": [ "id", "object", "container_id", "created_at", "bytes", "path", "source" ], "properties": { "id": { "type": "string", "description": "Unique identifier for the file." }, "object": { "type": "string", "enum": [ "container.file" ], "description": "The type of this object (`container.file`)." }, "container_id": { "type": "string", "description": "The container this file belongs to." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) when the file was created." }, "bytes": { "type": "integer", "description": "Size of the file in bytes." }, "path": { "type": "string", "description": "Path of the file in the container." }, "source": { "type": "string", "description": "Source of the file (e.g., `user`, `assistant`)." } }, "title": "The container file object", "x-oaiMeta": { "name": "The container file object", "example": "{\n \"id\": \"cfile_682e0e8a43c88191a7978f477a09bdf5\",\n \"object\": \"container.file\",\n \"created_at\": 1747848842,\n \"bytes\": 880,\n \"container_id\": \"cntr_682e0e7318108198aa783fd921ff305e08e78805b9fdbb04\",\n \"path\": \"/mnt/data/88e12fa445d32636f190a0b33daed6cb-tsconfig.json\",\n \"source\": \"user\"\n}\n" } }, "OpenAI.ContainerListResource": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of object returned, must be 'list'." }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ContainerResource" }, "description": "A list of containers." }, "first_id": { "type": "string", "description": "The ID of the first container in the list." }, "last_id": { "type": "string", "description": "The ID of the last container in the list." }, "has_more": { "type": "boolean", "description": "Whether there are more containers available." } } }, "OpenAI.ContainerMemoryLimit": { "type": "string", "enum": [ "1g", "4g", "16g", "64g" ] }, "OpenAI.ContainerResource": { "type": "object", "required": [ "id", "object", "name", "created_at", "status" ], "properties": { "id": { "type": "string", "description": "Unique identifier for the container." }, "object": { "type": "string", "description": "The type of this object." }, "name": { "type": "string", "description": "Name of the container." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) when the container was created." }, "status": { "type": "string", "description": "Status of the container (e.g., active, deleted)." }, "last_active_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) when the container was last active." }, "expires_after": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ContainerResourceExpiresAfter" } ], "description": "The container will expire after this time period.\n The anchor is the reference point for the expiration.\n The minutes is the number of minutes after the anchor before the container expires." }, "memory_limit": { "type": "string", "enum": [ "1g", "4g", "16g", "64g" ], "description": "The memory limit configured for the container." } }, "title": "The container object", "x-oaiMeta": { "name": "The container object", "example": "{\n \"id\": \"cntr_682dfebaacac8198bbfe9c2474fb6f4a085685cbe3cb5863\",\n \"object\": \"container\",\n \"created_at\": 1747844794,\n \"status\": \"running\",\n \"expires_after\": {\n \"anchor\": \"last_active_at\",\n \"minutes\": 20\n },\n \"last_active_at\": 1747844794,\n \"memory_limit\": \"1g\",\n \"name\": \"My Container\"\n}\n" } }, "OpenAI.ContainerResourceExpiresAfter": { "type": "object", "properties": { "anchor": { "type": "string", "enum": [ "last_active_at" ] }, "minutes": { "type": "integer" } } }, "OpenAI.ConversationItem": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ConversationItemType" } }, "discriminator": { "propertyName": "type", "mapping": { "message": "#/components/schemas/OpenAI.ConversationItemMessage", "function_call": "#/components/schemas/OpenAI.ConversationItemFunctionToolCallResource", "function_call_output": "#/components/schemas/OpenAI.ConversationItemFunctionToolCallOutputResource", "file_search_call": "#/components/schemas/OpenAI.ConversationItemFileSearchToolCall", "web_search_call": "#/components/schemas/OpenAI.ConversationItemWebSearchToolCall", "image_generation_call": "#/components/schemas/OpenAI.ConversationItemImageGenToolCall", "computer_call": "#/components/schemas/OpenAI.ConversationItemComputerToolCall", "computer_call_output": "#/components/schemas/OpenAI.ConversationItemComputerToolCallOutputResource", "reasoning": "#/components/schemas/OpenAI.ConversationItemReasoningItem", "code_interpreter_call": "#/components/schemas/OpenAI.ConversationItemCodeInterpreterToolCall", "local_shell_call": "#/components/schemas/OpenAI.ConversationItemLocalShellToolCall", "local_shell_call_output": "#/components/schemas/OpenAI.ConversationItemLocalShellToolCallOutput", "shell_call": "#/components/schemas/OpenAI.ConversationItemFunctionShellCall", "shell_call_output": "#/components/schemas/OpenAI.ConversationItemFunctionShellCallOutput", "apply_patch_call": "#/components/schemas/OpenAI.ConversationItemApplyPatchToolCall", "apply_patch_call_output": "#/components/schemas/OpenAI.ConversationItemApplyPatchToolCallOutput", "mcp_list_tools": "#/components/schemas/OpenAI.ConversationItemMcpListTools", "mcp_approval_request": "#/components/schemas/OpenAI.ConversationItemMcpApprovalRequest", "mcp_approval_response": "#/components/schemas/OpenAI.ConversationItemMcpApprovalResponseResource", "mcp_call": "#/components/schemas/OpenAI.ConversationItemMcpToolCall", "custom_tool_call": "#/components/schemas/OpenAI.ConversationItemCustomToolCall", "custom_tool_call_output": "#/components/schemas/OpenAI.ConversationItemCustomToolCallOutput" } }, "description": "A single item within a conversation. The set of possible types are the same as the `output` type of a [Response object](https://platform.openai.com/docs/api-reference/responses/object#responses/object-output).", "title": "Conversation item" }, "OpenAI.ConversationItemApplyPatchToolCall": { "type": "object", "required": [ "type", "id", "call_id", "status", "operation" ], "properties": { "type": { "type": "string", "enum": [ "apply_patch_call" ], "description": "The type of the item. Always `apply_patch_call`.", "x-stainless-const": true, "default": "apply_patch_call" }, "id": { "type": "string", "description": "The unique ID of the apply patch tool call. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the apply patch tool call generated by the model." }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchCallStatus" } ], "description": "The status of the apply patch tool call. One of `in_progress` or `completed`." }, "operation": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchFileOperation" } ], "description": "One of the create_file, delete_file, or update_file operations applied via apply_patch." }, "created_by": { "type": "string", "description": "The ID of the entity that created this tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "A tool call that applies file diffs by creating, deleting, or updating files.", "title": "Apply patch tool call" }, "OpenAI.ConversationItemApplyPatchToolCallOutput": { "type": "object", "required": [ "type", "id", "call_id", "status" ], "properties": { "type": { "type": "string", "enum": [ "apply_patch_call_output" ], "description": "The type of the item. Always `apply_patch_call_output`.", "x-stainless-const": true, "default": "apply_patch_call_output" }, "id": { "type": "string", "description": "The unique ID of the apply patch tool call output. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the apply patch tool call generated by the model." }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchCallOutputStatus" } ], "description": "The status of the apply patch tool call output. One of `completed` or `failed`." }, "output": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "created_by": { "type": "string", "description": "The ID of the entity that created this tool call output." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "The output emitted by an apply patch tool call.", "title": "Apply patch tool call output" }, "OpenAI.ConversationItemCodeInterpreterToolCall": { "type": "object", "required": [ "type", "id", "status", "container_id", "code", "outputs" ], "properties": { "type": { "type": "string", "enum": [ "code_interpreter_call" ], "description": "The type of the code interpreter tool call. Always `code_interpreter_call`.", "x-stainless-const": true, "default": "code_interpreter_call" }, "id": { "type": "string", "description": "The unique ID of the code interpreter tool call." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete", "interpreting", "failed" ], "description": "The status of the code interpreter tool call. Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`." }, "container_id": { "type": "string", "description": "The ID of the container used to run the code." }, "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "outputs": { "anyOf": [ { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CodeInterpreterOutputLogs" }, { "$ref": "#/components/schemas/OpenAI.CodeInterpreterOutputImage" } ] } }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "A tool call to run code.", "title": "Code interpreter tool call" }, "OpenAI.ConversationItemComputerToolCall": { "type": "object", "required": [ "type", "id", "call_id", "action", "pending_safety_checks", "status" ], "properties": { "type": { "type": "string", "enum": [ "computer_call" ], "description": "The type of the computer call. Always `computer_call`.", "default": "computer_call" }, "id": { "type": "string", "description": "The unique ID of the computer call." }, "call_id": { "type": "string", "description": "An identifier used when responding to the tool call with output." }, "action": { "$ref": "#/components/schemas/OpenAI.ComputerAction" }, "pending_safety_checks": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ComputerCallSafetyCheckParam" }, "description": "The pending safety checks for the computer call." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n `incomplete`. Populated when items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "A tool call to a computer use tool. See the\n[computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.", "title": "Computer tool call" }, "OpenAI.ConversationItemComputerToolCallOutputResource": { "type": "object", "required": [ "type", "call_id", "output" ], "properties": { "type": { "type": "string", "enum": [ "computer_call_output" ], "description": "The type of the computer tool call output. Always `computer_call_output`.", "x-stainless-const": true, "default": "computer_call_output" }, "id": { "type": "string", "description": "The ID of the computer tool call output." }, "call_id": { "type": "string", "description": "The ID of the computer tool call that produced the output." }, "acknowledged_safety_checks": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ComputerCallSafetyCheckParam" }, "description": "The safety checks reported by the API that have been acknowledged by the\n developer." }, "output": { "$ref": "#/components/schemas/OpenAI.ComputerScreenshotImage" }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the message input. One of `in_progress`, `completed`, or\n `incomplete`. Populated when input items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ] }, "OpenAI.ConversationItemCustomToolCall": { "type": "object", "required": [ "type", "call_id", "name", "input" ], "properties": { "type": { "type": "string", "enum": [ "custom_tool_call" ], "description": "The type of the custom tool call. Always `custom_tool_call`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the custom tool call in the OpenAI platform." }, "call_id": { "type": "string", "description": "An identifier used to map this custom tool call to a tool call output." }, "name": { "type": "string", "description": "The name of the custom tool being called." }, "input": { "type": "string", "description": "The input for the custom tool call generated by the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "A call to a custom tool created by the model.", "title": "Custom tool call" }, "OpenAI.ConversationItemCustomToolCallOutput": { "type": "object", "required": [ "type", "call_id", "output" ], "properties": { "type": { "type": "string", "enum": [ "custom_tool_call_output" ], "description": "The type of the custom tool call output. Always `custom_tool_call_output`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the custom tool call output in the OpenAI platform." }, "call_id": { "type": "string", "description": "The call ID, used to map this custom tool call output to a custom tool call." }, "output": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FunctionAndCustomToolCallOutput" } } ], "description": "The output from the custom tool call generated by your code.\n Can be a string or an list of output content." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "The output of a custom tool call from your code, being sent back to the model.", "title": "Custom tool call output" }, "OpenAI.ConversationItemFileSearchToolCall": { "type": "object", "required": [ "id", "type", "status", "queries" ], "properties": { "id": { "type": "string", "description": "The unique ID of the file search tool call." }, "type": { "type": "string", "enum": [ "file_search_call" ], "description": "The type of the file search tool call. Always `file_search_call`.", "x-stainless-const": true }, "status": { "type": "string", "enum": [ "in_progress", "searching", "completed", "incomplete", "failed" ], "description": "The status of the file search tool call. One of `in_progress`,\n `searching`, `incomplete` or `failed`," }, "queries": { "type": "array", "items": { "type": "string" }, "description": "The queries used to search for files." }, "results": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FileSearchToolCallResults" } }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "The results of a file search tool call. See the\n[file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.", "title": "File search tool call" }, "OpenAI.ConversationItemFunctionShellCall": { "type": "object", "required": [ "type", "id", "call_id", "action", "status" ], "properties": { "type": { "type": "string", "enum": [ "shell_call" ], "description": "The type of the item. Always `shell_call`.", "x-stainless-const": true, "default": "shell_call" }, "id": { "type": "string", "description": "The unique ID of the shell tool call. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the shell tool call generated by the model." }, "action": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.FunctionShellAction" } ], "description": "The shell commands and limits that describe how to run the tool call." }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.LocalShellCallStatus" } ], "description": "The status of the shell call. One of `in_progress`, `completed`, or `incomplete`." }, "created_by": { "type": "string", "description": "The ID of the entity that created this tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "A tool call that executes one or more shell commands in a managed environment.", "title": "Shell tool call" }, "OpenAI.ConversationItemFunctionShellCallOutput": { "type": "object", "required": [ "type", "id", "call_id", "output", "max_output_length" ], "properties": { "type": { "type": "string", "enum": [ "shell_call_output" ], "description": "The type of the shell call output. Always `shell_call_output`.", "x-stainless-const": true, "default": "shell_call_output" }, "id": { "type": "string", "description": "The unique ID of the shell call output. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the shell tool call generated by the model." }, "output": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FunctionShellCallOutputContent" }, "description": "An array of shell call output contents" }, "max_output_length": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "created_by": { "type": "string", "description": "The identifier of the actor that created the item." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "The output of a shell tool call that was emitted.", "title": "Shell call output" }, "OpenAI.ConversationItemFunctionToolCallOutputResource": { "type": "object", "required": [ "type", "call_id", "output" ], "properties": { "id": { "type": "string", "description": "The unique ID of the function tool call output. Populated when this item\n is returned via API." }, "type": { "type": "string", "enum": [ "function_call_output" ], "description": "The type of the function tool call output. Always `function_call_output`.", "x-stainless-const": true }, "call_id": { "type": "string", "description": "The unique ID of the function tool call generated by the model." }, "output": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FunctionAndCustomToolCallOutput" } } ], "description": "The output from the function call generated by your code.\n Can be a string or an list of output content." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n `incomplete`. Populated when items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ] }, "OpenAI.ConversationItemFunctionToolCallResource": { "type": "object", "required": [ "type", "call_id", "name", "arguments" ], "properties": { "id": { "type": "string", "description": "The unique ID of the function tool call." }, "type": { "type": "string", "enum": [ "function_call" ], "description": "The type of the function tool call. Always `function_call`.", "x-stainless-const": true }, "call_id": { "type": "string", "description": "The unique ID of the function tool call generated by the model." }, "name": { "type": "string", "description": "The name of the function to run." }, "arguments": { "type": "string", "description": "A JSON string of the arguments to pass to the function." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n `incomplete`. Populated when items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ] }, "OpenAI.ConversationItemImageGenToolCall": { "type": "object", "required": [ "type", "id", "status", "result" ], "properties": { "type": { "type": "string", "enum": [ "image_generation_call" ], "description": "The type of the image generation call. Always `image_generation_call`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the image generation call." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "generating", "failed" ], "description": "The status of the image generation call." }, "result": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "An image generation request made by the model.", "title": "Image generation call" }, "OpenAI.ConversationItemList": { "type": "object", "required": [ "object", "data", "has_more", "first_id", "last_id" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of object returned, must be `list`.", "x-stainless-const": true }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ConversationItem" }, "description": "A list of conversation items." }, "has_more": { "type": "boolean", "description": "Whether there are more items available." }, "first_id": { "type": "string", "description": "The ID of the first item in the list." }, "last_id": { "type": "string", "description": "The ID of the last item in the list." } }, "description": "A list of Conversation items.", "title": "The conversation item list", "x-oaiMeta": { "name": "The item list", "group": "conversations" } }, "OpenAI.ConversationItemLocalShellToolCall": { "type": "object", "required": [ "type", "id", "call_id", "action", "status" ], "properties": { "type": { "type": "string", "enum": [ "local_shell_call" ], "description": "The type of the local shell call. Always `local_shell_call`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the local shell call." }, "call_id": { "type": "string", "description": "The unique ID of the local shell tool call generated by the model." }, "action": { "$ref": "#/components/schemas/OpenAI.LocalShellExecAction" }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the local shell call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "A tool call to run a command on the local shell.", "title": "Local shell call" }, "OpenAI.ConversationItemLocalShellToolCallOutput": { "type": "object", "required": [ "type", "id", "output" ], "properties": { "type": { "type": "string", "enum": [ "local_shell_call_output" ], "description": "The type of the local shell tool call output. Always `local_shell_call_output`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the local shell tool call generated by the model." }, "output": { "type": "string", "description": "A JSON string of the output of the local shell tool call." }, "status": { "anyOf": [ { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ] }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "The output of a local shell tool call.", "title": "Local shell call output" }, "OpenAI.ConversationItemMcpApprovalRequest": { "type": "object", "required": [ "type", "id", "server_label", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "mcp_approval_request" ], "description": "The type of the item. Always `mcp_approval_request`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the approval request." }, "server_label": { "type": "string", "description": "The label of the MCP server making the request." }, "name": { "type": "string", "description": "The name of the tool to run." }, "arguments": { "type": "string", "description": "A JSON string of arguments for the tool." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "A request for human approval of a tool invocation.", "title": "MCP approval request" }, "OpenAI.ConversationItemMcpApprovalResponseResource": { "type": "object", "required": [ "type", "id", "approval_request_id", "approve" ], "properties": { "type": { "type": "string", "enum": [ "mcp_approval_response" ], "description": "The type of the item. Always `mcp_approval_response`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the approval response" }, "approval_request_id": { "type": "string", "description": "The ID of the approval request being answered." }, "approve": { "type": "boolean", "description": "Whether the request was approved." }, "reason": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "A response to an MCP approval request.", "title": "MCP approval response" }, "OpenAI.ConversationItemMcpListTools": { "type": "object", "required": [ "type", "id", "server_label", "tools" ], "properties": { "type": { "type": "string", "enum": [ "mcp_list_tools" ], "description": "The type of the item. Always `mcp_list_tools`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the list." }, "server_label": { "type": "string", "description": "The label of the MCP server." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.MCPListToolsTool" }, "description": "The tools available on the server." }, "error": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "A list of tools available on an MCP server.", "title": "MCP list tools" }, "OpenAI.ConversationItemMcpToolCall": { "type": "object", "required": [ "type", "id", "server_label", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "mcp_call" ], "description": "The type of the item. Always `mcp_call`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the tool call." }, "server_label": { "type": "string", "description": "The label of the MCP server running the tool." }, "name": { "type": "string", "description": "The name of the tool that was run." }, "arguments": { "type": "string", "description": "A JSON string of the arguments passed to the tool." }, "output": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "error": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.MCPToolCallStatus" } ], "description": "The status of the tool call. One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`." }, "approval_request_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "An invocation of a tool on an MCP server.", "title": "MCP tool call" }, "OpenAI.ConversationItemMessage": { "type": "object", "required": [ "type", "id", "status", "role", "content" ], "properties": { "type": { "type": "string", "enum": [ "message" ], "description": "The type of the message. Always set to `message`.", "x-stainless-const": true, "default": "message" }, "id": { "type": "string", "description": "The unique ID of the message." }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.MessageStatus" } ], "description": "The status of item. One of `in_progress`, `completed`, or `incomplete`. Populated when items are returned via API." }, "role": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.MessageRole" } ], "description": "The role of the message. One of `unknown`, `user`, `assistant`, `system`, `critic`, `discriminator`, `developer`, or `tool`." }, "content": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.InputTextContent" }, { "$ref": "#/components/schemas/OpenAI.OutputTextContent" }, { "$ref": "#/components/schemas/OpenAI.TextContent" }, { "$ref": "#/components/schemas/OpenAI.SummaryTextContent" }, { "$ref": "#/components/schemas/OpenAI.ReasoningTextContent" }, { "$ref": "#/components/schemas/OpenAI.RefusalContent" }, { "$ref": "#/components/schemas/OpenAI.InputImageContent" }, { "$ref": "#/components/schemas/OpenAI.ComputerScreenshotContent" }, { "$ref": "#/components/schemas/OpenAI.InputFileContent" } ] }, "description": "The content of the message" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "A message to or from the model.", "title": "Message" }, "OpenAI.ConversationItemReasoningItem": { "type": "object", "required": [ "type", "id", "summary" ], "properties": { "type": { "type": "string", "enum": [ "reasoning" ], "description": "The type of the object. Always `reasoning`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique identifier of the reasoning content." }, "encrypted_content": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "summary": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Summary" }, "description": "Reasoning summary content." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ReasoningTextContent" }, "description": "Reasoning text content." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n `incomplete`. Populated when items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "A description of the chain of thought used by a reasoning model while generating\na response. Be sure to include these items in your `input` to the Responses API\nfor subsequent turns of a conversation if you are manually\n[managing context](https://platform.openai.com/docs/guides/conversation-state).", "title": "Reasoning" }, "OpenAI.ConversationItemType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "message", "function_call", "function_call_output", "file_search_call", "web_search_call", "image_generation_call", "computer_call", "computer_call_output", "reasoning", "code_interpreter_call", "local_shell_call", "local_shell_call_output", "shell_call", "shell_call_output", "apply_patch_call", "apply_patch_call_output", "mcp_list_tools", "mcp_approval_request", "mcp_approval_response", "mcp_call", "custom_tool_call", "custom_tool_call_output" ] } ] }, "OpenAI.ConversationItemWebSearchToolCall": { "type": "object", "required": [ "id", "type", "status", "action" ], "properties": { "id": { "type": "string", "description": "The unique ID of the web search tool call." }, "type": { "type": "string", "enum": [ "web_search_call" ], "description": "The type of the web search tool call. Always `web_search_call`.", "x-stainless-const": true }, "status": { "type": "string", "enum": [ "in_progress", "searching", "completed", "failed" ], "description": "The status of the web search tool call." }, "action": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.WebSearchActionSearch" }, { "$ref": "#/components/schemas/OpenAI.WebSearchActionOpenPage" }, { "$ref": "#/components/schemas/OpenAI.WebSearchActionFind" } ], "description": "An object describing the specific action taken in this web search call.\n Includes details on how the model used the web (search, open_page, find)." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationItem" } ], "description": "The results of a web search tool call. See the\n[web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.", "title": "Web search tool call" }, "OpenAI.ConversationParam": { "anyOf": [ { "type": "string" }, { "$ref": "#/components/schemas/OpenAI.ConversationParam-2" } ], "description": "The conversation that this response belongs to. Items from this conversation are prepended to `input_items` for this response request.\nInput items and output items from this response are automatically added to this conversation after this response completes." }, "OpenAI.ConversationParam-2": { "type": "object", "required": [ "id" ], "properties": { "id": { "type": "string", "description": "The unique ID of the conversation." } }, "description": "The conversation that this response belongs to.", "title": "Conversation object" }, "OpenAI.ConversationReference": { "type": "object", "required": [ "id" ], "properties": { "id": { "type": "string", "description": "The unique ID of the conversation that this response was associated with." } }, "description": "The conversation that this response belonged to. Input items and output items from this response were automatically added to this conversation.", "title": "Conversation" }, "OpenAI.ConversationResource": { "type": "object", "required": [ "id", "object", "metadata", "created_at" ], "properties": { "id": { "type": "string", "description": "The unique ID of the conversation." }, "object": { "type": "string", "enum": [ "conversation" ], "description": "The object type, which is always `conversation`.", "x-stainless-const": true, "default": "conversation" }, "metadata": { "description": "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.\n Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The time at which the conversation was created, measured in seconds since the Unix epoch." } } }, "OpenAI.CreateChatCompletionRequestAudio": { "type": "object", "required": [ "voice", "format" ], "properties": { "voice": { "$ref": "#/components/schemas/OpenAI.VoiceIdsShared" }, "format": { "type": "string", "enum": [ "wav", "aac", "mp3", "flac", "opus", "pcm16" ] } } }, "OpenAI.CreateChatCompletionRequestResponseFormat": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.CreateChatCompletionRequestResponseFormatType" } }, "discriminator": { "propertyName": "type", "mapping": { "json_schema": "#/components/schemas/OpenAI.ResponseFormatJsonSchema", "text": "#/components/schemas/OpenAI.CreateChatCompletionRequestResponseFormatResponseFormatText", "json_object": "#/components/schemas/OpenAI.CreateChatCompletionRequestResponseFormatResponseFormatJsonObject" } }, "description": "An object specifying the format that the model must output.\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables\nStructured Outputs which ensures the model will match your supplied JSON\nschema. Learn more in the [Structured Outputs\nguide](https://platform.openai.com/docs/guides/structured-outputs).\nSetting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\nensures the message the model generates is valid JSON. Using `json_schema`\nis preferred for models that support it." }, "OpenAI.CreateChatCompletionRequestResponseFormatResponseFormatJsonObject": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "json_object" ], "description": "The type of response format being defined. Always `json_object`.", "x-stainless-const": true } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.CreateChatCompletionRequestResponseFormat" } ], "description": "JSON object response format. An older method of generating JSON responses.\nUsing `json_schema` is recommended for models that support it. Note that the\nmodel will not generate JSON without a system or user message instructing it\nto do so.", "title": "JSON object" }, "OpenAI.CreateChatCompletionRequestResponseFormatResponseFormatText": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "The type of response format being defined. Always `text`.", "x-stainless-const": true } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.CreateChatCompletionRequestResponseFormat" } ], "description": "Default response format. Used to generate text responses.", "title": "Text" }, "OpenAI.CreateChatCompletionRequestResponseFormatType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "text", "json_schema", "json_object" ] } ] }, "OpenAI.CreateChatCompletionResponseChoices": { "type": "object", "required": [ "finish_reason", "index", "message", "logprobs" ], "properties": { "finish_reason": { "type": "string", "enum": [ "stop", "length", "tool_calls", "content_filter", "function_call" ] }, "index": { "type": "integer" }, "message": { "$ref": "#/components/schemas/OpenAI.ChatCompletionResponseMessage" }, "logprobs": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateChatCompletionResponseChoicesLogprobs" }, { "type": "null" } ] }, "content_filter_results": { "$ref": "#/components/schemas/AzureContentFilterResultForChoice" } } }, "OpenAI.CreateChatCompletionResponseChoicesLogprobs": { "type": "object", "required": [ "content", "refusal" ], "properties": { "content": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTokenLogprob" } }, { "type": "null" } ] }, "refusal": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTokenLogprob" } }, { "type": "null" } ] } } }, "OpenAI.CreateChatCompletionStreamResponseChoices": { "type": "object", "required": [ "delta", "finish_reason", "index" ], "properties": { "delta": { "$ref": "#/components/schemas/OpenAI.ChatCompletionStreamResponseDelta" }, "logprobs": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateChatCompletionStreamResponseChoicesLogprobs" }, { "type": "null" } ] }, "finish_reason": { "anyOf": [ { "type": "string", "enum": [ "stop", "length", "tool_calls", "content_filter", "function_call" ] }, { "type": "null" } ] }, "index": { "type": "integer" } } }, "OpenAI.CreateChatCompletionStreamResponseChoicesLogprobs": { "type": "object", "required": [ "content", "refusal" ], "properties": { "content": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTokenLogprob" } }, { "type": "null" } ] }, "refusal": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTokenLogprob" } }, { "type": "null" } ] } } }, "OpenAI.CreateCompletionResponseChoices": { "type": "object", "required": [ "finish_reason", "index", "logprobs", "text" ], "properties": { "finish_reason": { "type": "string", "enum": [ "stop", "length", "content_filter" ] }, "index": { "type": "integer", "format": "int32" }, "logprobs": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateCompletionResponseChoicesLogprobs" }, { "type": "null" } ] }, "text": { "type": "string" }, "content_filter_results": { "$ref": "#/components/schemas/AzureContentFilterResultForChoice" } } }, "OpenAI.CreateCompletionResponseChoicesLogprobs": { "type": "object", "properties": { "text_offset": { "type": "array", "items": { "type": "integer", "format": "int32" } }, "token_logprobs": { "type": "array", "items": { "type": "number", "format": "float" } }, "tokens": { "type": "array", "items": { "type": "string" } }, "top_logprobs": { "type": "array", "items": { "type": "object", "unevaluatedProperties": { "type": "number", "format": "float" } } } } }, "OpenAI.CreateContainerBody": { "type": "object", "required": [ "name" ], "properties": { "name": { "type": "string", "description": "Name of the container to create." }, "file_ids": { "type": "array", "items": { "type": "string" }, "description": "IDs of files to copy to the container." }, "expires_after": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.CreateContainerBodyExpiresAfter" } ], "description": "Container expiration time in seconds relative to the 'anchor' time." }, "memory_limit": { "type": "string", "enum": [ "1g", "4g", "16g", "64g" ], "description": "Optional memory limit for the container. Defaults to \"1g\"." } } }, "OpenAI.CreateContainerBodyExpiresAfter": { "type": "object", "required": [ "anchor", "minutes" ], "properties": { "anchor": { "type": "string", "enum": [ "last_active_at" ] }, "minutes": { "type": "integer" } } }, "OpenAI.CreateContainerFileBody": { "type": "object", "properties": { "file_id": { "type": "string", "description": "Name of the file to create." }, "file": { "description": "The File object (not file name) to be uploaded." } }, "required": [] }, "OpenAI.CreateConversationBody": { "type": "object", "properties": { "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "items": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.InputItem" } }, { "type": "null" } ] } } }, "OpenAI.CreateConversationItemsParametersBody": { "type": "object", "required": [ "items" ], "properties": { "items": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.InputItem" }, "maxItems": 20 } } }, "OpenAI.CreateEmbeddingRequest": { "type": "object", "required": [ "input", "model" ], "properties": { "input": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "type": "string" } }, { "type": "array", "items": { "type": "integer" } }, { "type": "array", "items": { "type": "array", "items": { "type": "integer" } } } ], "description": "Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for all embedding models), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. In addition to the per-input token limit, all embedding models enforce a maximum of 300,000 tokens summed across all inputs in a single request." }, "model": { "type": "string", "description": "ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them.", "x-oaiTypeLabel": "string" }, "encoding_format": { "type": "string", "enum": [ "float", "base64" ], "description": "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).", "default": "float" }, "dimensions": { "type": "integer", "minimum": 1, "description": "The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models." }, "user": { "type": "string", "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids)." } } }, "OpenAI.CreateEmbeddingResponse": { "type": "object", "required": [ "data", "model", "object", "usage" ], "properties": { "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Embedding" }, "description": "The list of embeddings generated by the model." }, "model": { "type": "string", "description": "The name of the model used to generate the embedding." }, "object": { "type": "string", "enum": [ "list" ], "description": "The object type, which is always \"list\".", "x-stainless-const": true }, "usage": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.CreateEmbeddingResponseUsage" } ], "description": "The usage information for the request." } } }, "OpenAI.CreateEmbeddingResponseUsage": { "type": "object", "required": [ "prompt_tokens", "total_tokens" ], "properties": { "prompt_tokens": { "type": "integer" }, "total_tokens": { "type": "integer" } } }, "OpenAI.CreateEvalCompletionsRunDataSource": { "type": "object", "required": [ "type", "source" ], "properties": { "type": { "type": "string", "enum": [ "completions" ], "description": "The type of run data source. Always `completions`.", "default": "completions" }, "input_messages": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateEvalCompletionsRunDataSourceInputMessagesTemplate" }, { "$ref": "#/components/schemas/OpenAI.CreateEvalCompletionsRunDataSourceInputMessagesItemReference" } ], "description": "Used when sampling from a model. Dictates the structure of the messages passed into the model. Can either be a reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template with variable references to the `item` namespace." }, "sampling_params": { "$ref": "#/components/schemas/AzureCompletionsSamplingParams" }, "model": { "type": "string", "description": "The name of the model to use for generating completions (e.g. \"o3-mini\")." }, "source": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.EvalJsonlFileContentSource" }, { "$ref": "#/components/schemas/OpenAI.EvalJsonlFileIdSource" }, { "$ref": "#/components/schemas/OpenAI.EvalStoredCompletionsSource" } ], "description": "Determines what populates the `item` namespace in this run's data source." } }, "description": "A CompletionsRunDataSource object describing a model sampling configuration.", "title": "CompletionsRunDataSource", "x-oaiMeta": { "name": "The completions data source object used to configure an individual run", "group": "eval runs", "example": "{\n \"name\": \"gpt-4o-mini-2024-07-18\",\n \"data_source\": {\n \"type\": \"completions\",\n \"input_messages\": {\n \"type\": \"item_reference\",\n \"item_reference\": \"item.input\"\n },\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"source\": {\n \"type\": \"stored_completions\",\n \"model\": \"gpt-4o-mini-2024-07-18\"\n }\n }\n}\n" } }, "OpenAI.CreateEvalCompletionsRunDataSourceInputMessagesItemReference": { "type": "object", "required": [ "type", "item_reference" ], "properties": { "type": { "type": "string", "enum": [ "item_reference" ] }, "item_reference": { "type": "string" } } }, "OpenAI.CreateEvalCompletionsRunDataSourceInputMessagesTemplate": { "type": "object", "required": [ "type", "template" ], "properties": { "type": { "type": "string", "enum": [ "template" ] }, "template": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.EasyInputMessage" }, { "$ref": "#/components/schemas/OpenAI.EvalItem" } ] } } } }, "OpenAI.CreateEvalCompletionsRunDataSourceSamplingParams": { "type": "object", "properties": { "reasoning_effort": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ReasoningEffort" } ], "description": "Controls the level of reasoning effort applied during generation." }, "temperature": { "type": "number", "description": "A higher temperature increases randomness in the outputs.", "default": 1 }, "max_completion_tokens": { "type": "integer" }, "top_p": { "type": "number", "description": "An alternative to temperature for nucleus sampling; 1.0 includes all tokens.", "default": 1 }, "seed": { "type": "integer", "description": "A seed value initializes the randomness during sampling.", "default": 42 }, "response_format": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseFormatText" }, { "$ref": "#/components/schemas/OpenAI.ResponseFormatJsonSchema" }, { "$ref": "#/components/schemas/OpenAI.ResponseFormatJsonObject" } ] }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTool" } } } }, "OpenAI.CreateEvalCustomDataSourceConfig": { "type": "object", "required": [ "type", "item_schema" ], "properties": { "type": { "type": "string", "enum": [ "custom" ], "description": "The type of data source. Always `custom`.", "x-stainless-const": true, "default": "custom" }, "item_schema": { "type": "object", "unevaluatedProperties": {}, "description": "The json schema for each row in the data source." }, "include_sample_schema": { "type": "boolean", "description": "Whether the eval should expect you to populate the sample namespace (ie, by generating responses off of your data source)" } }, "description": "A CustomDataSourceConfig object that defines the schema for the data source used for the evaluation runs.\nThis schema is used to define the shape of the data that will be:\n- Used to define your testing criteria and\n- What data is required when creating a run", "title": "CustomDataSourceConfig", "x-oaiMeta": { "name": "The eval file data source config object", "group": "evals", "example": "{\n \"type\": \"custom\",\n \"item_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"age\": {\"type\": \"integer\"}\n },\n \"required\": [\"name\", \"age\"]\n },\n \"include_sample_schema\": true\n}\n" } }, "OpenAI.CreateEvalItem": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "description": "The role of the message (e.g. \"system\", \"assistant\", \"user\")." }, "content": { "type": "string", "description": "The content of the message." } }, "description": "A chat message that makes up the prompt or context. May include variable references to the `item` namespace, ie {{item.name}}.", "title": "SimpleInputMessage", "x-oaiMeta": { "name": "The chat message object used to configure an individual run" } }, "OpenAI.CreateEvalJsonlRunDataSource": { "type": "object", "required": [ "type", "source" ], "properties": { "type": { "type": "string", "enum": [ "jsonl" ], "description": "The type of data source. Always `jsonl`.", "x-stainless-const": true, "default": "jsonl" }, "source": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.EvalJsonlFileContentSource" }, { "$ref": "#/components/schemas/OpenAI.EvalJsonlFileIdSource" } ], "description": "Determines what populates the `item` namespace in the data source." } }, "description": "A JsonlRunDataSource object with that specifies a JSONL file that matches the eval", "title": "JsonlRunDataSource", "x-oaiMeta": { "name": "The file data source object for the eval run configuration", "group": "evals", "example": "{\n \"type\": \"jsonl\",\n \"source\": {\n \"type\": \"file_id\",\n \"id\": \"file-9GYS6xbkWgWhmE7VoLUWFg\"\n }\n}\n" } }, "OpenAI.CreateEvalLabelModelGrader": { "type": "object", "required": [ "type", "name", "model", "input", "labels", "passing_labels" ], "properties": { "type": { "type": "string", "enum": [ "label_model" ], "description": "The object type, which is always `label_model`.", "x-stainless-const": true }, "name": { "type": "string", "description": "The name of the grader." }, "model": { "type": "string", "description": "The model to use for the evaluation. Must support structured outputs." }, "input": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CreateEvalItem" }, "description": "A list of chat messages forming the prompt or context. May include variable references to the `item` namespace, ie {{item.name}}." }, "labels": { "type": "array", "items": { "type": "string" }, "description": "The labels to classify to each item in the evaluation." }, "passing_labels": { "type": "array", "items": { "type": "string" }, "description": "The labels that indicate a passing result. Must be a subset of labels." } }, "description": "A LabelModelGrader object which uses a model to assign labels to each item\nin the evaluation.", "title": "LabelModelGrader", "x-oaiMeta": { "name": "The eval label model grader object", "group": "evals", "example": "{\n \"type\": \"label_model\",\n \"model\": \"gpt-4o-2024-08-06\",\n \"input\": [\n {\n \"role\": \"system\",\n \"content\": \"Classify the sentiment of the following statement as one of 'positive', 'neutral', or 'negative'\"\n },\n {\n \"role\": \"user\",\n \"content\": \"Statement: {{item.response}}\"\n }\n ],\n \"passing_labels\": [\"positive\"],\n \"labels\": [\"positive\", \"neutral\", \"negative\"],\n \"name\": \"Sentiment label grader\"\n}\n" } }, "OpenAI.CreateEvalLogsDataSourceConfig": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "logs" ], "description": "The type of data source. Always `logs`.", "x-stainless-const": true, "default": "logs" }, "metadata": { "type": "object", "unevaluatedProperties": {}, "description": "Metadata filters for the logs data source." } }, "description": "A data source config which specifies the metadata property of your logs query.\nThis is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc.", "title": "LogsDataSourceConfig", "x-oaiMeta": { "name": "The logs data source object for evals", "group": "evals", "example": "{\n \"type\": \"logs\",\n \"metadata\": {\n \"use_case\": \"customer_support_agent\"\n }\n}\n" } }, "OpenAI.CreateEvalResponsesRunDataSource": { "type": "object", "required": [ "type", "source" ], "properties": { "type": { "type": "string", "enum": [ "responses" ], "description": "The type of run data source. Always `responses`.", "default": "responses" }, "input_messages": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateEvalResponsesRunDataSourceInputMessagesTemplate" }, { "$ref": "#/components/schemas/OpenAI.CreateEvalResponsesRunDataSourceInputMessagesItemReference" } ], "description": "Used when sampling from a model. Dictates the structure of the messages passed into the model. Can either be a reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template with variable references to the `item` namespace." }, "sampling_params": { "$ref": "#/components/schemas/AzureResponsesSamplingParams" }, "model": { "type": "string", "description": "The name of the model to use for generating completions (e.g. \"o3-mini\")." }, "source": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.EvalJsonlFileContentSource" }, { "$ref": "#/components/schemas/OpenAI.EvalJsonlFileIdSource" }, { "$ref": "#/components/schemas/OpenAI.EvalResponsesSource" } ], "description": "Determines what populates the `item` namespace in this run's data source." } }, "description": "A ResponsesRunDataSource object describing a model sampling configuration.", "title": "CreateEvalResponsesRunDataSource", "x-oaiMeta": { "name": "The completions data source object used to configure an individual run", "group": "eval runs", "example": "{\n \"name\": \"gpt-4o-mini-2024-07-18\",\n \"data_source\": {\n \"type\": \"responses\",\n \"input_messages\": {\n \"type\": \"item_reference\",\n \"item_reference\": \"item.input\"\n },\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"source\": {\n \"type\": \"responses\",\n \"model\": \"gpt-4o-mini-2024-07-18\"\n }\n }\n}\n" } }, "OpenAI.CreateEvalResponsesRunDataSourceInputMessagesItemReference": { "type": "object", "required": [ "type", "item_reference" ], "properties": { "type": { "type": "string", "enum": [ "item_reference" ] }, "item_reference": { "type": "string" } } }, "OpenAI.CreateEvalResponsesRunDataSourceInputMessagesTemplate": { "type": "object", "required": [ "type", "template" ], "properties": { "type": { "type": "string", "enum": [ "template" ] }, "template": { "type": "array", "items": { "anyOf": [ { "type": "object", "properties": { "role": { "type": "string" }, "content": { "type": "string" } }, "required": [ "role", "content" ] }, { "$ref": "#/components/schemas/OpenAI.EvalItem" } ] } } } }, "OpenAI.CreateEvalResponsesRunDataSourceSamplingParams": { "type": "object", "properties": { "reasoning_effort": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ReasoningEffort" } ], "description": "Controls the level of reasoning effort applied during generation." }, "temperature": { "type": "number", "description": "A higher temperature increases randomness in the outputs.", "default": 1 }, "top_p": { "type": "number", "description": "An alternative to temperature for nucleus sampling; 1.0 includes all tokens.", "default": 1 }, "seed": { "type": "integer", "description": "A seed value initializes the randomness during sampling.", "default": 42 }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Tool" } }, "text": { "$ref": "#/components/schemas/OpenAI.CreateEvalResponsesRunDataSourceSamplingParamsText" } } }, "OpenAI.CreateEvalResponsesRunDataSourceSamplingParamsText": { "type": "object", "properties": { "format": { "$ref": "#/components/schemas/OpenAI.TextResponseFormatConfiguration" } } }, "OpenAI.CreateEvalRunRequest": { "type": "object", "required": [ "data_source" ], "properties": { "name": { "type": "string", "description": "The name of the run." }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "data_source": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateEvalJsonlRunDataSource" }, { "$ref": "#/components/schemas/OpenAI.CreateEvalCompletionsRunDataSource" }, { "$ref": "#/components/schemas/OpenAI.CreateEvalResponsesRunDataSource" } ], "description": "Details about the run's data source." } }, "title": "CreateEvalRunRequest" }, "OpenAI.CreateEvalStoredCompletionsDataSourceConfig": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "stored_completions" ], "description": "The type of data source. Always `stored_completions`.", "x-stainless-const": true, "default": "stored_completions" }, "metadata": { "type": "object", "unevaluatedProperties": {}, "description": "Metadata filters for the stored completions data source." } }, "description": "Deprecated in favor of LogsDataSourceConfig.", "title": "StoredCompletionsDataSourceConfig", "deprecated": true, "x-oaiMeta": { "name": "The stored completions data source object for evals", "group": "evals", "example": "{\n \"type\": \"stored_completions\",\n \"metadata\": {\n \"use_case\": \"customer_support_agent\"\n }\n}\n" } }, "OpenAI.CreateFileRequest": { "type": "object", "properties": { "file": { "description": "The File object (not file name) to be uploaded.", "x-oaiMeta": { "exampleFilePath": "fine-tune.jsonl" } }, "expires_after": { "type": "object", "properties": { "seconds": { "type": "integer", "format": "int32" }, "anchor": { "$ref": "#/components/schemas/AzureFileExpiryAnchor" } }, "required": [ "seconds", "anchor" ] }, "purpose": { "type": "string", "enum": [ "assistants", "batch", "fine-tune", "evals" ], "description": "The intended purpose of the uploaded file. One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for fine-tuning - `evals`: Used for eval data sets" } }, "required": [ "file", "expires_after", "purpose" ] }, "OpenAI.CreateFineTuningCheckpointPermissionRequest": { "type": "object", "required": [ "project_ids" ], "properties": { "project_ids": { "type": "array", "items": { "type": "string" }, "description": "The project identifiers to grant access to." } } }, "OpenAI.CreateFineTuningJobRequest": { "type": "object", "required": [ "model", "training_file" ], "properties": { "model": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] } ], "description": "The name of the model to fine-tune. You can select one of the\n [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).", "x-oaiTypeLabel": "string" }, "training_file": { "type": "string", "description": "The ID of an uploaded file that contains training data.\n See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file.\n Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.\n The contents of the file should differ depending on if the model uses the [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input), [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input) format, or if the fine-tuning method uses the [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input) format.\n See the [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details." }, "hyperparameters": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.CreateFineTuningJobRequestHyperparameters" } ], "description": "The hyperparameters used for the fine-tuning job.\n This value is now deprecated in favor of `method`, and should be passed in under the `method` parameter.", "deprecated": true }, "suffix": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "minLength": 1, "maxLength": 64, "description": "A string of up to 64 characters that will be added to your fine-tuned model name.\n For example, a `suffix` of \"custom-model-name\" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`." }, "validation_file": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "description": "The ID of an uploaded file that contains validation data.\n If you provide this file, the data is used to generate validation\n metrics periodically during fine-tuning. These metrics can be viewed in\n the fine-tuning results file.\n The same data should not be present in both train and validation files.\n Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`.\n See the [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization) for more details." }, "integrations": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CreateFineTuningJobRequestIntegrations" } }, { "type": "null" } ], "description": "A list of integrations to enable for your fine-tuning job." }, "seed": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 0, "maximum": 2147483647, "description": "The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases.\n If a seed is not specified, one will be generated for you." }, "method": { "$ref": "#/components/schemas/OpenAI.FineTuneMethod" }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } } }, "OpenAI.CreateFineTuningJobRequestHyperparameters": { "type": "object", "properties": { "batch_size": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" } ], "default": "auto" }, "learning_rate_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number" } ] }, "n_epochs": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" } ], "default": "auto" } } }, "OpenAI.CreateFineTuningJobRequestIntegrations": { "type": "object", "required": [ "type", "wandb" ], "properties": { "type": { "type": "string", "enum": [ "wandb" ], "x-stainless-const": true }, "wandb": { "$ref": "#/components/schemas/OpenAI.CreateFineTuningJobRequestIntegrationsWandb" } } }, "OpenAI.CreateFineTuningJobRequestIntegrationsWandb": { "type": "object", "required": [ "project" ], "properties": { "project": { "type": "string" }, "name": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "entity": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "tags": { "type": "array", "items": { "type": "string" } } } }, "OpenAI.CreateMessageRequest": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "enum": [ "user", "assistant" ], "description": "The role of the entity that is creating the message. Allowed values include:\n - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages.\n - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation." }, "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.MessageContentImageFileObject" }, { "$ref": "#/components/schemas/OpenAI.MessageContentImageUrlObject" }, { "$ref": "#/components/schemas/OpenAI.MessageRequestContentTextObject" } ] } } ] }, "attachments": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CreateMessageRequestAttachments" } }, { "type": "null" } ] }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } } }, "OpenAI.CreateMessageRequestAttachments": { "type": "object", "properties": { "file_id": { "type": "string" }, "tools": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.AssistantToolsCode" }, { "$ref": "#/components/schemas/OpenAI.AssistantToolsFileSearchTypeOnly" } ] } } } }, "OpenAI.CreateResponse": { "type": "object", "properties": { "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "top_logprobs": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "user": { "type": "string", "description": "This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use `prompt_cache_key` instead to maintain caching optimizations.\n A stable identifier for your end-users.\n Used to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).", "deprecated": true }, "safety_identifier": { "type": "string", "description": "A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.\n The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers)." }, "prompt_cache_key": { "type": "string", "description": "Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching)." }, "prompt_cache_retention": { "anyOf": [ { "type": "string", "enum": [ "in-memory", "24h" ] }, { "type": "null" } ] }, "previous_response_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "model": { "type": "string", "description": "Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI\n offers a wide range of models with different capabilities, performance\n characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)\n to browse and compare available models." }, "reasoning": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Reasoning" }, { "type": "null" } ] }, "background": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ] }, "max_output_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "max_tool_calls": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "text": { "$ref": "#/components/schemas/OpenAI.ResponseTextParam" }, "tools": { "$ref": "#/components/schemas/OpenAI.ToolsArray", "description": "An array of tools the model may call while generating a response. You\ncan specify which tool to use by setting the `tool_choice` parameter.\n\nThe two categories of tools you can provide the model are:\n\n- **Built-in tools**: Tools that are provided by OpenAI that extend the\n model's capabilities, like file search.\n- **Function calls (custom tools)**: Functions that are defined by you,\n enabling the model to call your own code." }, "tool_choice": { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" }, "prompt": { "$ref": "#/components/schemas/OpenAI.Prompt" }, "truncation": { "anyOf": [ { "type": "string", "enum": [ "auto", "disabled" ] }, { "type": "null" } ], "default": "disabled" }, "input": { "$ref": "#/components/schemas/OpenAI.InputParam" }, "include": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.IncludeEnum" } }, { "type": "null" } ] }, "parallel_tool_calls": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ], "default": true }, "store": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ], "default": true }, "instructions": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "stream": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ] }, "stream_options": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamOptions" }, { "type": "null" } ] }, "conversation": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationParam" }, { "type": "null" } ] } } }, "OpenAI.CreateRunRequest": { "type": "object", "required": [ "assistant_id" ], "properties": { "assistant_id": { "type": "string", "description": "The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run." }, "model": { "type": "string", "description": "The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.", "x-oaiTypeLabel": "string" }, "reasoning_effort": { "$ref": "#/components/schemas/OpenAI.ReasoningEffort" }, "instructions": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "description": "Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis." }, "additional_instructions": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "description": "Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions." }, "additional_messages": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CreateMessageRequest" } }, { "type": "null" } ], "description": "Adds additional messages to the thread before creating the run." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.AssistantTool" }, "maxItems": 20, "description": "Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis." }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "minimum": 0, "maximum": 2, "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.", "default": 1 }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "minimum": 0, "maximum": 1, "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n We generally recommend altering this or temperature but not both.", "default": 1 }, "stream": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ], "description": "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message." }, "max_prompt_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 256, "description": "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info." }, "max_completion_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 256, "description": "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info." }, "truncation_strategy": { "$ref": "#/components/schemas/OpenAI.TruncationObject" }, "tool_choice": { "$ref": "#/components/schemas/OpenAI.AssistantsApiToolChoiceOption" }, "parallel_tool_calls": { "$ref": "#/components/schemas/OpenAI.ParallelToolCalls" }, "response_format": { "$ref": "#/components/schemas/OpenAI.AssistantsApiResponseFormatOption" } } }, "OpenAI.CreateThreadAndRunRequest": { "type": "object", "required": [ "assistant_id" ], "properties": { "assistant_id": { "type": "string", "description": "The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run." }, "thread": { "$ref": "#/components/schemas/OpenAI.CreateThreadRequest" }, "model": { "type": "string", "description": "The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.", "x-oaiTypeLabel": "string" }, "instructions": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "description": "Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.AssistantTool" }, "maxItems": 20, "description": "Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis." }, "tool_resources": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateThreadAndRunRequestToolResources" }, { "type": "null" } ], "description": "A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs." }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "minimum": 0, "maximum": 2, "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.", "default": 1 }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "minimum": 0, "maximum": 1, "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n We generally recommend altering this or temperature but not both.", "default": 1 }, "stream": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ], "description": "If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message." }, "max_prompt_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 256, "description": "The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info." }, "max_completion_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 256, "description": "The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info." }, "truncation_strategy": { "$ref": "#/components/schemas/OpenAI.TruncationObject" }, "tool_choice": { "$ref": "#/components/schemas/OpenAI.AssistantsApiToolChoiceOption" }, "parallel_tool_calls": { "$ref": "#/components/schemas/OpenAI.ParallelToolCalls" }, "response_format": { "$ref": "#/components/schemas/OpenAI.AssistantsApiResponseFormatOption" } } }, "OpenAI.CreateThreadAndRunRequestToolResources": { "type": "object", "properties": { "code_interpreter": { "$ref": "#/components/schemas/OpenAI.CreateThreadAndRunRequestToolResourcesCodeInterpreter" }, "file_search": { "$ref": "#/components/schemas/OpenAI.CreateThreadAndRunRequestToolResourcesFileSearch" } } }, "OpenAI.CreateThreadAndRunRequestToolResourcesCodeInterpreter": { "type": "object", "properties": { "file_ids": { "type": "array", "items": { "type": "string" }, "maxItems": 20, "default": [] } } }, "OpenAI.CreateThreadAndRunRequestToolResourcesFileSearch": { "type": "object", "properties": { "vector_store_ids": { "type": "array", "items": { "type": "string" }, "maxItems": 1 } } }, "OpenAI.CreateThreadRequest": { "type": "object", "properties": { "messages": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CreateMessageRequest" }, "description": "A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with." }, "tool_resources": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateThreadRequestToolResources" }, { "type": "null" } ] }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } }, "description": "Options to create a new thread. If no thread is provided when running a\nrequest, an empty thread will be created." }, "OpenAI.CreateThreadRequestToolResources": { "type": "object", "properties": { "code_interpreter": { "$ref": "#/components/schemas/OpenAI.CreateThreadRequestToolResourcesCodeInterpreter" }, "file_search": { "anyOf": [ {}, {} ] } } }, "OpenAI.CreateThreadRequestToolResourcesCodeInterpreter": { "type": "object", "properties": { "file_ids": { "type": "array", "items": { "type": "string" }, "maxItems": 20 } } }, "OpenAI.CreateVectorStoreFileBatchRequest": { "type": "object", "properties": { "file_ids": { "type": "array", "items": { "type": "string" }, "minItems": 1, "maxItems": 500, "description": "A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. If `attributes` or `chunking_strategy` are provided, they will be applied to all files in the batch. Mutually exclusive with `files`." }, "files": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CreateVectorStoreFileRequest" }, "minItems": 1, "maxItems": 500, "description": "A list of objects that each include a `file_id` plus optional `attributes` or `chunking_strategy`. Use this when you need to override metadata for specific files. The global `attributes` or `chunking_strategy` will be ignored and must be specified for each file. Mutually exclusive with `file_ids`." }, "chunking_strategy": { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyRequestParam" }, "attributes": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" }, { "type": "null" } ] } } }, "OpenAI.CreateVectorStoreFileRequest": { "type": "object", "required": [ "file_id" ], "properties": { "file_id": { "type": "string", "description": "A [File](https://platform.openai.com/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files." }, "chunking_strategy": { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyRequestParam" }, "attributes": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" }, { "type": "null" } ] } } }, "OpenAI.CreateVectorStoreRequest": { "type": "object", "properties": { "file_ids": { "type": "array", "items": { "type": "string" }, "maxItems": 500, "description": "A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files." }, "name": { "type": "string", "description": "The name of the vector store." }, "description": { "type": "string", "description": "A description for the vector store. Can be used to describe the vector store's purpose." }, "expires_after": { "$ref": "#/components/schemas/OpenAI.VectorStoreExpirationAfter" }, "chunking_strategy": { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyRequestParam" }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } } }, "OpenAI.CustomGrammarFormatParam": { "type": "object", "required": [ "type", "syntax", "definition" ], "properties": { "type": { "type": "string", "enum": [ "grammar" ], "description": "Grammar format. Always `grammar`.", "x-stainless-const": true, "default": "grammar" }, "syntax": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.GrammarSyntax1" } ], "description": "The syntax of the grammar definition. One of `lark` or `regex`." }, "definition": { "type": "string", "description": "The grammar definition." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.CustomToolParamFormat" } ], "description": "A grammar defined by the user.", "title": "Grammar format" }, "OpenAI.CustomTextFormatParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "Unconstrained text format. Always `text`.", "x-stainless-const": true, "default": "text" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.CustomToolParamFormat" } ], "description": "Unconstrained free-form text.", "title": "Text format" }, "OpenAI.CustomToolChatCompletions": { "type": "object", "required": [ "type", "custom" ], "properties": { "type": { "type": "string", "enum": [ "custom" ], "description": "The type of the custom tool. Always `custom`.", "x-stainless-const": true }, "custom": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.CustomToolChatCompletionsCustom" } ], "description": "Properties of the custom tool.", "title": "Custom tool properties" } }, "description": "A custom tool that processes input using a specified format.", "title": "Custom tool" }, "OpenAI.CustomToolChatCompletionsCustom": { "type": "object", "required": [ "name" ], "properties": { "name": { "type": "string" }, "description": { "type": "string" }, "format": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CustomToolChatCompletionsCustomFormatText" }, { "$ref": "#/components/schemas/OpenAI.CustomToolChatCompletionsCustomFormatGrammar" } ] } } }, "OpenAI.CustomToolChatCompletionsCustomFormatGrammar": { "type": "object", "required": [ "type", "grammar" ], "properties": { "type": { "type": "string", "enum": [ "grammar" ], "x-stainless-const": true }, "grammar": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.CustomToolChatCompletionsCustomFormatGrammarGrammar" } ], "title": "Grammar format" } } }, "OpenAI.CustomToolChatCompletionsCustomFormatGrammarGrammar": { "type": "object", "required": [ "definition", "syntax" ], "properties": { "definition": { "type": "string" }, "syntax": { "type": "string", "enum": [ "lark", "regex" ] } } }, "OpenAI.CustomToolChatCompletionsCustomFormatText": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "x-stainless-const": true } } }, "OpenAI.CustomToolParam": { "type": "object", "required": [ "type", "name" ], "properties": { "type": { "type": "string", "enum": [ "custom" ], "description": "The type of the custom tool. Always `custom`.", "x-stainless-const": true, "default": "custom" }, "name": { "type": "string", "description": "The name of the custom tool, used to identify it in tool calls." }, "description": { "type": "string", "description": "Optional description of the custom tool, used to provide more context." }, "format": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.CustomToolParamFormat" } ], "description": "The input format for the custom tool. Default is unconstrained text." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A custom tool that processes input using a specified format. Learn more about [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)", "title": "Custom tool" }, "OpenAI.CustomToolParamFormat": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.CustomToolParamFormatType" } }, "discriminator": { "propertyName": "type", "mapping": { "text": "#/components/schemas/OpenAI.CustomTextFormatParam", "grammar": "#/components/schemas/OpenAI.CustomGrammarFormatParam" } }, "description": "The input format for the custom tool. Default is unconstrained text." }, "OpenAI.CustomToolParamFormatType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "text", "grammar" ] } ] }, "OpenAI.DeleteFileResponse": { "type": "object", "required": [ "id", "object", "deleted" ], "properties": { "id": { "type": "string" }, "object": { "type": "string", "enum": [ "file" ], "x-stainless-const": true }, "deleted": { "type": "boolean" } } }, "OpenAI.DeleteFineTuningCheckpointPermissionResponse": { "type": "object", "required": [ "id", "object", "deleted" ], "properties": { "id": { "type": "string", "description": "The ID of the fine-tuned model checkpoint permission that was deleted." }, "object": { "type": "string", "enum": [ "checkpoint.permission" ], "description": "The object type, which is always \"checkpoint.permission\".", "x-stainless-const": true }, "deleted": { "type": "boolean", "description": "Whether the fine-tuned model checkpoint permission was successfully deleted." } } }, "OpenAI.DeleteMessageResponse": { "type": "object", "required": [ "id", "deleted", "object" ], "properties": { "id": { "type": "string" }, "deleted": { "type": "boolean" }, "object": { "type": "string", "enum": [ "thread.message.deleted" ], "x-stainless-const": true } } }, "OpenAI.DeleteModelResponse": { "type": "object", "required": [ "id", "deleted", "object" ], "properties": { "id": { "type": "string" }, "deleted": { "type": "boolean" }, "object": { "type": "string" } } }, "OpenAI.DeleteThreadResponse": { "type": "object", "required": [ "id", "deleted", "object" ], "properties": { "id": { "type": "string" }, "deleted": { "type": "boolean" }, "object": { "type": "string", "enum": [ "thread.deleted" ], "x-stainless-const": true } } }, "OpenAI.DeleteVectorStoreFileResponse": { "type": "object", "required": [ "id", "deleted", "object" ], "properties": { "id": { "type": "string" }, "deleted": { "type": "boolean" }, "object": { "type": "string", "enum": [ "vector_store.file.deleted" ], "x-stainless-const": true } } }, "OpenAI.DeleteVectorStoreResponse": { "type": "object", "required": [ "id", "deleted", "object" ], "properties": { "id": { "type": "string" }, "deleted": { "type": "boolean" }, "object": { "type": "string", "enum": [ "vector_store.deleted" ], "x-stainless-const": true } } }, "OpenAI.DeletedConversationResource": { "type": "object", "required": [ "object", "deleted", "id" ], "properties": { "object": { "type": "string", "enum": [ "conversation.deleted" ], "x-stainless-const": true, "default": "conversation.deleted" }, "deleted": { "type": "boolean" }, "id": { "type": "string" } } }, "OpenAI.DoubleClickAction": { "type": "object", "required": [ "type", "x", "y" ], "properties": { "type": { "type": "string", "enum": [ "double_click" ], "description": "Specifies the event type. For a double click action, this property is always set to `double_click`.", "x-stainless-const": true, "default": "double_click" }, "x": { "type": "integer", "description": "The x-coordinate where the double click occurred." }, "y": { "type": "integer", "description": "The y-coordinate where the double click occurred." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A double click action.", "title": "DoubleClick" }, "OpenAI.Drag": { "type": "object", "required": [ "type", "path" ], "properties": { "type": { "type": "string", "enum": [ "drag" ], "description": "Specifies the event type. For a drag action, this property is\n always set to `drag`.", "x-stainless-const": true, "default": "drag" }, "path": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.DragPoint" }, "description": "An array of coordinates representing the path of the drag action. Coordinates will appear as an array\n of objects, eg\n ```\n [\n { x: 100, y: 200 },\n { x: 200, y: 300 }\n ]\n ```" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A drag action.", "title": "Drag" }, "OpenAI.DragPoint": { "type": "object", "required": [ "x", "y" ], "properties": { "x": { "type": "integer", "description": "The x-coordinate." }, "y": { "type": "integer", "description": "The y-coordinate." } }, "description": "An x/y coordinate pair, e.g. `{ x: 100, y: 200 }`.", "title": "Coordinate" }, "OpenAI.EasyInputMessage": { "type": "object", "required": [ "role", "content", "type" ], "properties": { "role": { "type": "string", "enum": [ "user", "assistant", "system", "developer" ], "description": "The role of the message input. One of `user`, `assistant`, `system`, or\n `developer`." }, "content": { "anyOf": [ { "type": "string" }, { "$ref": "#/components/schemas/OpenAI.InputMessageContentList" } ], "description": "Text, image, or audio input to the model, used to generate a response.\n Can also contain previous assistant responses." }, "type": { "type": "string", "enum": [ "message" ], "description": "The type of the message input. Always `message`.", "x-stainless-const": true } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.InputItem" } ], "description": "A message input to the model with a role indicating instruction following\nhierarchy. Instructions given with the `developer` or `system` role take\nprecedence over instructions given with the `user` role. Messages with the\n`assistant` role are presumed to have been generated by the model in previous\ninteractions.", "title": "Input message" }, "OpenAI.Embedding": { "type": "object", "required": [ "index", "embedding", "object" ], "properties": { "index": { "type": "integer", "description": "The index of the embedding in the list of embeddings." }, "embedding": { "type": "array", "items": { "type": "number", "format": "float" }, "description": "The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings)." }, "object": { "type": "string", "enum": [ "embedding" ], "description": "The object type, which is always \"embedding\".", "x-stainless-const": true } }, "description": "Represents an embedding vector returned by embedding endpoint.", "x-oaiMeta": { "name": "The embedding object", "example": "{\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0023064255,\n -0.009327292,\n .... (1536 floats total for ada-002)\n -0.0028842222,\n ],\n \"index\": 0\n}\n" } }, "OpenAI.Eval": { "type": "object", "required": [ "object", "id", "name", "data_source_config", "testing_criteria", "created_at", "metadata" ], "properties": { "object": { "type": "string", "enum": [ "eval" ], "description": "The object type.", "x-stainless-const": true, "default": "eval" }, "id": { "type": "string", "description": "Unique identifier for the evaluation." }, "name": { "type": "string", "description": "The name of the evaluation." }, "data_source_config": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.EvalCustomDataSourceConfig" }, { "$ref": "#/components/schemas/OpenAI.EvalLogsDataSourceConfig" }, { "$ref": "#/components/schemas/OpenAI.EvalStoredCompletionsDataSourceConfig" } ], "description": "Configuration of data sources used in runs of the evaluation." }, "testing_criteria": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateEvalLabelModelGrader" }, { "$ref": "#/components/schemas/OpenAI.EvalGraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.EvalGraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.EvalGraderPython" }, { "$ref": "#/components/schemas/OpenAI.EvalGraderScoreModel" }, { "$ref": "#/components/schemas/EvalGraderEndpoint" } ] }, "description": "A list of testing criteria." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the eval was created." }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } }, "description": "An Eval object with a data source config and testing criteria.\nAn Eval represents a task to be done for your LLM integration.\nLike:\n- Improve the quality of my chatbot\n- See how well my chatbot handles customer support\n- Check if o4-mini is better at my usecase than gpt-4o", "title": "Eval", "x-oaiMeta": { "name": "The eval object", "group": "evals", "example": "{\n \"object\": \"eval\",\n \"id\": \"eval_67abd54d9b0081909a86353f6fb9317a\",\n \"data_source_config\": {\n \"type\": \"custom\",\n \"item_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"label\": {\"type\": \"string\"},\n },\n \"required\": [\"label\"]\n },\n \"include_sample_schema\": true\n },\n \"testing_criteria\": [\n {\n \"name\": \"My string check grader\",\n \"type\": \"string_check\",\n \"input\": \"{{sample.output_text}}\",\n \"reference\": \"{{item.label}}\",\n \"operation\": \"eq\",\n }\n ],\n \"name\": \"External Data Eval\",\n \"created_at\": 1739314509,\n \"metadata\": {\n \"test\": \"synthetics\",\n }\n}\n" } }, "OpenAI.EvalApiError": { "type": "object", "required": [ "code", "message" ], "properties": { "code": { "type": "string", "description": "The error code." }, "message": { "type": "string", "description": "The error message." } }, "description": "An object representing an error response from the Eval API.", "title": "EvalApiError", "x-oaiMeta": { "name": "The API error object", "group": "evals", "example": "{\n \"code\": \"internal_error\",\n \"message\": \"The eval run failed due to an internal error.\"\n}\n" } }, "OpenAI.EvalCustomDataSourceConfig": { "type": "object", "required": [ "type", "schema" ], "properties": { "type": { "type": "string", "enum": [ "custom" ], "description": "The type of data source. Always `custom`.", "x-stainless-const": true, "default": "custom" }, "schema": { "type": "object", "unevaluatedProperties": {}, "description": "The json schema for the run data source items.\n Learn how to build JSON schemas [here](https://json-schema.org/)." } }, "description": "A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces.\nThe response schema defines the shape of the data that will be:\n- Used to define your testing criteria and\n- What data is required when creating a run", "title": "CustomDataSourceConfig", "x-oaiMeta": { "name": "The eval custom data source config object", "group": "evals", "example": "{\n \"type\": \"custom\",\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"item\": {\n \"type\": \"object\",\n \"properties\": {\n \"label\": {\"type\": \"string\"},\n },\n \"required\": [\"label\"]\n }\n },\n \"required\": [\"item\"]\n }\n}\n" } }, "OpenAI.EvalGraderPython": { "type": "object", "required": [ "type", "name", "source" ], "properties": { "type": { "type": "string", "enum": [ "python" ], "description": "The object type, which is always `python`.", "x-stainless-const": true }, "name": { "type": "string", "description": "The name of the grader." }, "source": { "type": "string", "description": "The source code of the python script." }, "image_tag": { "type": "string", "description": "The image tag to use for the python script." }, "pass_threshold": { "type": "number", "description": "The threshold for the score." } }, "title": "EvalGraderPython" }, "OpenAI.EvalGraderScoreModel": { "type": "object", "required": [ "type", "name", "model", "input" ], "properties": { "type": { "type": "string", "enum": [ "score_model" ], "description": "The object type, which is always `score_model`.", "x-stainless-const": true }, "name": { "type": "string", "description": "The name of the grader." }, "model": { "type": "string", "description": "The model to use for the evaluation." }, "sampling_params": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalGraderScoreModelSamplingParams" } ], "description": "The sampling parameters for the model." }, "input": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalItem" }, "description": "The input messages evaluated by the grader. Supports text, output text, input image, and input audio content blocks, and may include template strings." }, "range": { "type": "array", "items": { "type": "number" }, "description": "The range of the score. Defaults to `[0, 1]`." }, "pass_threshold": { "type": "number", "description": "The threshold for the score." } }, "title": "EvalGraderScoreModel" }, "OpenAI.EvalGraderScoreModelSamplingParams": { "type": "object", "properties": { "seed": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ] }, "max_completions_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "reasoning_effort": { "$ref": "#/components/schemas/OpenAI.ReasoningEffort" } } }, "OpenAI.EvalGraderStringCheck": { "type": "object", "required": [ "type", "name", "input", "reference", "operation" ], "properties": { "type": { "type": "string", "enum": [ "string_check" ], "description": "The object type, which is always `string_check`.", "x-stainless-const": true }, "name": { "type": "string", "description": "The name of the grader." }, "input": { "type": "string", "description": "The input text. This may include template strings." }, "reference": { "type": "string", "description": "The reference text. This may include template strings." }, "operation": { "type": "string", "enum": [ "eq", "ne", "like", "ilike" ], "description": "The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`." } }, "title": "StringCheckGrader" }, "OpenAI.EvalGraderTextSimilarity": { "type": "object", "required": [ "type", "name", "input", "reference", "evaluation_metric", "pass_threshold" ], "properties": { "type": { "type": "string", "enum": [ "text_similarity" ], "description": "The type of grader.", "x-stainless-const": true, "default": "text_similarity" }, "name": { "type": "string", "description": "The name of the grader." }, "input": { "type": "string", "description": "The text being graded." }, "reference": { "type": "string", "description": "The text being graded against." }, "evaluation_metric": { "type": "string", "enum": [ "cosine", "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l" ], "description": "The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`,\n `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`,\n or `rouge_l`." }, "pass_threshold": { "type": "number", "description": "The threshold for the score." } }, "title": "EvalGraderTextSimilarity" }, "OpenAI.EvalItem": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "enum": [ "user", "assistant", "system", "developer" ], "description": "The role of the message input. One of `user`, `assistant`, `system`, or\n `developer`." }, "content": { "$ref": "#/components/schemas/OpenAI.EvalItemContent" }, "type": { "type": "string", "enum": [ "message" ], "description": "The type of the message input. Always `message`.", "x-stainless-const": true } }, "description": "A message input to the model with a role indicating instruction following\nhierarchy. Instructions given with the `developer` or `system` role take\nprecedence over instructions given with the `user` role. Messages with the\n`assistant` role are presumed to have been generated by the model in previous\ninteractions.", "title": "EvalItem" }, "OpenAI.EvalItemContent": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.EvalItemContentItem" }, { "$ref": "#/components/schemas/OpenAI.EvalItemContentArray" } ], "description": "Inputs to the model - can contain template strings. Supports text, output text, input images, and input audio, either as a single item or an array of items.", "title": "Eval content" }, "OpenAI.EvalItemContentArray": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalItemContentItem" }, "description": "A list of inputs, each of which may be either an input text, output text, input\nimage, or input audio object.", "title": "An array of Input text, Output text, Input image, and Input audio" }, "OpenAI.EvalItemContentItem": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.EvalItemContentText" }, { "$ref": "#/components/schemas/OpenAI.EvalItemContentItemObject" } ], "description": "A single content item: input text, output text, input image, or input audio.", "title": "Eval content item", "x-stainless-naming": { "ruby": { "model_name": "GraderInputItem" } } }, "OpenAI.EvalItemContentItemObject": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.EvalItemContentItemObjectType" } }, "discriminator": { "propertyName": "type", "mapping": { "output_text": "#/components/schemas/OpenAI.EvalItemContentOutputText", "input_image": "#/components/schemas/OpenAI.EvalItemInputImage", "input_audio": "#/components/schemas/OpenAI.InputAudio", "input_text": "#/components/schemas/OpenAI.EvalItemContentItemObjectInputTextContent" } }, "description": "A single content item: input text, output text, input image, or input audio.", "title": "Eval content item", "x-stainless-naming": { "ruby": { "model_name": "GraderInputItem" } } }, "OpenAI.EvalItemContentItemObjectInputTextContent": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "input_text" ], "description": "The type of the input item. Always `input_text`.", "x-stainless-const": true, "default": "input_text" }, "text": { "type": "string", "description": "The text input to the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalItemContentItemObject" } ], "description": "A text input to the model.", "title": "Input text" }, "OpenAI.EvalItemContentItemObjectType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "input_text", "output_text", "input_image", "input_audio" ] } ] }, "OpenAI.EvalItemContentOutputText": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "output_text" ], "description": "The type of the output text. Always `output_text`.", "x-stainless-const": true }, "text": { "type": "string", "description": "The text output from the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalItemContentItemObject" } ], "description": "A text output from the model.", "title": "Output text" }, "OpenAI.EvalItemContentText": { "type": "string", "description": "A text input to the model.", "title": "Text input" }, "OpenAI.EvalItemInputImage": { "type": "object", "required": [ "type", "image_url" ], "properties": { "type": { "type": "string", "enum": [ "input_image" ], "description": "The type of the image input. Always `input_image`.", "x-stainless-const": true }, "image_url": { "type": "string", "format": "uri", "description": "The URL of the image input." }, "detail": { "type": "string", "description": "The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalItemContentItemObject" } ], "description": "An image input block used within EvalItem content arrays.", "title": "Input image" }, "OpenAI.EvalJsonlFileContentSource": { "type": "object", "required": [ "type", "content" ], "properties": { "type": { "type": "string", "enum": [ "file_content" ], "description": "The type of jsonl source. Always `file_content`.", "x-stainless-const": true, "default": "file_content" }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalJsonlFileContentSourceContent" }, "description": "The content of the jsonl file." } }, "title": "EvalJsonlFileContentSource" }, "OpenAI.EvalJsonlFileContentSourceContent": { "type": "object", "required": [ "item" ], "properties": { "item": { "type": "object", "unevaluatedProperties": {} }, "sample": { "type": "object", "unevaluatedProperties": {} } } }, "OpenAI.EvalJsonlFileIdSource": { "type": "object", "required": [ "type", "id" ], "properties": { "type": { "type": "string", "enum": [ "file_id" ], "description": "The type of jsonl source. Always `file_id`.", "x-stainless-const": true, "default": "file_id" }, "id": { "type": "string", "description": "The identifier of the file." } }, "title": "EvalJsonlFileIdSource" }, "OpenAI.EvalList": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of this object. It is always set to \"list\".", "x-stainless-const": true, "default": "list" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Eval" }, "description": "An array of eval objects." }, "first_id": { "type": "string", "description": "The identifier of the first eval in the data array." }, "last_id": { "type": "string", "description": "The identifier of the last eval in the data array." }, "has_more": { "type": "boolean", "description": "Indicates whether there are more evals available." } }, "description": "An object representing a list of evals.", "title": "EvalList", "x-oaiMeta": { "name": "The eval list object", "group": "evals", "example": "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"eval\",\n \"id\": \"eval_67abd54d9b0081909a86353f6fb9317a\",\n \"data_source_config\": {\n \"type\": \"custom\",\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"item\": {\n \"type\": \"object\",\n \"properties\": {\n \"input\": {\n \"type\": \"string\"\n },\n \"ground_truth\": {\n \"type\": \"string\"\n }\n },\n \"required\": [\n \"input\",\n \"ground_truth\"\n ]\n }\n },\n \"required\": [\n \"item\"\n ]\n }\n },\n \"testing_criteria\": [\n {\n \"name\": \"String check\",\n \"id\": \"String check-2eaf2d8d-d649-4335-8148-9535a7ca73c2\",\n \"type\": \"string_check\",\n \"input\": \"{{item.input}}\",\n \"reference\": \"{{item.ground_truth}}\",\n \"operation\": \"eq\"\n }\n ],\n \"name\": \"External Data Eval\",\n \"created_at\": 1739314509,\n \"metadata\": {},\n }\n ],\n \"first_id\": \"eval_67abd54d9b0081909a86353f6fb9317a\",\n \"last_id\": \"eval_67abd54d9b0081909a86353f6fb9317a\",\n \"has_more\": true\n}\n" } }, "OpenAI.EvalLogsDataSourceConfig": { "type": "object", "required": [ "type", "schema" ], "properties": { "type": { "type": "string", "enum": [ "logs" ], "description": "The type of data source. Always `logs`.", "x-stainless-const": true, "default": "logs" }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "schema": { "type": "object", "unevaluatedProperties": {}, "description": "The json schema for the run data source items.\n Learn how to build JSON schemas [here](https://json-schema.org/)." } }, "description": "A LogsDataSourceConfig which specifies the metadata property of your logs query.\nThis is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc.\nThe schema returned by this data source config is used to defined what variables are available in your evals.\n`item` and `sample` are both defined when using this data source config.", "title": "LogsDataSourceConfig", "x-oaiMeta": { "name": "The logs data source object for evals", "group": "evals", "example": "{\n \"type\": \"logs\",\n \"metadata\": {\n \"language\": \"english\"\n },\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"item\": {\n \"type\": \"object\"\n },\n \"sample\": {\n \"type\": \"object\"\n }\n },\n \"required\": [\n \"item\",\n \"sample\"\n }\n}\n" } }, "OpenAI.EvalResponsesSource": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "responses" ], "description": "The type of run data source. Always `responses`." }, "metadata": { "anyOf": [ { "type": "object", "unevaluatedProperties": {} }, { "type": "null" } ] }, "model": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "instructions_search": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "created_after": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "created_before": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "reasoning_effort": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ReasoningEffort" }, { "type": "null" } ] }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ] }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ] }, "users": { "anyOf": [ { "type": "array", "items": { "type": "string" } }, { "type": "null" } ] }, "tools": { "anyOf": [ { "type": "array", "items": { "type": "string" } }, { "type": "null" } ] } }, "description": "A EvalResponsesSource object describing a run data source configuration.", "title": "EvalResponsesSource", "x-oaiMeta": { "name": "The run data source object used to configure an individual run", "group": "eval runs", "example": "{\n \"type\": \"responses\",\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"temperature\": 0.7,\n \"top_p\": 1.0,\n \"users\": [\"user1\", \"user2\"],\n \"tools\": [\"tool1\", \"tool2\"],\n \"instructions_search\": \"You are a coding assistant\"\n}\n" } }, "OpenAI.EvalRun": { "type": "object", "required": [ "object", "id", "eval_id", "status", "model", "name", "created_at", "report_url", "result_counts", "per_model_usage", "per_testing_criteria_results", "data_source", "metadata", "error" ], "properties": { "object": { "type": "string", "enum": [ "eval.run" ], "description": "The type of the object. Always \"eval.run\".", "x-stainless-const": true, "default": "eval.run" }, "id": { "type": "string", "description": "Unique identifier for the evaluation run." }, "eval_id": { "type": "string", "description": "The identifier of the associated evaluation." }, "status": { "type": "string", "description": "The status of the evaluation run." }, "model": { "type": "string", "description": "The model that is evaluated, if applicable." }, "name": { "type": "string", "description": "The name of the evaluation run." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) when the evaluation run was created." }, "report_url": { "type": "string", "format": "uri", "description": "The URL to the rendered evaluation run report on the UI dashboard." }, "result_counts": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunResultCounts" } ], "description": "Counters summarizing the outcomes of the evaluation run." }, "per_model_usage": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalRunPerModelUsage" }, "description": "Usage statistics for each model during the evaluation run." }, "per_testing_criteria_results": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalRunPerTestingCriteriaResults" }, "description": "Results per testing criteria applied during the evaluation run." }, "data_source": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CreateEvalJsonlRunDataSource" }, { "$ref": "#/components/schemas/OpenAI.CreateEvalCompletionsRunDataSource" }, { "$ref": "#/components/schemas/OpenAI.CreateEvalResponsesRunDataSource" } ], "description": "Information about the run's data source." }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "error": { "$ref": "#/components/schemas/OpenAI.EvalApiError" } }, "description": "A schema representing an evaluation run.", "title": "EvalRun", "x-oaiMeta": { "name": "The eval run object", "group": "evals", "example": "{\n \"object\": \"eval.run\",\n \"id\": \"evalrun_67e57965b480819094274e3a32235e4c\",\n \"eval_id\": \"eval_67e579652b548190aaa83ada4b125f47\",\n \"report_url\": \"https://platform.openai.com/evaluations/eval_67e579652b548190aaa83ada4b125f47?run_id=evalrun_67e57965b480819094274e3a32235e4c\",\n \"status\": \"queued\",\n \"model\": \"gpt-4o-mini\",\n \"name\": \"gpt-4o-mini\",\n \"created_at\": 1743092069,\n \"result_counts\": {\n \"total\": 0,\n \"errored\": 0,\n \"failed\": 0,\n \"passed\": 0\n },\n \"per_model_usage\": null,\n \"per_testing_criteria_results\": null,\n \"data_source\": {\n \"type\": \"completions\",\n \"source\": {\n \"type\": \"file_content\",\n \"content\": [\n {\n \"item\": {\n \"input\": \"Tech Company Launches Advanced Artificial Intelligence Platform\",\n \"ground_truth\": \"Technology\"\n }\n },\n {\n \"item\": {\n \"input\": \"Central Bank Increases Interest Rates Amid Inflation Concerns\",\n \"ground_truth\": \"Markets\"\n }\n },\n {\n \"item\": {\n \"input\": \"International Summit Addresses Climate Change Strategies\",\n \"ground_truth\": \"World\"\n }\n },\n {\n \"item\": {\n \"input\": \"Major Retailer Reports Record-Breaking Holiday Sales\",\n \"ground_truth\": \"Business\"\n }\n },\n {\n \"item\": {\n \"input\": \"National Team Qualifies for World Championship Finals\",\n \"ground_truth\": \"Sports\"\n }\n },\n {\n \"item\": {\n \"input\": \"Stock Markets Rally After Positive Economic Data Released\",\n \"ground_truth\": \"Markets\"\n }\n },\n {\n \"item\": {\n \"input\": \"Global Manufacturer Announces Merger with Competitor\",\n \"ground_truth\": \"Business\"\n }\n },\n {\n \"item\": {\n \"input\": \"Breakthrough in Renewable Energy Technology Unveiled\",\n \"ground_truth\": \"Technology\"\n }\n },\n {\n \"item\": {\n \"input\": \"World Leaders Sign Historic Climate Agreement\",\n \"ground_truth\": \"World\"\n }\n },\n {\n \"item\": {\n \"input\": \"Professional Athlete Sets New Record in Championship Event\",\n \"ground_truth\": \"Sports\"\n }\n },\n {\n \"item\": {\n \"input\": \"Financial Institutions Adapt to New Regulatory Requirements\",\n \"ground_truth\": \"Business\"\n }\n },\n {\n \"item\": {\n \"input\": \"Tech Conference Showcases Advances in Artificial Intelligence\",\n \"ground_truth\": \"Technology\"\n }\n },\n {\n \"item\": {\n \"input\": \"Global Markets Respond to Oil Price Fluctuations\",\n \"ground_truth\": \"Markets\"\n }\n },\n {\n \"item\": {\n \"input\": \"International Cooperation Strengthened Through New Treaty\",\n \"ground_truth\": \"World\"\n }\n },\n {\n \"item\": {\n \"input\": \"Sports League Announces Revised Schedule for Upcoming Season\",\n \"ground_truth\": \"Sports\"\n }\n }\n ]\n },\n \"input_messages\": {\n \"type\": \"template\",\n \"template\": [\n {\n \"type\": \"message\",\n \"role\": \"developer\",\n \"content\": {\n \"type\": \"input_text\",\n \"text\": \"Categorize a given news headline into one of the following topics: Technology, Markets, World, Business, or Sports.\n\n# Steps\n\n1. Analyze the content of the news headline to understand its primary focus.\n2. Extract the subject matter, identifying any key indicators or keywords.\n3. Use the identified indicators to determine the most suitable category out of the five options: Technology, Markets, World, Business, or Sports.\n4. Ensure only one category is selected per headline.\n\n# Output Format\n\nRespond with the chosen category as a single word. For instance: \"Technology\", \"Markets\", \"World\", \"Business\", or \"Sports\".\n\n# Examples\n\n**Input**: \"Apple Unveils New iPhone Model, Featuring Advanced AI Features\"\n**Output**: \"Technology\"\n\n**Input**: \"Global Stocks Mixed as Investors Await Central Bank Decisions\"\n**Output**: \"Markets\"\n\n**Input**: \"War in Ukraine: Latest Updates on Negotiation Status\"\n**Output**: \"World\"\n\n**Input**: \"Microsoft in Talks to Acquire Gaming Company for $2 Billion\"\n**Output**: \"Business\"\n\n**Input**: \"Manchester United Secures Win in Premier League Football Match\"\n**Output**: \"Sports\"\n\n# Notes\n\n- If the headline appears to fit into more than one category, choose the most dominant theme.\n- Keywords or phrases such as \"stocks\", \"company acquisition\", \"match\", or technological brands can be good indicators for classification.\n\"\n }\n },\n {\n \"type\": \"message\",\n \"role\": \"user\",\n \"content\": {\n \"type\": \"input_text\",\n \"text\": \"{{item.input}}\"\n }\n }\n ]\n },\n \"model\": \"gpt-4o-mini\",\n \"sampling_params\": {\n \"seed\": 42,\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_completions_tokens\": 2048\n }\n },\n \"error\": null,\n \"metadata\": {}\n}\n" } }, "OpenAI.EvalRunList": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of this object. It is always set to \"list\".", "x-stainless-const": true, "default": "list" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalRun" }, "description": "An array of eval run objects." }, "first_id": { "type": "string", "description": "The identifier of the first eval run in the data array." }, "last_id": { "type": "string", "description": "The identifier of the last eval run in the data array." }, "has_more": { "type": "boolean", "description": "Indicates whether there are more evals available." } }, "description": "An object representing a list of runs for an evaluation.", "title": "EvalRunList", "x-oaiMeta": { "name": "The eval run list object", "group": "evals", "example": "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"eval.run\",\n \"id\": \"evalrun_67b7fbdad46c819092f6fe7a14189620\",\n \"eval_id\": \"eval_67b7fa9a81a88190ab4aa417e397ea21\",\n \"report_url\": \"https://platform.openai.com/evaluations/eval_67b7fa9a81a88190ab4aa417e397ea21?run_id=evalrun_67b7fbdad46c819092f6fe7a14189620\",\n \"status\": \"completed\",\n \"model\": \"o3-mini\",\n \"name\": \"Academic Assistant\",\n \"created_at\": 1740110812,\n \"result_counts\": {\n \"total\": 171,\n \"errored\": 0,\n \"failed\": 80,\n \"passed\": 91\n },\n \"per_model_usage\": null,\n \"per_testing_criteria_results\": [\n {\n \"testing_criteria\": \"String check grader\",\n \"passed\": 91,\n \"failed\": 80\n }\n ],\n \"run_data_source\": {\n \"type\": \"completions\",\n \"template_messages\": [\n {\n \"type\": \"message\",\n \"role\": \"system\",\n \"content\": {\n \"type\": \"input_text\",\n \"text\": \"You are a helpful assistant.\"\n }\n },\n {\n \"type\": \"message\",\n \"role\": \"user\",\n \"content\": {\n \"type\": \"input_text\",\n \"text\": \"Hello, can you help me with my homework?\"\n }\n }\n ],\n \"datasource_reference\": null,\n \"model\": \"o3-mini\",\n \"max_completion_tokens\": null,\n \"seed\": null,\n \"temperature\": null,\n \"top_p\": null\n },\n \"error\": null,\n \"metadata\": {\"test\": \"synthetics\"}\n }\n ],\n \"first_id\": \"evalrun_67abd54d60ec8190832b46859da808f7\",\n \"last_id\": \"evalrun_67abd54d60ec8190832b46859da808f7\",\n \"has_more\": false\n}\n" } }, "OpenAI.EvalRunOutputItem": { "type": "object", "required": [ "object", "id", "run_id", "eval_id", "created_at", "status", "datasource_item_id", "datasource_item", "results", "sample" ], "properties": { "object": { "type": "string", "enum": [ "eval.run.output_item" ], "description": "The type of the object. Always \"eval.run.output_item\".", "x-stainless-const": true, "default": "eval.run.output_item" }, "id": { "type": "string", "description": "Unique identifier for the evaluation run output item." }, "run_id": { "type": "string", "description": "The identifier of the evaluation run associated with this output item." }, "eval_id": { "type": "string", "description": "The identifier of the evaluation group." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) when the evaluation run was created." }, "status": { "type": "string", "description": "The status of the evaluation run." }, "datasource_item_id": { "type": "integer", "description": "The identifier for the data source item." }, "datasource_item": { "type": "object", "unevaluatedProperties": {}, "description": "Details of the input data source item." }, "results": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalRunOutputItemResult" }, "description": "A list of grader results for this output item." }, "sample": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunOutputItemSample" } ], "description": "A sample containing the input and output of the evaluation run." } }, "description": "A schema representing an evaluation run output item.", "title": "EvalRunOutputItem", "x-oaiMeta": { "name": "The eval run output item object", "group": "evals", "example": "{\n \"object\": \"eval.run.output_item\",\n \"id\": \"outputitem_67abd55eb6548190bb580745d5644a33\",\n \"run_id\": \"evalrun_67abd54d60ec8190832b46859da808f7\",\n \"eval_id\": \"eval_67abd54d9b0081909a86353f6fb9317a\",\n \"created_at\": 1739314509,\n \"status\": \"pass\",\n \"datasource_item_id\": 137,\n \"datasource_item\": {\n \"teacher\": \"To grade essays, I only check for style, content, and grammar.\",\n \"student\": \"I am a student who is trying to write the best essay.\"\n },\n \"results\": [\n {\n \"name\": \"String Check Grader\",\n \"type\": \"string-check-grader\",\n \"score\": 1.0,\n \"passed\": true,\n }\n ],\n \"sample\": {\n \"input\": [\n {\n \"role\": \"system\",\n \"content\": \"You are an evaluator bot...\"\n },\n {\n \"role\": \"user\",\n \"content\": \"You are assessing...\"\n }\n ],\n \"output\": [\n {\n \"role\": \"assistant\",\n \"content\": \"The rubric is not clear nor concise.\"\n }\n ],\n \"finish_reason\": \"stop\",\n \"model\": \"gpt-4o-2024-08-06\",\n \"usage\": {\n \"total_tokens\": 521,\n \"completion_tokens\": 2,\n \"prompt_tokens\": 519,\n \"cached_tokens\": 0\n },\n \"error\": null,\n \"temperature\": 1.0,\n \"max_completion_tokens\": 2048,\n \"top_p\": 1.0,\n \"seed\": 42\n }\n}\n" } }, "OpenAI.EvalRunOutputItemList": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of this object. It is always set to \"list\".", "x-stainless-const": true, "default": "list" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalRunOutputItem" }, "description": "An array of eval run output item objects." }, "first_id": { "type": "string", "description": "The identifier of the first eval run output item in the data array." }, "last_id": { "type": "string", "description": "The identifier of the last eval run output item in the data array." }, "has_more": { "type": "boolean", "description": "Indicates whether there are more eval run output items available." } }, "description": "An object representing a list of output items for an evaluation run.", "title": "EvalRunOutputItemList", "x-oaiMeta": { "name": "The eval run output item list object", "group": "evals", "example": "{\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"eval.run.output_item\",\n \"id\": \"outputitem_67abd55eb6548190bb580745d5644a33\",\n \"run_id\": \"evalrun_67abd54d60ec8190832b46859da808f7\",\n \"eval_id\": \"eval_67abd54d9b0081909a86353f6fb9317a\",\n \"created_at\": 1739314509,\n \"status\": \"pass\",\n \"datasource_item_id\": 137,\n \"datasource_item\": {\n \"teacher\": \"To grade essays, I only check for style, content, and grammar.\",\n \"student\": \"I am a student who is trying to write the best essay.\"\n },\n \"results\": [\n {\n \"name\": \"String Check Grader\",\n \"type\": \"string-check-grader\",\n \"score\": 1.0,\n \"passed\": true,\n }\n ],\n \"sample\": {\n \"input\": [\n {\n \"role\": \"system\",\n \"content\": \"You are an evaluator bot...\"\n },\n {\n \"role\": \"user\",\n \"content\": \"You are assessing...\"\n }\n ],\n \"output\": [\n {\n \"role\": \"assistant\",\n \"content\": \"The rubric is not clear nor concise.\"\n }\n ],\n \"finish_reason\": \"stop\",\n \"model\": \"gpt-4o-2024-08-06\",\n \"usage\": {\n \"total_tokens\": 521,\n \"completion_tokens\": 2,\n \"prompt_tokens\": 519,\n \"cached_tokens\": 0\n },\n \"error\": null,\n \"temperature\": 1.0,\n \"max_completion_tokens\": 2048,\n \"top_p\": 1.0,\n \"seed\": 42\n }\n },\n ],\n \"first_id\": \"outputitem_67abd55eb6548190bb580745d5644a33\",\n \"last_id\": \"outputitem_67abd55eb6548190bb580745d5644a33\",\n \"has_more\": false\n}\n" } }, "OpenAI.EvalRunOutputItemResult": { "type": "object", "required": [ "name", "score", "passed" ], "properties": { "name": { "type": "string", "description": "The name of the grader." }, "type": { "type": "string", "description": "The grader type (for example, \"string-check-grader\")." }, "score": { "type": "number", "description": "The numeric score produced by the grader." }, "passed": { "type": "boolean", "description": "Whether the grader considered the output a pass." }, "sample": { "anyOf": [ { "type": "object", "unevaluatedProperties": {} }, { "type": "null" } ], "description": "Optional sample or intermediate data produced by the grader." } }, "unevaluatedProperties": {}, "description": "A single grader result for an evaluation run output item.", "title": "EvalRunOutputItemResult" }, "OpenAI.EvalRunOutputItemSample": { "type": "object", "required": [ "input", "output", "finish_reason", "model", "usage", "error", "temperature", "max_completion_tokens", "top_p", "seed" ], "properties": { "input": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalRunOutputItemSampleInput" } }, "output": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalRunOutputItemSampleOutput" } }, "finish_reason": { "type": "string" }, "model": { "type": "string" }, "usage": { "$ref": "#/components/schemas/OpenAI.EvalRunOutputItemSampleUsage" }, "error": { "$ref": "#/components/schemas/OpenAI.EvalApiError" }, "temperature": { "type": "number" }, "max_completion_tokens": { "type": "integer" }, "top_p": { "type": "number" }, "seed": { "type": "integer" } } }, "OpenAI.EvalRunOutputItemSampleInput": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string" }, "content": { "type": "string" } } }, "OpenAI.EvalRunOutputItemSampleOutput": { "type": "object", "properties": { "role": { "type": "string" }, "content": { "type": "string" } } }, "OpenAI.EvalRunOutputItemSampleUsage": { "type": "object", "required": [ "total_tokens", "completion_tokens", "prompt_tokens", "cached_tokens" ], "properties": { "total_tokens": { "type": "integer" }, "completion_tokens": { "type": "integer" }, "prompt_tokens": { "type": "integer" }, "cached_tokens": { "type": "integer" } } }, "OpenAI.EvalRunPerModelUsage": { "type": "object", "required": [ "model_name", "invocation_count", "prompt_tokens", "completion_tokens", "total_tokens", "cached_tokens" ], "properties": { "model_name": { "type": "string", "x-stainless-naming": { "python": { "property_name": "run_model_name" } } }, "invocation_count": { "type": "integer" }, "prompt_tokens": { "type": "integer" }, "completion_tokens": { "type": "integer" }, "total_tokens": { "type": "integer" }, "cached_tokens": { "type": "integer" } } }, "OpenAI.EvalRunPerTestingCriteriaResults": { "type": "object", "required": [ "testing_criteria", "passed", "failed" ], "properties": { "testing_criteria": { "type": "string" }, "passed": { "type": "integer" }, "failed": { "type": "integer" } } }, "OpenAI.EvalRunResultCounts": { "type": "object", "required": [ "total", "errored", "failed", "passed" ], "properties": { "total": { "type": "integer" }, "errored": { "type": "integer" }, "failed": { "type": "integer" }, "passed": { "type": "integer" } } }, "OpenAI.EvalStoredCompletionsDataSourceConfig": { "type": "object", "required": [ "type", "schema" ], "properties": { "type": { "type": "string", "enum": [ "stored_completions" ], "description": "The type of data source. Always `stored_completions`.", "x-stainless-const": true, "default": "stored_completions" }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "schema": { "type": "object", "unevaluatedProperties": {}, "description": "The json schema for the run data source items.\n Learn how to build JSON schemas [here](https://json-schema.org/)." } }, "description": "Deprecated in favor of LogsDataSourceConfig.", "title": "StoredCompletionsDataSourceConfig", "deprecated": true, "x-oaiMeta": { "name": "The stored completions data source object for evals", "group": "evals", "example": "{\n \"type\": \"stored_completions\",\n \"metadata\": {\n \"language\": \"english\"\n },\n \"schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"item\": {\n \"type\": \"object\"\n },\n \"sample\": {\n \"type\": \"object\"\n }\n },\n \"required\": [\n \"item\",\n \"sample\"\n }\n}\n" } }, "OpenAI.EvalStoredCompletionsSource": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "stored_completions" ], "description": "The type of source. Always `stored_completions`.", "x-stainless-const": true, "default": "stored_completions" }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "model": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "created_after": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "created_before": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "limit": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] } }, "description": "A StoredCompletionsRunDataSource configuration describing a set of filters", "title": "StoredCompletionsRunDataSource", "x-oaiMeta": { "name": "The stored completions data source object used to configure an individual run", "group": "eval runs", "example": "{\n \"type\": \"stored_completions\",\n \"model\": \"gpt-4o\",\n \"created_after\": 1668124800,\n \"created_before\": 1668124900,\n \"limit\": 100,\n \"metadata\": {}\n}\n" } }, "OpenAI.FileCitationBody": { "type": "object", "required": [ "type", "file_id", "index", "filename" ], "properties": { "type": { "type": "string", "enum": [ "file_citation" ], "description": "The type of the file citation. Always `file_citation`.", "x-stainless-const": true, "default": "file_citation" }, "file_id": { "type": "string", "description": "The ID of the file." }, "index": { "type": "integer", "description": "The index of the file in the list of files." }, "filename": { "type": "string", "description": "The filename of the file cited." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Annotation" } ], "description": "A citation to a file.", "title": "File citation" }, "OpenAI.FilePath": { "type": "object", "required": [ "type", "file_id", "index" ], "properties": { "type": { "type": "string", "enum": [ "file_path" ], "description": "The type of the file path. Always `file_path`.", "x-stainless-const": true }, "file_id": { "type": "string", "description": "The ID of the file." }, "index": { "type": "integer", "description": "The index of the file in the list of files." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Annotation" } ], "description": "A path to a file.", "title": "File path" }, "OpenAI.FileSearchRanker": { "type": "string", "enum": [ "auto", "default_2024_08_21" ], "description": "The ranker to use for the file search. If not specified will use the `auto` ranker." }, "OpenAI.FileSearchRankingOptions": { "type": "object", "required": [ "score_threshold" ], "properties": { "ranker": { "$ref": "#/components/schemas/OpenAI.FileSearchRanker" }, "score_threshold": { "type": "number", "minimum": 0, "maximum": 1, "description": "The score threshold for the file search. All values must be a floating point number between 0 and 1." } }, "description": "The ranking options for the file search. If not specified, the file search tool will use the `auto` ranker and a score_threshold of 0.\nSee the [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings) for more information.", "title": "File search tool call ranking options" }, "OpenAI.FileSearchTool": { "type": "object", "required": [ "type", "vector_store_ids" ], "properties": { "type": { "type": "string", "enum": [ "file_search" ], "description": "The type of the file search tool. Always `file_search`.", "x-stainless-const": true, "default": "file_search" }, "vector_store_ids": { "type": "array", "items": { "type": "string" }, "description": "The IDs of the vector stores to search." }, "max_num_results": { "type": "integer", "description": "The maximum number of results to return. This number should be between 1 and 50 inclusive." }, "ranking_options": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RankingOptions" } ], "description": "Ranking options for search." }, "filters": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Filters" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A tool that searches for relevant content from uploaded files. Learn more about the [file search tool](https://platform.openai.com/docs/guides/tools-file-search).", "title": "File search" }, "OpenAI.FileSearchToolCallResults": { "type": "object", "properties": { "file_id": { "type": "string" }, "text": { "type": "string" }, "filename": { "type": "string" }, "attributes": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" }, { "type": "null" } ] }, "score": { "type": "number", "format": "float" } } }, "OpenAI.Filters": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ComparisonFilter" }, { "$ref": "#/components/schemas/OpenAI.CompoundFilter" } ] }, "OpenAI.FineTuneDPOHyperparameters": { "type": "object", "properties": { "beta": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number" } ], "description": "The beta value for the DPO method. A higher beta value will increase the weight of the penalty between the policy and reference model." }, "batch_size": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" } ], "description": "Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.", "default": "auto" }, "learning_rate_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number" } ], "description": "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting." }, "n_epochs": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" } ], "description": "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.", "default": "auto" } }, "description": "The hyperparameters used for the DPO fine-tuning job." }, "OpenAI.FineTuneDPOMethod": { "type": "object", "properties": { "hyperparameters": { "$ref": "#/components/schemas/OpenAI.FineTuneDPOHyperparameters" } }, "description": "Configuration for the DPO fine-tuning method." }, "OpenAI.FineTuneMethod": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "supervised", "dpo", "reinforcement" ], "description": "The type of method. Is either `supervised`, `dpo`, or `reinforcement`." }, "supervised": { "$ref": "#/components/schemas/OpenAI.FineTuneSupervisedMethod" }, "dpo": { "$ref": "#/components/schemas/OpenAI.FineTuneDPOMethod" }, "reinforcement": { "$ref": "#/components/schemas/AzureFineTuneReinforcementMethod" } }, "description": "The method used for fine-tuning." }, "OpenAI.FineTuneReinforcementHyperparameters": { "type": "object", "properties": { "batch_size": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" } ], "description": "Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.", "default": "auto" }, "learning_rate_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number" } ], "description": "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting." }, "n_epochs": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" } ], "description": "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.", "default": "auto" }, "reasoning_effort": { "type": "string", "enum": [ "default", "low", "medium", "high" ], "description": "Level of reasoning effort.", "default": "default" }, "compute_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number" } ], "description": "Multiplier on amount of compute used for exploring search space during training." }, "eval_interval": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" } ], "description": "The number of training steps between evaluation runs.", "default": "auto" }, "eval_samples": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" } ], "description": "Number of evaluation samples to generate per training step.", "default": "auto" } }, "description": "The hyperparameters used for the reinforcement fine-tuning job." }, "OpenAI.FineTuneSupervisedHyperparameters": { "type": "object", "properties": { "batch_size": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" } ], "description": "Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.", "default": "auto" }, "learning_rate_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number" } ], "description": "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting." }, "n_epochs": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" } ], "description": "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.", "default": "auto" } }, "description": "The hyperparameters used for the fine-tuning job." }, "OpenAI.FineTuneSupervisedMethod": { "type": "object", "properties": { "hyperparameters": { "$ref": "#/components/schemas/OpenAI.FineTuneSupervisedHyperparameters" } }, "description": "Configuration for the supervised fine-tuning method." }, "OpenAI.FineTuningCheckpointPermission": { "type": "object", "required": [ "id", "created_at", "project_id", "object" ], "properties": { "id": { "type": "string", "description": "The permission identifier, which can be referenced in the API endpoints." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the permission was created." }, "project_id": { "type": "string", "description": "The project identifier that the permission is for." }, "object": { "type": "string", "enum": [ "checkpoint.permission" ], "description": "The object type, which is always \"checkpoint.permission\".", "x-stainless-const": true } }, "description": "The `checkpoint.permission` object represents a permission for a fine-tuned model checkpoint.", "title": "FineTuningCheckpointPermission", "x-oaiMeta": { "name": "The fine-tuned model checkpoint permission object", "example": "{\n \"object\": \"checkpoint.permission\",\n \"id\": \"cp_zc4Q7MP6XxulcVzj4MZdwsAB\",\n \"created_at\": 1712211699,\n \"project_id\": \"proj_abGMw1llN8IrBb6SvvY5A1iH\"\n}\n" } }, "OpenAI.FineTuningIntegration": { "type": "object", "required": [ "type", "wandb" ], "properties": { "type": { "type": "string", "enum": [ "wandb" ], "description": "The type of the integration being enabled for the fine-tuning job", "x-stainless-const": true }, "wandb": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.FineTuningIntegrationWandb" } ], "description": "The settings for your integration with Weights and Biases. This payload specifies the project that\n metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\n to your run, and set a default entity (team, username, etc) to be associated with your run." } }, "title": "Fine-Tuning Job Integration" }, "OpenAI.FineTuningIntegrationWandb": { "type": "object", "required": [ "project" ], "properties": { "project": { "type": "string" }, "name": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "entity": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "tags": { "type": "array", "items": { "type": "string" } } } }, "OpenAI.FineTuningJob": { "type": "object", "required": [ "id", "created_at", "error", "fine_tuned_model", "finished_at", "hyperparameters", "model", "object", "organization_id", "result_files", "status", "trained_tokens", "training_file", "validation_file", "seed" ], "properties": { "id": { "type": "string", "description": "The object identifier, which can be referenced in the API endpoints." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the fine-tuning job was created." }, "error": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.FineTuningJobError" }, { "type": "null" } ] }, "fine_tuned_model": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "finished_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "hyperparameters": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.FineTuningJobHyperparameters" } ], "description": "The hyperparameters used for the fine-tuning job. This value will only be returned when running `supervised` jobs." }, "model": { "type": "string", "description": "The base model that is being fine-tuned." }, "object": { "type": "string", "enum": [ "fine_tuning.job" ], "description": "The object type, which is always \"fine_tuning.job\".", "x-stainless-const": true }, "organization_id": { "type": "string", "description": "The organization that owns the fine-tuning job." }, "result_files": { "type": "array", "items": { "type": "string" }, "description": "The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents)." }, "status": { "type": "string", "enum": [ "validating_files", "queued", "running", "succeeded", "failed", "cancelled" ], "description": "The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`." }, "trained_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "training_file": { "type": "string", "description": "The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents)." }, "validation_file": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "integrations": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FineTuningIntegration" } }, { "type": "null" } ] }, "seed": { "type": "integer", "description": "The seed used for the fine-tuning job." }, "estimated_finish": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "method": { "$ref": "#/components/schemas/OpenAI.FineTuneMethod" }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } }, "description": "The `fine_tuning.job` object represents a fine-tuning job that has been created through the API.", "title": "FineTuningJob", "x-oaiMeta": { "name": "The fine-tuning job object", "example": "{\n \"object\": \"fine_tuning.job\",\n \"id\": \"ftjob-abc123\",\n \"model\": \"davinci-002\",\n \"created_at\": 1692661014,\n \"finished_at\": 1692661190,\n \"fine_tuned_model\": \"ft:davinci-002:my-org:custom_suffix:7q8mpxmy\",\n \"organization_id\": \"org-123\",\n \"result_files\": [\n \"file-abc123\"\n ],\n \"status\": \"succeeded\",\n \"validation_file\": null,\n \"training_file\": \"file-abc123\",\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n },\n \"trained_tokens\": 5768,\n \"integrations\": [],\n \"seed\": 0,\n \"estimated_finish\": 0,\n \"method\": {\n \"type\": \"supervised\",\n \"supervised\": {\n \"hyperparameters\": {\n \"n_epochs\": 4,\n \"batch_size\": 1,\n \"learning_rate_multiplier\": 1.0\n }\n }\n },\n \"metadata\": {\n \"key\": \"value\"\n }\n}\n" } }, "OpenAI.FineTuningJobCheckpoint": { "type": "object", "required": [ "id", "created_at", "fine_tuned_model_checkpoint", "step_number", "metrics", "fine_tuning_job_id", "object" ], "properties": { "id": { "type": "string", "description": "The checkpoint identifier, which can be referenced in the API endpoints." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the checkpoint was created." }, "fine_tuned_model_checkpoint": { "type": "string", "description": "The name of the fine-tuned checkpoint model that is created." }, "step_number": { "type": "integer", "description": "The step number that the checkpoint was created at." }, "metrics": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.FineTuningJobCheckpointMetrics" } ], "description": "Metrics at the step number during the fine-tuning job." }, "fine_tuning_job_id": { "type": "string", "description": "The name of the fine-tuning job that this checkpoint was created from." }, "object": { "type": "string", "enum": [ "fine_tuning.job.checkpoint" ], "description": "The object type, which is always \"fine_tuning.job.checkpoint\".", "x-stainless-const": true } }, "description": "The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use.", "title": "FineTuningJobCheckpoint", "x-oaiMeta": { "name": "The fine-tuning job checkpoint object", "example": "{\n \"object\": \"fine_tuning.job.checkpoint\",\n \"id\": \"ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P\",\n \"created_at\": 1712211699,\n \"fine_tuned_model_checkpoint\": \"ft:gpt-4o-mini-2024-07-18:my-org:custom_suffix:9ABel2dg:ckpt-step-88\",\n \"fine_tuning_job_id\": \"ftjob-fpbNQ3H1GrMehXRf8cO97xTN\",\n \"metrics\": {\n \"step\": 88,\n \"train_loss\": 0.478,\n \"train_mean_token_accuracy\": 0.924,\n \"valid_loss\": 10.112,\n \"valid_mean_token_accuracy\": 0.145,\n \"full_valid_loss\": 0.567,\n \"full_valid_mean_token_accuracy\": 0.944\n },\n \"step_number\": 88\n}\n" } }, "OpenAI.FineTuningJobCheckpointMetrics": { "type": "object", "properties": { "step": { "type": "number" }, "train_loss": { "type": "number" }, "train_mean_token_accuracy": { "type": "number" }, "valid_loss": { "type": "number" }, "valid_mean_token_accuracy": { "type": "number" }, "full_valid_loss": { "type": "number" }, "full_valid_mean_token_accuracy": { "type": "number" } } }, "OpenAI.FineTuningJobError": { "type": "object", "required": [ "code", "message", "param" ], "properties": { "code": { "type": "string" }, "message": { "type": "string" }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } } }, "OpenAI.FineTuningJobEvent": { "type": "object", "required": [ "object", "id", "created_at", "level", "message" ], "properties": { "object": { "type": "string", "enum": [ "fine_tuning.job.event" ], "description": "The object type, which is always \"fine_tuning.job.event\".", "x-stainless-const": true }, "id": { "type": "string", "description": "The object identifier." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the fine-tuning job was created." }, "level": { "type": "string", "enum": [ "info", "warn", "error" ], "description": "The log level of the event." }, "message": { "type": "string", "description": "The message of the event." }, "type": { "type": "string", "enum": [ "message", "metrics" ], "description": "The type of event." }, "data": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.FineTuningJobEventData" } ], "description": "The data associated with the event." } }, "description": "Fine-tuning job event object", "x-oaiMeta": { "name": "The fine-tuning job event object", "example": "{\n \"object\": \"fine_tuning.job.event\",\n \"id\": \"ftevent-abc123\"\n \"created_at\": 1677610602,\n \"level\": \"info\",\n \"message\": \"Created fine-tuning job\",\n \"data\": {},\n \"type\": \"message\"\n}\n" } }, "OpenAI.FineTuningJobEventData": { "type": "object" }, "OpenAI.FineTuningJobHyperparameters": { "type": "object", "properties": { "batch_size": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" }, { "type": "null" } ], "default": "auto" }, "learning_rate_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number" } ] }, "n_epochs": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer" } ], "default": "auto" } } }, "OpenAI.FunctionAndCustomToolCallOutput": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.FunctionAndCustomToolCallOutputType" } }, "discriminator": { "propertyName": "type", "mapping": { "input_text": "#/components/schemas/OpenAI.FunctionAndCustomToolCallOutputInputTextContent", "input_image": "#/components/schemas/OpenAI.FunctionAndCustomToolCallOutputInputImageContent", "input_file": "#/components/schemas/OpenAI.FunctionAndCustomToolCallOutputInputFileContent" } } }, "OpenAI.FunctionAndCustomToolCallOutputInputFileContent": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "input_file" ], "description": "The type of the input item. Always `input_file`.", "x-stainless-const": true, "default": "input_file" }, "file_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "filename": { "type": "string", "description": "The name of the file to be sent to the model." }, "file_url": { "type": "string", "format": "uri", "description": "The URL of the file to be sent to the model." }, "file_data": { "type": "string", "description": "The content of the file to be sent to the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.FunctionAndCustomToolCallOutput" } ], "description": "A file input to the model.", "title": "Input file" }, "OpenAI.FunctionAndCustomToolCallOutputInputImageContent": { "type": "object", "required": [ "type", "detail" ], "properties": { "type": { "type": "string", "enum": [ "input_image" ], "description": "The type of the input item. Always `input_image`.", "x-stainless-const": true, "default": "input_image" }, "image_url": { "anyOf": [ { "type": "string", "format": "uri" }, { "type": "null" } ] }, "file_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "detail": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ImageDetail" } ], "description": "The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.FunctionAndCustomToolCallOutput" } ], "description": "An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision).", "title": "Input image" }, "OpenAI.FunctionAndCustomToolCallOutputInputTextContent": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "input_text" ], "description": "The type of the input item. Always `input_text`.", "x-stainless-const": true, "default": "input_text" }, "text": { "type": "string", "description": "The text input to the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.FunctionAndCustomToolCallOutput" } ], "description": "A text input to the model.", "title": "Input text" }, "OpenAI.FunctionAndCustomToolCallOutputType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "input_text", "input_image", "input_file" ] } ] }, "OpenAI.FunctionObject": { "type": "object", "required": [ "name" ], "properties": { "description": { "type": "string", "description": "A description of what the function does, used by the model to choose when and how to call the function." }, "name": { "type": "string", "description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64." }, "parameters": { "$ref": "#/components/schemas/OpenAI.FunctionParameters" }, "strict": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ] } } }, "OpenAI.FunctionParameters": { "type": "object", "unevaluatedProperties": {}, "description": "The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\nOmitting `parameters` defines a function with an empty parameter list." }, "OpenAI.FunctionShellAction": { "type": "object", "required": [ "commands", "timeout_ms", "max_output_length" ], "properties": { "commands": { "type": "array", "items": { "type": "string" } }, "timeout_ms": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "max_output_length": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] } }, "description": "Execute a shell command.", "title": "Shell exec action" }, "OpenAI.FunctionShellCallOutputContent": { "type": "object", "required": [ "stdout", "stderr", "outcome" ], "properties": { "stdout": { "type": "string", "description": "The standard output that was captured." }, "stderr": { "type": "string", "description": "The standard error output that was captured." }, "outcome": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.FunctionShellCallOutputOutcome" } ], "description": "Represents either an exit outcome (with an exit code) or a timeout outcome for a shell call output chunk." }, "created_by": { "type": "string", "description": "The identifier of the actor that created the item." } }, "description": "The content of a shell tool call output that was emitted.", "title": "Shell call output content" }, "OpenAI.FunctionShellCallOutputExitOutcome": { "type": "object", "required": [ "type", "exit_code" ], "properties": { "type": { "type": "string", "enum": [ "exit" ], "description": "The outcome type. Always `exit`.", "x-stainless-const": true, "default": "exit" }, "exit_code": { "type": "integer", "description": "Exit code from the shell process." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.FunctionShellCallOutputOutcome" } ], "description": "Indicates that the shell commands finished and returned an exit code.", "title": "Shell call exit outcome" }, "OpenAI.FunctionShellCallOutputOutcome": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.FunctionShellCallOutputOutcomeType" } }, "discriminator": { "propertyName": "type", "mapping": { "timeout": "#/components/schemas/OpenAI.FunctionShellCallOutputTimeoutOutcome", "exit": "#/components/schemas/OpenAI.FunctionShellCallOutputExitOutcome" } }, "description": "Represents either an exit outcome (with an exit code) or a timeout outcome for a shell call output chunk.", "title": "Shell call outcome" }, "OpenAI.FunctionShellCallOutputOutcomeType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "timeout", "exit" ] } ] }, "OpenAI.FunctionShellCallOutputTimeoutOutcome": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "timeout" ], "description": "The outcome type. Always `timeout`.", "x-stainless-const": true, "default": "timeout" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.FunctionShellCallOutputOutcome" } ], "description": "Indicates that the shell call exceeded its configured time limit.", "title": "Shell call timeout outcome" }, "OpenAI.FunctionShellToolParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "shell" ], "description": "The type of the shell tool. Always `shell`.", "x-stainless-const": true, "default": "shell" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A tool that allows the model to execute shell commands.", "title": "Shell tool" }, "OpenAI.FunctionTool": { "type": "object", "required": [ "type", "name", "parameters", "strict" ], "properties": { "type": { "type": "string", "enum": [ "function" ], "description": "The type of the function tool. Always `function`.", "x-stainless-const": true, "default": "function" }, "name": { "type": "string", "description": "The name of the function to call." }, "description": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "parameters": { "anyOf": [ { "type": "object", "unevaluatedProperties": {} }, { "type": "null" } ] }, "strict": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "Defines a function in your own code the model can choose to call. Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling).", "title": "Function" }, "OpenAI.GraderMulti": { "type": "object", "required": [ "type", "name", "graders", "calculate_output" ], "properties": { "type": { "type": "string", "enum": [ "multi" ], "description": "The object type, which is always `multi`.", "x-stainless-const": true, "default": "multi" }, "name": { "type": "string", "description": "The name of the grader." }, "graders": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.GraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.GraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.GraderScoreModel" }, { "$ref": "#/components/schemas/GraderEndpoint" } ] }, "calculate_output": { "type": "string", "description": "A formula to calculate the output based on grader results." } }, "description": "A MultiGrader object combines the output of multiple graders to produce a single score.", "title": "MultiGrader", "x-oaiMeta": { "name": "Multi Grader", "group": "graders", "example": "{\n \"type\": \"multi\",\n \"name\": \"example multi grader\",\n \"graders\": [\n {\n \"type\": \"text_similarity\",\n \"name\": \"example text similarity grader\",\n \"input\": \"The graded text\",\n \"reference\": \"The reference text\",\n \"evaluation_metric\": \"fuzzy_match\"\n },\n {\n \"type\": \"string_check\",\n \"name\": \"Example string check grader\",\n \"input\": \"{{sample.output_text}}\",\n \"reference\": \"{{item.label}}\",\n \"operation\": \"eq\"\n }\n ],\n \"calculate_output\": \"0.5 * text_similarity_score + 0.5 * string_check_score)\"\n}\n" } }, "OpenAI.GraderPython": { "type": "object", "required": [ "type", "name", "source" ], "properties": { "type": { "type": "string", "enum": [ "python" ], "description": "The object type, which is always `python`.", "x-stainless-const": true }, "name": { "type": "string", "description": "The name of the grader." }, "source": { "type": "string", "description": "The source code of the python script." }, "image_tag": { "type": "string", "description": "The image tag to use for the python script." } }, "description": "A PythonGrader object that runs a python script on the input.", "title": "PythonGrader", "x-oaiMeta": { "name": "Python Grader", "group": "graders", "example": "{\n \"type\": \"python\",\n \"name\": \"Example python grader\",\n \"image_tag\": \"2025-05-08\",\n \"source\": \"\"\"\ndef grade(sample: dict, item: dict) -> float:\n \"\"\"\n Returns 1.0 if `output_text` equals `label`, otherwise 0.0.\n \"\"\"\n output = sample.get(\"output_text\")\n label = item.get(\"label\")\n return 1.0 if output == label else 0.0\n\"\"\",\n}\n" } }, "OpenAI.GraderScoreModel": { "type": "object", "required": [ "type", "name", "model", "input" ], "properties": { "type": { "type": "string", "enum": [ "score_model" ], "description": "The object type, which is always `score_model`.", "x-stainless-const": true }, "name": { "type": "string", "description": "The name of the grader." }, "model": { "type": "string", "description": "The model to use for the evaluation." }, "sampling_params": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalGraderScoreModelSamplingParams" } ], "description": "The sampling parameters for the model." }, "input": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalItem" }, "description": "The input messages evaluated by the grader. Supports text, output text, input image, and input audio content blocks, and may include template strings." }, "range": { "type": "array", "items": { "type": "number" }, "description": "The range of the score. Defaults to `[0, 1]`." } }, "description": "A ScoreModelGrader object that uses a model to assign a score to the input.", "title": "ScoreModelGrader", "x-oaiMeta": { "name": "Score Model Grader", "group": "graders", "example": "{\n \"type\": \"score_model\",\n \"name\": \"Example score model grader\",\n \"input\": [\n {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"input_text\",\n \"text\": (\n \"Score how close the reference answer is to the model answer. Score 1.0 if they are the same and 0.0 if they are different.\"\n \" Return just a floating point score\n\n\"\n \" Reference answer: {{item.label}}\n\n\"\n \" Model answer: {{sample.output_text}}\"\n )\n },\n {\n \"type\": \"input_image\",\n \"image_url\": \"https://example.com/reference.png\",\n \"file_id\": null,\n \"detail\": \"auto\"\n }\n ],\n }\n ],\n \"model\": \"gpt-5-mini\",\n \"sampling_params\": {\n \"temperature\": 1,\n \"top_p\": 1,\n \"seed\": 42,\n \"max_completions_tokens\": 32768,\n \"reasoning_effort\": \"medium\"\n },\n}\n" } }, "OpenAI.GraderStringCheck": { "type": "object", "required": [ "type", "name", "input", "reference", "operation" ], "properties": { "type": { "type": "string", "enum": [ "string_check" ], "description": "The object type, which is always `string_check`.", "x-stainless-const": true }, "name": { "type": "string", "description": "The name of the grader." }, "input": { "type": "string", "description": "The input text. This may include template strings." }, "reference": { "type": "string", "description": "The reference text. This may include template strings." }, "operation": { "type": "string", "enum": [ "eq", "ne", "like", "ilike" ], "description": "The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`." } }, "description": "A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.", "title": "StringCheckGrader", "x-oaiMeta": { "name": "String Check Grader", "group": "graders", "example": "{\n \"type\": \"string_check\",\n \"name\": \"Example string check grader\",\n \"input\": \"{{sample.output_text}}\",\n \"reference\": \"{{item.label}}\",\n \"operation\": \"eq\"\n}\n" } }, "OpenAI.GraderTextSimilarity": { "type": "object", "required": [ "type", "name", "input", "reference", "evaluation_metric" ], "properties": { "type": { "type": "string", "enum": [ "text_similarity" ], "description": "The type of grader.", "x-stainless-const": true, "default": "text_similarity" }, "name": { "type": "string", "description": "The name of the grader." }, "input": { "type": "string", "description": "The text being graded." }, "reference": { "type": "string", "description": "The text being graded against." }, "evaluation_metric": { "type": "string", "enum": [ "cosine", "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l" ], "description": "The evaluation metric to use. One of `cosine`, `fuzzy_match`, `bleu`,\n `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`,\n or `rouge_l`." } }, "description": "A TextSimilarityGrader object which grades text based on similarity metrics.", "title": "TextSimilarityGrader", "x-oaiMeta": { "name": "Text Similarity Grader", "group": "graders", "example": "{\n \"type\": \"text_similarity\",\n \"name\": \"Example text similarity grader\",\n \"input\": \"{{sample.output_text}}\",\n \"reference\": \"{{item.label}}\",\n \"evaluation_metric\": \"fuzzy_match\"\n}\n" } }, "OpenAI.GrammarSyntax1": { "type": "string", "enum": [ "lark", "regex" ] }, "OpenAI.HybridSearchOptions": { "type": "object", "required": [ "embedding_weight", "text_weight" ], "properties": { "embedding_weight": { "type": "number", "description": "The weight of the embedding in the reciprocal ranking fusion." }, "text_weight": { "type": "number", "description": "The weight of the text in the reciprocal ranking fusion." } } }, "OpenAI.ImageDetail": { "type": "string", "enum": [ "low", "high", "auto" ] }, "OpenAI.ImageGenTool": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "image_generation" ], "description": "The type of the image generation tool. Always `image_generation`.", "x-stainless-const": true, "default": "image_generation" }, "model": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "gpt-image-1", "gpt-image-1-mini" ] } ], "default": "gpt-image-1" }, "quality": { "type": "string", "enum": [ "low", "medium", "high", "auto" ], "description": "The quality of the generated image. One of `low`, `medium`, `high`,\n or `auto`. Default: `auto`.", "default": "auto" }, "size": { "type": "string", "enum": [ "1024x1024", "1024x1536", "1536x1024", "auto" ], "description": "The size of the generated image. One of `1024x1024`, `1024x1536`,\n `1536x1024`, or `auto`. Default: `auto`.", "default": "auto" }, "output_format": { "type": "string", "enum": [ "png", "webp", "jpeg" ], "description": "The output format of the generated image. One of `png`, `webp`, or\n `jpeg`. Default: `png`.", "default": "png" }, "output_compression": { "type": "integer", "minimum": 0, "maximum": 100, "description": "Compression level for the output image. Default: 100.", "default": 100 }, "moderation": { "type": "string", "enum": [ "auto", "low" ], "description": "Moderation level for the generated image. Default: `auto`.", "default": "auto" }, "background": { "type": "string", "enum": [ "transparent", "opaque", "auto" ], "description": "Background type for the generated image. One of `transparent`,\n `opaque`, or `auto`. Default: `auto`.", "default": "auto" }, "input_fidelity": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.InputFidelity" }, { "type": "null" } ] }, "input_image_mask": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ImageGenToolInputImageMask" } ], "description": "Optional mask for inpainting. Contains `image_url`\n (string, optional) and `file_id` (string, optional)." }, "partial_images": { "type": "integer", "minimum": 0, "maximum": 3, "description": "Number of partial images to generate in streaming mode, from 0 (default value) to 3." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A tool that generates images using the GPT image models.", "title": "Image generation tool" }, "OpenAI.ImageGenToolInputImageMask": { "type": "object", "properties": { "image_url": { "type": "string" }, "file_id": { "type": "string" } } }, "OpenAI.IncludeEnum": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "file_search_call.results", "web_search_call.results", "web_search_call.action.sources", "message.input_image.image_url", "computer_call_output.output.image_url", "code_interpreter_call.outputs", "reasoning.encrypted_content", "message.output_text.logprobs" ] } ], "description": "Specify additional output data to include in the model response. Currently supported values are:\n- `web_search_call.action.sources`: Include the sources of the web search tool call.\n- `code_interpreter_call.outputs`: Includes the outputs of python code execution in code interpreter tool call items.\n- `computer_call_output.output.image_url`: Include image urls from the computer call output.\n- `file_search_call.results`: Include the search results of the file search tool call.\n- `message.input_image.image_url`: Include image urls from the input message.\n- `message.output_text.logprobs`: Include logprobs with assistant messages.\n- `reasoning.encrypted_content`: Includes an encrypted version of reasoning tokens in reasoning item outputs. This enables reasoning items to be used in multi-turn conversations when using the Responses API statelessly (like when the `store` parameter is set to `false`, or when an organization is enrolled in the zero data retention program)." }, "OpenAI.InputAudio": { "type": "object", "required": [ "type", "input_audio" ], "properties": { "type": { "type": "string", "enum": [ "input_audio" ], "description": "The type of the input item. Always `input_audio`.", "x-stainless-const": true }, "input_audio": { "$ref": "#/components/schemas/OpenAI.InputAudioInputAudio" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalItemContentItemObject" } ], "description": "An audio input to the model.", "title": "Input audio" }, "OpenAI.InputAudioInputAudio": { "type": "object", "required": [ "data", "format" ], "properties": { "data": { "type": "string" }, "format": { "type": "string", "enum": [ "mp3", "wav" ] } } }, "OpenAI.InputContent": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.InputContentType" } }, "discriminator": { "propertyName": "type", "mapping": { "input_text": "#/components/schemas/OpenAI.InputContentInputTextContent", "input_image": "#/components/schemas/OpenAI.InputContentInputImageContent", "input_file": "#/components/schemas/OpenAI.InputContentInputFileContent" } } }, "OpenAI.InputContentInputFileContent": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "input_file" ], "description": "The type of the input item. Always `input_file`.", "x-stainless-const": true, "default": "input_file" }, "file_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "filename": { "type": "string", "description": "The name of the file to be sent to the model." }, "file_url": { "type": "string", "format": "uri", "description": "The URL of the file to be sent to the model." }, "file_data": { "type": "string", "description": "The content of the file to be sent to the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.InputContent" } ], "description": "A file input to the model.", "title": "Input file" }, "OpenAI.InputContentInputImageContent": { "type": "object", "required": [ "type", "detail" ], "properties": { "type": { "type": "string", "enum": [ "input_image" ], "description": "The type of the input item. Always `input_image`.", "x-stainless-const": true, "default": "input_image" }, "image_url": { "anyOf": [ { "type": "string", "format": "uri" }, { "type": "null" } ] }, "file_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "detail": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ImageDetail" } ], "description": "The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.InputContent" } ], "description": "An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision).", "title": "Input image" }, "OpenAI.InputContentInputTextContent": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "input_text" ], "description": "The type of the input item. Always `input_text`.", "x-stainless-const": true, "default": "input_text" }, "text": { "type": "string", "description": "The text input to the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.InputContent" } ], "description": "A text input to the model.", "title": "Input text" }, "OpenAI.InputContentType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "input_text", "input_image", "input_file" ] } ] }, "OpenAI.InputFidelity": { "type": "string", "enum": [ "high", "low" ], "description": "Control how much effort the model will exert to match the style and features, especially facial features, of input images. This parameter is only supported for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and `low`. Defaults to `low`." }, "OpenAI.InputFileContent": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "input_file" ], "description": "The type of the input item. Always `input_file`.", "x-stainless-const": true, "default": "input_file" }, "file_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "filename": { "type": "string", "description": "The name of the file to be sent to the model." }, "file_url": { "type": "string", "format": "uri", "description": "The URL of the file to be sent to the model." }, "file_data": { "type": "string", "description": "The content of the file to be sent to the model." } }, "description": "A file input to the model.", "title": "Input file" }, "OpenAI.InputImageContent": { "type": "object", "required": [ "type", "detail" ], "properties": { "type": { "type": "string", "enum": [ "input_image" ], "description": "The type of the input item. Always `input_image`.", "x-stainless-const": true, "default": "input_image" }, "image_url": { "anyOf": [ { "type": "string", "format": "uri" }, { "type": "null" } ] }, "file_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "detail": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ImageDetail" } ], "description": "The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`." } }, "description": "An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision).", "title": "Input image" }, "OpenAI.InputItem": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.InputItemType" } }, "discriminator": { "propertyName": "type", "mapping": { "message": "#/components/schemas/OpenAI.EasyInputMessage", "item_reference": "#/components/schemas/OpenAI.ItemReferenceParam" } } }, "OpenAI.InputItemType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "message", "item_reference" ] } ] }, "OpenAI.InputMessageContentList": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.InputContent" }, "description": "A list of one or many input items to the model, containing different content\ntypes.", "title": "Input item content list" }, "OpenAI.InputMessageResource": { "type": "object", "required": [ "type", "role", "content", "id" ], "properties": { "type": { "type": "string", "enum": [ "message" ], "description": "The type of the message input. Always set to `message`.", "x-stainless-const": true }, "role": { "type": "string", "enum": [ "user", "system", "developer" ], "description": "The role of the message input. One of `user`, `system`, or `developer`." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of item. One of `in_progress`, `completed`, or\n `incomplete`. Populated when items are returned via API." }, "content": { "$ref": "#/components/schemas/OpenAI.InputMessageContentList" }, "id": { "type": "string", "description": "The unique ID of the message input." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ] }, "OpenAI.InputParam": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.InputItem" } } ], "description": "Text, image, or file inputs to the model, used to generate a response.\nLearn more:\n- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)\n- [Image inputs](https://platform.openai.com/docs/guides/images)\n- [File inputs](https://platform.openai.com/docs/guides/pdf-files)\n- [Conversation state](https://platform.openai.com/docs/guides/conversation-state)\n- [Function calling](https://platform.openai.com/docs/guides/function-calling)" }, "OpenAI.InputTextContent": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "input_text" ], "description": "The type of the input item. Always `input_text`.", "x-stainless-const": true, "default": "input_text" }, "text": { "type": "string", "description": "The text input to the model." } }, "description": "A text input to the model.", "title": "Input text" }, "OpenAI.ItemReferenceParam": { "type": "object", "required": [ "type", "id" ], "properties": { "type": { "type": "string", "enum": [ "item_reference" ], "description": "The type of item to reference. Always `item_reference`.", "x-stainless-const": true, "default": "item_reference" }, "id": { "type": "string", "description": "The ID of the item to reference." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.InputItem" } ], "description": "An internal identifier for an item to reference.", "title": "Item reference" }, "OpenAI.ItemResource": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ItemResourceType" } }, "discriminator": { "propertyName": "type", "mapping": { "message": "#/components/schemas/OpenAI.InputMessageResource", "output_message": "#/components/schemas/OpenAI.ItemResourceOutputMessage", "file_search_call": "#/components/schemas/OpenAI.ItemResourceFileSearchToolCall", "computer_call": "#/components/schemas/OpenAI.ItemResourceComputerToolCall", "computer_call_output": "#/components/schemas/OpenAI.ItemResourceComputerToolCallOutputResource", "web_search_call": "#/components/schemas/OpenAI.ItemResourceWebSearchToolCall", "function_call": "#/components/schemas/OpenAI.ItemResourceFunctionToolCallResource", "function_call_output": "#/components/schemas/OpenAI.ItemResourceFunctionToolCallOutputResource", "image_generation_call": "#/components/schemas/OpenAI.ItemResourceImageGenToolCall", "code_interpreter_call": "#/components/schemas/OpenAI.ItemResourceCodeInterpreterToolCall", "local_shell_call": "#/components/schemas/OpenAI.ItemResourceLocalShellToolCall", "local_shell_call_output": "#/components/schemas/OpenAI.ItemResourceLocalShellToolCallOutput", "shell_call": "#/components/schemas/OpenAI.ItemResourceFunctionShellCall", "shell_call_output": "#/components/schemas/OpenAI.ItemResourceFunctionShellCallOutput", "apply_patch_call": "#/components/schemas/OpenAI.ItemResourceApplyPatchToolCall", "apply_patch_call_output": "#/components/schemas/OpenAI.ItemResourceApplyPatchToolCallOutput", "mcp_list_tools": "#/components/schemas/OpenAI.ItemResourceMcpListTools", "mcp_approval_request": "#/components/schemas/OpenAI.ItemResourceMcpApprovalRequest", "mcp_approval_response": "#/components/schemas/OpenAI.ItemResourceMcpApprovalResponseResource", "mcp_call": "#/components/schemas/OpenAI.ItemResourceMcpToolCall" } }, "description": "Content item used to generate a response." }, "OpenAI.ItemResourceApplyPatchToolCall": { "type": "object", "required": [ "type", "id", "call_id", "status", "operation" ], "properties": { "type": { "type": "string", "enum": [ "apply_patch_call" ], "description": "The type of the item. Always `apply_patch_call`.", "x-stainless-const": true, "default": "apply_patch_call" }, "id": { "type": "string", "description": "The unique ID of the apply patch tool call. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the apply patch tool call generated by the model." }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchCallStatus" } ], "description": "The status of the apply patch tool call. One of `in_progress` or `completed`." }, "operation": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchFileOperation" } ], "description": "One of the create_file, delete_file, or update_file operations applied via apply_patch." }, "created_by": { "type": "string", "description": "The ID of the entity that created this tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A tool call that applies file diffs by creating, deleting, or updating files.", "title": "Apply patch tool call" }, "OpenAI.ItemResourceApplyPatchToolCallOutput": { "type": "object", "required": [ "type", "id", "call_id", "status" ], "properties": { "type": { "type": "string", "enum": [ "apply_patch_call_output" ], "description": "The type of the item. Always `apply_patch_call_output`.", "x-stainless-const": true, "default": "apply_patch_call_output" }, "id": { "type": "string", "description": "The unique ID of the apply patch tool call output. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the apply patch tool call generated by the model." }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchCallOutputStatus" } ], "description": "The status of the apply patch tool call output. One of `completed` or `failed`." }, "output": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "created_by": { "type": "string", "description": "The ID of the entity that created this tool call output." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "The output emitted by an apply patch tool call.", "title": "Apply patch tool call output" }, "OpenAI.ItemResourceCodeInterpreterToolCall": { "type": "object", "required": [ "type", "id", "status", "container_id", "code", "outputs" ], "properties": { "type": { "type": "string", "enum": [ "code_interpreter_call" ], "description": "The type of the code interpreter tool call. Always `code_interpreter_call`.", "x-stainless-const": true, "default": "code_interpreter_call" }, "id": { "type": "string", "description": "The unique ID of the code interpreter tool call." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete", "interpreting", "failed" ], "description": "The status of the code interpreter tool call. Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`." }, "container_id": { "type": "string", "description": "The ID of the container used to run the code." }, "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "outputs": { "anyOf": [ { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CodeInterpreterOutputLogs" }, { "$ref": "#/components/schemas/OpenAI.CodeInterpreterOutputImage" } ] } }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A tool call to run code.", "title": "Code interpreter tool call" }, "OpenAI.ItemResourceComputerToolCall": { "type": "object", "required": [ "type", "id", "call_id", "action", "pending_safety_checks", "status" ], "properties": { "type": { "type": "string", "enum": [ "computer_call" ], "description": "The type of the computer call. Always `computer_call`.", "default": "computer_call" }, "id": { "type": "string", "description": "The unique ID of the computer call." }, "call_id": { "type": "string", "description": "An identifier used when responding to the tool call with output." }, "action": { "$ref": "#/components/schemas/OpenAI.ComputerAction" }, "pending_safety_checks": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ComputerCallSafetyCheckParam" }, "description": "The pending safety checks for the computer call." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n `incomplete`. Populated when items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A tool call to a computer use tool. See the\n[computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.", "title": "Computer tool call" }, "OpenAI.ItemResourceComputerToolCallOutputResource": { "type": "object", "required": [ "type", "call_id", "output" ], "properties": { "type": { "type": "string", "enum": [ "computer_call_output" ], "description": "The type of the computer tool call output. Always `computer_call_output`.", "x-stainless-const": true, "default": "computer_call_output" }, "id": { "type": "string", "description": "The ID of the computer tool call output." }, "call_id": { "type": "string", "description": "The ID of the computer tool call that produced the output." }, "acknowledged_safety_checks": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ComputerCallSafetyCheckParam" }, "description": "The safety checks reported by the API that have been acknowledged by the\n developer." }, "output": { "$ref": "#/components/schemas/OpenAI.ComputerScreenshotImage" }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the message input. One of `in_progress`, `completed`, or\n `incomplete`. Populated when input items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ] }, "OpenAI.ItemResourceFileSearchToolCall": { "type": "object", "required": [ "id", "type", "status", "queries" ], "properties": { "id": { "type": "string", "description": "The unique ID of the file search tool call." }, "type": { "type": "string", "enum": [ "file_search_call" ], "description": "The type of the file search tool call. Always `file_search_call`.", "x-stainless-const": true }, "status": { "type": "string", "enum": [ "in_progress", "searching", "completed", "incomplete", "failed" ], "description": "The status of the file search tool call. One of `in_progress`,\n `searching`, `incomplete` or `failed`," }, "queries": { "type": "array", "items": { "type": "string" }, "description": "The queries used to search for files." }, "results": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FileSearchToolCallResults" } }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "The results of a file search tool call. See the\n[file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.", "title": "File search tool call" }, "OpenAI.ItemResourceFunctionShellCall": { "type": "object", "required": [ "type", "id", "call_id", "action", "status" ], "properties": { "type": { "type": "string", "enum": [ "shell_call" ], "description": "The type of the item. Always `shell_call`.", "x-stainless-const": true, "default": "shell_call" }, "id": { "type": "string", "description": "The unique ID of the shell tool call. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the shell tool call generated by the model." }, "action": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.FunctionShellAction" } ], "description": "The shell commands and limits that describe how to run the tool call." }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.LocalShellCallStatus" } ], "description": "The status of the shell call. One of `in_progress`, `completed`, or `incomplete`." }, "created_by": { "type": "string", "description": "The ID of the entity that created this tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A tool call that executes one or more shell commands in a managed environment.", "title": "Shell tool call" }, "OpenAI.ItemResourceFunctionShellCallOutput": { "type": "object", "required": [ "type", "id", "call_id", "output", "max_output_length" ], "properties": { "type": { "type": "string", "enum": [ "shell_call_output" ], "description": "The type of the shell call output. Always `shell_call_output`.", "x-stainless-const": true, "default": "shell_call_output" }, "id": { "type": "string", "description": "The unique ID of the shell call output. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the shell tool call generated by the model." }, "output": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FunctionShellCallOutputContent" }, "description": "An array of shell call output contents" }, "max_output_length": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "created_by": { "type": "string", "description": "The identifier of the actor that created the item." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "The output of a shell tool call that was emitted.", "title": "Shell call output" }, "OpenAI.ItemResourceFunctionToolCallOutputResource": { "type": "object", "required": [ "type", "call_id", "output" ], "properties": { "id": { "type": "string", "description": "The unique ID of the function tool call output. Populated when this item\n is returned via API." }, "type": { "type": "string", "enum": [ "function_call_output" ], "description": "The type of the function tool call output. Always `function_call_output`.", "x-stainless-const": true }, "call_id": { "type": "string", "description": "The unique ID of the function tool call generated by the model." }, "output": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FunctionAndCustomToolCallOutput" } } ], "description": "The output from the function call generated by your code.\n Can be a string or an list of output content." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n `incomplete`. Populated when items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ] }, "OpenAI.ItemResourceFunctionToolCallResource": { "type": "object", "required": [ "type", "call_id", "name", "arguments" ], "properties": { "id": { "type": "string", "description": "The unique ID of the function tool call." }, "type": { "type": "string", "enum": [ "function_call" ], "description": "The type of the function tool call. Always `function_call`.", "x-stainless-const": true }, "call_id": { "type": "string", "description": "The unique ID of the function tool call generated by the model." }, "name": { "type": "string", "description": "The name of the function to run." }, "arguments": { "type": "string", "description": "A JSON string of the arguments to pass to the function." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n `incomplete`. Populated when items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ] }, "OpenAI.ItemResourceImageGenToolCall": { "type": "object", "required": [ "type", "id", "status", "result" ], "properties": { "type": { "type": "string", "enum": [ "image_generation_call" ], "description": "The type of the image generation call. Always `image_generation_call`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the image generation call." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "generating", "failed" ], "description": "The status of the image generation call." }, "result": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "An image generation request made by the model.", "title": "Image generation call" }, "OpenAI.ItemResourceLocalShellToolCall": { "type": "object", "required": [ "type", "id", "call_id", "action", "status" ], "properties": { "type": { "type": "string", "enum": [ "local_shell_call" ], "description": "The type of the local shell call. Always `local_shell_call`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the local shell call." }, "call_id": { "type": "string", "description": "The unique ID of the local shell tool call generated by the model." }, "action": { "$ref": "#/components/schemas/OpenAI.LocalShellExecAction" }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the local shell call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A tool call to run a command on the local shell.", "title": "Local shell call" }, "OpenAI.ItemResourceLocalShellToolCallOutput": { "type": "object", "required": [ "type", "id", "output" ], "properties": { "type": { "type": "string", "enum": [ "local_shell_call_output" ], "description": "The type of the local shell tool call output. Always `local_shell_call_output`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the local shell tool call generated by the model." }, "output": { "type": "string", "description": "A JSON string of the output of the local shell tool call." }, "status": { "anyOf": [ { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ] }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "The output of a local shell tool call.", "title": "Local shell call output" }, "OpenAI.ItemResourceMcpApprovalRequest": { "type": "object", "required": [ "type", "id", "server_label", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "mcp_approval_request" ], "description": "The type of the item. Always `mcp_approval_request`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the approval request." }, "server_label": { "type": "string", "description": "The label of the MCP server making the request." }, "name": { "type": "string", "description": "The name of the tool to run." }, "arguments": { "type": "string", "description": "A JSON string of arguments for the tool." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A request for human approval of a tool invocation.", "title": "MCP approval request" }, "OpenAI.ItemResourceMcpApprovalResponseResource": { "type": "object", "required": [ "type", "id", "approval_request_id", "approve" ], "properties": { "type": { "type": "string", "enum": [ "mcp_approval_response" ], "description": "The type of the item. Always `mcp_approval_response`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the approval response" }, "approval_request_id": { "type": "string", "description": "The ID of the approval request being answered." }, "approve": { "type": "boolean", "description": "Whether the request was approved." }, "reason": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A response to an MCP approval request.", "title": "MCP approval response" }, "OpenAI.ItemResourceMcpListTools": { "type": "object", "required": [ "type", "id", "server_label", "tools" ], "properties": { "type": { "type": "string", "enum": [ "mcp_list_tools" ], "description": "The type of the item. Always `mcp_list_tools`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the list." }, "server_label": { "type": "string", "description": "The label of the MCP server." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.MCPListToolsTool" }, "description": "The tools available on the server." }, "error": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A list of tools available on an MCP server.", "title": "MCP list tools" }, "OpenAI.ItemResourceMcpToolCall": { "type": "object", "required": [ "type", "id", "server_label", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "mcp_call" ], "description": "The type of the item. Always `mcp_call`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the tool call." }, "server_label": { "type": "string", "description": "The label of the MCP server running the tool." }, "name": { "type": "string", "description": "The name of the tool that was run." }, "arguments": { "type": "string", "description": "A JSON string of the arguments passed to the tool." }, "output": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "error": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.MCPToolCallStatus" } ], "description": "The status of the tool call. One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`." }, "approval_request_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "An invocation of a tool on an MCP server.", "title": "MCP tool call" }, "OpenAI.ItemResourceOutputMessage": { "type": "object", "required": [ "id", "type", "role", "content", "status" ], "properties": { "id": { "type": "string", "description": "The unique ID of the output message.", "x-stainless-go-json": "omitzero" }, "type": { "type": "string", "enum": [ "output_message" ], "description": "The type of the output message. Always `message`.", "x-stainless-const": true }, "role": { "type": "string", "enum": [ "assistant" ], "description": "The role of the output message. Always `assistant`.", "x-stainless-const": true }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.OutputMessageContent" }, "description": "The content of the output message." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the message input. One of `in_progress`, `completed`, or\n `incomplete`. Populated when input items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "An output message from the model.", "title": "Output message" }, "OpenAI.ItemResourceType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "message", "output_message", "file_search_call", "computer_call", "computer_call_output", "web_search_call", "function_call", "function_call_output", "image_generation_call", "code_interpreter_call", "local_shell_call", "local_shell_call_output", "shell_call", "shell_call_output", "apply_patch_call", "apply_patch_call_output", "mcp_list_tools", "mcp_approval_request", "mcp_approval_response", "mcp_call" ] } ] }, "OpenAI.ItemResourceWebSearchToolCall": { "type": "object", "required": [ "id", "type", "status", "action" ], "properties": { "id": { "type": "string", "description": "The unique ID of the web search tool call." }, "type": { "type": "string", "enum": [ "web_search_call" ], "description": "The type of the web search tool call. Always `web_search_call`.", "x-stainless-const": true }, "status": { "type": "string", "enum": [ "in_progress", "searching", "completed", "failed" ], "description": "The status of the web search tool call." }, "action": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.WebSearchActionSearch" }, { "$ref": "#/components/schemas/OpenAI.WebSearchActionOpenPage" }, { "$ref": "#/components/schemas/OpenAI.WebSearchActionFind" } ], "description": "An object describing the specific action taken in this web search call.\n Includes details on how the model used the web (search, open_page, find)." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "The results of a web search tool call. See the\n[web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.", "title": "Web search tool call" }, "OpenAI.KeyPressAction": { "type": "object", "required": [ "type", "keys" ], "properties": { "type": { "type": "string", "enum": [ "keypress" ], "description": "Specifies the event type. For a keypress action, this property is always set to `keypress`.", "x-stainless-const": true, "default": "keypress" }, "keys": { "type": "array", "items": { "type": "string" }, "description": "The combination of keys the model is requesting to be pressed. This is an array of strings, each representing a key." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A collection of keypresses the model would like to perform.", "title": "KeyPress" }, "OpenAI.ListBatchesResponse": { "type": "object", "required": [ "data", "has_more", "object" ], "properties": { "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Batch" } }, "first_id": { "type": "string" }, "last_id": { "type": "string" }, "has_more": { "type": "boolean" }, "object": { "type": "string", "enum": [ "list" ], "x-stainless-const": true } } }, "OpenAI.ListFilesResponse": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.OpenAIFile" } }, "first_id": { "type": "string" }, "last_id": { "type": "string" }, "has_more": { "type": "boolean" } } }, "OpenAI.ListFineTuningCheckpointPermissionResponse": { "type": "object", "required": [ "data", "object", "has_more" ], "properties": { "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FineTuningCheckpointPermission" } }, "object": { "type": "string", "enum": [ "list" ], "x-stainless-const": true }, "first_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "last_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "has_more": { "type": "boolean" } } }, "OpenAI.ListFineTuningJobCheckpointsResponse": { "type": "object", "required": [ "data", "object", "has_more" ], "properties": { "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FineTuningJobCheckpoint" } }, "object": { "type": "string", "enum": [ "list" ], "x-stainless-const": true }, "first_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "last_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "has_more": { "type": "boolean" } } }, "OpenAI.ListFineTuningJobEventsResponse": { "type": "object", "required": [ "data", "object", "has_more" ], "properties": { "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FineTuningJobEvent" } }, "object": { "type": "string", "enum": [ "list" ], "x-stainless-const": true }, "has_more": { "type": "boolean" } } }, "OpenAI.ListMessagesResponse": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.MessageObject" } }, "first_id": { "type": "string" }, "last_id": { "type": "string" }, "has_more": { "type": "boolean" } } }, "OpenAI.ListModelsResponse": { "type": "object", "required": [ "object", "data" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "x-stainless-const": true }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Model" } } } }, "OpenAI.ListPaginatedFineTuningJobsResponse": { "type": "object", "required": [ "data", "has_more", "object" ], "properties": { "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } }, "has_more": { "type": "boolean" }, "object": { "type": "string", "enum": [ "list" ], "x-stainless-const": true } } }, "OpenAI.ListRunStepsResponse": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.RunStepObject" } }, "first_id": { "type": "string" }, "last_id": { "type": "string" }, "has_more": { "type": "boolean" } } }, "OpenAI.ListRunsResponse": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.RunObject" } }, "first_id": { "type": "string" }, "last_id": { "type": "string" }, "has_more": { "type": "boolean" } } }, "OpenAI.ListVectorStoreFilesResponse": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileObject" } }, "first_id": { "type": "string" }, "last_id": { "type": "string" }, "has_more": { "type": "boolean" } } }, "OpenAI.ListVectorStoresResponse": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.VectorStoreObject" } }, "first_id": { "type": "string" }, "last_id": { "type": "string" }, "has_more": { "type": "boolean" } } }, "OpenAI.LocalShellCallStatus": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ] }, "OpenAI.LocalShellExecAction": { "type": "object", "required": [ "type", "command", "env" ], "properties": { "type": { "type": "string", "enum": [ "exec" ], "description": "The type of the local shell action. Always `exec`.", "x-stainless-const": true, "default": "exec" }, "command": { "type": "array", "items": { "type": "string" }, "description": "The command to run." }, "timeout_ms": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "working_directory": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "env": { "type": "object", "unevaluatedProperties": { "type": "string" }, "description": "Environment variables to set for the command.", "x-oaiTypeLabel": "map" }, "user": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "description": "Execute a shell command on the server.", "title": "Local shell exec action" }, "OpenAI.LocalShellToolParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "local_shell" ], "description": "The type of the local shell tool. Always `local_shell`.", "x-stainless-const": true, "default": "local_shell" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A tool that allows the model to execute shell commands in a local environment.", "title": "Local shell tool" }, "OpenAI.LogProb": { "type": "object", "required": [ "token", "logprob", "bytes", "top_logprobs" ], "properties": { "token": { "type": "string" }, "logprob": { "type": "number" }, "bytes": { "type": "array", "items": { "type": "integer" } }, "top_logprobs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.TopLogProb" } } }, "description": "The log probability of a token.", "title": "Log probability" }, "OpenAI.MCPListToolsTool": { "type": "object", "required": [ "name", "input_schema" ], "properties": { "name": { "type": "string", "description": "The name of the tool." }, "description": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "input_schema": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.MCPListToolsToolInputSchema" } ], "description": "The JSON schema describing the tool's input." }, "annotations": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.MCPListToolsToolAnnotations" }, { "type": "null" } ] } }, "description": "A tool available on an MCP server.", "title": "MCP list tools tool" }, "OpenAI.MCPListToolsToolAnnotations": { "type": "object" }, "OpenAI.MCPListToolsToolInputSchema": { "type": "object" }, "OpenAI.MCPTool": { "type": "object", "required": [ "type", "server_label" ], "properties": { "type": { "type": "string", "enum": [ "mcp" ], "description": "The type of the MCP tool. Always `mcp`.", "x-stainless-const": true }, "server_label": { "type": "string", "description": "A label for this MCP server, used to identify it in tool calls." }, "server_url": { "type": "string", "format": "uri", "description": "The URL for the MCP server. One of `server_url` or `connector_id` must be\n provided." }, "connector_id": { "type": "string", "enum": [ "connector_dropbox", "connector_gmail", "connector_googlecalendar", "connector_googledrive", "connector_microsoftteams", "connector_outlookcalendar", "connector_outlookemail", "connector_sharepoint" ], "description": "Identifier for service connectors, like those available in ChatGPT. One of\n `server_url` or `connector_id` must be provided. Learn more about service\n connectors [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).\n Currently supported `connector_id` values are:\n - Dropbox: `connector_dropbox`\n - Gmail: `connector_gmail`\n - Google Calendar: `connector_googlecalendar`\n - Google Drive: `connector_googledrive`\n - Microsoft Teams: `connector_microsoftteams`\n - Outlook Calendar: `connector_outlookcalendar`\n - Outlook Email: `connector_outlookemail`\n - SharePoint: `connector_sharepoint`" }, "authorization": { "type": "string", "description": "An OAuth access token that can be used with a remote MCP server, either\n with a custom MCP server URL or a service connector. Your application\n must handle the OAuth authorization flow and provide the token here." }, "server_description": { "type": "string", "description": "Optional description of the MCP server, used to provide more context." }, "headers": { "anyOf": [ { "type": "object", "unevaluatedProperties": { "type": "string" } }, { "type": "null" } ] }, "allowed_tools": { "anyOf": [ { "type": "array", "items": { "type": "string" } }, { "$ref": "#/components/schemas/OpenAI.MCPToolFilter" }, { "type": "null" } ] }, "require_approval": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.MCPToolRequireApproval" }, { "type": "string", "enum": [ "always", "never" ] }, { "type": "null" } ], "default": "always" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "Give the model access to additional tools via remote Model Context Protocol\n(MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).", "title": "MCP tool" }, "OpenAI.MCPToolCallStatus": { "type": "string", "enum": [ "in_progress", "completed", "incomplete", "calling", "failed" ] }, "OpenAI.MCPToolFilter": { "type": "object", "properties": { "tool_names": { "type": "array", "items": { "type": "string" }, "description": "List of allowed tool names.", "title": "MCP allowed tools" }, "read_only": { "type": "boolean", "description": "Indicates whether or not a tool modifies data or is read-only. If an\n MCP server is [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),\n it will match this filter." } }, "description": "A filter object to specify which tools are allowed.", "title": "MCP tool filter" }, "OpenAI.MCPToolRequireApproval": { "type": "object", "properties": { "always": { "$ref": "#/components/schemas/OpenAI.MCPToolFilter" }, "never": { "$ref": "#/components/schemas/OpenAI.MCPToolFilter" } } }, "OpenAI.MessageContent": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.MessageContentType" } }, "discriminator": { "propertyName": "type", "mapping": { "image_url": "#/components/schemas/OpenAI.MessageContentImageUrlObject", "text": "#/components/schemas/OpenAI.MessageContentTextObject", "refusal": "#/components/schemas/OpenAI.MessageContentRefusalObject" } } }, "OpenAI.MessageContentImageFileObject": { "type": "object", "required": [ "type", "image_file" ], "properties": { "type": { "type": "string", "enum": [ "image_file" ], "description": "Always `image_file`.", "x-stainless-const": true }, "image_file": { "$ref": "#/components/schemas/OpenAI.MessageContentImageFileObjectImageFile" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.MessageContent" } ], "description": "References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.", "title": "Image file" }, "OpenAI.MessageContentImageFileObjectImageFile": { "type": "object", "required": [ "file_id" ], "properties": { "file_id": { "type": "string" }, "detail": { "type": "string", "enum": [ "auto", "low", "high" ], "default": "auto" } } }, "OpenAI.MessageContentImageUrlObject": { "type": "object", "required": [ "type", "image_url" ], "properties": { "type": { "type": "string", "enum": [ "image_url" ], "description": "The type of the content part.", "x-stainless-const": true }, "image_url": { "$ref": "#/components/schemas/OpenAI.MessageContentImageUrlObjectImageUrl" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.MessageContent" } ], "description": "References an image URL in the content of a message.", "title": "Image URL" }, "OpenAI.MessageContentImageUrlObjectImageUrl": { "type": "object", "required": [ "url" ], "properties": { "url": { "type": "string", "format": "uri" }, "detail": { "type": "string", "enum": [ "auto", "low", "high" ], "default": "auto" } } }, "OpenAI.MessageContentRefusalObject": { "type": "object", "required": [ "type", "refusal" ], "properties": { "type": { "type": "string", "enum": [ "refusal" ], "description": "Always `refusal`.", "x-stainless-const": true }, "refusal": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.MessageContent" } ], "description": "The refusal content generated by the assistant.", "title": "Refusal" }, "OpenAI.MessageContentTextAnnotationsFileCitationObject": { "type": "object", "required": [ "type", "text", "file_citation", "start_index", "end_index" ], "properties": { "type": { "type": "string", "enum": [ "file_citation" ], "description": "Always `file_citation`.", "x-stainless-const": true }, "text": { "type": "string", "description": "The text in the message content that needs to be replaced." }, "file_citation": { "$ref": "#/components/schemas/OpenAI.MessageContentTextAnnotationsFileCitationObjectFileCitation" }, "start_index": { "type": "integer", "minimum": 0 }, "end_index": { "type": "integer", "minimum": 0 } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.TextAnnotation" } ], "description": "A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the \"file_search\" tool to search files.", "title": "File citation" }, "OpenAI.MessageContentTextAnnotationsFileCitationObjectFileCitation": { "type": "object", "required": [ "file_id" ], "properties": { "file_id": { "type": "string" } } }, "OpenAI.MessageContentTextAnnotationsFilePathObject": { "type": "object", "required": [ "type", "text", "file_path", "start_index", "end_index" ], "properties": { "type": { "type": "string", "enum": [ "file_path" ], "description": "Always `file_path`.", "x-stainless-const": true }, "text": { "type": "string", "description": "The text in the message content that needs to be replaced." }, "file_path": { "$ref": "#/components/schemas/OpenAI.MessageContentTextAnnotationsFilePathObjectFilePath" }, "start_index": { "type": "integer", "minimum": 0 }, "end_index": { "type": "integer", "minimum": 0 } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.TextAnnotation" } ], "description": "A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file.", "title": "File path" }, "OpenAI.MessageContentTextAnnotationsFilePathObjectFilePath": { "type": "object", "required": [ "file_id" ], "properties": { "file_id": { "type": "string" } } }, "OpenAI.MessageContentTextObject": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "Always `text`.", "x-stainless-const": true }, "text": { "$ref": "#/components/schemas/OpenAI.MessageContentTextObjectText" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.MessageContent" } ], "description": "The text content that is part of a message.", "title": "Text" }, "OpenAI.MessageContentTextObjectText": { "type": "object", "required": [ "value", "annotations" ], "properties": { "value": { "type": "string" }, "annotations": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.TextAnnotation" } } } }, "OpenAI.MessageContentType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "image_file", "image_url", "text", "refusal" ] } ] }, "OpenAI.MessageObject": { "type": "object", "required": [ "id", "object", "created_at", "thread_id", "status", "incomplete_details", "completed_at", "incomplete_at", "role", "content", "assistant_id", "run_id", "attachments", "metadata" ], "properties": { "id": { "type": "string", "description": "The identifier, which can be referenced in API endpoints." }, "object": { "type": "string", "enum": [ "thread.message" ], "description": "The object type, which is always `thread.message`.", "x-stainless-const": true }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the message was created." }, "thread_id": { "type": "string", "description": "The [thread](https://platform.openai.com/docs/api-reference/threads) ID that this message belongs to." }, "status": { "type": "string", "enum": [ "in_progress", "incomplete", "completed" ], "description": "The status of the message, which can be either `in_progress`, `incomplete`, or `completed`." }, "incomplete_details": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.MessageObjectIncompleteDetails" }, { "type": "null" } ] }, "completed_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "incomplete_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "role": { "type": "string", "enum": [ "user", "assistant" ], "description": "The entity that produced the message. One of `user` or `assistant`." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.MessageContent" }, "description": "The content of the message in array of text and/or images." }, "assistant_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "run_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "attachments": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.MessageObjectAttachments" } }, { "type": "null" } ] }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } }, "description": "Represents a message within a [thread](https://platform.openai.com/docs/api-reference/threads).", "title": "The message object", "x-oaiMeta": { "name": "The message object", "beta": true, "example": "{\n \"id\": \"msg_abc123\",\n \"object\": \"thread.message\",\n \"created_at\": 1698983503,\n \"thread_id\": \"thread_abc123\",\n \"role\": \"assistant\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": {\n \"value\": \"Hi! How can I help you today?\",\n \"annotations\": []\n }\n }\n ],\n \"assistant_id\": \"asst_abc123\",\n \"run_id\": \"run_abc123\",\n \"attachments\": [],\n \"metadata\": {}\n}\n" } }, "OpenAI.MessageObjectAttachments": { "type": "object", "properties": { "file_id": { "type": "string" }, "tools": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.AssistantToolsCode" }, { "$ref": "#/components/schemas/OpenAI.AssistantToolsFileSearchTypeOnly" } ] } } } }, "OpenAI.MessageObjectIncompleteDetails": { "type": "object", "required": [ "reason" ], "properties": { "reason": { "type": "string", "enum": [ "content_filter", "max_tokens", "run_cancelled", "run_expired", "run_failed" ] } } }, "OpenAI.MessageRequestContentTextObject": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "Always `text`.", "x-stainless-const": true }, "text": { "type": "string", "description": "Text content to be sent to the model" } }, "description": "The text content that is part of a message.", "title": "Text" }, "OpenAI.MessageRole": { "type": "string", "enum": [ "unknown", "user", "assistant", "system", "critic", "discriminator", "developer", "tool" ] }, "OpenAI.MessageStatus": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ] }, "OpenAI.Metadata": { "type": "object", "unevaluatedProperties": { "type": "string" }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "OpenAI.Model": { "type": "object", "required": [ "id", "created", "object", "owned_by" ], "properties": { "id": { "type": "string", "description": "The model identifier, which can be referenced in the API endpoints." }, "created": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) when the model was created." }, "object": { "type": "string", "enum": [ "model" ], "description": "The object type, which is always \"model\".", "x-stainless-const": true }, "owned_by": { "type": "string", "description": "The organization that owns the model." } }, "description": "Describes an OpenAI model offering that can be used with the API.", "title": "Model", "x-oaiMeta": { "name": "The model object", "example": "{\n \"id\": \"VAR_chat_model_id\",\n \"object\": \"model\",\n \"created\": 1686935002,\n \"owned_by\": \"openai\"\n}\n" } }, "OpenAI.ModifyMessageRequest": { "type": "object", "properties": { "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } } }, "OpenAI.ModifyRunRequest": { "type": "object", "properties": { "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } } }, "OpenAI.ModifyThreadRequest": { "type": "object", "properties": { "tool_resources": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ModifyThreadRequestToolResources" }, { "type": "null" } ] }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } } }, "OpenAI.ModifyThreadRequestToolResources": { "type": "object", "properties": { "code_interpreter": { "$ref": "#/components/schemas/OpenAI.ModifyThreadRequestToolResourcesCodeInterpreter" }, "file_search": { "$ref": "#/components/schemas/OpenAI.ModifyThreadRequestToolResourcesFileSearch" } } }, "OpenAI.ModifyThreadRequestToolResourcesCodeInterpreter": { "type": "object", "properties": { "file_ids": { "type": "array", "items": { "type": "string" }, "maxItems": 20 } } }, "OpenAI.ModifyThreadRequestToolResourcesFileSearch": { "type": "object", "properties": { "vector_store_ids": { "type": "array", "items": { "type": "string" }, "maxItems": 1 } } }, "OpenAI.Move": { "type": "object", "required": [ "type", "x", "y" ], "properties": { "type": { "type": "string", "enum": [ "move" ], "description": "Specifies the event type. For a move action, this property is\n always set to `move`.", "x-stainless-const": true, "default": "move" }, "x": { "type": "integer", "description": "The x-coordinate to move to." }, "y": { "type": "integer", "description": "The y-coordinate to move to." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A mouse move action.", "title": "Move" }, "OpenAI.NoiseReductionType": { "type": "string", "enum": [ "near_field", "far_field" ], "description": "Type of noise reduction. `near_field` is for close-talking microphones such as headphones, `far_field` is for far-field microphones such as laptop or conference room microphones." }, "OpenAI.OpenAIFile": { "type": "object", "required": [ "id", "bytes", "created_at", "filename", "object", "purpose", "status" ], "properties": { "id": { "type": "string", "description": "The file identifier, which can be referenced in the API endpoints." }, "bytes": { "type": "integer", "description": "The size of the file, in bytes." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the file was created." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the file will expire." }, "filename": { "type": "string", "description": "The name of the file." }, "object": { "type": "string", "enum": [ "file" ], "description": "The object type, which is always `file`.", "x-stainless-const": true }, "status_details": { "type": "string", "description": "Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`.", "deprecated": true }, "purpose": { "type": "string", "enum": [ "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "evals" ], "description": "The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune` and `fine-tune-results`." }, "status": { "type": "string", "enum": [ "uploaded", "pending", "running", "processed", "error", "deleting", "deleted" ] } }, "description": "The `File` object represents a document that has been uploaded to OpenAI.", "title": "OpenAIFile", "x-oaiMeta": { "name": "The file object", "example": "{\n \"id\": \"file-abc123\",\n \"object\": \"file\",\n \"bytes\": 120000,\n \"created_at\": 1677610602,\n \"expires_at\": 1680202602,\n \"filename\": \"salesOverview.pdf\",\n \"purpose\": \"assistants\",\n}\n" } }, "OpenAI.OtherChunkingStrategyResponseParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "other" ], "description": "Always `other`.", "x-stainless-const": true } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyResponse" } ], "description": "This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API.", "title": "Other Chunking Strategy" }, "OpenAI.OutputContent": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.OutputContentType" } }, "discriminator": { "propertyName": "type", "mapping": { "output_text": "#/components/schemas/OpenAI.OutputContentOutputTextContent", "refusal": "#/components/schemas/OpenAI.OutputContentRefusalContent" } } }, "OpenAI.OutputContentOutputTextContent": { "type": "object", "required": [ "type", "text", "annotations" ], "properties": { "type": { "type": "string", "enum": [ "output_text" ], "description": "The type of the output text. Always `output_text`.", "x-stainless-const": true, "default": "output_text" }, "text": { "type": "string", "description": "The text output from the model." }, "annotations": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Annotation" }, "description": "The annotations of the text output." }, "logprobs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.LogProb" } } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputContent" } ], "description": "A text output from the model.", "title": "Output text" }, "OpenAI.OutputContentRefusalContent": { "type": "object", "required": [ "type", "refusal" ], "properties": { "type": { "type": "string", "enum": [ "refusal" ], "description": "The type of the refusal. Always `refusal`.", "x-stainless-const": true, "default": "refusal" }, "refusal": { "type": "string", "description": "The refusal explanation from the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputContent" } ], "description": "A refusal from the model.", "title": "Refusal" }, "OpenAI.OutputContentType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "output_text", "refusal", "reasoning_text" ] } ] }, "OpenAI.OutputItem": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.OutputItemType" } }, "discriminator": { "propertyName": "type", "mapping": { "output_message": "#/components/schemas/OpenAI.OutputItemOutputMessage", "file_search_call": "#/components/schemas/OpenAI.OutputItemFileSearchToolCall", "function_call": "#/components/schemas/OpenAI.OutputItemFunctionToolCall", "web_search_call": "#/components/schemas/OpenAI.OutputItemWebSearchToolCall", "computer_call": "#/components/schemas/OpenAI.OutputItemComputerToolCall", "reasoning": "#/components/schemas/OpenAI.OutputItemReasoningItem", "compaction": "#/components/schemas/OpenAI.OutputItemCompactionBody", "image_generation_call": "#/components/schemas/OpenAI.OutputItemImageGenToolCall", "code_interpreter_call": "#/components/schemas/OpenAI.OutputItemCodeInterpreterToolCall", "local_shell_call": "#/components/schemas/OpenAI.OutputItemLocalShellToolCall", "shell_call": "#/components/schemas/OpenAI.OutputItemFunctionShellCall", "shell_call_output": "#/components/schemas/OpenAI.OutputItemFunctionShellCallOutput", "apply_patch_call": "#/components/schemas/OpenAI.OutputItemApplyPatchToolCall", "apply_patch_call_output": "#/components/schemas/OpenAI.OutputItemApplyPatchToolCallOutput", "mcp_call": "#/components/schemas/OpenAI.OutputItemMcpToolCall", "mcp_list_tools": "#/components/schemas/OpenAI.OutputItemMcpListTools", "mcp_approval_request": "#/components/schemas/OpenAI.OutputItemMcpApprovalRequest", "custom_tool_call": "#/components/schemas/OpenAI.OutputItemCustomToolCall" } } }, "OpenAI.OutputItemApplyPatchToolCall": { "type": "object", "required": [ "type", "id", "call_id", "status", "operation" ], "properties": { "type": { "type": "string", "enum": [ "apply_patch_call" ], "description": "The type of the item. Always `apply_patch_call`.", "x-stainless-const": true, "default": "apply_patch_call" }, "id": { "type": "string", "description": "The unique ID of the apply patch tool call. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the apply patch tool call generated by the model." }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchCallStatus" } ], "description": "The status of the apply patch tool call. One of `in_progress` or `completed`." }, "operation": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchFileOperation" } ], "description": "One of the create_file, delete_file, or update_file operations applied via apply_patch." }, "created_by": { "type": "string", "description": "The ID of the entity that created this tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "A tool call that applies file diffs by creating, deleting, or updating files.", "title": "Apply patch tool call" }, "OpenAI.OutputItemApplyPatchToolCallOutput": { "type": "object", "required": [ "type", "id", "call_id", "status" ], "properties": { "type": { "type": "string", "enum": [ "apply_patch_call_output" ], "description": "The type of the item. Always `apply_patch_call_output`.", "x-stainless-const": true, "default": "apply_patch_call_output" }, "id": { "type": "string", "description": "The unique ID of the apply patch tool call output. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the apply patch tool call generated by the model." }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ApplyPatchCallOutputStatus" } ], "description": "The status of the apply patch tool call output. One of `completed` or `failed`." }, "output": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "created_by": { "type": "string", "description": "The ID of the entity that created this tool call output." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "The output emitted by an apply patch tool call.", "title": "Apply patch tool call output" }, "OpenAI.OutputItemCodeInterpreterToolCall": { "type": "object", "required": [ "type", "id", "status", "container_id", "code", "outputs" ], "properties": { "type": { "type": "string", "enum": [ "code_interpreter_call" ], "description": "The type of the code interpreter tool call. Always `code_interpreter_call`.", "x-stainless-const": true, "default": "code_interpreter_call" }, "id": { "type": "string", "description": "The unique ID of the code interpreter tool call." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete", "interpreting", "failed" ], "description": "The status of the code interpreter tool call. Valid values are `in_progress`, `completed`, `incomplete`, `interpreting`, and `failed`." }, "container_id": { "type": "string", "description": "The ID of the container used to run the code." }, "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "outputs": { "anyOf": [ { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.CodeInterpreterOutputLogs" }, { "$ref": "#/components/schemas/OpenAI.CodeInterpreterOutputImage" } ] } }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "A tool call to run code.", "title": "Code interpreter tool call" }, "OpenAI.OutputItemCompactionBody": { "type": "object", "required": [ "type", "id", "encrypted_content" ], "properties": { "type": { "type": "string", "enum": [ "compaction" ], "description": "The type of the item. Always `compaction`.", "x-stainless-const": true, "default": "compaction" }, "id": { "type": "string", "description": "The unique ID of the compaction item." }, "encrypted_content": { "type": "string", "description": "The encrypted content that was produced by compaction." }, "created_by": { "type": "string", "description": "The identifier of the actor that created the item." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "A compaction item generated by the [`v1/responses/compact` API](https://platform.openai.com/docs/api-reference/responses/compact).", "title": "Compaction item" }, "OpenAI.OutputItemComputerToolCall": { "type": "object", "required": [ "type", "id", "call_id", "action", "pending_safety_checks", "status" ], "properties": { "type": { "type": "string", "enum": [ "computer_call" ], "description": "The type of the computer call. Always `computer_call`.", "default": "computer_call" }, "id": { "type": "string", "description": "The unique ID of the computer call." }, "call_id": { "type": "string", "description": "An identifier used when responding to the tool call with output." }, "action": { "$ref": "#/components/schemas/OpenAI.ComputerAction" }, "pending_safety_checks": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ComputerCallSafetyCheckParam" }, "description": "The pending safety checks for the computer call." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n `incomplete`. Populated when items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "A tool call to a computer use tool. See the\n[computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.", "title": "Computer tool call" }, "OpenAI.OutputItemCustomToolCall": { "type": "object", "required": [ "type", "call_id", "name", "input" ], "properties": { "type": { "type": "string", "enum": [ "custom_tool_call" ], "description": "The type of the custom tool call. Always `custom_tool_call`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the custom tool call in the OpenAI platform." }, "call_id": { "type": "string", "description": "An identifier used to map this custom tool call to a tool call output." }, "name": { "type": "string", "description": "The name of the custom tool being called." }, "input": { "type": "string", "description": "The input for the custom tool call generated by the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "A call to a custom tool created by the model.", "title": "Custom tool call" }, "OpenAI.OutputItemFileSearchToolCall": { "type": "object", "required": [ "id", "type", "status", "queries" ], "properties": { "id": { "type": "string", "description": "The unique ID of the file search tool call." }, "type": { "type": "string", "enum": [ "file_search_call" ], "description": "The type of the file search tool call. Always `file_search_call`.", "x-stainless-const": true }, "status": { "type": "string", "enum": [ "in_progress", "searching", "completed", "incomplete", "failed" ], "description": "The status of the file search tool call. One of `in_progress`,\n `searching`, `incomplete` or `failed`," }, "queries": { "type": "array", "items": { "type": "string" }, "description": "The queries used to search for files." }, "results": { "anyOf": [ { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FileSearchToolCallResults" } }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "The results of a file search tool call. See the\n[file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.", "title": "File search tool call" }, "OpenAI.OutputItemFunctionShellCall": { "type": "object", "required": [ "type", "id", "call_id", "action", "status" ], "properties": { "type": { "type": "string", "enum": [ "shell_call" ], "description": "The type of the item. Always `shell_call`.", "x-stainless-const": true, "default": "shell_call" }, "id": { "type": "string", "description": "The unique ID of the shell tool call. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the shell tool call generated by the model." }, "action": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.FunctionShellAction" } ], "description": "The shell commands and limits that describe how to run the tool call." }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.LocalShellCallStatus" } ], "description": "The status of the shell call. One of `in_progress`, `completed`, or `incomplete`." }, "created_by": { "type": "string", "description": "The ID of the entity that created this tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "A tool call that executes one or more shell commands in a managed environment.", "title": "Shell tool call" }, "OpenAI.OutputItemFunctionShellCallOutput": { "type": "object", "required": [ "type", "id", "call_id", "output", "max_output_length" ], "properties": { "type": { "type": "string", "enum": [ "shell_call_output" ], "description": "The type of the shell call output. Always `shell_call_output`.", "x-stainless-const": true, "default": "shell_call_output" }, "id": { "type": "string", "description": "The unique ID of the shell call output. Populated when this item is returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the shell tool call generated by the model." }, "output": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FunctionShellCallOutputContent" }, "description": "An array of shell call output contents" }, "max_output_length": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "created_by": { "type": "string", "description": "The identifier of the actor that created the item." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "The output of a shell tool call that was emitted.", "title": "Shell call output" }, "OpenAI.OutputItemFunctionToolCall": { "type": "object", "required": [ "type", "call_id", "name", "arguments" ], "properties": { "id": { "type": "string", "description": "The unique ID of the function tool call." }, "type": { "type": "string", "enum": [ "function_call" ], "description": "The type of the function tool call. Always `function_call`.", "x-stainless-const": true }, "call_id": { "type": "string", "description": "The unique ID of the function tool call generated by the model." }, "name": { "type": "string", "description": "The name of the function to run." }, "arguments": { "type": "string", "description": "A JSON string of the arguments to pass to the function." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n `incomplete`. Populated when items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "A tool call to run a function. See the\n[function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.", "title": "Function tool call" }, "OpenAI.OutputItemImageGenToolCall": { "type": "object", "required": [ "type", "id", "status", "result" ], "properties": { "type": { "type": "string", "enum": [ "image_generation_call" ], "description": "The type of the image generation call. Always `image_generation_call`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the image generation call." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "generating", "failed" ], "description": "The status of the image generation call." }, "result": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "An image generation request made by the model.", "title": "Image generation call" }, "OpenAI.OutputItemLocalShellToolCall": { "type": "object", "required": [ "type", "id", "call_id", "action", "status" ], "properties": { "type": { "type": "string", "enum": [ "local_shell_call" ], "description": "The type of the local shell call. Always `local_shell_call`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the local shell call." }, "call_id": { "type": "string", "description": "The unique ID of the local shell tool call generated by the model." }, "action": { "$ref": "#/components/schemas/OpenAI.LocalShellExecAction" }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the local shell call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "A tool call to run a command on the local shell.", "title": "Local shell call" }, "OpenAI.OutputItemMcpApprovalRequest": { "type": "object", "required": [ "type", "id", "server_label", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "mcp_approval_request" ], "description": "The type of the item. Always `mcp_approval_request`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the approval request." }, "server_label": { "type": "string", "description": "The label of the MCP server making the request." }, "name": { "type": "string", "description": "The name of the tool to run." }, "arguments": { "type": "string", "description": "A JSON string of arguments for the tool." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "A request for human approval of a tool invocation.", "title": "MCP approval request" }, "OpenAI.OutputItemMcpListTools": { "type": "object", "required": [ "type", "id", "server_label", "tools" ], "properties": { "type": { "type": "string", "enum": [ "mcp_list_tools" ], "description": "The type of the item. Always `mcp_list_tools`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the list." }, "server_label": { "type": "string", "description": "The label of the MCP server." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.MCPListToolsTool" }, "description": "The tools available on the server." }, "error": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "A list of tools available on an MCP server.", "title": "MCP list tools" }, "OpenAI.OutputItemMcpToolCall": { "type": "object", "required": [ "type", "id", "server_label", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "mcp_call" ], "description": "The type of the item. Always `mcp_call`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique ID of the tool call." }, "server_label": { "type": "string", "description": "The label of the MCP server running the tool." }, "name": { "type": "string", "description": "The name of the tool that was run." }, "arguments": { "type": "string", "description": "A JSON string of the arguments passed to the tool." }, "output": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "error": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "status": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.MCPToolCallStatus" } ], "description": "The status of the tool call. One of `in_progress`, `completed`, `incomplete`, `calling`, or `failed`." }, "approval_request_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "An invocation of a tool on an MCP server.", "title": "MCP tool call" }, "OpenAI.OutputItemOutputMessage": { "type": "object", "required": [ "id", "type", "role", "content", "status" ], "properties": { "id": { "type": "string", "description": "The unique ID of the output message.", "x-stainless-go-json": "omitzero" }, "type": { "type": "string", "enum": [ "output_message" ], "description": "The type of the output message. Always `message`.", "x-stainless-const": true }, "role": { "type": "string", "enum": [ "assistant" ], "description": "The role of the output message. Always `assistant`.", "x-stainless-const": true }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.OutputMessageContent" }, "description": "The content of the output message." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the message input. One of `in_progress`, `completed`, or\n `incomplete`. Populated when input items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "An output message from the model.", "title": "Output message" }, "OpenAI.OutputItemReasoningItem": { "type": "object", "required": [ "type", "id", "summary" ], "properties": { "type": { "type": "string", "enum": [ "reasoning" ], "description": "The type of the object. Always `reasoning`.", "x-stainless-const": true }, "id": { "type": "string", "description": "The unique identifier of the reasoning content." }, "encrypted_content": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "summary": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Summary" }, "description": "Reasoning summary content." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ReasoningTextContent" }, "description": "Reasoning text content." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n `incomplete`. Populated when items are returned via API." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "A description of the chain of thought used by a reasoning model while generating\na response. Be sure to include these items in your `input` to the Responses API\nfor subsequent turns of a conversation if you are manually\n[managing context](https://platform.openai.com/docs/guides/conversation-state).", "title": "Reasoning" }, "OpenAI.OutputItemType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "output_message", "file_search_call", "function_call", "web_search_call", "computer_call", "reasoning", "compaction", "image_generation_call", "code_interpreter_call", "local_shell_call", "shell_call", "shell_call_output", "apply_patch_call", "apply_patch_call_output", "mcp_call", "mcp_list_tools", "mcp_approval_request", "custom_tool_call" ] } ] }, "OpenAI.OutputItemWebSearchToolCall": { "type": "object", "required": [ "id", "type", "status", "action" ], "properties": { "id": { "type": "string", "description": "The unique ID of the web search tool call." }, "type": { "type": "string", "enum": [ "web_search_call" ], "description": "The type of the web search tool call. Always `web_search_call`.", "x-stainless-const": true }, "status": { "type": "string", "enum": [ "in_progress", "searching", "completed", "failed" ], "description": "The status of the web search tool call." }, "action": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.WebSearchActionSearch" }, { "$ref": "#/components/schemas/OpenAI.WebSearchActionOpenPage" }, { "$ref": "#/components/schemas/OpenAI.WebSearchActionFind" } ], "description": "An object describing the specific action taken in this web search call.\n Includes details on how the model used the web (search, open_page, find)." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "The results of a web search tool call. See the\n[web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.", "title": "Web search tool call" }, "OpenAI.OutputMessageContent": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.OutputMessageContentType" } }, "discriminator": { "propertyName": "type", "mapping": { "output_text": "#/components/schemas/OpenAI.OutputMessageContentOutputTextContent", "refusal": "#/components/schemas/OpenAI.OutputMessageContentRefusalContent" } } }, "OpenAI.OutputMessageContentOutputTextContent": { "type": "object", "required": [ "type", "text", "annotations" ], "properties": { "type": { "type": "string", "enum": [ "output_text" ], "description": "The type of the output text. Always `output_text`.", "x-stainless-const": true, "default": "output_text" }, "text": { "type": "string", "description": "The text output from the model." }, "annotations": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Annotation" }, "description": "The annotations of the text output." }, "logprobs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.LogProb" } } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputMessageContent" } ], "description": "A text output from the model.", "title": "Output text" }, "OpenAI.OutputMessageContentRefusalContent": { "type": "object", "required": [ "type", "refusal" ], "properties": { "type": { "type": "string", "enum": [ "refusal" ], "description": "The type of the refusal. Always `refusal`.", "x-stainless-const": true, "default": "refusal" }, "refusal": { "type": "string", "description": "The refusal explanation from the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputMessageContent" } ], "description": "A refusal from the model.", "title": "Refusal" }, "OpenAI.OutputMessageContentType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "output_text", "refusal" ] } ] }, "OpenAI.OutputTextContent": { "type": "object", "required": [ "type", "text", "annotations" ], "properties": { "type": { "type": "string", "enum": [ "output_text" ], "description": "The type of the output text. Always `output_text`.", "x-stainless-const": true, "default": "output_text" }, "text": { "type": "string", "description": "The text output from the model." }, "annotations": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Annotation" }, "description": "The annotations of the text output." }, "logprobs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.LogProb" } } }, "description": "A text output from the model.", "title": "Output text" }, "OpenAI.ParallelToolCalls": { "type": "boolean", "description": "Whether to enable [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) during tool use." }, "OpenAI.PredictionContent": { "type": "object", "required": [ "type", "content" ], "properties": { "type": { "type": "string", "enum": [ "content" ], "description": "The type of the predicted content you want to provide. This type is\n currently always `content`.", "x-stainless-const": true }, "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartText" } } ], "description": "The content that should be matched when generating a model response.\n If generated tokens would match this content, the entire model response\n can be returned much more quickly." } }, "description": "Static predicted output content, such as the content of a text file that is\nbeing regenerated.", "title": "Static Content" }, "OpenAI.Prompt": { "type": "object", "required": [ "id" ], "properties": { "id": { "type": "string", "description": "The unique identifier of the prompt template to use." }, "version": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "variables": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsePromptVariables" }, { "type": "null" } ] } }, "description": "Reference to a prompt template and its variables.\n[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts)." }, "OpenAI.RankerVersionType": { "type": "string", "enum": [ "auto", "default-2024-11-15" ] }, "OpenAI.RankingOptions": { "type": "object", "properties": { "ranker": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RankerVersionType" } ], "description": "The ranker to use for the file search." }, "score_threshold": { "type": "number", "description": "The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will attempt to return only the most relevant results, but may return fewer results." }, "hybrid_search": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.HybridSearchOptions" } ], "description": "Weights that control how reciprocal rank fusion balances semantic embedding matches versus sparse keyword matches when hybrid search is enabled." } } }, "OpenAI.RealtimeAudioFormats": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.RealtimeAudioFormatsType" } }, "discriminator": { "propertyName": "type", "mapping": { "audio/pcm": "#/components/schemas/OpenAI.RealtimeAudioFormatsAudioPcm", "audio/pcmu": "#/components/schemas/OpenAI.RealtimeAudioFormatsAudioPcmu", "audio/pcma": "#/components/schemas/OpenAI.RealtimeAudioFormatsAudioPcma" } } }, "OpenAI.RealtimeAudioFormatsAudioPcm": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "audio/pcm" ] }, "rate": { "type": "number", "enum": [ 24000 ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeAudioFormats" } ] }, "OpenAI.RealtimeAudioFormatsAudioPcma": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "audio/pcma" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeAudioFormats" } ] }, "OpenAI.RealtimeAudioFormatsAudioPcmu": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "audio/pcmu" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeAudioFormats" } ] }, "OpenAI.RealtimeAudioFormatsType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "audio/pcm", "audio/pcmu", "audio/pcma" ] } ] }, "OpenAI.RealtimeCallCreateRequest": { "type": "object", "properties": { "sdp": { "type": "string", "description": "WebRTC Session Description Protocol (SDP) offer generated by the caller." }, "session": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestGA" } ], "description": "Optional session configuration to apply before the realtime session is\n created. Use the same parameters you would send in a [`create client secret`](https://platform.openai.com/docs/api-reference/realtime-sessions/create-realtime-client-secret)\n request." } }, "required": [ "sdp" ] }, "OpenAI.RealtimeCallReferRequest": { "type": "object", "required": [ "target_uri" ], "properties": { "target_uri": { "type": "string", "description": "URI that should appear in the SIP Refer-To header. Supports values like\n `tel:+14155550123` or `sip:agent\\@example.com`." } }, "description": "Parameters required to transfer a SIP call to a new destination using the\nRealtime API.", "title": "Realtime call refer request" }, "OpenAI.RealtimeCallRejectRequest": { "type": "object", "properties": { "status_code": { "type": "integer", "description": "SIP response code to send back to the caller. Defaults to `603` (Decline)\n when omitted." } }, "description": "Parameters used to decline an incoming SIP call handled by the Realtime API.", "title": "Realtime call reject request" }, "OpenAI.RealtimeCreateClientSecretRequest": { "type": "object", "properties": { "expires_after": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeCreateClientSecretRequestExpiresAfter" } ], "description": "Configuration for the client secret expiration. Expiration refers to the time after which\n a client secret will no longer be valid for creating sessions. The session itself may\n continue after that time once started. A secret can be used to create multiple sessions\n until it expires.", "title": "Client secret expiration" }, "session": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestUnion" } ], "description": "Session configuration to use for the client secret. Choose either a realtime\n session or a transcription session." } }, "description": "Create a session and client secret for the Realtime API. The request can specify\neither a realtime or a transcription session configuration.\n[Learn more about the Realtime API](https://platform.openai.com/docs/guides/realtime).", "title": "Realtime client secret creation request" }, "OpenAI.RealtimeCreateClientSecretRequestExpiresAfter": { "type": "object", "properties": { "anchor": { "type": "string", "enum": [ "created_at" ], "x-stainless-const": true, "default": "created_at" }, "seconds": { "type": "integer", "minimum": 10, "maximum": 7200, "default": 600 } } }, "OpenAI.RealtimeCreateClientSecretResponse": { "type": "object", "required": [ "value", "expires_at", "session" ], "properties": { "value": { "type": "string", "description": "The generated client secret value." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "Expiration timestamp for the client secret, in seconds since epoch." }, "session": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateResponseUnion" } ], "description": "The session configuration for either a realtime or transcription session." } }, "description": "Response from creating a session and client secret for the Realtime API.", "title": "Realtime session and client secret", "x-oaiMeta": { "name": "Session response object", "group": "realtime", "example": "{\n \"value\": \"ek_68af296e8e408191a1120ab6383263c2\",\n \"expires_at\": 1756310470,\n \"session\": {\n \"type\": \"realtime\",\n \"object\": \"realtime.session\",\n \"id\": \"sess_C9CiUVUzUzYIssh3ELY1d\",\n \"model\": \"gpt-realtime-2025-08-25\",\n \"output_modalities\": [\n \"audio\"\n ],\n \"instructions\": \"You are a friendly assistant.\",\n \"tools\": [],\n \"tool_choice\": \"auto\",\n \"max_output_tokens\": \"inf\",\n \"tracing\": null,\n \"truncation\": \"auto\",\n \"prompt\": null,\n \"expires_at\": 0,\n \"audio\": {\n \"input\": {\n \"format\": {\n \"type\": \"audio/pcm\",\n \"rate\": 24000\n },\n \"transcription\": null,\n \"noise_reduction\": null,\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 200,\n \"idle_timeout_ms\": null,\n \"create_response\": true,\n \"interrupt_response\": true\n }\n },\n \"output\": {\n \"format\": {\n \"type\": \"audio/pcm\",\n \"rate\": 24000\n },\n \"voice\": \"alloy\",\n \"speed\": 1.0\n }\n },\n \"include\": null\n }\n}\n" } }, "OpenAI.RealtimeFunctionTool": { "type": "object", "properties": { "type": { "type": "string", "enum": [ "function" ], "description": "The type of the tool, i.e. `function`.", "x-stainless-const": true }, "name": { "type": "string", "description": "The name of the function." }, "description": { "type": "string", "description": "The description of the function, including guidance on when and how\n to call it, and guidance about what to tell the user when calling\n (if anything)." }, "parameters": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeFunctionToolParameters" } ], "description": "Parameters of the function in JSON Schema." } }, "title": "Function tool" }, "OpenAI.RealtimeFunctionToolParameters": { "type": "object" }, "OpenAI.RealtimeSessionCreateRequest": { "type": "object", "required": [ "client_secret", "type" ], "properties": { "client_secret": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestClientSecret" } ], "description": "Ephemeral key returned by the API." }, "modalities": { "type": "array", "items": { "type": "string", "enum": [ "text", "audio" ] }, "description": "The set of modalities the model can respond with. To disable audio,\n set this to [\"text\"].", "default": [ "text", "audio" ] }, "instructions": { "type": "string", "description": "The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model can be instructed on response content and format, (e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good responses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The instructions are not guaranteed to be followed by the model, but they provide guidance to the model on the desired behavior.\n Note that the server sets default instructions which will be used if this field is not set and are visible in the `session.created` event at the start of the session." }, "voice": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.VoiceIdsShared" } ], "description": "The voice the model uses to respond. Supported built-in voices are `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and `cedar`. Voice cannot be changed during the session once the model has responded with audio at least once." }, "input_audio_format": { "type": "string", "description": "The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`." }, "output_audio_format": { "type": "string", "description": "The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`." }, "input_audio_transcription": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestInputAudioTranscription" } ], "description": "Configuration for input audio transcription, defaults to off and can be\n set to `null` to turn off once on. Input audio transcription is not native\n to the model, since the model consumes audio directly. Transcription runs\n asynchronously and should be treated as rough guidance\n rather than the representation understood by the model." }, "speed": { "type": "number", "minimum": 0.25, "maximum": 1.5, "description": "The speed of the model's spoken response. 1.0 is the default speed. 0.25 is\n the minimum speed. 1.5 is the maximum speed. This value can only be changed\n in between model turns, not while a response is in progress.", "default": 1 }, "tracing": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "object", "properties": { "workflow_name": { "type": "string" }, "group_id": { "type": "string" }, "metadata": { "type": "object", "unevaluatedProperties": {} } } } ], "description": "Configuration options for tracing. Set to null to disable tracing. Once\n tracing is enabled for a session, the configuration cannot be modified.\n `auto` will create a trace for the session with default values for the\n workflow name, group id, and metadata.", "title": "Tracing Configuration", "default": "auto" }, "turn_detection": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestTurnDetection" } ], "description": "Configuration for turn detection. Can be set to `null` to turn off. Server\n VAD means that the model will detect the start and end of speech based on\n audio volume and respond at the end of user speech." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestTools" }, "description": "Tools (functions) available to the model." }, "tool_choice": { "type": "string", "description": "How the model chooses tools. Options are `auto`, `none`, `required`, or\n specify a function." }, "temperature": { "type": "number", "description": "Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8." }, "max_response_output_tokens": { "anyOf": [ { "type": "integer" }, { "type": "string", "enum": [ "inf" ] } ], "description": "Maximum number of output tokens for a single assistant response,\n inclusive of tool calls. Provide an integer between 1 and 4096 to\n limit output tokens, or `inf` for the maximum available tokens for a\n given model. Defaults to `inf`." }, "truncation": { "$ref": "#/components/schemas/OpenAI.RealtimeTruncation" }, "prompt": { "$ref": "#/components/schemas/OpenAI.Prompt" }, "type": { "type": "string", "enum": [ "realtime" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestUnion" } ], "description": "A new Realtime session configuration, with an ephemeral key. Default TTL\nfor keys is one minute.", "x-oaiMeta": { "name": "The session object", "group": "realtime", "example": "{\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"model\": \"gpt-realtime-2025-08-25\",\n \"modalities\": [\"audio\", \"text\"],\n \"instructions\": \"You are a friendly assistant.\",\n \"voice\": \"alloy\",\n \"input_audio_format\": \"pcm16\",\n \"output_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"whisper-1\"\n },\n \"turn_detection\": null,\n \"tools\": [],\n \"tool_choice\": \"none\",\n \"temperature\": 0.7,\n \"speed\": 1.1,\n \"tracing\": \"auto\",\n \"max_response_output_tokens\": 200,\n \"truncation\": \"auto\",\n \"prompt\": null,\n \"client_secret\": {\n \"value\": \"ek_abc123\",\n \"expires_at\": 1234567890\n }\n}\n" } }, "OpenAI.RealtimeSessionCreateRequestClientSecret": { "type": "object", "required": [ "value", "expires_at" ], "properties": { "value": { "type": "string" }, "expires_at": { "type": "integer", "format": "unixtime" } } }, "OpenAI.RealtimeSessionCreateRequestGA": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "realtime" ], "description": "The type of session to create. Always `realtime` for the Realtime API.", "x-stainless-const": true }, "output_modalities": { "type": "array", "items": { "type": "string", "enum": [ "text", "audio" ] }, "description": "The set of modalities the model can respond with. It defaults to `[\"audio\"]`, indicating\n that the model will respond with audio plus a transcript. `[\"text\"]` can be used to make\n the model respond with text only. It is not possible to request both `text` and `audio` at the same time.", "default": [ "audio" ] }, "model": { "type": "string", "description": "The Realtime model used for this session." }, "instructions": { "type": "string", "description": "The default system instructions (i.e. system message) prepended to model calls. This field allows the client to guide the model on desired responses. The model can be instructed on response content and format, (e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good responses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion into your voice\", \"laugh frequently\"). The instructions are not guaranteed to be followed by the model, but they provide guidance to the model on the desired behavior.\n Note that the server sets default instructions which will be used if this field is not set and are visible in the `session.created` event at the start of the session." }, "audio": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestGAAudio" } ], "description": "Configuration for input and output audio." }, "include": { "type": "array", "items": { "type": "string", "enum": [ "item.input_audio_transcription.logprobs" ] }, "description": "Additional fields to include in server outputs.\n `item.input_audio_transcription.logprobs`: Include logprobs for input audio transcription." }, "tracing": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestGATracing" }, { "type": "null" } ], "description": "Realtime API can write session traces to the [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once\n tracing is enabled for a session, the configuration cannot be modified.\n `auto` will create a trace for the session with default values for the\n workflow name, group id, and metadata.", "title": "Tracing Configuration", "default": "auto" }, "tools": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeFunctionTool" }, { "$ref": "#/components/schemas/OpenAI.MCPTool" } ] }, "description": "Tools available to the model." }, "tool_choice": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceOptions" }, { "$ref": "#/components/schemas/OpenAI.ToolChoiceFunction" }, { "$ref": "#/components/schemas/OpenAI.ToolChoiceMCP" } ], "description": "How the model chooses tools. Provide one of the string modes or force a specific\n function/MCP tool.", "default": "auto" }, "max_output_tokens": { "anyOf": [ { "type": "integer" }, { "type": "string", "enum": [ "inf" ] } ], "description": "Maximum number of output tokens for a single assistant response,\n inclusive of tool calls. Provide an integer between 1 and 4096 to\n limit output tokens, or `inf` for the maximum available tokens for a\n given model. Defaults to `inf`." }, "truncation": { "$ref": "#/components/schemas/OpenAI.RealtimeTruncation" }, "prompt": { "$ref": "#/components/schemas/OpenAI.Prompt" } }, "description": "Realtime session object configuration.", "title": "Realtime session configuration" }, "OpenAI.RealtimeSessionCreateRequestGAAudio": { "type": "object", "properties": { "input": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestGAAudioInput" }, "output": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestGAAudioOutput" } } }, "OpenAI.RealtimeSessionCreateRequestGAAudioInput": { "type": "object", "properties": { "format": { "$ref": "#/components/schemas/OpenAI.RealtimeAudioFormats" }, "transcription": { "$ref": "#/components/schemas/OpenAI.AudioTranscription" }, "noise_reduction": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestGAAudioInputNoiseReduction" }, "turn_detection": { "$ref": "#/components/schemas/OpenAI.RealtimeTurnDetection" } } }, "OpenAI.RealtimeSessionCreateRequestGAAudioInputNoiseReduction": { "type": "object", "properties": { "type": { "$ref": "#/components/schemas/OpenAI.NoiseReductionType" } } }, "OpenAI.RealtimeSessionCreateRequestGAAudioOutput": { "type": "object", "properties": { "format": { "$ref": "#/components/schemas/OpenAI.RealtimeAudioFormats" }, "voice": { "$ref": "#/components/schemas/OpenAI.VoiceIdsShared" }, "speed": { "type": "number", "minimum": 0.25, "maximum": 1.5, "default": 1 } } }, "OpenAI.RealtimeSessionCreateRequestGATracing": { "type": "object", "properties": { "workflow_name": { "type": "string" }, "group_id": { "type": "string" }, "metadata": { "type": "object", "unevaluatedProperties": {} } } }, "OpenAI.RealtimeSessionCreateRequestInputAudioTranscription": { "type": "object", "properties": { "model": { "type": "string" } } }, "OpenAI.RealtimeSessionCreateRequestTools": { "type": "object", "properties": { "type": { "type": "string", "enum": [ "function" ], "x-stainless-const": true }, "name": { "type": "string" }, "description": { "type": "string" }, "parameters": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestToolsParameters" } } }, "OpenAI.RealtimeSessionCreateRequestToolsParameters": { "type": "object" }, "OpenAI.RealtimeSessionCreateRequestTurnDetection": { "type": "object", "properties": { "type": { "type": "string" }, "threshold": { "type": "number" }, "prefix_padding_ms": { "type": "integer" }, "silence_duration_ms": { "type": "integer" } } }, "OpenAI.RealtimeSessionCreateRequestUnion": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestUnionType" } }, "discriminator": { "propertyName": "type", "mapping": { "realtime": "#/components/schemas/OpenAI.RealtimeSessionCreateRequest", "transcription": "#/components/schemas/OpenAI.RealtimeTranscriptionSessionCreateRequest" } } }, "OpenAI.RealtimeSessionCreateRequestUnionType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "realtime", "transcription" ] } ] }, "OpenAI.RealtimeSessionCreateResponse": { "type": "object", "required": [ "type" ], "properties": { "id": { "type": "string", "description": "Unique identifier for the session that looks like `sess_1234567890abcdef`." }, "object": { "type": "string", "description": "The object type. Always `realtime.session`." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "Expiration timestamp for the session, in seconds since epoch." }, "include": { "type": "array", "items": { "type": "string", "enum": [ "item.input_audio_transcription.logprobs" ] }, "description": "Additional fields to include in server outputs.\n - `item.input_audio_transcription.logprobs`: Include logprobs for input audio transcription." }, "model": { "type": "string", "description": "The Realtime model used for this session." }, "output_modalities": { "type": "array", "items": { "type": "string", "enum": [ "text", "audio" ] }, "description": "The set of modalities the model can respond with. To disable audio,\n set this to [\"text\"]." }, "instructions": { "type": "string", "description": "The default system instructions (i.e. system message) prepended to model\n calls. This field allows the client to guide the model on desired\n responses. The model can be instructed on response content and format,\n (e.g. \"be extremely succinct\", \"act friendly\", \"here are examples of good\n responses\") and on audio behavior (e.g. \"talk quickly\", \"inject emotion\n into your voice\", \"laugh frequently\"). The instructions are not guaranteed\n to be followed by the model, but they provide guidance to the model on the\n desired behavior.\n Note that the server sets default instructions which will be used if this\n field is not set and are visible in the `session.created` event at the\n start of the session." }, "audio": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateResponseAudio" } ], "description": "Configuration for input and output audio for the session." }, "tracing": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "object", "properties": { "workflow_name": { "type": "string" }, "group_id": { "type": "string" }, "metadata": { "type": "object", "unevaluatedProperties": {} } } } ], "description": "Configuration options for tracing. Set to null to disable tracing. Once\n tracing is enabled for a session, the configuration cannot be modified.\n `auto` will create a trace for the session with default values for the\n workflow name, group id, and metadata.", "title": "Tracing Configuration", "default": "auto" }, "turn_detection": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateResponseTurnDetection" } ], "description": "Configuration for turn detection. Can be set to `null` to turn off. Server\n VAD means that the model will detect the start and end of speech based on\n audio volume and respond at the end of user speech." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.RealtimeFunctionTool" }, "description": "Tools (functions) available to the model." }, "tool_choice": { "type": "string", "description": "How the model chooses tools. Options are `auto`, `none`, `required`, or\n specify a function." }, "max_output_tokens": { "anyOf": [ { "type": "integer" }, { "type": "string", "enum": [ "inf" ] } ], "description": "Maximum number of output tokens for a single assistant response,\n inclusive of tool calls. Provide an integer between 1 and 4096 to\n limit output tokens, or `inf` for the maximum available tokens for a\n given model. Defaults to `inf`." }, "type": { "type": "string", "enum": [ "realtime" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateResponseUnion" } ], "description": "A Realtime session configuration object.", "title": "Realtime session configuration object", "x-oaiMeta": { "name": "The session object", "group": "realtime", "example": "{\n \"id\": \"sess_001\",\n \"object\": \"realtime.session\",\n \"expires_at\": 1742188264,\n \"model\": \"gpt-realtime\",\n \"output_modalities\": [\"audio\"],\n \"instructions\": \"You are a friendly assistant.\",\n \"tools\": [],\n \"tool_choice\": \"none\",\n \"max_output_tokens\": \"inf\",\n \"tracing\": \"auto\",\n \"truncation\": \"auto\",\n \"prompt\": null,\n \"audio\": {\n \"input\": {\n \"format\": {\n \"type\": \"audio/pcm\",\n \"rate\": 24000\n },\n \"transcription\": { \"model\": \"whisper-1\" },\n \"noise_reduction\": null,\n \"turn_detection\": null\n },\n \"output\": {\n \"format\": {\n \"type\": \"audio/pcm\",\n \"rate\": 24000\n },\n \"voice\": \"alloy\",\n \"speed\": 1.0\n }\n }\n}\n" } }, "OpenAI.RealtimeSessionCreateResponseAudio": { "type": "object", "properties": { "input": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateResponseAudioInput" }, "output": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateResponseAudioOutput" } } }, "OpenAI.RealtimeSessionCreateResponseAudioInput": { "type": "object", "properties": { "format": { "$ref": "#/components/schemas/OpenAI.RealtimeAudioFormats" }, "transcription": { "$ref": "#/components/schemas/OpenAI.AudioTranscription" }, "noise_reduction": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateResponseAudioInputNoiseReduction" }, "turn_detection": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateResponseAudioInputTurnDetection" } } }, "OpenAI.RealtimeSessionCreateResponseAudioInputNoiseReduction": { "type": "object", "properties": { "type": { "$ref": "#/components/schemas/OpenAI.NoiseReductionType" } } }, "OpenAI.RealtimeSessionCreateResponseAudioInputTurnDetection": { "type": "object", "properties": { "type": { "type": "string" }, "threshold": { "type": "number" }, "prefix_padding_ms": { "type": "integer" }, "silence_duration_ms": { "type": "integer" } } }, "OpenAI.RealtimeSessionCreateResponseAudioOutput": { "type": "object", "properties": { "format": { "$ref": "#/components/schemas/OpenAI.RealtimeAudioFormats" }, "voice": { "$ref": "#/components/schemas/OpenAI.VoiceIdsShared" }, "speed": { "type": "number" } } }, "OpenAI.RealtimeSessionCreateResponseTurnDetection": { "type": "object", "properties": { "type": { "type": "string" }, "threshold": { "type": "number" }, "prefix_padding_ms": { "type": "integer" }, "silence_duration_ms": { "type": "integer" } } }, "OpenAI.RealtimeSessionCreateResponseUnion": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateResponseUnionType" } }, "discriminator": { "propertyName": "type", "mapping": { "realtime": "#/components/schemas/OpenAI.RealtimeSessionCreateResponse", "transcription": "#/components/schemas/OpenAI.RealtimeTranscriptionSessionCreateResponse" } } }, "OpenAI.RealtimeSessionCreateResponseUnionType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "realtime", "transcription" ] } ] }, "OpenAI.RealtimeTranscriptionSessionCreateRequest": { "type": "object", "required": [ "type" ], "properties": { "turn_detection": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeTranscriptionSessionCreateRequestTurnDetection" } ], "description": "Configuration for turn detection. Can be set to `null` to turn off. Server VAD means that the model will detect the start and end of speech based on audio volume and respond at the end of user speech." }, "input_audio_noise_reduction": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeTranscriptionSessionCreateRequestInputAudioNoiseReduction" } ], "description": "Configuration for input audio noise reduction. This can be set to `null` to turn off.\n Noise reduction filters audio added to the input audio buffer before it is sent to VAD and the model.\n Filtering the audio can improve VAD and turn detection accuracy (reducing false positives) and model performance by improving perception of the input audio." }, "input_audio_format": { "type": "string", "enum": [ "pcm16", "g711_ulaw", "g711_alaw" ], "description": "The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.\n For `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate,\n single channel (mono), and little-endian byte order.", "default": "pcm16" }, "input_audio_transcription": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.AudioTranscription" } ], "description": "Configuration for input audio transcription. The client can optionally set the language and prompt for transcription, these offer additional guidance to the transcription service." }, "include": { "type": "array", "items": { "type": "string", "enum": [ "item.input_audio_transcription.logprobs" ] }, "description": "The set of items to include in the transcription. Current available items are:\n `item.input_audio_transcription.logprobs`" }, "type": { "type": "string", "enum": [ "transcription" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateRequestUnion" } ], "description": "Realtime transcription session object configuration.", "title": "Realtime transcription session configuration" }, "OpenAI.RealtimeTranscriptionSessionCreateRequestInputAudioNoiseReduction": { "type": "object", "properties": { "type": { "$ref": "#/components/schemas/OpenAI.NoiseReductionType" } } }, "OpenAI.RealtimeTranscriptionSessionCreateRequestTurnDetection": { "type": "object", "properties": { "type": { "type": "string", "enum": [ "server_vad" ] }, "threshold": { "type": "number" }, "prefix_padding_ms": { "type": "integer" }, "silence_duration_ms": { "type": "integer" } } }, "OpenAI.RealtimeTranscriptionSessionCreateResponse": { "type": "object", "required": [ "client_secret", "type" ], "properties": { "client_secret": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeTranscriptionSessionCreateResponseClientSecret" } ], "description": "Ephemeral key returned by the API. Only present when the session is\n created on the server via REST API." }, "modalities": { "type": "array", "items": { "type": "string", "enum": [ "text", "audio" ] }, "description": "The set of modalities the model can respond with. To disable audio,\n set this to [\"text\"]." }, "input_audio_format": { "type": "string", "description": "The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`." }, "input_audio_transcription": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.AudioTranscription" } ], "description": "Configuration of the transcription model." }, "turn_detection": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeTranscriptionSessionCreateResponseTurnDetection" } ], "description": "Configuration for turn detection. Can be set to `null` to turn off. Server\n VAD means that the model will detect the start and end of speech based on\n audio volume and respond at the end of user speech." }, "type": { "type": "string", "enum": [ "transcription" ], "x-stainless-const": true } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.RealtimeSessionCreateResponseUnion" } ], "description": "A new Realtime transcription session configuration.\nWhen a session is created on the server via REST API, the session object\nalso contains an ephemeral key. Default TTL for keys is 10 minutes. This\nproperty is not present when a session is updated via the WebSocket API.", "x-oaiMeta": { "name": "The transcription session object", "group": "realtime", "example": "{\n \"id\": \"sess_BBwZc7cFV3XizEyKGDCGL\",\n \"object\": \"realtime.transcription_session\",\n \"expires_at\": 1742188264,\n \"modalities\": [\"audio\", \"text\"],\n \"turn_detection\": {\n \"type\": \"server_vad\",\n \"threshold\": 0.5,\n \"prefix_padding_ms\": 300,\n \"silence_duration_ms\": 200\n },\n \"input_audio_format\": \"pcm16\",\n \"input_audio_transcription\": {\n \"model\": \"gpt-4o-transcribe\",\n \"language\": null,\n \"prompt\": \"\"\n },\n \"client_secret\": null\n}\n" } }, "OpenAI.RealtimeTranscriptionSessionCreateResponseClientSecret": { "type": "object", "required": [ "value", "expires_at" ], "properties": { "value": { "type": "string" }, "expires_at": { "type": "integer", "format": "unixtime" } } }, "OpenAI.RealtimeTranscriptionSessionCreateResponseTurnDetection": { "type": "object", "properties": { "type": { "type": "string" }, "threshold": { "type": "number" }, "prefix_padding_ms": { "type": "integer" }, "silence_duration_ms": { "type": "integer" } } }, "OpenAI.RealtimeTruncation": { "anyOf": [ { "type": "string", "enum": [ "auto", "disabled" ] }, { "type": "object", "properties": { "type": { "type": "string", "enum": [ "retention_ratio" ], "x-stainless-const": true }, "retention_ratio": { "type": "number", "minimum": 0, "maximum": 1 }, "token_limits": { "$ref": "#/components/schemas/OpenAI.TokenLimits" } }, "required": [ "type", "retention_ratio" ] } ], "description": "When the number of tokens in a conversation exceeds the model's input token limit, the conversation be truncated, meaning messages (starting from the oldest) will not be included in the model's context. A 32k context model with 4,096 max output tokens can only include 28,224 tokens in the context before truncation occurs.\nClients can configure truncation behavior to truncate with a lower max token limit, which is an effective way to control token usage and cost.\nTruncation will reduce the number of cached tokens on the next turn (busting the cache), since messages are dropped from the beginning of the context. However, clients can also configure truncation to retain messages up to a fraction of the maximum context size, which will reduce the need for future truncations and thus improve the cache rate.\nTruncation can be disabled entirely, which means the server will never truncate but would instead return an error if the conversation exceeds the model's input token limit.", "title": "Realtime Truncation Controls" }, "OpenAI.RealtimeTurnDetection": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.RealtimeTurnDetectionType" } }, "discriminator": { "propertyName": "type" } }, "OpenAI.RealtimeTurnDetectionType": { "type": "string" }, "OpenAI.Reasoning": { "type": "object", "properties": { "effort": { "$ref": "#/components/schemas/OpenAI.ReasoningEffort" }, "summary": { "anyOf": [ { "type": "string", "enum": [ "auto", "concise", "detailed" ] }, { "type": "null" } ] }, "generate_summary": { "anyOf": [ { "type": "string", "enum": [ "auto", "concise", "detailed" ] }, { "type": "null" } ] } }, "description": "**gpt-5 and o-series models only**\nConfiguration options for\n[reasoning models](https://platform.openai.com/docs/guides/reasoning).", "title": "Reasoning" }, "OpenAI.ReasoningEffort": { "anyOf": [ { "type": "string", "enum": [ "none", "minimal", "low", "medium", "high", "xhigh" ] }, { "type": "null" } ], "description": "Constrains effort on reasoning for\n[reasoning models](https://platform.openai.com/docs/guides/reasoning).\nCurrently supported values are `none`, `minimal`, `low`, `medium`, `high`, and `xhigh`. Reducing\nreasoning effort can result in faster responses and fewer tokens used\non reasoning in a response.\n- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool calls are supported for all reasoning values in gpt-5.1.\n- All models before `gpt-5.1` default to `medium` reasoning effort, and do not support `none`.\n- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.\n- `xhigh` is supported for all models after `gpt-5.1-codex-max`." }, "OpenAI.ReasoningTextContent": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "reasoning_text" ], "description": "The type of the reasoning text. Always `reasoning_text`.", "x-stainless-const": true, "default": "reasoning_text" }, "text": { "type": "string", "description": "The reasoning text from the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputContent" } ], "description": "Reasoning text from the model.", "title": "ReasoningTextContent" }, "OpenAI.RefusalContent": { "type": "object", "required": [ "type", "refusal" ], "properties": { "type": { "type": "string", "enum": [ "refusal" ], "description": "The type of the refusal. Always `refusal`.", "x-stainless-const": true, "default": "refusal" }, "refusal": { "type": "string", "description": "The refusal explanation from the model." } }, "description": "A refusal from the model.", "title": "Refusal" }, "OpenAI.Response": { "type": "object", "required": [ "id", "object", "created_at", "error", "incomplete_details", "output", "instructions", "parallel_tool_calls", "content_filters" ], "properties": { "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "top_logprobs": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "default": 1 }, "user": { "type": "string", "description": "This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use `prompt_cache_key` instead to maintain caching optimizations.\n A stable identifier for your end-users.\n Used to boost cache hit rates by better bucketing similar requests and to help OpenAI detect and prevent abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).", "deprecated": true }, "safety_identifier": { "type": "string", "description": "A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies.\n The IDs should be a string that uniquely identifies each user. We recommend hashing their username or email address, in order to avoid sending us any identifying information. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers)." }, "prompt_cache_key": { "type": "string", "description": "Used by OpenAI to cache responses for similar requests to optimize your cache hit rates. Replaces the `user` field. [Learn more](https://platform.openai.com/docs/guides/prompt-caching)." }, "prompt_cache_retention": { "anyOf": [ { "type": "string", "enum": [ "in-memory", "24h" ] }, { "type": "null" } ] }, "previous_response_id": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "model": { "type": "string", "description": "Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI\n offers a wide range of models with different capabilities, performance\n characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)\n to browse and compare available models." }, "reasoning": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Reasoning" }, { "type": "null" } ] }, "background": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ] }, "max_output_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "max_tool_calls": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "text": { "$ref": "#/components/schemas/OpenAI.ResponseTextParam" }, "tools": { "$ref": "#/components/schemas/OpenAI.ToolsArray" }, "tool_choice": { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" }, "prompt": { "$ref": "#/components/schemas/OpenAI.Prompt" }, "truncation": { "anyOf": [ { "type": "string", "enum": [ "auto", "disabled" ] }, { "type": "null" } ], "default": "disabled" }, "id": { "type": "string", "description": "Unique identifier for this Response." }, "object": { "type": "string", "enum": [ "response" ], "description": "The object type of this resource - always set to `response`.", "x-stainless-const": true }, "status": { "type": "string", "enum": [ "completed", "failed", "in_progress", "cancelled", "queued", "incomplete" ], "description": "The status of the response generation. One of `completed`, `failed`,\n `in_progress`, `cancelled`, `queued`, or `incomplete`." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) of when this Response was created." }, "completed_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "error": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseError" }, { "type": "null" } ] }, "incomplete_details": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseIncompleteDetails" }, { "type": "null" } ] }, "output": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.OutputItem" }, "description": "An array of content items generated by the model.\n - The length and order of items in the `output` array is dependent\n on the model's response.\n - Rather than accessing the first item in the `output` array and\n assuming it's an `assistant` message with the content generated by\n the model, you might consider using the `output_text` property where\n supported in SDKs." }, "instructions": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.InputItem" } }, { "type": "null" } ] }, "output_text": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "x-stainless-skip": true }, "usage": { "$ref": "#/components/schemas/OpenAI.ResponseUsage" }, "parallel_tool_calls": { "type": "boolean", "description": "Whether to allow the model to run tool calls in parallel.", "default": true }, "conversation": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ConversationReference" }, { "type": "null" } ] }, "content_filters": { "type": "array", "items": { "$ref": "#/components/schemas/AzureContentFilterForResponsesAPI" }, "description": "The content filter results from RAI." } }, "title": "The response object" }, "OpenAI.ResponseAudioDeltaEvent": { "type": "object", "required": [ "type", "sequence_number", "delta" ], "properties": { "type": { "type": "string", "enum": [ "response.audio.delta" ], "description": "The type of the event. Always `response.audio.delta`.", "x-stainless-const": true }, "sequence_number": { "type": "integer", "description": "A sequence number for this chunk of the stream response." }, "delta": { "type": "string", "contentEncoding": "base64", "description": "A chunk of Base64 encoded response audio bytes." } }, "description": "Emitted when there is a partial audio response.", "x-oaiMeta": { "name": "response.audio.delta", "group": "responses", "example": "{\n \"type\": \"response.audio.delta\",\n \"response_id\": \"resp_123\",\n \"delta\": \"base64encoded...\",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseAudioTranscriptDeltaEvent": { "type": "object", "required": [ "type", "delta", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.audio.transcript.delta" ], "description": "The type of the event. Always `response.audio.transcript.delta`.", "x-stainless-const": true }, "delta": { "type": "string", "description": "The partial transcript of the audio response." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when there is a partial transcript of audio.", "x-oaiMeta": { "name": "response.audio.transcript.delta", "group": "responses", "example": "{\n \"type\": \"response.audio.transcript.delta\",\n \"response_id\": \"resp_123\",\n \"delta\": \" ... partial transcript ... \",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseCodeInterpreterCallCodeDeltaEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "delta", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.code_interpreter_call_code.delta" ], "description": "The type of the event. Always `response.code_interpreter_call_code.delta`.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item in the response for which the code is being streamed." }, "item_id": { "type": "string", "description": "The unique identifier of the code interpreter tool call item." }, "delta": { "type": "string", "description": "The partial code snippet being streamed by the code interpreter." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event, used to order streaming events." } }, "description": "Emitted when a partial code snippet is streamed by the code interpreter.", "x-oaiMeta": { "name": "response.code_interpreter_call_code.delta", "group": "responses", "example": "{\n \"type\": \"response.code_interpreter_call_code.delta\",\n \"output_index\": 0,\n \"item_id\": \"ci_12345\",\n \"delta\": \"print('Hello, world')\",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseCodeInterpreterCallInProgressEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.code_interpreter_call.in_progress" ], "description": "The type of the event. Always `response.code_interpreter_call.in_progress`.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item in the response for which the code interpreter call is in progress." }, "item_id": { "type": "string", "description": "The unique identifier of the code interpreter tool call item." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event, used to order streaming events." } }, "description": "Emitted when a code interpreter call is in progress.", "x-oaiMeta": { "name": "response.code_interpreter_call.in_progress", "group": "responses", "example": "{\n \"type\": \"response.code_interpreter_call.in_progress\",\n \"output_index\": 0,\n \"item_id\": \"ci_12345\",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseCodeInterpreterCallInterpretingEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.code_interpreter_call.interpreting" ], "description": "The type of the event. Always `response.code_interpreter_call.interpreting`.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item in the response for which the code interpreter is interpreting code." }, "item_id": { "type": "string", "description": "The unique identifier of the code interpreter tool call item." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event, used to order streaming events." } }, "description": "Emitted when the code interpreter is actively interpreting the code snippet.", "x-oaiMeta": { "name": "response.code_interpreter_call.interpreting", "group": "responses", "example": "{\n \"type\": \"response.code_interpreter_call.interpreting\",\n \"output_index\": 4,\n \"item_id\": \"ci_12345\",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseContentPartAddedEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "part", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.content_part.added" ], "description": "The type of the event. Always `response.content_part.added`.", "x-stainless-const": true }, "item_id": { "type": "string", "description": "The ID of the output item that the content part was added to." }, "output_index": { "type": "integer", "description": "The index of the output item that the content part was added to." }, "content_index": { "type": "integer", "description": "The index of the content part that was added." }, "part": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputContent" } ], "description": "The content part that was added." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when a new content part is added.", "x-oaiMeta": { "name": "response.content_part.added", "group": "responses", "example": "{\n \"type\": \"response.content_part.added\",\n \"item_id\": \"msg_123\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"part\": {\n \"type\": \"output_text\",\n \"text\": \"\",\n \"annotations\": []\n },\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseCreatedEvent": { "type": "object", "required": [ "type", "response", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.created" ], "description": "The type of the event. Always `response.created`.", "x-stainless-const": true }, "response": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Response" } ], "description": "The response that was created." }, "sequence_number": { "type": "integer", "description": "The sequence number for this event." } }, "description": "An event that is emitted when a response is created.", "x-oaiMeta": { "name": "response.created", "group": "responses", "example": "{\n \"type\": \"response.created\",\n \"response\": {\n \"id\": \"resp_67ccfcdd16748190a91872c75d38539e09e4d4aac714747c\",\n \"object\": \"response\",\n \"created_at\": 1741487325,\n \"status\": \"in_progress\",\n \"completed_at\": null,\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"model\": \"gpt-4o-2024-08-06\",\n \"output\": [],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": null,\n \"reasoning\": {\n \"effort\": null,\n \"summary\": null\n },\n \"store\": true,\n \"temperature\": 1,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n }\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_p\": 1,\n \"truncation\": \"disabled\",\n \"usage\": null,\n \"user\": null,\n \"metadata\": {}\n },\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseCustomToolCallInputDeltaEvent": { "type": "object", "required": [ "type", "sequence_number", "output_index", "item_id", "delta" ], "properties": { "type": { "type": "string", "enum": [ "response.custom_tool_call_input.delta" ], "description": "The event type identifier.", "x-stainless-const": true }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." }, "output_index": { "type": "integer", "description": "The index of the output this delta applies to." }, "item_id": { "type": "string", "description": "Unique identifier for the API item associated with this event." }, "delta": { "type": "string", "description": "The incremental input data (delta) for the custom tool call." } }, "description": "Event representing a delta (partial update) to the input of a custom tool call.", "title": "ResponseCustomToolCallInputDelta", "x-oaiMeta": { "name": "response.custom_tool_call_input.delta", "group": "responses", "example": "{\n \"type\": \"response.custom_tool_call_input.delta\",\n \"output_index\": 0,\n \"item_id\": \"ctc_1234567890abcdef\",\n \"delta\": \"partial input text\"\n}\n" } }, "OpenAI.ResponseError": { "type": "object", "required": [ "code", "message" ], "properties": { "code": { "$ref": "#/components/schemas/OpenAI.ResponseErrorCode" }, "message": { "type": "string", "description": "A human-readable description of the error." } }, "description": "An error object returned when the model fails to generate a Response." }, "OpenAI.ResponseErrorCode": { "type": "string", "enum": [ "server_error", "rate_limit_exceeded", "invalid_prompt", "vector_store_timeout", "invalid_image", "invalid_image_format", "invalid_base64_image", "invalid_image_url", "image_too_large", "image_too_small", "image_parse_error", "image_content_policy_violation", "invalid_image_mode", "image_file_too_large", "unsupported_image_media_type", "empty_image_file", "failed_to_download_image", "image_file_not_found" ], "description": "The error code for the response." }, "OpenAI.ResponseErrorEvent": { "type": "object", "required": [ "type", "code", "message", "param", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "error" ], "description": "The type of the event. Always `error`.", "x-stainless-const": true }, "code": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "message": { "type": "string", "description": "The error message." }, "param": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when an error occurs.", "x-oaiMeta": { "name": "error", "group": "responses", "example": "{\n \"type\": \"error\",\n \"code\": \"ERR_SOMETHING\",\n \"message\": \"Something went wrong\",\n \"param\": null,\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseFailedEvent": { "type": "object", "required": [ "type", "sequence_number", "response" ], "properties": { "type": { "type": "string", "enum": [ "response.failed" ], "description": "The type of the event. Always `response.failed`.", "x-stainless-const": true }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." }, "response": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Response" } ], "description": "The response that failed." } }, "description": "An event that is emitted when a response fails.", "x-oaiMeta": { "name": "response.failed", "group": "responses", "example": "{\n \"type\": \"response.failed\",\n \"response\": {\n \"id\": \"resp_123\",\n \"object\": \"response\",\n \"created_at\": 1740855869,\n \"status\": \"failed\",\n \"completed_at\": null,\n \"error\": {\n \"code\": \"server_error\",\n \"message\": \"The model failed to generate a response.\"\n },\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"output\": [],\n \"previous_response_id\": null,\n \"reasoning_effort\": null,\n \"store\": false,\n \"temperature\": 1,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n }\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_p\": 1,\n \"truncation\": \"disabled\",\n \"usage\": null,\n \"user\": null,\n \"metadata\": {}\n }\n}\n" } }, "OpenAI.ResponseFileSearchCallInProgressEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.file_search_call.in_progress" ], "description": "The type of the event. Always `response.file_search_call.in_progress`.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item that the file search call is initiated." }, "item_id": { "type": "string", "description": "The ID of the output item that the file search call is initiated." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when a file search call is initiated.", "x-oaiMeta": { "name": "response.file_search_call.in_progress", "group": "responses", "example": "{\n \"type\": \"response.file_search_call.in_progress\",\n \"output_index\": 0,\n \"item_id\": \"fs_123\",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseFileSearchCallSearchingEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.file_search_call.searching" ], "description": "The type of the event. Always `response.file_search_call.searching`.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item that the file search call is searching." }, "item_id": { "type": "string", "description": "The ID of the output item that the file search call is initiated." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when a file search is currently searching.", "x-oaiMeta": { "name": "response.file_search_call.searching", "group": "responses", "example": "{\n \"type\": \"response.file_search_call.searching\",\n \"output_index\": 0,\n \"item_id\": \"fs_123\",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseFormatJsonObject": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "json_object" ], "description": "The type of response format being defined. Always `json_object`.", "x-stainless-const": true } }, "description": "JSON object response format. An older method of generating JSON responses.\nUsing `json_schema` is recommended for models that support it. Note that the\nmodel will not generate JSON without a system or user message instructing it\nto do so.", "title": "JSON object" }, "OpenAI.ResponseFormatJsonSchema": { "type": "object", "required": [ "type", "json_schema" ], "properties": { "type": { "type": "string", "enum": [ "json_schema" ], "description": "The type of response format being defined. Always `json_schema`.", "x-stainless-const": true }, "json_schema": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseFormatJsonSchemaJsonSchema" } ], "description": "Structured Outputs configuration options, including a JSON Schema.", "title": "JSON schema" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.CreateChatCompletionRequestResponseFormat" } ], "description": "JSON Schema response format. Used to generate structured JSON responses.\nLearn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).", "title": "JSON schema" }, "OpenAI.ResponseFormatJsonSchemaJsonSchema": { "type": "object", "required": [ "name" ], "properties": { "description": { "type": "string" }, "name": { "type": "string" }, "schema": { "$ref": "#/components/schemas/OpenAI.ResponseFormatJsonSchemaSchema" }, "strict": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ] } } }, "OpenAI.ResponseFormatJsonSchemaSchema": { "type": "object", "unevaluatedProperties": {}, "description": "The schema for the response format, described as a JSON Schema object.\nLearn how to build JSON schemas [here](https://json-schema.org/).", "title": "JSON schema" }, "OpenAI.ResponseFormatText": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "The type of response format being defined. Always `text`.", "x-stainless-const": true } }, "description": "Default response format. Used to generate text responses.", "title": "Text" }, "OpenAI.ResponseFunctionCallArgumentsDeltaEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "sequence_number", "delta" ], "properties": { "type": { "type": "string", "enum": [ "response.function_call_arguments.delta" ], "description": "The type of the event. Always `response.function_call_arguments.delta`.", "x-stainless-const": true }, "item_id": { "type": "string", "description": "The ID of the output item that the function-call arguments delta is added to." }, "output_index": { "type": "integer", "description": "The index of the output item that the function-call arguments delta is added to." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." }, "delta": { "type": "string", "description": "The function-call arguments delta that is added." } }, "description": "Emitted when there is a partial function-call arguments delta.", "x-oaiMeta": { "name": "response.function_call_arguments.delta", "group": "responses", "example": "{\n \"type\": \"response.function_call_arguments.delta\",\n \"item_id\": \"item-abc\",\n \"output_index\": 0,\n \"delta\": \"{ \"arg\":\"\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseImageGenCallGeneratingEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.image_generation_call.generating" ], "description": "The type of the event. Always 'response.image_generation_call.generating'.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the image generation item being processed." }, "sequence_number": { "type": "integer", "description": "The sequence number of the image generation item being processed." } }, "description": "Emitted when an image generation tool call is actively generating an image (intermediate state).", "title": "ResponseImageGenCallGeneratingEvent", "x-oaiMeta": { "name": "response.image_generation_call.generating", "group": "responses", "example": "{\n \"type\": \"response.image_generation_call.generating\",\n \"output_index\": 0,\n \"item_id\": \"item-123\",\n \"sequence_number\": 0\n}\n" } }, "OpenAI.ResponseImageGenCallInProgressEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.image_generation_call.in_progress" ], "description": "The type of the event. Always 'response.image_generation_call.in_progress'.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the image generation item being processed." }, "sequence_number": { "type": "integer", "description": "The sequence number of the image generation item being processed." } }, "description": "Emitted when an image generation tool call is in progress.", "title": "ResponseImageGenCallInProgressEvent", "x-oaiMeta": { "name": "response.image_generation_call.in_progress", "group": "responses", "example": "{\n \"type\": \"response.image_generation_call.in_progress\",\n \"output_index\": 0,\n \"item_id\": \"item-123\",\n \"sequence_number\": 0\n}\n" } }, "OpenAI.ResponseImageGenCallPartialImageEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "sequence_number", "partial_image_index", "partial_image_b64" ], "properties": { "type": { "type": "string", "enum": [ "response.image_generation_call.partial_image" ], "description": "The type of the event. Always 'response.image_generation_call.partial_image'.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the image generation item being processed." }, "sequence_number": { "type": "integer", "description": "The sequence number of the image generation item being processed." }, "partial_image_index": { "type": "integer", "description": "0-based index for the partial image (backend is 1-based, but this is 0-based for the user)." }, "partial_image_b64": { "type": "string", "description": "Base64-encoded partial image data, suitable for rendering as an image." } }, "description": "Emitted when a partial image is available during image generation streaming.", "title": "ResponseImageGenCallPartialImageEvent", "x-oaiMeta": { "name": "response.image_generation_call.partial_image", "group": "responses", "example": "{\n \"type\": \"response.image_generation_call.partial_image\",\n \"output_index\": 0,\n \"item_id\": \"item-123\",\n \"sequence_number\": 0,\n \"partial_image_index\": 0,\n \"partial_image_b64\": \"...\"\n}\n" } }, "OpenAI.ResponseInProgressEvent": { "type": "object", "required": [ "type", "response", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.in_progress" ], "description": "The type of the event. Always `response.in_progress`.", "x-stainless-const": true }, "response": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Response" } ], "description": "The response that is in progress." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when the response is in progress.", "x-oaiMeta": { "name": "response.in_progress", "group": "responses", "example": "{\n \"type\": \"response.in_progress\",\n \"response\": {\n \"id\": \"resp_67ccfcdd16748190a91872c75d38539e09e4d4aac714747c\",\n \"object\": \"response\",\n \"created_at\": 1741487325,\n \"status\": \"in_progress\",\n \"completed_at\": null,\n \"error\": null,\n \"incomplete_details\": null,\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"model\": \"gpt-4o-2024-08-06\",\n \"output\": [],\n \"parallel_tool_calls\": true,\n \"previous_response_id\": null,\n \"reasoning\": {\n \"effort\": null,\n \"summary\": null\n },\n \"store\": true,\n \"temperature\": 1,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n }\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_p\": 1,\n \"truncation\": \"disabled\",\n \"usage\": null,\n \"user\": null,\n \"metadata\": {}\n },\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseIncompleteDetails": { "type": "object", "properties": { "reason": { "type": "string", "enum": [ "max_output_tokens", "content_filter" ] } } }, "OpenAI.ResponseIncompleteEvent": { "type": "object", "required": [ "type", "response", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.incomplete" ], "description": "The type of the event. Always `response.incomplete`.", "x-stainless-const": true }, "response": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Response" } ], "description": "The response that was incomplete." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "An event that is emitted when a response finishes as incomplete.", "x-oaiMeta": { "name": "response.incomplete", "group": "responses", "example": "{\n \"type\": \"response.incomplete\",\n \"response\": {\n \"id\": \"resp_123\",\n \"object\": \"response\",\n \"created_at\": 1740855869,\n \"status\": \"incomplete\",\n \"completed_at\": null,\n \"error\": null,\n \"incomplete_details\": {\n \"reason\": \"max_tokens\"\n },\n \"instructions\": null,\n \"max_output_tokens\": null,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n \"output\": [],\n \"previous_response_id\": null,\n \"reasoning_effort\": null,\n \"store\": false,\n \"temperature\": 1,\n \"text\": {\n \"format\": {\n \"type\": \"text\"\n }\n },\n \"tool_choice\": \"auto\",\n \"tools\": [],\n \"top_p\": 1,\n \"truncation\": \"disabled\",\n \"usage\": null,\n \"user\": null,\n \"metadata\": {}\n },\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseItemList": { "type": "object", "required": [ "object", "data", "has_more", "first_id", "last_id" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of object returned, must be `list`.", "x-stainless-const": true }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemResource" }, "description": "A list of items used to generate this response." }, "has_more": { "type": "boolean", "description": "Whether there are more items available." }, "first_id": { "type": "string", "description": "The ID of the first item in the list." }, "last_id": { "type": "string", "description": "The ID of the last item in the list." } }, "description": "A list of Response items.", "x-oaiMeta": { "name": "The input item list", "group": "responses", "example": "{\n \"object\": \"list\",\n \"data\": [\n {\n \"id\": \"msg_abc123\",\n \"type\": \"message\",\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"input_text\",\n \"text\": \"Tell me a three sentence bedtime story about a unicorn.\"\n }\n ]\n }\n ],\n \"first_id\": \"msg_abc123\",\n \"last_id\": \"msg_abc123\",\n \"has_more\": false\n}\n" } }, "OpenAI.ResponseLogProb": { "type": "object", "required": [ "token", "logprob" ], "properties": { "token": { "type": "string", "description": "A possible text token." }, "logprob": { "type": "number", "description": "The log probability of this token." }, "top_logprobs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ResponseLogProbTopLogprobs" }, "description": "The log probability of the top 20 most likely tokens." } }, "description": "A logprob is the logarithmic probability that the model assigns to producing\na particular token at a given position in the sequence. Less-negative (higher)\nlogprob values indicate greater model confidence in that token choice." }, "OpenAI.ResponseLogProbTopLogprobs": { "type": "object", "properties": { "token": { "type": "string" }, "logprob": { "type": "number" } } }, "OpenAI.ResponseMCPCallArgumentsDeltaEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "delta", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_call_arguments.delta" ], "description": "The type of the event. Always 'response.mcp_call_arguments.delta'.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the MCP tool call item being processed." }, "delta": { "type": "string", "description": "A JSON string containing the partial update to the arguments for the MCP tool call." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when there is a delta (partial update) to the arguments of an MCP tool call.", "title": "ResponseMCPCallArgumentsDeltaEvent", "x-oaiMeta": { "name": "response.mcp_call_arguments.delta", "group": "responses", "example": "{\n \"type\": \"response.mcp_call_arguments.delta\",\n \"output_index\": 0,\n \"item_id\": \"item-abc\",\n \"delta\": \"{\",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseMCPCallFailedEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_call.failed" ], "description": "The type of the event. Always 'response.mcp_call.failed'.", "x-stainless-const": true }, "item_id": { "type": "string", "description": "The ID of the MCP tool call item that failed." }, "output_index": { "type": "integer", "description": "The index of the output item that failed." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when an MCP tool call has failed.", "title": "ResponseMCPCallFailedEvent", "x-oaiMeta": { "name": "response.mcp_call.failed", "group": "responses", "example": "{\n \"type\": \"response.mcp_call.failed\",\n \"sequence_number\": 1,\n \"item_id\": \"mcp_682d437d90a88191bf88cd03aae0c3e503937d5f622d7a90\",\n \"output_index\": 0\n}\n" } }, "OpenAI.ResponseMCPCallInProgressEvent": { "type": "object", "required": [ "type", "sequence_number", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_call.in_progress" ], "description": "The type of the event. Always 'response.mcp_call.in_progress'.", "x-stainless-const": true }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." }, "output_index": { "type": "integer", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the MCP tool call item being processed." } }, "description": "Emitted when an MCP tool call is in progress.", "title": "ResponseMCPCallInProgressEvent", "x-oaiMeta": { "name": "response.mcp_call.in_progress", "group": "responses", "example": "{\n \"type\": \"response.mcp_call.in_progress\",\n \"sequence_number\": 1,\n \"output_index\": 0,\n \"item_id\": \"mcp_682d437d90a88191bf88cd03aae0c3e503937d5f622d7a90\"\n}\n" } }, "OpenAI.ResponseMCPListToolsFailedEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_list_tools.failed" ], "description": "The type of the event. Always 'response.mcp_list_tools.failed'.", "x-stainless-const": true }, "item_id": { "type": "string", "description": "The ID of the MCP tool call item that failed." }, "output_index": { "type": "integer", "description": "The index of the output item that failed." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when the attempt to list available MCP tools has failed.", "title": "ResponseMCPListToolsFailedEvent", "x-oaiMeta": { "name": "response.mcp_list_tools.failed", "group": "responses", "example": "{\n \"type\": \"response.mcp_list_tools.failed\",\n \"sequence_number\": 1,\n \"output_index\": 0,\n \"item_id\": \"mcpl_682d4379df088191886b70f4ec39f90403937d5f622d7a90\"\n}\n" } }, "OpenAI.ResponseMCPListToolsInProgressEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_list_tools.in_progress" ], "description": "The type of the event. Always 'response.mcp_list_tools.in_progress'.", "x-stainless-const": true }, "item_id": { "type": "string", "description": "The ID of the MCP tool call item that is being processed." }, "output_index": { "type": "integer", "description": "The index of the output item that is being processed." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when the system is in the process of retrieving the list of available MCP tools.", "title": "ResponseMCPListToolsInProgressEvent", "x-oaiMeta": { "name": "response.mcp_list_tools.in_progress", "group": "responses", "example": "{\n \"type\": \"response.mcp_list_tools.in_progress\",\n \"sequence_number\": 1,\n \"output_index\": 0,\n \"item_id\": \"mcpl_682d4379df088191886b70f4ec39f90403937d5f622d7a90\"\n}\n" } }, "OpenAI.ResponseModalities": { "anyOf": [ { "type": "array", "items": { "type": "string", "enum": [ "text", "audio" ] } }, { "type": "null" } ], "description": "Output types that you would like the model to generate.\nMost models are capable of generating text, which is the default:\n`[\"text\"]`\nThe `gpt-4o-audio-preview` model can also be used to\n[generate audio](https://platform.openai.com/docs/guides/audio). To request that this model generate\nboth text and audio responses, you can use:\n`[\"text\", \"audio\"]`" }, "OpenAI.ResponseOutputItemAddedEvent": { "type": "object", "required": [ "type", "output_index", "sequence_number", "item" ], "properties": { "type": { "type": "string", "enum": [ "response.output_item.added" ], "description": "The type of the event. Always `response.output_item.added`.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item that was added." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." }, "item": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.OutputItem" } ], "description": "The output item that was added." } }, "description": "Emitted when a new output item is added.", "x-oaiMeta": { "name": "response.output_item.added", "group": "responses", "example": "{\n \"type\": \"response.output_item.added\",\n \"output_index\": 0,\n \"item\": {\n \"id\": \"msg_123\",\n \"status\": \"in_progress\",\n \"type\": \"message\",\n \"role\": \"assistant\",\n \"content\": []\n },\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseOutputTextAnnotationAddedEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "annotation_index", "sequence_number", "annotation" ], "properties": { "type": { "type": "string", "enum": [ "response.output_text.annotation.added" ], "description": "The type of the event. Always 'response.output_text.annotation.added'.", "x-stainless-const": true }, "item_id": { "type": "string", "description": "The unique identifier of the item to which the annotation is being added." }, "output_index": { "type": "integer", "description": "The index of the output item in the response's output array." }, "content_index": { "type": "integer", "description": "The index of the content part within the output item." }, "annotation_index": { "type": "integer", "description": "The index of the annotation within the content part." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." }, "annotation": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Annotation" } ], "description": "The annotation object being added. (See annotation schema for details.)" } }, "description": "Emitted when an annotation is added to output text content.", "title": "ResponseOutputTextAnnotationAddedEvent", "x-oaiMeta": { "name": "response.output_text.annotation.added", "group": "responses", "example": "{\n \"type\": \"response.output_text.annotation.added\",\n \"item_id\": \"item-abc\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"annotation_index\": 0,\n \"annotation\": {\n \"type\": \"text_annotation\",\n \"text\": \"This is a test annotation\",\n \"start\": 0,\n \"end\": 10\n },\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponsePromptVariables": { "type": "object", "unevaluatedProperties": { "anyOf": [ { "type": "string" }, { "$ref": "#/components/schemas/OpenAI.InputTextContent" }, { "$ref": "#/components/schemas/OpenAI.InputImageContent" }, { "$ref": "#/components/schemas/OpenAI.InputFileContent" } ] }, "description": "Optional map of values to substitute in for variables in your\nprompt. The substitution values can either be strings, or other\nResponse input types like images or files.", "title": "Prompt Variables", "x-oaiExpandable": true, "x-oaiTypeLabel": "map" }, "OpenAI.ResponseQueuedEvent": { "type": "object", "required": [ "type", "response", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.queued" ], "description": "The type of the event. Always 'response.queued'.", "x-stainless-const": true }, "response": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Response" } ], "description": "The full response object that is queued." }, "sequence_number": { "type": "integer", "description": "The sequence number for this event." } }, "description": "Emitted when a response is queued and waiting to be processed.", "title": "ResponseQueuedEvent", "x-oaiMeta": { "name": "response.queued", "group": "responses", "example": "{\n \"type\": \"response.queued\",\n \"response\": {\n \"id\": \"res_123\",\n \"status\": \"queued\",\n \"created_at\": \"2021-01-01T00:00:00Z\",\n \"updated_at\": \"2021-01-01T00:00:00Z\"\n },\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseReasoningSummaryPartAddedEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "summary_index", "sequence_number", "part" ], "properties": { "type": { "type": "string", "enum": [ "response.reasoning_summary_part.added" ], "description": "The type of the event. Always `response.reasoning_summary_part.added`.", "x-stainless-const": true }, "item_id": { "type": "string", "description": "The ID of the item this summary part is associated with." }, "output_index": { "type": "integer", "description": "The index of the output item this summary part is associated with." }, "summary_index": { "type": "integer", "description": "The index of the summary part within the reasoning summary." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." }, "part": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseReasoningSummaryPartAddedEventPart" } ], "description": "The summary part that was added." } }, "description": "Emitted when a new reasoning summary part is added.", "x-oaiMeta": { "name": "response.reasoning_summary_part.added", "group": "responses", "example": "{\n \"type\": \"response.reasoning_summary_part.added\",\n \"item_id\": \"rs_6806bfca0b2481918a5748308061a2600d3ce51bdffd5476\",\n \"output_index\": 0,\n \"summary_index\": 0,\n \"part\": {\n \"type\": \"summary_text\",\n \"text\": \"\"\n },\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseReasoningSummaryPartAddedEventPart": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "summary_text" ], "x-stainless-const": true }, "text": { "type": "string" } } }, "OpenAI.ResponseReasoningSummaryTextDeltaEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "summary_index", "delta", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.reasoning_summary_text.delta" ], "description": "The type of the event. Always `response.reasoning_summary_text.delta`.", "x-stainless-const": true }, "item_id": { "type": "string", "description": "The ID of the item this summary text delta is associated with." }, "output_index": { "type": "integer", "description": "The index of the output item this summary text delta is associated with." }, "summary_index": { "type": "integer", "description": "The index of the summary part within the reasoning summary." }, "delta": { "type": "string", "description": "The text delta that was added to the summary." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when a delta is added to a reasoning summary text.", "x-oaiMeta": { "name": "response.reasoning_summary_text.delta", "group": "responses", "example": "{\n \"type\": \"response.reasoning_summary_text.delta\",\n \"item_id\": \"rs_6806bfca0b2481918a5748308061a2600d3ce51bdffd5476\",\n \"output_index\": 0,\n \"summary_index\": 0,\n \"delta\": \"**Responding to a greeting**\n\nThe user just said, \"Hello!\" So, it seems I need to engage. I'll greet them back and offer help since they're looking to chat. I could say something like, \"Hello! How can I assist you today?\" That feels friendly and open. They didn't ask a specific question, so this approach will work well for starting a conversation. Let's see where it goes from there!\",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseReasoningTextDeltaEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "delta", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.reasoning_text.delta" ], "description": "The type of the event. Always `response.reasoning_text.delta`.", "x-stainless-const": true }, "item_id": { "type": "string", "description": "The ID of the item this reasoning text delta is associated with." }, "output_index": { "type": "integer", "description": "The index of the output item this reasoning text delta is associated with." }, "content_index": { "type": "integer", "description": "The index of the reasoning content part this delta is associated with." }, "delta": { "type": "string", "description": "The text delta that was added to the reasoning content." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when a delta is added to a reasoning text.", "x-oaiMeta": { "name": "response.reasoning_text.delta", "group": "responses", "example": "{\n \"type\": \"response.reasoning_text.delta\",\n \"item_id\": \"rs_123\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"delta\": \"The\",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseRefusalDeltaEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "delta", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.refusal.delta" ], "description": "The type of the event. Always `response.refusal.delta`.", "x-stainless-const": true }, "item_id": { "type": "string", "description": "The ID of the output item that the refusal text is added to." }, "output_index": { "type": "integer", "description": "The index of the output item that the refusal text is added to." }, "content_index": { "type": "integer", "description": "The index of the content part that the refusal text is added to." }, "delta": { "type": "string", "description": "The refusal text that is added." }, "sequence_number": { "type": "integer", "description": "The sequence number of this event." } }, "description": "Emitted when there is a partial refusal text.", "x-oaiMeta": { "name": "response.refusal.delta", "group": "responses", "example": "{\n \"type\": \"response.refusal.delta\",\n \"item_id\": \"msg_123\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"delta\": \"refusal text so far\",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseStreamOptions": { "type": "object", "properties": { "include_obfuscation": { "type": "boolean", "description": "When true, stream obfuscation will be enabled. Stream obfuscation adds\n random characters to an `obfuscation` field on streaming delta events to\n normalize payload sizes as a mitigation to certain side-channel attacks.\n These obfuscation fields are included by default, but add a small amount\n of overhead to the data stream. You can set `include_obfuscation` to\n false to optimize for bandwidth if you trust the network links between\n your application and the OpenAI API." } }, "description": "Options for streaming responses. Only set this when you set `stream: true`." }, "OpenAI.ResponseTextDeltaEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "delta", "sequence_number", "logprobs" ], "properties": { "type": { "type": "string", "enum": [ "response.output_text.delta" ], "description": "The type of the event. Always `response.output_text.delta`.", "x-stainless-const": true }, "item_id": { "type": "string", "description": "The ID of the output item that the text delta was added to." }, "output_index": { "type": "integer", "description": "The index of the output item that the text delta was added to." }, "content_index": { "type": "integer", "description": "The index of the content part that the text delta was added to." }, "delta": { "type": "string", "description": "The text delta that was added." }, "sequence_number": { "type": "integer", "description": "The sequence number for this event." }, "logprobs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ResponseLogProb" }, "description": "The log probabilities of the tokens in the delta." } }, "description": "Emitted when there is an additional text delta.", "x-oaiMeta": { "name": "response.output_text.delta", "group": "responses", "example": "{\n \"type\": \"response.output_text.delta\",\n \"item_id\": \"msg_123\",\n \"output_index\": 0,\n \"content_index\": 0,\n \"delta\": \"In\",\n \"sequence_number\": 1\n}\n" } }, "OpenAI.ResponseTextParam": { "type": "object", "properties": { "format": { "$ref": "#/components/schemas/OpenAI.TextResponseFormatConfiguration" }, "verbosity": { "$ref": "#/components/schemas/OpenAI.Verbosity" } }, "description": "Configuration options for a text response from the model. Can be plain\ntext or structured JSON data. Learn more:\n- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)\n- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)" }, "OpenAI.ResponseUsage": { "type": "object", "required": [ "input_tokens", "input_tokens_details", "output_tokens", "output_tokens_details", "total_tokens" ], "properties": { "input_tokens": { "type": "integer", "description": "The number of input tokens." }, "input_tokens_details": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseUsageInputTokensDetails" } ], "description": "A detailed breakdown of the input tokens." }, "output_tokens": { "type": "integer", "description": "The number of output tokens." }, "output_tokens_details": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseUsageOutputTokensDetails" } ], "description": "A detailed breakdown of the output tokens." }, "total_tokens": { "type": "integer", "description": "The total number of tokens used." } }, "description": "Represents token usage details including input tokens, output tokens,\na breakdown of output tokens, and the total tokens used." }, "OpenAI.ResponseUsageInputTokensDetails": { "type": "object", "required": [ "cached_tokens" ], "properties": { "cached_tokens": { "type": "integer" } } }, "OpenAI.ResponseUsageOutputTokensDetails": { "type": "object", "required": [ "reasoning_tokens" ], "properties": { "reasoning_tokens": { "type": "integer" } } }, "OpenAI.ResponseWebSearchCallInProgressEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.web_search_call.in_progress" ], "description": "The type of the event. Always `response.web_search_call.in_progress`.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item that the web search call is associated with." }, "item_id": { "type": "string", "description": "Unique ID for the output item associated with the web search call." }, "sequence_number": { "type": "integer", "description": "The sequence number of the web search call being processed." } }, "description": "Note: web_search is not yet available via Azure OpenAI.", "x-oaiMeta": { "name": "response.web_search_call.in_progress", "group": "responses", "example": "{\n \"type\": \"response.web_search_call.in_progress\",\n \"output_index\": 0,\n \"item_id\": \"ws_123\",\n \"sequence_number\": 0\n}\n" } }, "OpenAI.ResponseWebSearchCallSearchingEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "sequence_number" ], "properties": { "type": { "type": "string", "enum": [ "response.web_search_call.searching" ], "description": "The type of the event. Always `response.web_search_call.searching`.", "x-stainless-const": true }, "output_index": { "type": "integer", "description": "The index of the output item that the web search call is associated with." }, "item_id": { "type": "string", "description": "Unique ID for the output item associated with the web search call." }, "sequence_number": { "type": "integer", "description": "The sequence number of the web search call being processed." } }, "description": "Note: web_search is not yet available via Azure OpenAI.", "x-oaiMeta": { "name": "response.web_search_call.searching", "group": "responses", "example": "{\n \"type\": \"response.web_search_call.searching\",\n \"output_index\": 0,\n \"item_id\": \"ws_123\",\n \"sequence_number\": 0\n}\n" } }, "OpenAI.RunCompletionUsage": { "type": "object", "required": [ "completion_tokens", "prompt_tokens", "total_tokens" ], "properties": { "completion_tokens": { "type": "integer", "description": "Number of completion tokens used over the course of the run." }, "prompt_tokens": { "type": "integer", "description": "Number of prompt tokens used over the course of the run." }, "total_tokens": { "type": "integer", "description": "Total number of tokens used (prompt + completion)." } }, "description": "Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.)." }, "OpenAI.RunGraderRequest": { "type": "object", "required": [ "grader", "model_sample" ], "properties": { "grader": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.GraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.GraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.GraderPython" }, { "$ref": "#/components/schemas/OpenAI.GraderScoreModel" }, { "$ref": "#/components/schemas/OpenAI.GraderMulti" }, { "$ref": "#/components/schemas/GraderEndpoint" } ], "description": "The grader used for the fine-tuning job." }, "item": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RunGraderRequestItem" } ], "description": "The dataset item provided to the grader. This will be used to populate\n the `item` namespace. See [the guide](https://platform.openai.com/docs/guides/graders) for more details." }, "model_sample": { "type": "string", "description": "The model sample to be evaluated. This value will be used to populate\n the `sample` namespace. See [the guide](https://platform.openai.com/docs/guides/graders) for more details.\n The `output_json` variable will be populated if the model sample is a\n valid JSON string." } }, "title": "RunGraderRequest" }, "OpenAI.RunGraderRequestItem": { "type": "object" }, "OpenAI.RunGraderResponse": { "type": "object", "required": [ "reward", "metadata", "sub_rewards", "model_grader_token_usage_per_model" ], "properties": { "reward": { "type": "number" }, "metadata": { "$ref": "#/components/schemas/OpenAI.RunGraderResponseMetadata" }, "sub_rewards": { "type": "object", "unevaluatedProperties": {} }, "model_grader_token_usage_per_model": { "type": "object", "unevaluatedProperties": {} } } }, "OpenAI.RunGraderResponseMetadata": { "type": "object", "required": [ "name", "type", "errors", "execution_time", "scores", "token_usage", "sampled_model_name" ], "properties": { "name": { "type": "string" }, "type": { "type": "string" }, "errors": { "$ref": "#/components/schemas/OpenAI.RunGraderResponseMetadataErrors" }, "execution_time": { "type": "number" }, "scores": { "type": "object", "unevaluatedProperties": {} }, "token_usage": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] }, "sampled_model_name": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } } }, "OpenAI.RunGraderResponseMetadataErrors": { "type": "object", "required": [ "formula_parse_error", "sample_parse_error", "truncated_observation_error", "unresponsive_reward_error", "invalid_variable_error", "other_error", "python_grader_server_error", "python_grader_server_error_type", "python_grader_runtime_error", "python_grader_runtime_error_details", "model_grader_server_error", "model_grader_refusal_error", "model_grader_parse_error", "model_grader_server_error_details" ], "properties": { "formula_parse_error": { "type": "boolean" }, "sample_parse_error": { "type": "boolean" }, "truncated_observation_error": { "type": "boolean" }, "unresponsive_reward_error": { "type": "boolean" }, "invalid_variable_error": { "type": "boolean" }, "other_error": { "type": "boolean" }, "python_grader_server_error": { "type": "boolean" }, "python_grader_server_error_type": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "python_grader_runtime_error": { "type": "boolean" }, "python_grader_runtime_error_details": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "model_grader_server_error": { "type": "boolean" }, "model_grader_refusal_error": { "type": "boolean" }, "model_grader_parse_error": { "type": "boolean" }, "model_grader_server_error_details": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } } }, "OpenAI.RunObject": { "type": "object", "required": [ "id", "object", "created_at", "thread_id", "assistant_id", "status", "required_action", "last_error", "expires_at", "started_at", "cancelled_at", "failed_at", "completed_at", "incomplete_details", "model", "instructions", "tools", "metadata", "usage", "max_prompt_tokens", "max_completion_tokens", "truncation_strategy", "tool_choice", "parallel_tool_calls", "response_format" ], "properties": { "id": { "type": "string", "description": "The identifier, which can be referenced in API endpoints." }, "object": { "type": "string", "enum": [ "thread.run" ], "description": "The object type, which is always `thread.run`.", "x-stainless-const": true }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the run was created." }, "thread_id": { "type": "string", "description": "The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was executed on as a part of this run." }, "assistant_id": { "type": "string", "description": "The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for execution of this run." }, "status": { "$ref": "#/components/schemas/OpenAI.RunStatus" }, "required_action": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.RunObjectRequiredAction" }, { "type": "null" } ], "description": "Details on the action required to continue the run. Will be `null` if no action is required." }, "last_error": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.RunObjectLastError" }, { "type": "null" } ], "description": "The last error associated with this run. Will be `null` if there are no errors." }, "expires_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp", "description": "The Unix timestamp (in seconds) for when the run will expire." }, "started_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp", "description": "The Unix timestamp (in seconds) for when the run was started." }, "cancelled_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp", "description": "The Unix timestamp (in seconds) for when the run was cancelled." }, "failed_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp", "description": "The Unix timestamp (in seconds) for when the run failed." }, "completed_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp", "description": "The Unix timestamp (in seconds) for when the run was completed." }, "incomplete_details": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.RunObjectIncompleteDetails" }, { "type": "null" } ], "description": "Details on why the run is incomplete. Will be `null` if the run is not incomplete." }, "model": { "type": "string", "description": "The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run." }, "instructions": { "type": "string", "description": "The instructions that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.AssistantTool" }, "maxItems": 20, "description": "The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run.", "default": [] }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "usage": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.RunCompletionUsage" }, { "type": "null" } ] }, "temperature": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "description": "The sampling temperature used for this run. If not set, defaults to 1." }, "top_p": { "anyOf": [ { "type": "number" }, { "type": "null" } ], "description": "The nucleus sampling value used for this run. If not set, defaults to 1." }, "max_prompt_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 256, "description": "The maximum number of prompt tokens specified to have been used over the course of the run." }, "max_completion_tokens": { "anyOf": [ { "type": "integer" }, { "type": "null" } ], "minimum": 256, "description": "The maximum number of completion tokens specified to have been used over the course of the run." }, "truncation_strategy": { "$ref": "#/components/schemas/OpenAI.TruncationObject" }, "tool_choice": { "$ref": "#/components/schemas/OpenAI.AssistantsApiToolChoiceOption" }, "parallel_tool_calls": { "$ref": "#/components/schemas/OpenAI.ParallelToolCalls" }, "response_format": { "$ref": "#/components/schemas/OpenAI.AssistantsApiResponseFormatOption" } }, "description": "Represents an execution run on a [thread](https://platform.openai.com/docs/api-reference/threads).", "title": "A run on a thread", "x-oaiMeta": { "name": "The run object", "beta": true, "example": "{\n \"id\": \"run_abc123\",\n \"object\": \"thread.run\",\n \"created_at\": 1698107661,\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"status\": \"completed\",\n \"started_at\": 1699073476,\n \"expires_at\": null,\n \"cancelled_at\": null,\n \"failed_at\": null,\n \"completed_at\": 1699073498,\n \"last_error\": null,\n \"model\": \"gpt-4o\",\n \"instructions\": null,\n \"tools\": [{\"type\": \"file_search\"}, {\"type\": \"code_interpreter\"}],\n \"metadata\": {},\n \"incomplete_details\": null,\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n },\n \"temperature\": 1.0,\n \"top_p\": 1.0,\n \"max_prompt_tokens\": 1000,\n \"max_completion_tokens\": 1000,\n \"truncation_strategy\": {\n \"type\": \"auto\",\n \"last_messages\": null\n },\n \"response_format\": \"auto\",\n \"tool_choice\": \"auto\",\n \"parallel_tool_calls\": true\n}\n" } }, "OpenAI.RunObjectIncompleteDetails": { "type": "object", "properties": { "reason": { "type": "string", "enum": [ "max_completion_tokens", "max_prompt_tokens" ] } } }, "OpenAI.RunObjectLastError": { "type": "object", "required": [ "code", "message" ], "properties": { "code": { "type": "string", "enum": [ "server_error", "rate_limit_exceeded", "invalid_prompt" ] }, "message": { "type": "string" } } }, "OpenAI.RunObjectRequiredAction": { "type": "object", "required": [ "type", "submit_tool_outputs" ], "properties": { "type": { "type": "string", "enum": [ "submit_tool_outputs" ], "x-stainless-const": true }, "submit_tool_outputs": { "$ref": "#/components/schemas/OpenAI.RunObjectRequiredActionSubmitToolOutputs" } } }, "OpenAI.RunObjectRequiredActionSubmitToolOutputs": { "type": "object", "required": [ "tool_calls" ], "properties": { "tool_calls": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.RunToolCallObject" } } } }, "OpenAI.RunStatus": { "type": "string", "enum": [ "queued", "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", "incomplete", "expired" ], "description": "The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, `incomplete`, or `expired`." }, "OpenAI.RunStepCompletionUsage": { "type": "object", "required": [ "completion_tokens", "prompt_tokens", "total_tokens" ], "properties": { "completion_tokens": { "type": "integer", "description": "Number of completion tokens used over the course of the run step." }, "prompt_tokens": { "type": "integer", "description": "Number of prompt tokens used over the course of the run step." }, "total_tokens": { "type": "integer", "description": "Total number of tokens used (prompt + completion)." } }, "description": "Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`." }, "OpenAI.RunStepDetailsMessageCreationObject": { "type": "object", "required": [ "type", "message_creation" ], "properties": { "type": { "type": "string", "enum": [ "message_creation" ], "description": "Always `message_creation`.", "x-stainless-const": true }, "message_creation": { "$ref": "#/components/schemas/OpenAI.RunStepDetailsMessageCreationObjectMessageCreation" } }, "description": "Details of the message creation by the run step.", "title": "Message creation" }, "OpenAI.RunStepDetailsMessageCreationObjectMessageCreation": { "type": "object", "required": [ "message_id" ], "properties": { "message_id": { "type": "string" } } }, "OpenAI.RunStepDetailsToolCall": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCallType" } }, "discriminator": { "propertyName": "type", "mapping": { "code_interpreter": "#/components/schemas/OpenAI.RunStepDetailsToolCallsCodeObject", "file_search": "#/components/schemas/OpenAI.RunStepDetailsToolCallsFileSearchObject", "function": "#/components/schemas/OpenAI.RunStepDetailsToolCallsFunctionObject" } } }, "OpenAI.RunStepDetailsToolCallType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "code_interpreter", "file_search", "function" ] } ] }, "OpenAI.RunStepDetailsToolCallsCodeObject": { "type": "object", "required": [ "id", "type", "code_interpreter" ], "properties": { "id": { "type": "string", "description": "The ID of the tool call." }, "type": { "type": "string", "enum": [ "code_interpreter" ], "description": "The type of tool call. This is always going to be `code_interpreter` for this type of tool call.", "x-stainless-const": true }, "code_interpreter": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCallsCodeObjectCodeInterpreter" } ], "description": "The Code Interpreter tool call definition." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCall" } ], "description": "Details of the Code Interpreter tool call the run step was involved in.", "title": "Code Interpreter tool call" }, "OpenAI.RunStepDetailsToolCallsCodeObjectCodeInterpreter": { "type": "object", "required": [ "input", "outputs" ], "properties": { "input": { "type": "string" }, "outputs": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCallsCodeOutputLogsObject" }, { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCallsCodeOutputImageObject" } ] } } } }, "OpenAI.RunStepDetailsToolCallsCodeOutputImageObject": { "type": "object", "required": [ "type", "image" ], "properties": { "type": { "type": "string", "enum": [ "image" ], "description": "Always `image`.", "x-stainless-const": true }, "image": { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCallsCodeOutputImageObjectImage" } }, "title": "Code Interpreter image output", "x-stainless-naming": { "java": { "type_name": "ImageOutput" }, "kotlin": { "type_name": "ImageOutput" } } }, "OpenAI.RunStepDetailsToolCallsCodeOutputImageObjectImage": { "type": "object", "required": [ "file_id" ], "properties": { "file_id": { "type": "string" } } }, "OpenAI.RunStepDetailsToolCallsCodeOutputLogsObject": { "type": "object", "required": [ "type", "logs" ], "properties": { "type": { "type": "string", "enum": [ "logs" ], "description": "Always `logs`.", "x-stainless-const": true }, "logs": { "type": "string", "description": "The text output from the Code Interpreter tool call." } }, "description": "Text output from the Code Interpreter tool call as part of a run step.", "title": "Code Interpreter log output", "x-stainless-naming": { "java": { "type_name": "LogsOutput" }, "kotlin": { "type_name": "LogsOutput" } } }, "OpenAI.RunStepDetailsToolCallsFileSearchObject": { "type": "object", "required": [ "id", "type", "file_search" ], "properties": { "id": { "type": "string", "description": "The ID of the tool call object." }, "type": { "type": "string", "enum": [ "file_search" ], "description": "The type of tool call. This is always going to be `file_search` for this type of tool call.", "x-stainless-const": true }, "file_search": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCallsFileSearchObjectFileSearch" } ], "description": "For now, this is always going to be an empty object.", "x-oaiTypeLabel": "map" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCall" } ], "title": "File search tool call" }, "OpenAI.RunStepDetailsToolCallsFileSearchObjectFileSearch": { "type": "object", "properties": { "ranking_options": { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCallsFileSearchRankingOptionsObject" }, "results": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCallsFileSearchResultObject" } } } }, "OpenAI.RunStepDetailsToolCallsFileSearchRankingOptionsObject": { "type": "object", "required": [ "ranker", "score_threshold" ], "properties": { "ranker": { "$ref": "#/components/schemas/OpenAI.FileSearchRanker" }, "score_threshold": { "type": "number", "minimum": 0, "maximum": 1, "description": "The score threshold for the file search. All values must be a floating point number between 0 and 1." } }, "description": "The ranking options for the file search.", "title": "File search tool call ranking options" }, "OpenAI.RunStepDetailsToolCallsFileSearchResultObject": { "type": "object", "required": [ "file_id", "file_name", "score" ], "properties": { "file_id": { "type": "string", "description": "The ID of the file that result was found in." }, "file_name": { "type": "string", "description": "The name of the file that result was found in." }, "score": { "type": "number", "minimum": 0, "maximum": 1, "description": "The score of the result. All values must be a floating point number between 0 and 1." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCallsFileSearchResultObjectContent" }, "description": "The content of the result that was found. The content is only included if requested via the include query parameter." } }, "description": "A result instance of the file search.", "title": "File search tool call result", "x-oaiTypeLabel": "map" }, "OpenAI.RunStepDetailsToolCallsFileSearchResultObjectContent": { "type": "object", "properties": { "type": { "type": "string", "enum": [ "text" ], "x-stainless-const": true }, "text": { "type": "string" } } }, "OpenAI.RunStepDetailsToolCallsFunctionObject": { "type": "object", "required": [ "id", "type", "function" ], "properties": { "id": { "type": "string", "description": "The ID of the tool call object." }, "type": { "type": "string", "enum": [ "function" ], "description": "The type of tool call. This is always going to be `function` for this type of tool call.", "x-stainless-const": true }, "function": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCallsFunctionObjectFunction" } ], "description": "The definition of the function that was called." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCall" } ], "title": "Function tool call" }, "OpenAI.RunStepDetailsToolCallsFunctionObjectFunction": { "type": "object", "required": [ "name", "arguments", "output" ], "properties": { "name": { "type": "string" }, "arguments": { "type": "string" }, "output": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } } }, "OpenAI.RunStepDetailsToolCallsObject": { "type": "object", "required": [ "type", "tool_calls" ], "properties": { "type": { "type": "string", "enum": [ "tool_calls" ], "description": "Always `tool_calls`.", "x-stainless-const": true }, "tool_calls": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCall" }, "description": "An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`." } }, "description": "Details of the tool call.", "title": "Tool calls" }, "OpenAI.RunStepObject": { "type": "object", "required": [ "id", "object", "created_at", "assistant_id", "thread_id", "run_id", "type", "status", "step_details", "last_error", "expired_at", "cancelled_at", "failed_at", "completed_at", "metadata", "usage" ], "properties": { "id": { "type": "string", "description": "The identifier of the run step, which can be referenced in API endpoints." }, "object": { "type": "string", "enum": [ "thread.run.step" ], "description": "The object type, which is always `thread.run.step`.", "x-stainless-const": true }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the run step was created." }, "assistant_id": { "type": "string", "description": "The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) associated with the run step." }, "thread_id": { "type": "string", "description": "The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was run." }, "run_id": { "type": "string", "description": "The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that this run step is a part of." }, "type": { "type": "string", "enum": [ "message_creation", "tool_calls" ], "description": "The type of run step, which can be either `message_creation` or `tool_calls`." }, "status": { "type": "string", "enum": [ "in_progress", "cancelled", "failed", "completed", "expired" ], "description": "The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`." }, "step_details": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.RunStepDetailsMessageCreationObject" }, { "$ref": "#/components/schemas/OpenAI.RunStepDetailsToolCallsObject" } ], "description": "The details of the run step." }, "last_error": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.RunStepObjectLastError" }, { "type": "null" } ] }, "expired_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "cancelled_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "failed_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "completed_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] }, "usage": { "$ref": "#/components/schemas/OpenAI.RunStepCompletionUsage" } }, "description": "Represents a step in execution of a run.", "title": "Run steps", "x-oaiMeta": { "name": "The run step object", "beta": true, "example": "{\n \"id\": \"step_abc123\",\n \"object\": \"thread.run.step\",\n \"created_at\": 1699063291,\n \"run_id\": \"run_abc123\",\n \"assistant_id\": \"asst_abc123\",\n \"thread_id\": \"thread_abc123\",\n \"type\": \"message_creation\",\n \"status\": \"completed\",\n \"cancelled_at\": null,\n \"completed_at\": 1699063291,\n \"expired_at\": null,\n \"failed_at\": null,\n \"last_error\": null,\n \"step_details\": {\n \"type\": \"message_creation\",\n \"message_creation\": {\n \"message_id\": \"msg_abc123\"\n }\n },\n \"usage\": {\n \"prompt_tokens\": 123,\n \"completion_tokens\": 456,\n \"total_tokens\": 579\n }\n}\n" } }, "OpenAI.RunStepObjectLastError": { "type": "object", "required": [ "code", "message" ], "properties": { "code": { "type": "string", "enum": [ "server_error", "rate_limit_exceeded" ] }, "message": { "type": "string" } } }, "OpenAI.RunToolCallObject": { "type": "object", "required": [ "id", "type", "function" ], "properties": { "id": { "type": "string", "description": "The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) endpoint." }, "type": { "type": "string", "enum": [ "function" ], "description": "The type of tool call the output is required for. For now, this is always `function`.", "x-stainless-const": true }, "function": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RunToolCallObjectFunction" } ], "description": "The function definition." } }, "description": "Tool call objects" }, "OpenAI.RunToolCallObjectFunction": { "type": "object", "required": [ "name", "arguments" ], "properties": { "name": { "type": "string" }, "arguments": { "type": "string" } } }, "OpenAI.Screenshot": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "screenshot" ], "description": "Specifies the event type. For a screenshot action, this property is\n always set to `screenshot`.", "x-stainless-const": true, "default": "screenshot" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A screenshot action.", "title": "Screenshot" }, "OpenAI.Scroll": { "type": "object", "required": [ "type", "x", "y", "scroll_x", "scroll_y" ], "properties": { "type": { "type": "string", "enum": [ "scroll" ], "description": "Specifies the event type. For a scroll action, this property is\n always set to `scroll`.", "x-stainless-const": true, "default": "scroll" }, "x": { "type": "integer", "description": "The x-coordinate where the scroll occurred." }, "y": { "type": "integer", "description": "The y-coordinate where the scroll occurred." }, "scroll_x": { "type": "integer", "description": "The horizontal scroll distance." }, "scroll_y": { "type": "integer", "description": "The vertical scroll distance." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A scroll action.", "title": "Scroll" }, "OpenAI.SearchContextSize": { "type": "string", "enum": [ "low", "medium", "high" ] }, "OpenAI.SpecificApplyPatchParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "apply_patch" ], "description": "The tool to call. Always `apply_patch`.", "x-stainless-const": true, "default": "apply_patch" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Forces the model to call the apply_patch tool when executing a tool call.", "title": "Specific apply patch tool choice" }, "OpenAI.SpecificFunctionShellParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "shell" ], "description": "The tool to call. Always `shell`.", "x-stainless-const": true, "default": "shell" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Forces the model to call the shell tool when a tool call is required.", "title": "Specific shell tool choice" }, "OpenAI.StaticChunkingStrategy": { "type": "object", "required": [ "max_chunk_size_tokens", "chunk_overlap_tokens" ], "properties": { "max_chunk_size_tokens": { "type": "integer", "minimum": 100, "maximum": 4096, "description": "The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`." }, "chunk_overlap_tokens": { "type": "integer", "description": "The number of tokens that overlap between chunks. The default value is `400`.\n Note that the overlap must not exceed half of `max_chunk_size_tokens`." } } }, "OpenAI.StaticChunkingStrategyRequestParam": { "type": "object", "required": [ "type", "static" ], "properties": { "type": { "type": "string", "enum": [ "static" ], "description": "Always `static`.", "x-stainless-const": true }, "static": { "$ref": "#/components/schemas/OpenAI.StaticChunkingStrategy" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyRequestParam" } ], "description": "Customize your own chunking strategy by setting chunk size and chunk overlap.", "title": "Static Chunking Strategy" }, "OpenAI.StaticChunkingStrategyResponseParam": { "type": "object", "required": [ "type", "static" ], "properties": { "type": { "type": "string", "enum": [ "static" ], "description": "Always `static`.", "x-stainless-const": true }, "static": { "$ref": "#/components/schemas/OpenAI.StaticChunkingStrategy" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyResponse" } ], "title": "Static Chunking Strategy" }, "OpenAI.StopConfiguration": { "anyOf": [ { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, { "type": "array", "items": { "type": "string" } }, { "type": "null" } ], "description": "Not supported with latest reasoning models `o3` and `o4-mini`.\nUp to 4 sequences where the API will stop generating further tokens. The\nreturned text will not contain the stop sequence." }, "OpenAI.SubmitToolOutputsRunRequest": { "type": "object", "required": [ "tool_outputs" ], "properties": { "tool_outputs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.SubmitToolOutputsRunRequestToolOutputs" }, "description": "A list of tools for which the outputs are being submitted." }, "stream": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ] } } }, "OpenAI.SubmitToolOutputsRunRequestToolOutputs": { "type": "object", "properties": { "tool_call_id": { "type": "string" }, "output": { "type": "string" } } }, "OpenAI.Summary": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "summary_text" ], "description": "The type of the object. Always `summary_text`.", "x-stainless-const": true, "default": "summary_text" }, "text": { "type": "string", "description": "A summary of the reasoning output from the model so far." } }, "description": "A summary text from the model.", "title": "Summary text" }, "OpenAI.SummaryTextContent": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "summary_text" ], "description": "The type of the object. Always `summary_text`.", "x-stainless-const": true, "default": "summary_text" }, "text": { "type": "string", "description": "A summary of the reasoning output from the model so far." } }, "description": "A summary text from the model.", "title": "Summary text" }, "OpenAI.TextAnnotation": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.TextAnnotationType" } }, "discriminator": { "propertyName": "type", "mapping": { "file_citation": "#/components/schemas/OpenAI.MessageContentTextAnnotationsFileCitationObject", "file_path": "#/components/schemas/OpenAI.MessageContentTextAnnotationsFilePathObject" } } }, "OpenAI.TextAnnotationType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "file_citation", "file_path" ] } ] }, "OpenAI.TextContent": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "x-stainless-const": true, "default": "text" }, "text": { "type": "string" } }, "description": "A text content.", "title": "Text Content" }, "OpenAI.TextResponseFormatConfiguration": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.TextResponseFormatConfigurationType" } }, "discriminator": { "propertyName": "type", "mapping": { "json_schema": "#/components/schemas/OpenAI.TextResponseFormatJsonSchema", "text": "#/components/schemas/OpenAI.TextResponseFormatConfigurationResponseFormatText", "json_object": "#/components/schemas/OpenAI.TextResponseFormatConfigurationResponseFormatJsonObject" } }, "description": "An object specifying the format that the model must output.\nConfiguring `{ \"type\": \"json_schema\" }` enables Structured Outputs,\nwhich ensures the model will match your supplied JSON schema. Learn more in the\n[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).\nThe default format is `{ \"type\": \"text\" }` with no additional options.\n*Not recommended for gpt-4o and newer models:**\nSetting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\nensures the message the model generates is valid JSON. Using `json_schema`\nis preferred for models that support it." }, "OpenAI.TextResponseFormatConfigurationResponseFormatJsonObject": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "json_object" ], "description": "The type of response format being defined. Always `json_object`.", "x-stainless-const": true } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.TextResponseFormatConfiguration" } ], "description": "JSON object response format. An older method of generating JSON responses.\nUsing `json_schema` is recommended for models that support it. Note that the\nmodel will not generate JSON without a system or user message instructing it\nto do so.", "title": "JSON object" }, "OpenAI.TextResponseFormatConfigurationResponseFormatText": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "The type of response format being defined. Always `text`.", "x-stainless-const": true } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.TextResponseFormatConfiguration" } ], "description": "Default response format. Used to generate text responses.", "title": "Text" }, "OpenAI.TextResponseFormatConfigurationType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "text", "json_schema", "json_object" ] } ] }, "OpenAI.TextResponseFormatJsonSchema": { "type": "object", "required": [ "type", "name", "schema" ], "properties": { "type": { "type": "string", "enum": [ "json_schema" ], "description": "The type of response format being defined. Always `json_schema`.", "x-stainless-const": true }, "description": { "type": "string", "description": "A description of what the response format is for, used by the model to\n determine how to respond in the format." }, "name": { "type": "string", "description": "The name of the response format. Must be a-z, A-Z, 0-9, or contain\n underscores and dashes, with a maximum length of 64." }, "schema": { "$ref": "#/components/schemas/OpenAI.ResponseFormatJsonSchemaSchema" }, "strict": { "anyOf": [ { "type": "boolean" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.TextResponseFormatConfiguration" } ], "description": "JSON Schema response format. Used to generate structured JSON responses.\nLearn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).", "title": "JSON schema" }, "OpenAI.ThreadObject": { "type": "object", "required": [ "id", "object", "created_at", "tool_resources", "metadata" ], "properties": { "id": { "type": "string", "description": "The identifier, which can be referenced in API endpoints." }, "object": { "type": "string", "enum": [ "thread" ], "description": "The object type, which is always `thread`.", "x-stainless-const": true }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the thread was created." }, "tool_resources": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ThreadObjectToolResources" }, { "type": "null" } ] }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } }, "description": "Represents a thread that contains [messages](https://platform.openai.com/docs/api-reference/messages).", "title": "Thread", "x-oaiMeta": { "name": "The thread object", "beta": true, "example": "{\n \"id\": \"thread_abc123\",\n \"object\": \"thread\",\n \"created_at\": 1698107661,\n \"metadata\": {}\n}\n" } }, "OpenAI.ThreadObjectToolResources": { "type": "object", "properties": { "code_interpreter": { "$ref": "#/components/schemas/OpenAI.ThreadObjectToolResourcesCodeInterpreter" }, "file_search": { "$ref": "#/components/schemas/OpenAI.ThreadObjectToolResourcesFileSearch" } } }, "OpenAI.ThreadObjectToolResourcesCodeInterpreter": { "type": "object", "properties": { "file_ids": { "type": "array", "items": { "type": "string" }, "maxItems": 20 } } }, "OpenAI.ThreadObjectToolResourcesFileSearch": { "type": "object", "properties": { "vector_store_ids": { "type": "array", "items": { "type": "string" }, "maxItems": 1 } } }, "OpenAI.TokenLimits": { "type": "object", "properties": { "post_instructions": { "type": "integer", "minimum": 0 } } }, "OpenAI.Tool": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ToolType" } }, "discriminator": { "propertyName": "type", "mapping": { "code_interpreter": "#/components/schemas/OpenAI.CodeInterpreterTool", "function": "#/components/schemas/OpenAI.FunctionTool", "file_search": "#/components/schemas/OpenAI.FileSearchTool", "computer_use_preview": "#/components/schemas/OpenAI.ComputerUsePreviewTool", "web_search": "#/components/schemas/OpenAI.WebSearchTool", "mcp": "#/components/schemas/OpenAI.MCPTool", "image_generation": "#/components/schemas/OpenAI.ImageGenTool", "local_shell": "#/components/schemas/OpenAI.LocalShellToolParam", "shell": "#/components/schemas/OpenAI.FunctionShellToolParam", "custom": "#/components/schemas/OpenAI.CustomToolParam", "web_search_preview": "#/components/schemas/OpenAI.WebSearchPreviewTool", "apply_patch": "#/components/schemas/OpenAI.ApplyPatchToolParam" } }, "description": "A tool that can be used to generate a response." }, "OpenAI.ToolChoiceAllowed": { "type": "object", "required": [ "type", "mode", "tools" ], "properties": { "type": { "type": "string", "enum": [ "allowed_tools" ], "description": "Allowed tool configuration type. Always `allowed_tools`.", "x-stainless-const": true }, "mode": { "type": "string", "enum": [ "auto", "required" ], "description": "Constrains the tools available to the model to a pre-defined set.\n `auto` allows the model to pick from among the allowed tools and generate a\n message.\n `required` requires the model to call one or more of the allowed tools." }, "tools": { "type": "array", "items": { "type": "object", "unevaluatedProperties": {} }, "description": "A list of tool definitions that the model should be allowed to call.\n For the Responses API, the list of tool definitions might look like:\n ```json\n [\n { \"type\": \"function\", \"name\": \"get_weather\" },\n { \"type\": \"mcp\", \"server_label\": \"deepwiki\" },\n { \"type\": \"image_generation\" }\n ]\n ```" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Constrains the tools available to the model to a pre-defined set.", "title": "Allowed tools" }, "OpenAI.ToolChoiceCodeInterpreter": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "code_interpreter" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Indicates that the model should use a built-in tool to generate a response.\n[Learn more about built-in tools](https://platform.openai.com/docs/guides/tools)." }, "OpenAI.ToolChoiceComputerUsePreview": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "computer_use_preview" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Indicates that the model should use a built-in tool to generate a response.\n[Learn more about built-in tools](https://platform.openai.com/docs/guides/tools)." }, "OpenAI.ToolChoiceCustom": { "type": "object", "required": [ "type", "name" ], "properties": { "type": { "type": "string", "enum": [ "custom" ], "description": "For custom tool calling, the type is always `custom`.", "x-stainless-const": true }, "name": { "type": "string", "description": "The name of the custom tool to call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Use this option to force the model to call a specific custom tool.", "title": "Custom tool" }, "OpenAI.ToolChoiceFileSearch": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "file_search" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Indicates that the model should use a built-in tool to generate a response.\n[Learn more about built-in tools](https://platform.openai.com/docs/guides/tools)." }, "OpenAI.ToolChoiceFunction": { "type": "object", "required": [ "type", "name" ], "properties": { "type": { "type": "string", "enum": [ "function" ], "description": "For function calling, the type is always `function`.", "x-stainless-const": true }, "name": { "type": "string", "description": "The name of the function to call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Use this option to force the model to call a specific function.", "title": "Function tool" }, "OpenAI.ToolChoiceImageGeneration": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "image_generation" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Indicates that the model should use a built-in tool to generate a response.\n[Learn more about built-in tools](https://platform.openai.com/docs/guides/tools)." }, "OpenAI.ToolChoiceMCP": { "type": "object", "required": [ "type", "server_label" ], "properties": { "type": { "type": "string", "enum": [ "mcp" ], "description": "For MCP tools, the type is always `mcp`.", "x-stainless-const": true }, "server_label": { "type": "string", "description": "The label of the MCP server to use." }, "name": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Use this option to force the model to call a specific tool on a remote MCP server.", "title": "MCP tool" }, "OpenAI.ToolChoiceOptions": { "type": "string", "enum": [ "none", "auto", "required" ], "description": "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or\nmore tools.\n`required` means the model must call one or more tools.", "title": "Tool choice mode" }, "OpenAI.ToolChoiceParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ToolChoiceParamType" } }, "discriminator": { "propertyName": "type", "mapping": { "allowed_tools": "#/components/schemas/OpenAI.ToolChoiceAllowed", "mcp": "#/components/schemas/OpenAI.ToolChoiceMCP", "custom": "#/components/schemas/OpenAI.ToolChoiceCustom", "apply_patch": "#/components/schemas/OpenAI.SpecificApplyPatchParam", "shell": "#/components/schemas/OpenAI.SpecificFunctionShellParam", "file_search": "#/components/schemas/OpenAI.ToolChoiceFileSearch", "web_search_preview": "#/components/schemas/OpenAI.ToolChoiceWebSearchPreview", "computer_use_preview": "#/components/schemas/OpenAI.ToolChoiceComputerUsePreview", "web_search_preview_2025_03_11": "#/components/schemas/OpenAI.ToolChoiceWebSearchPreview20250311", "image_generation": "#/components/schemas/OpenAI.ToolChoiceImageGeneration", "code_interpreter": "#/components/schemas/OpenAI.ToolChoiceCodeInterpreter" } }, "description": "How the model should select which tool (or tools) to use when generating\na response. See the `tools` parameter to see how to specify which tools\nthe model can call." }, "OpenAI.ToolChoiceParamType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "allowed_tools", "function", "mcp", "custom", "apply_patch", "shell", "file_search", "web_search_preview", "computer_use_preview", "web_search_preview_2025_03_11", "image_generation", "code_interpreter" ] } ] }, "OpenAI.ToolChoiceWebSearchPreview": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "web_search_preview" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Note: web_search is not yet available via Azure OpenAI." }, "OpenAI.ToolChoiceWebSearchPreview20250311": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "web_search_preview_2025_03_11" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceParam" } ], "description": "Indicates that the model should use a built-in tool to generate a response.\n[Learn more about built-in tools](https://platform.openai.com/docs/guides/tools)." }, "OpenAI.ToolType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "function", "file_search", "computer_use_preview", "web_search", "mcp", "code_interpreter", "image_generation", "local_shell", "shell", "custom", "web_search_preview", "apply_patch" ] } ] }, "OpenAI.ToolsArray": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Tool" }, "description": "An array of tools the model may call while generating a response. You\ncan specify which tool to use by setting the `tool_choice` parameter.\nWe support the following categories of tools:\n- **Built-in tools**: Tools that are provided by OpenAI that extend the\nmodel's capabilities, like [web search](https://platform.openai.com/docs/guides/tools-web-search)\nor [file search](https://platform.openai.com/docs/guides/tools-file-search). Learn more about\n[built-in tools](https://platform.openai.com/docs/guides/tools).\n- **MCP Tools**: Integrations with third-party systems via custom MCP servers\nor predefined connectors such as Google Drive and SharePoint. Learn more about\n[MCP Tools](https://platform.openai.com/docs/guides/tools-connectors-mcp).\n- **Function calls (custom tools)**: Functions that are defined by you,\nenabling the model to call your own code with strongly typed arguments\nand outputs. Learn more about\n[function calling](https://platform.openai.com/docs/guides/function-calling). You can also use\ncustom tools to call your own code." }, "OpenAI.TopLogProb": { "type": "object", "required": [ "token", "logprob", "bytes" ], "properties": { "token": { "type": "string" }, "logprob": { "type": "number" }, "bytes": { "type": "array", "items": { "type": "integer" } } }, "description": "The top log probability of a token.", "title": "Top log probability" }, "OpenAI.TranscriptionSegment": { "type": "object", "required": [ "id", "seek", "start", "end", "text", "tokens", "temperature", "avg_logprob", "compression_ratio", "no_speech_prob" ], "properties": { "id": { "type": "integer", "description": "Unique identifier of the segment." }, "seek": { "type": "integer", "description": "Seek offset of the segment." }, "start": { "type": "number", "format": "float", "description": "Start time of the segment in seconds." }, "end": { "type": "number", "format": "float", "description": "End time of the segment in seconds." }, "text": { "type": "string", "description": "Text content of the segment." }, "tokens": { "type": "array", "items": { "type": "integer" }, "description": "Array of token IDs for the text content." }, "temperature": { "type": "number", "format": "float", "description": "Temperature parameter used for generating the segment." }, "avg_logprob": { "type": "number", "format": "float", "description": "Average logprob of the segment. If the value is lower than -1, consider the logprobs failed." }, "compression_ratio": { "type": "number", "format": "float", "description": "Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed." }, "no_speech_prob": { "type": "number", "format": "float", "description": "Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent." } } }, "OpenAI.TranscriptionWord": { "type": "object", "required": [ "word", "start", "end" ], "properties": { "word": { "type": "string", "description": "The text content of the word." }, "start": { "type": "number", "format": "float", "description": "Start time of the word in seconds." }, "end": { "type": "number", "format": "float", "description": "End time of the word in seconds." } } }, "OpenAI.TruncationObject": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "auto", "last_messages" ], "description": "The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`." }, "last_messages": { "anyOf": [ { "type": "integer" }, { "type": "null" } ] } }, "description": "Controls for how a thread will be truncated prior to the run. Use this to control the initial context window of the run.", "title": "Thread Truncation Controls" }, "OpenAI.Type": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "type" ], "description": "Specifies the event type. For a type action, this property is\n always set to `type`.", "x-stainless-const": true, "default": "type" }, "text": { "type": "string", "description": "The text to type." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "An action to type in text.", "title": "Type" }, "OpenAI.UpdateConversationBody": { "type": "object", "required": [ "metadata" ], "properties": { "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ], "description": "Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and querying for objects via API or the dashboard.\n Keys are strings with a maximum length of 64 characters. Values are strings with a maximum length of 512 characters." } } }, "OpenAI.UpdateVectorStoreFileAttributesRequest": { "type": "object", "required": [ "attributes" ], "properties": { "attributes": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" }, { "type": "null" } ] } }, "x-oaiMeta": { "name": "Update vector store file attributes request" } }, "OpenAI.UpdateVectorStoreRequest": { "type": "object", "properties": { "name": { "anyOf": [ { "type": "string" }, { "type": "null" } ], "description": "The name of the vector store." }, "expires_after": { "$ref": "#/components/schemas/OpenAI.VectorStoreExpirationAfter" }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } } }, "OpenAI.UrlCitationBody": { "type": "object", "required": [ "type", "url", "start_index", "end_index", "title" ], "properties": { "type": { "type": "string", "enum": [ "url_citation" ], "description": "The type of the URL citation. Always `url_citation`.", "x-stainless-const": true, "default": "url_citation" }, "url": { "type": "string", "format": "uri", "description": "The URL of the web resource." }, "start_index": { "type": "integer", "description": "The index of the first character of the URL citation in the message." }, "end_index": { "type": "integer", "description": "The index of the last character of the URL citation in the message." }, "title": { "type": "string", "description": "The title of the web resource." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Annotation" } ], "description": "A citation for a web resource used to generate a model response.", "title": "URL citation" }, "OpenAI.ValidateGraderResponse": { "type": "object", "properties": { "grader": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.GraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.GraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.GraderPython" }, { "$ref": "#/components/schemas/OpenAI.GraderScoreModel" }, { "$ref": "#/components/schemas/OpenAI.GraderMulti" }, { "$ref": "#/components/schemas/GraderEndpoint" } ], "description": "The grader used for the fine-tuning job." } }, "title": "ValidateGraderResponse" }, "OpenAI.VectorStoreExpirationAfter": { "type": "object", "required": [ "anchor", "days" ], "properties": { "anchor": { "type": "string", "enum": [ "last_active_at" ], "description": "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`.", "x-stainless-const": true }, "days": { "type": "integer", "minimum": 1, "maximum": 365, "description": "The number of days after the anchor time that the vector store will expire." } }, "description": "The expiration policy for a vector store.", "title": "Vector store expiration policy" }, "OpenAI.VectorStoreFileAttributes": { "type": "object", "unevaluatedProperties": { "anyOf": [ { "type": "string" }, { "type": "number" }, { "type": "boolean" } ] }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard. Keys are strings\nwith a maximum length of 64 characters. Values are strings with a maximum\nlength of 512 characters, booleans, or numbers.", "x-oaiTypeLabel": "map" }, "OpenAI.VectorStoreFileBatchObject": { "type": "object", "required": [ "id", "object", "created_at", "vector_store_id", "status", "file_counts" ], "properties": { "id": { "type": "string", "description": "The identifier, which can be referenced in API endpoints." }, "object": { "type": "string", "enum": [ "vector_store.files_batch" ], "description": "The object type, which is always `vector_store.file_batch`.", "x-stainless-const": true }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the vector store files batch was created." }, "vector_store_id": { "type": "string", "description": "The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "cancelled", "failed" ], "description": "The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`." }, "file_counts": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileBatchObjectFileCounts" } }, "description": "A batch of files attached to a vector store.", "title": "Vector store file batch", "x-oaiMeta": { "name": "The vector store files batch object", "beta": true, "example": "{\n \"id\": \"vsfb_123\",\n \"object\": \"vector_store.files_batch\",\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"failed\": 0,\n \"cancelled\": 0,\n \"total\": 100\n }\n}\n" } }, "OpenAI.VectorStoreFileBatchObjectFileCounts": { "type": "object", "required": [ "in_progress", "completed", "failed", "cancelled", "total" ], "properties": { "in_progress": { "type": "integer" }, "completed": { "type": "integer" }, "failed": { "type": "integer" }, "cancelled": { "type": "integer" }, "total": { "type": "integer" } } }, "OpenAI.VectorStoreFileObject": { "type": "object", "required": [ "id", "object", "usage_bytes", "created_at", "vector_store_id", "status", "last_error" ], "properties": { "id": { "type": "string", "description": "The identifier, which can be referenced in API endpoints." }, "object": { "type": "string", "enum": [ "vector_store.file" ], "description": "The object type, which is always `vector_store.file`.", "x-stainless-const": true }, "usage_bytes": { "type": "integer", "description": "The total vector store usage in bytes. Note that this may be different from the original file size." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the vector store file was created." }, "vector_store_id": { "type": "string", "description": "The ID of the [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "cancelled", "failed" ], "description": "The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use." }, "last_error": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreFileObjectLastError" }, { "type": "null" } ] }, "chunking_strategy": { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyResponse" }, "attributes": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" }, { "type": "null" } ] } }, "description": "A list of files attached to a vector store.", "title": "Vector store files", "x-oaiMeta": { "name": "The vector store file object", "beta": true, "example": "{\n \"id\": \"file-abc123\",\n \"object\": \"vector_store.file\",\n \"usage_bytes\": 1234,\n \"created_at\": 1698107661,\n \"vector_store_id\": \"vs_abc123\",\n \"status\": \"completed\",\n \"last_error\": null,\n \"chunking_strategy\": {\n \"type\": \"static\",\n \"static\": {\n \"max_chunk_size_tokens\": 800,\n \"chunk_overlap_tokens\": 400\n }\n }\n}\n" } }, "OpenAI.VectorStoreFileObjectLastError": { "type": "object", "required": [ "code", "message" ], "properties": { "code": { "type": "string", "enum": [ "server_error", "unsupported_file", "invalid_file" ] }, "message": { "type": "string" } } }, "OpenAI.VectorStoreObject": { "type": "object", "required": [ "id", "object", "created_at", "name", "usage_bytes", "file_counts", "status", "last_active_at", "metadata" ], "properties": { "id": { "type": "string", "description": "The identifier, which can be referenced in API endpoints." }, "object": { "type": "string", "enum": [ "vector_store" ], "description": "The object type, which is always `vector_store`.", "x-stainless-const": true }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the vector store was created." }, "name": { "type": "string", "description": "The name of the vector store." }, "usage_bytes": { "type": "integer", "description": "The total number of bytes used by the files in the vector store." }, "file_counts": { "$ref": "#/components/schemas/OpenAI.VectorStoreObjectFileCounts" }, "status": { "type": "string", "enum": [ "expired", "in_progress", "completed" ], "description": "The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use." }, "expires_after": { "$ref": "#/components/schemas/OpenAI.VectorStoreExpirationAfter" }, "expires_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "last_active_at": { "anyOf": [ { "type": "string", "format": "date-time" }, { "type": "null" } ], "type": "integer", "format": "unixTimestamp" }, "metadata": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.Metadata" }, { "type": "null" } ] } }, "description": "A vector store is a collection of processed files can be used by the `file_search` tool.", "title": "Vector store", "x-oaiMeta": { "name": "The vector store object", "example": "{\n \"id\": \"vs_123\",\n \"object\": \"vector_store\",\n \"created_at\": 1698107661,\n \"usage_bytes\": 123456,\n \"last_active_at\": 1698107661,\n \"name\": \"my_vector_store\",\n \"status\": \"completed\",\n \"file_counts\": {\n \"in_progress\": 0,\n \"completed\": 100,\n \"cancelled\": 0,\n \"failed\": 0,\n \"total\": 100\n },\n \"last_used_at\": 1698107661\n}\n" } }, "OpenAI.VectorStoreObjectFileCounts": { "type": "object", "required": [ "in_progress", "completed", "failed", "cancelled", "total" ], "properties": { "in_progress": { "type": "integer" }, "completed": { "type": "integer" }, "failed": { "type": "integer" }, "cancelled": { "type": "integer" }, "total": { "type": "integer" } } }, "OpenAI.VectorStoreSearchRequest": { "type": "object", "required": [ "query" ], "properties": { "query": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "type": "string" } } ], "description": "A query string for a search" }, "rewrite_query": { "type": "boolean", "description": "Whether to rewrite the natural language query for vector search." }, "max_num_results": { "type": "integer", "minimum": 1, "maximum": 50, "description": "The maximum number of results to return. This number should be between 1 and 50 inclusive.", "default": 10 }, "filters": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ComparisonFilter" }, { "$ref": "#/components/schemas/OpenAI.CompoundFilter" } ], "description": "A filter to apply based on file attributes." }, "ranking_options": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreSearchRequestRankingOptions" } ], "description": "Ranking options for search." } }, "x-oaiMeta": { "name": "Vector store search request" } }, "OpenAI.VectorStoreSearchRequestRankingOptions": { "type": "object", "properties": { "ranker": { "type": "string", "enum": [ "none", "auto", "default-2024-11-15" ], "default": "auto" }, "score_threshold": { "type": "number", "minimum": 0, "maximum": 1 } } }, "OpenAI.VectorStoreSearchResultContentObject": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "The type of content." }, "text": { "type": "string", "description": "The text content returned from search." } }, "x-oaiMeta": { "name": "Vector store search result content object" } }, "OpenAI.VectorStoreSearchResultItem": { "type": "object", "required": [ "file_id", "filename", "score", "attributes", "content" ], "properties": { "file_id": { "type": "string", "description": "The ID of the vector store file." }, "filename": { "type": "string", "description": "The name of the vector store file." }, "score": { "type": "number", "minimum": 0, "maximum": 1, "description": "The similarity score for the result." }, "attributes": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" }, { "type": "null" } ] }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.VectorStoreSearchResultContentObject" }, "description": "Content chunks from the file." } }, "x-oaiMeta": { "name": "Vector store search result item" } }, "OpenAI.VectorStoreSearchResultsPage": { "type": "object", "required": [ "object", "search_query", "data", "has_more", "next_page" ], "properties": { "object": { "type": "string", "enum": [ "vector_store.search_results.page" ], "description": "The object type, which is always `vector_store.search_results.page`", "x-stainless-const": true }, "search_query": { "type": "array", "items": { "type": "string" } }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.VectorStoreSearchResultItem" }, "description": "The list of search result items." }, "has_more": { "type": "boolean", "description": "Indicates if there are more results to fetch." }, "next_page": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "x-oaiMeta": { "name": "Vector store search results page" } }, "OpenAI.Verbosity": { "anyOf": [ { "type": "string", "enum": [ "low", "medium", "high" ] }, { "type": "null" } ], "description": "Constrains the verbosity of the model's response. Lower values will result in\nmore concise responses, while higher values will result in more verbose responses.\nCurrently supported values are `low`, `medium`, and `high`." }, "OpenAI.VoiceIdsShared": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse", "marin", "cedar" ] } ] }, "OpenAI.Wait": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "wait" ], "description": "Specifies the event type. For a wait action, this property is\n always set to `wait`.", "x-stainless-const": true, "default": "wait" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A wait action.", "title": "Wait" }, "OpenAI.WebSearchActionFind": { "type": "object", "required": [ "type", "url", "pattern" ], "properties": { "type": { "type": "string", "enum": [ "find_in_page" ], "description": "The action type.", "x-stainless-const": true }, "url": { "type": "string", "format": "uri", "description": "The URL of the page searched for the pattern." }, "pattern": { "type": "string", "description": "The pattern or text to search for within the page." } }, "description": "Action type \"find\": Searches for a pattern within a loaded page.", "title": "Find action" }, "OpenAI.WebSearchActionOpenPage": { "type": "object", "required": [ "type", "url" ], "properties": { "type": { "type": "string", "enum": [ "open_page" ], "description": "The action type.", "x-stainless-const": true }, "url": { "type": "string", "format": "uri", "description": "The URL opened by the model." } }, "description": "Action type \"open_page\" - Opens a specific URL from search results.", "title": "Open page action" }, "OpenAI.WebSearchActionSearch": { "type": "object", "required": [ "type", "query" ], "properties": { "type": { "type": "string", "enum": [ "search" ], "description": "The action type.", "x-stainless-const": true }, "query": { "type": "string", "description": "[DEPRECATED] The search query.", "deprecated": true }, "queries": { "type": "array", "items": { "type": "string" }, "description": "The search queries.", "title": "Search queries" }, "sources": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.WebSearchActionSearchSources" }, "description": "The sources used in the search.", "title": "Web search sources" } }, "description": "Action type \"search\" - Performs a web search query.", "title": "Search action" }, "OpenAI.WebSearchActionSearchSources": { "type": "object", "required": [ "type", "url" ], "properties": { "type": { "type": "string", "enum": [ "url" ], "x-stainless-const": true }, "url": { "type": "string" } } }, "OpenAI.WebSearchApproximateLocation": { "type": "object", "properties": { "type": { "type": "string", "enum": [ "approximate" ], "description": "The type of location approximation. Always `approximate`.", "x-stainless-const": true, "default": "approximate" }, "country": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "region": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "city": { "anyOf": [ { "type": "string" }, { "type": "null" } ] }, "timezone": { "anyOf": [ { "type": "string" }, { "type": "null" } ] } }, "description": "The approximate location of the user.", "title": "Web search approximate location" }, "OpenAI.WebSearchPreviewTool": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "web_search_preview" ], "description": "The type of the web search tool. One of `web_search_preview` or `web_search_preview_2025_03_11`.", "x-stainless-const": true, "default": "web_search_preview" }, "user_location": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ApproximateLocation" }, { "type": "null" } ] }, "search_context_size": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.SearchContextSize" } ], "description": "High level guidance for the amount of context window space to use for the search. One of `low`, `medium`, or `high`. `medium` is the default." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "Note: web_search is not yet available via Azure OpenAI.", "title": "Web search preview" }, "OpenAI.WebSearchTool": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "web_search" ], "description": "The type of the web search tool. One of `web_search` or `web_search_2025_08_26`.", "default": "web_search" }, "filters": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.WebSearchToolFilters" }, { "type": "null" } ] }, "user_location": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.WebSearchApproximateLocation" }, { "type": "null" } ] }, "search_context_size": { "type": "string", "enum": [ "low", "medium", "high" ], "description": "High level guidance for the amount of context window space to use for the search. One of `low`, `medium`, or `high`. `medium` is the default.", "default": "medium" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "Note: web_search is not yet available via Azure OpenAI.", "title": "Web search" }, "OpenAI.WebSearchToolFilters": { "type": "object", "properties": { "allowed_domains": { "anyOf": [ { "type": "array", "items": { "type": "string" } }, { "type": "null" } ] } } }, "Order": { "type": "string", "enum": [ "asc", "desc" ] }, "ResponseFormatJSONSchemaRequest": { "type": "object", "required": [ "type", "json_schema" ], "properties": { "type": { "type": "string", "enum": [ "json_schema" ], "description": "Type of response format" }, "json_schema": { "type": "object", "unevaluatedProperties": {}, "description": "JSON Schema for the response format" } } }, "SpeechGenerationResponse": { "type": "object", "required": [ "audio" ], "properties": { "audio": { "type": "string", "contentEncoding": "base64", "description": "The generated audio, generated in the requested audio output format." } }, "description": "A representation of a response for a text-to-speech operation." }, "SpeechGenerationResponseFormat": { "oneOf": [ { "type": "string" }, { "type": "string", "enum": [ "mp3", "opus", "aac", "flac", "wav", "pcm" ] } ], "description": "The supported audio output formats for text-to-speech." }, "SpeechVoice": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "alloy", "echo", "fable", "onyx", "nova", "shimmer" ] } ], "description": "The available voices for text-to-speech." }, "VideoContent": { "type": "object", "required": [ "content" ], "properties": { "content": { "type": "string", "contentEncoding": "base64" } } }, "VideoContentVariant": { "anyOf": [ { "type": "string", "enum": [ "video", "thumbnail", "spritesheet" ] }, { "type": "string" } ], "description": "Selectable asset variants for downloaded content." }, "VideoIdParameter": { "type": "object", "required": [ "video-id" ], "properties": { "video-id": { "type": "string", "description": "The ID of the video to use for the Azure OpenAI request." } } }, "VideoList": { "type": "object", "required": [ "object", "data", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ] }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/VideoResource" }, "description": "The list of video generation jobs." }, "has_more": { "type": "boolean", "description": "A flag indicating whether there are more jobs available after the list." }, "first_id": { "type": "string", "description": "The ID of the first video in the current page, if available." }, "last_id": { "type": "string", "description": "The ID of the last video in the current page, if available." } }, "description": "A list of video generation jobs." }, "VideoResource": { "type": "object", "required": [ "id", "object", "model", "status", "progress", "created_at", "size", "seconds" ], "properties": { "id": { "type": "string", "description": "Unique identifier for the video job." }, "object": { "type": "string", "description": "The object type, which is always `video`." }, "model": { "type": "string", "description": "The video generation model deployment that produced the job." }, "status": { "allOf": [ { "$ref": "#/components/schemas/VideoStatus" } ], "description": "Current lifecycle status of the video job." }, "progress": { "type": "integer", "format": "int32", "description": "Approximate completion percentage for the generation task." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (seconds) for when the job was created." }, "completed_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (seconds) for when the job completed, if finished." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (seconds) for when the video generation expires (and will be deleted)." }, "size": { "allOf": [ { "$ref": "#/components/schemas/VideoSize" } ], "description": "The resolution of the generated video." }, "seconds": { "allOf": [ { "$ref": "#/components/schemas/VideoSeconds" } ], "description": "Duration of the generated clip in seconds." }, "remixed_from_video_id": { "type": "string", "description": "Identifier of the source video if this video is a remix." }, "error": { "allOf": [ { "$ref": "#/components/schemas/Error" } ], "description": "Error payload that explains why generation failed, if applicable." } }, "description": "Structured information describing a generated video job." }, "VideoSeconds": { "anyOf": [ { "type": "string", "enum": [ "4", "8", "12" ] }, { "type": "string" } ], "description": "Supported clip durations, measured in seconds." }, "VideoSize": { "anyOf": [ { "type": "string", "enum": [ "720x1280", "1280x720", "1024x1792", "1792x1024" ] }, { "type": "string" } ], "description": "Output dimensions formatted as `{width}x{height}`." }, "VideoStatus": { "anyOf": [ { "type": "string", "enum": [ "queued", "in_progress", "completed", "failed" ] }, { "type": "string" } ], "description": "Lifecycle state of a generated video." } }, "securitySchemes": { "ApiKeyAuth": { "type": "apiKey", "in": "header", "name": "api-key" }, "ApiKeyAuth_": { "type": "apiKey", "in": "header", "name": "authorization" }, "OAuth2Auth": { "type": "oauth2", "flows": { "implicit": { "authorizationUrl": "https://login.microsoftonline.com/common/oauth2/v2.0/authorize", "scopes": { "https://cognitiveservices.azure.com/.default": "" } } } } } }, "servers": [ { "url": "{endpoint}/openai/v1", "description": "Azure AI Foundry Models APIs", "variables": { "endpoint": { "default": "", "description": "A supported Azure AI Foundry Models APIs endpoint, including protocol and hostname.\nFor example:\nhttps://westus.api.cognitive.microsoft.com)." } } } ] }