{ "openapi": "3.0.0", "info": { "title": "Azure AI Foundry Models Service", "license": { "name": "MIT", "url": "https://github.com/openai/openai-openapi/blob/master/LICENSE" }, "version": "v1" }, "tags": [ { "name": "Chat" }, { "name": "Containers" }, { "name": "Embeddings" }, { "name": "Evals" }, { "name": "Files" }, { "name": "Fine-tuning" }, { "name": "Models" }, { "name": "Responses" }, { "name": "Vector Stores" } ], "paths": { "/chat/completions": { "post": { "operationId": "createChatCompletion", "description": "Creates a chat completion.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureCreateChatCompletionResponse" } }, "text/event-stream": { "schema": { "$ref": "#/components/schemas/AzureCreateChatCompletionStreamResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Chat" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureCreateChatCompletionRequest" } } } }, "x-ms-examples": { "Create a chat completion": { "$ref": "./examples/chat_completions.yaml" } } } }, "/containers": { "get": { "operationId": "ListContainers", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerListResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Containers" ] }, "post": { "operationId": "CreateContainer", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Containers" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateContainerBody" } } } } } }, "/containers/{container_id}": { "get": { "operationId": "RetrieveContainer", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Containers" ] }, "delete": { "operationId": "DeleteContainer", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "id", "object", "deleted" ], "properties": { "id": { "type": "string" }, "object": { "type": "string", "enum": [ "container.deleted" ] }, "deleted": { "type": "boolean", "enum": [ true ] } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Containers" ] } }, "/containers/{container_id}/files": { "post": { "operationId": "CreateContainerFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerFileResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Containers" ], "requestBody": { "required": true, "content": { "multipart/form-data": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateContainerFileBodyMultiPart" } } } } }, "get": { "operationId": "ListContainerFiles", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerFileListResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Containers" ] } }, "/containers/{container_id}/files/{file_id}": { "get": { "operationId": "RetrieveContainerFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ContainerFileResource" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Containers" ] }, "delete": { "operationId": "DeleteContainerFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "id", "object", "deleted" ], "properties": { "id": { "type": "string" }, "object": { "type": "string", "enum": [ "container.file.deleted" ] }, "deleted": { "type": "boolean", "enum": [ true ] } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Containers" ] } }, "/containers/{container_id}/files/{file_id}/content": { "get": { "operationId": "RetrieveContainerFileContent", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "container_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/octet-stream": { "schema": { "type": "string", "format": "binary" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Containers" ] } }, "/embeddings": { "post": { "operationId": "createEmbedding", "summary": "Creates an embedding vector representing the input text.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateEmbeddingResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Embeddings" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureCreateEmbeddingRequest" } } } }, "x-ms-examples": { "Create an embedding request": { "$ref": "./examples/embeddings.yaml" } } } }, "/evals": { "get": { "operationId": "listEvals", "summary": "List evaluations for a project.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "after", "in": "query", "required": false, "description": "Identifier for the last eval from the previous pagination request.", "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of evals to be returned in a single pagination response.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order for evals by timestamp. Use `asc` for ascending order or\n`desc` for descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ], "default": "asc" }, "explode": false }, { "name": "order_by", "in": "query", "required": false, "description": "Evals can be ordered by creation time or last updated time. Use\n`created_at` for creation time or `updated_at` for last updated\ntime.", "schema": { "type": "string", "enum": [ "created_at", "updated_at" ], "default": "created_at" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalList" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ] }, "post": { "operationId": "createEval", "description": "Create the structure of an evaluation that can be used to test a model's\nperformance.\n\nAn evaluation is a set of testing criteria and a datasource. After\ncreating an evaluation, you can run it on different models and model\nparameters. We support several types of graders and datasources.\n\nFor more information, see the [Evals guide](/docs/guides/evals).\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.Eval" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "type": "object", "properties": { "statusCode": { "type": "number", "enum": [ 201 ] }, "name": { "type": "string", "description": "The name of the evaluation." }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "data_source_config": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalDataSourceConfigParams" } ], "description": "The configuration for the data source used for the evaluation runs. Dictates the schema of the data used in the evaluation." }, "testing_criteria": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalGraderParams" }, "description": "A list of graders for all eval runs in this group. Graders can reference variables in the data source using double curly braces notation, like `{{item.variable_name}}`. To reference the model's output, use the `sample` namespace (ie, `{{sample.output_text}}`)." } }, "required": [ "statusCode", "data_source_config", "testing_criteria" ] } } } } } }, "/evals/{eval_id}": { "get": { "operationId": "getEval", "summary": "Retrieve an evaluation by its ID.", "description": "Retrieves an evaluation by its ID.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.Eval" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ] }, "post": { "operationId": "updateEval", "description": "Update select, mutable properties of a specified evaluation.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.Eval" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "type": "object", "properties": { "name": { "type": "string" }, "metadata": { "$ref": "#/components/schemas/OpenAI.MetadataPropertyForRequest" } } } } } } }, "delete": { "operationId": "deleteEval", "description": "Delete a specified evaluation.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "object", "deleted", "eval_id" ], "properties": { "object": { "type": "string", "enum": [ "eval.deleted" ] }, "deleted": { "type": "boolean" }, "eval_id": { "type": "string" } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ] } }, "/evals/{eval_id}/runs": { "get": { "operationId": "getEvalRuns", "summary": "", "description": "Retrieve a list of runs for a specified evaluation.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "after", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "asc", "desc" ], "default": "asc" }, "explode": false }, { "name": "status", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "queued", "in_progress", "completed", "canceled", "failed" ] }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRunList" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ] }, "post": { "operationId": "createEvalRun", "description": "Create a new evaluation run, beginning the grading process.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "201": { "description": "The request has succeeded and a new resource has been created as a result.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRun" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateEvalRunRequest" } } } } } }, "/evals/{eval_id}/runs/{run_id}": { "get": { "operationId": "getEvalRun", "description": "Retrieve a specific evaluation run by its ID.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRun" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ] }, "post": { "operationId": "cancelEvalRun", "description": "Cancel a specific evaluation run by its ID.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRun" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ] }, "delete": { "operationId": "deleteEvalRun", "description": "Delete a specific evaluation run by its ID.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "object", "deleted", "eval_run_id" ], "properties": { "object": { "type": "string", "enum": [ "eval_run.deleted" ] }, "deleted": { "type": "boolean" }, "eval_run_id": { "type": "string" } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ] } }, "/evals/{eval_id}/runs/{run_id}/output_items": { "get": { "operationId": "getEvalRunOutputItems", "description": "Get a list of output items for a specified evaluation run.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "after", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "status", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "fail", "pass" ] }, "explode": false }, { "name": "order", "in": "query", "required": false, "schema": { "type": "string", "enum": [ "asc", "desc" ], "default": "asc" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRunOutputItemList" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ] } }, "/evals/{eval_id}/runs/{run_id}/output_items/{output_item_id}": { "get": { "operationId": "getEvalRunOutputItem", "description": "Retrieve a specific output item from an evaluation run by its ID.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-evals", "in": "header", "required": true, "description": "Enables access to AOAI Evals, a preview feature.\nThis feature requires the 'aoai-evals' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "eval_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "run_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "output_item_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.EvalRunOutputItem" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Evals" ] } }, "/files": { "post": { "operationId": "createFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureOpenAIFile" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Files" ], "requestBody": { "required": true, "content": { "multipart/form-data": { "schema": { "$ref": "#/components/schemas/AzureCreateFileRequestMultiPart" } } } }, "x-ms-examples": { "Create a file request": { "$ref": "./examples/files.yaml" } } }, "get": { "operationId": "listFiles", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "purpose", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureListFilesResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Files" ] } }, "/files/{file_id}": { "get": { "operationId": "retrieveFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to use for this request.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureOpenAIFile" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Files" ] }, "delete": { "operationId": "deleteFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to use for this request.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.DeleteFileResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Files" ] } }, "/files/{file_id}/content": { "get": { "operationId": "downloadFile", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to use for this request.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/octet-stream": { "schema": { "type": "string", "format": "binary" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Files" ] } }, "/fine_tuning/alpha/graders/run": { "post": { "operationId": "runGrader", "summary": "Run a grader.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RunGraderResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.RunGraderRequest" } } } } } }, "/fine_tuning/alpha/graders/validate": { "post": { "operationId": "validateGrader", "summary": "Validate a grader.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ValidateGraderResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ValidateGraderRequest" } } } } } }, "/fine_tuning/jobs": { "post": { "operationId": "createFineTuningJob", "summary": "Creates a fine-tuning job which begins the process of creating a new model from a given dataset.\n\nResponse includes details of the enqueued job including job status and the name of the fine-tuned models once complete.\n\n[Learn more about fine-tuning](/docs/guides/fine-tuning)", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateFineTuningJobRequest" } } } } }, "get": { "operationId": "listPaginatedFineTuningJobs", "summary": "List your organization's fine-tuning jobs", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "after", "in": "query", "required": false, "description": "Identifier for the last job from the previous pagination request.", "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "description": "Number of fine-tuning jobs to retrieve.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListPaginatedFineTuningJobsResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}": { "get": { "operationId": "retrieveFineTuningJob", "summary": "Get info about a fine-tuning job.\n\n[Learn more about fine-tuning](/docs/guides/fine-tuning)", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/cancel": { "post": { "operationId": "cancelFineTuningJob", "summary": "Immediately cancel a fine-tune job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job to cancel.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints": { "get": { "operationId": "listFineTuningJobCheckpoints", "summary": "List the checkpoints for a fine-tuning job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job to get checkpoints for.", "schema": { "type": "string" } }, { "name": "after", "in": "query", "required": false, "description": "Identifier for the last checkpoint ID from the previous pagination request.", "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "description": "Number of checkpoints to retrieve.", "schema": { "type": "integer", "format": "int32", "default": 10 }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListFineTuningJobCheckpointsResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints/{fine_tuning_checkpoint_id}/copy": { "post": { "operationId": "FineTuning_CopyCheckpoint", "description": "Creates a copy of a fine-tuning checkpoint at the given destination account and region.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-copy-ft-checkpoints", "in": "header", "required": true, "description": "Enables access to checkpoint copy operations for models, an AOAI preview feature.\nThis feature requires the 'aoai-copy-ft-checkpoints' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "accept", "in": "header", "required": true, "schema": { "type": "string", "enum": [ "application/json" ] } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "fine_tuning_checkpoint_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CopyModelResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CopyModelRequest" } } } } }, "get": { "operationId": "FineTuning_GetCheckpoint", "description": "Gets the status of a fine-tuning checkpoint copy.\n\nNOTE: This Azure OpenAI API is in preview and subject to change.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "aoai-copy-ft-checkpoints", "in": "header", "required": true, "description": "Enables access to checkpoint copy operations for models, an AOAI preview feature.\nThis feature requires the 'aoai-copy-ft-checkpoints' header to be set to 'preview'.", "schema": { "type": "string", "enum": [ "preview" ] } }, { "name": "accept", "in": "header", "required": true, "schema": { "type": "string", "enum": [ "application/json" ] } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "fine_tuning_checkpoint_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CopyModelResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/events": { "get": { "operationId": "listFineTuningEvents", "summary": "Get status updates for a fine-tuning job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job to get events for.", "schema": { "type": "string" } }, { "name": "after", "in": "query", "required": false, "description": "Identifier for the last event from the previous pagination request.", "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "description": "Number of events to retrieve.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListFineTuningJobEventsResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/pause": { "post": { "operationId": "pauseFineTuningJob", "summary": "Pause a fine-tune job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job to pause.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ] } }, "/fine_tuning/jobs/{fine_tuning_job_id}/resume": { "post": { "operationId": "resumeFineTuningJob", "summary": "Resume a paused fine-tune job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "fine_tuning_job_id", "in": "path", "required": true, "description": "The ID of the fine-tuning job to resume.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Fine-tuning" ] } }, "/models": { "get": { "operationId": "listModels", "summary": "Lists the currently available models, and provides basic information about each one such as the\nowner and availability.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListModelsResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Models" ] } }, "/models/{model}": { "get": { "operationId": "retrieveModel", "summary": "Retrieves a model instance, providing basic information about the model such as the owner and\npermissioning.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "model", "in": "path", "required": true, "description": "The ID of the model to use for this request.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.Model" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Models" ] } }, "/responses": { "post": { "operationId": "createResponse", "description": "Creates a model response.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureResponse" } }, "text/event-stream": { "schema": { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Responses" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureCreateResponse" } } } }, "x-ms-examples": { "Create a response request": { "$ref": "./examples/responses.yaml" } } } }, "/responses/{response_id}": { "get": { "operationId": "getResponse", "description": "Retrieves a model response with the given ID.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "response_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "include_obfuscation", "in": "query", "required": false, "description": "When true, stream obfuscation will be enabled. Stream obfuscation adds random characters to an `obfuscation` field on streaming delta events to normalize payload sizes as a mitigation to certain side-channel attacks. These obfuscation fields are included by default, but add a small amount of overhead to the data stream. You can set `include_obfuscation` to false to optimize for bandwidth if you trust the network links between your application and the OpenAI API.", "schema": { "type": "boolean", "default": true }, "explode": false }, { "name": "include[]", "in": "query", "required": false, "schema": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Includable" }, "default": [] } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Responses" ] }, "delete": { "operationId": "deleteResponse", "description": "Deletes a response by ID.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "response_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "type": "object", "required": [ "object", "id", "deleted" ], "properties": { "object": { "type": "string", "enum": [ "response.deleted" ] }, "id": { "type": "string" }, "deleted": { "type": "boolean", "enum": [ true ] } } } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Responses" ] } }, "/responses/{response_id}/input_items": { "get": { "operationId": "listInputItems", "description": "Returns a list of input items for a given response.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "response_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "description": "A cursor for use in pagination. `before` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include before=obj_foo in order to fetch the previous page of the list.", "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ResponseItemList" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Responses" ] } }, "/vector_stores": { "get": { "operationId": "listVectorStores", "summary": "Returns a list of vector stores.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "description": "A cursor for use in pagination. `before` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include before=obj_foo in order to fetch the previous page of the list.", "schema": { "type": "string" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListVectorStoresResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ] }, "post": { "operationId": "createVectorStore", "summary": "Creates a vector store.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateVectorStoreRequest" } } } }, "x-ms-examples": { "Create a vector store request": { "$ref": "./examples/vector_stores.yaml" } } } }, "/vector_stores/{vector_store_id}": { "get": { "operationId": "getVectorStore", "summary": "Retrieves a vector store.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store to retrieve.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ] }, "post": { "operationId": "modifyVectorStore", "summary": "Modifies a vector store.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store to modify.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.UpdateVectorStoreRequest" } } } } }, "delete": { "operationId": "deleteVectorStore", "summary": "Delete a vector store.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store to delete.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.DeleteVectorStoreResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ] } }, "/vector_stores/{vector_store_id}/file_batches": { "post": { "operationId": "createVectorStoreFileBatch", "summary": "Create a vector store file batch.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store for which to create a file batch.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileBatchObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateVectorStoreFileBatchRequest" } } } } } }, "/vector_stores/{vector_store_id}/file_batches/{batch_id}": { "get": { "operationId": "getVectorStoreFileBatch", "summary": "Retrieves a vector store file batch.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the file batch belongs to.", "schema": { "type": "string" } }, { "name": "batch_id", "in": "path", "required": true, "description": "The ID of the file batch being retrieved.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileBatchObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ] } }, "/vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel": { "post": { "operationId": "cancelVectorStoreFileBatch", "summary": "Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the file batch belongs to.", "schema": { "type": "string" } }, { "name": "batch_id", "in": "path", "required": true, "description": "The ID of the file batch to cancel.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileBatchObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ] } }, "/vector_stores/{vector_store_id}/file_batches/{batch_id}/files": { "get": { "operationId": "listFilesInVectorStoreBatch", "summary": "Returns a list of vector store files in a batch.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the file batch belongs to.", "schema": { "type": "string" } }, { "name": "batch_id", "in": "path", "required": true, "description": "The ID of the file batch that the files belong to.", "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "description": "A cursor for use in pagination. `before` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include before=obj_foo in order to fetch the previous page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "filter", "in": "query", "required": false, "description": "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.", "schema": { "$ref": "#/components/schemas/OpenAI.ListVectorStoreFilesFilter" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListVectorStoreFilesResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ] } }, "/vector_stores/{vector_store_id}/files": { "get": { "operationId": "listVectorStoreFiles", "summary": "Returns a list of vector store files.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the files belong to.", "schema": { "type": "string" } }, { "name": "limit", "in": "query", "required": false, "description": "A limit on the number of objects to be returned. Limit can range between 1 and 100, and the\ndefault is 20.", "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "description": "Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and`desc`\nfor descending order.", "schema": { "type": "string", "enum": [ "asc", "desc" ] }, "explode": false }, { "name": "after", "in": "query", "required": false, "description": "A cursor for use in pagination. `after` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include after=obj_foo in order to fetch the next page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "before", "in": "query", "required": false, "description": "A cursor for use in pagination. `before` is an object ID that defines your place in the list.\nFor instance, if you make a list request and receive 100 objects, ending with obj_foo, your\nsubsequent call can include before=obj_foo in order to fetch the previous page of the list.", "schema": { "type": "string" }, "explode": false }, { "name": "filter", "in": "query", "required": false, "description": "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.", "schema": { "$ref": "#/components/schemas/OpenAI.ListVectorStoreFilesFilter" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.ListVectorStoreFilesResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ] }, "post": { "operationId": "createVectorStoreFile", "summary": "Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object).", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store for which to create a File.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.CreateVectorStoreFileRequest" } } } } } }, "/vector_stores/{vector_store_id}/files/{file_id}": { "get": { "operationId": "getVectorStoreFile", "summary": "Retrieves a vector store file.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the file belongs to.", "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file being retrieved.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ] }, "post": { "operationId": "updateVectorStoreFileAttributes", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileObject" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.UpdateVectorStoreFileAttributesRequest" } } } } }, "delete": { "operationId": "deleteVectorStoreFile", "summary": "Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the [delete file](/docs/api-reference/files/delete) endpoint.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "vector_store_id", "in": "path", "required": true, "description": "The ID of the vector store that the file belongs to.", "schema": { "type": "string" } }, { "name": "file_id", "in": "path", "required": true, "description": "The ID of the file to delete.", "schema": { "type": "string" } } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/OpenAI.DeleteVectorStoreFileResponse" } } } }, "default": { "description": "An unexpected error response.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Vector Stores" ] } }, "/videos": { "post": { "operationId": "Videos_Create", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } } ], "description": "Creates a new video generation job.", "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/VideoResource" } } } }, "default": { "description": "An unexpected error response.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Videos" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CreateVideoBody" } }, "multipart/form-data": { "schema": { "$ref": "#/components/schemas/CreateVideoBodyWithInputReference" }, "encoding": { "seconds": { "contentType": "text/plain" }, "size": { "contentType": "text/plain" }, "input_reference": { "contentType": "*/*" } } } } } }, "get": { "operationId": "Videos_List", "description": "Lists video generation jobs.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "name": "after", "in": "query", "required": false, "schema": { "type": "string" }, "explode": false }, { "name": "limit", "in": "query", "required": false, "schema": { "type": "integer", "format": "int32", "default": 20 }, "explode": false }, { "name": "order", "in": "query", "required": false, "schema": { "$ref": "#/components/schemas/Order", "default": "desc" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/VideoList" } } } }, "default": { "description": "An unexpected error response.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Videos" ] } }, "/videos/{video-id}": { "get": { "operationId": "Videos_Get", "description": "Retrieves properties of a video generation job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "$ref": "#/components/parameters/VideoIdParameter" } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/VideoResource" } } } }, "default": { "description": "An unexpected error response.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Videos" ] }, "delete": { "operationId": "Videos_Delete", "description": "Deletes a video generation job.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "$ref": "#/components/parameters/VideoIdParameter" } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/DeletedVideoResource" } } } }, "default": { "description": "An unexpected error response.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Videos" ] } }, "/videos/{video-id}/content": { "get": { "operationId": "Videos_RetrieveContent", "description": "Retrieves a thumbnail of the generated video content.", "parameters": [ { "name": "api-version", "in": "query", "required": false, "description": "The explicit Azure AI Foundry Models API version to use for this request.\n`v1` if not otherwise specified.", "schema": { "$ref": "#/components/schemas/AzureAIFoundryModelsApiVersion", "default": "v1" } }, { "$ref": "#/components/parameters/VideoIdParameter" }, { "name": "variant", "in": "query", "required": false, "schema": { "$ref": "#/components/schemas/VideoContentVariant", "default": "video" }, "explode": false } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } }, "content-length": { "required": true, "schema": { "type": "integer" } } }, "content": { "image/jpg": { "schema": { "type": "string", "format": "binary" } }, "image/webp": { "schema": { "type": "string", "format": "binary" } }, "video/mp4": { "schema": { "type": "string", "format": "binary" } } } }, "default": { "description": "An unexpected error response.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Videos" ] } }, "/videos/{video-id}/remix": { "post": { "operationId": "Videos_Remix", "description": "Use an existing generated video and change it with a prompt.", "parameters": [ { "$ref": "#/components/parameters/VideoIdParameter" } ], "responses": { "200": { "description": "The request has succeeded.", "headers": { "apim-request-id": { "required": false, "description": "A request ID used for troubleshooting purposes.", "schema": { "type": "string" } } }, "content": { "application/json": { "schema": { "$ref": "#/components/schemas/VideoResource" } } } }, "default": { "description": "An unexpected error response.", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/AzureErrorResponse" } } } } }, "tags": [ "Videos" ], "requestBody": { "required": true, "content": { "application/json": { "schema": { "type": "object", "properties": { "prompt": { "type": "string", "minLength": 1 } }, "required": [ "prompt" ] } } } } } } }, "security": [ { "ApiKeyAuth": [] }, { "ApiKeyAuth_": [] }, { "OAuth2Auth": [ "https://cognitiveservices.azure.com/.default" ] } ], "components": { "parameters": { "VideoIdParameter": { "name": "video-id", "in": "path", "required": true, "description": "The ID of the video to use for the Azure OpenAI request.", "schema": { "type": "string" } } }, "schemas": { "AzureAIFoundryModelsApiVersion": { "type": "string", "enum": [ "v1", "preview" ] }, "AzureChatCompletionResponseMessage": { "type": "object", "required": [ "content", "refusal", "role" ], "properties": { "content": { "type": "string", "nullable": true, "description": "The contents of the message." }, "refusal": { "type": "string", "nullable": true, "description": "The refusal message generated by the model." }, "tool_calls": { "$ref": "#/components/schemas/ChatCompletionMessageToolCallsItem", "readOnly": true }, "annotations": { "type": "array", "items": { "type": "object", "properties": { "type": { "type": "string", "enum": [ "url_citation" ], "description": "The type of the URL citation. Always `url_citation`." }, "url_citation": { "type": "object", "properties": { "end_index": { "type": "integer", "format": "int32", "description": "The index of the last character of the URL citation in the message." }, "start_index": { "type": "integer", "format": "int32", "description": "The index of the first character of the URL citation in the message." }, "url": { "type": "string", "format": "uri", "description": "The URL of the web resource." }, "title": { "type": "string", "description": "The title of the web resource." } }, "required": [ "end_index", "start_index", "url", "title" ], "description": "A URL citation when using web search." } }, "required": [ "type", "url_citation" ] }, "description": "Annotations for the message, when applicable, as when using the\n[web search tool](/docs/guides/tools-web-search?api-mode=chat)." }, "role": { "type": "string", "enum": [ "assistant" ], "description": "The role of the author of this message." }, "function_call": { "type": "object", "properties": { "name": { "type": "string" }, "arguments": { "type": "string" } }, "required": [ "name", "arguments" ], "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", "deprecated": true }, "audio": { "type": "object", "properties": { "id": { "type": "string", "description": "Unique identifier for this audio response." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when this audio response will\nno longer be accessible on the server for use in multi-turn\nconversations." }, "data": { "type": "string", "format": "base64", "description": "Base64 encoded audio bytes generated by the model, in the format\nspecified in the request." }, "transcript": { "type": "string", "description": "Transcript of the audio generated by the model." } }, "required": [ "id", "expires_at", "data", "transcript" ], "nullable": true, "description": "If the audio output modality is requested, this object contains data\nabout the audio response from the model. [Learn more](/docs/guides/audio)." }, "context": { "allOf": [ { "$ref": "#/components/schemas/AzureChatMessageContext" } ], "description": "The Azure-specific context information associated with the chat completion response message." }, "reasoning_content": { "type": "string", "description": "An Azure-specific extension property containing generated reasoning content from supported models." } }, "description": "The extended response model component for chat completion response messages on the Azure OpenAI service.\nThis model adds support for chat message context, used by the On Your Data feature for intent, citations, and other\ninformation related to retrieval-augmented generation performed." }, "AzureChatCompletionStreamResponseDelta": { "type": "object", "properties": { "audio": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageAudioChunk" } ], "description": "Response audio associated with the streaming chat delta payload." }, "content": { "type": "string", "nullable": true, "description": "The contents of the chunk message." }, "function_call": { "type": "object", "properties": { "name": { "type": "string" }, "arguments": { "type": "string" } }, "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", "deprecated": true }, "tool_calls": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageToolCallChunk" }, "readOnly": true }, "role": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRole" } ], "description": "The role of the author of this message." }, "refusal": { "type": "string", "nullable": true, "description": "The refusal message generated by the model." }, "context": { "allOf": [ { "$ref": "#/components/schemas/AzureChatMessageContext" } ], "description": "The Azure-specific context information associated with the chat completion response message." }, "reasoning_content": { "type": "string", "description": "An Azure-specific extension property containing generated reasoning content from supported models." } }, "description": "The extended response model for a streaming chat response message on the Azure OpenAI service.\nThis model adds support for chat message context, used by the On Your Data feature for intent, citations, and other\ninformation related to retrieval-augmented generation performed." }, "AzureChatDataSource": { "type": "object", "required": [ "type" ], "properties": { "type": { "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceType" } ], "description": "The differentiating type identifier for the data source." } }, "discriminator": { "propertyName": "type", "mapping": { "azure_search": "#/components/schemas/AzureSearchChatDataSource", "azure_cosmos_db": "#/components/schemas/AzureCosmosDBChatDataSource", "elasticsearch": "#/components/schemas/ElasticsearchChatDataSource", "pinecone": "#/components/schemas/PineconeChatDataSource", "mongo_db": "#/components/schemas/MongoDBChatDataSource" } }, "description": "A representation of configuration data for a single Azure OpenAI chat data source.\nThis will be used by a chat completions request that should use Azure OpenAI chat extensions to augment the\nresponse behavior.\nThe use of this configuration is compatible only with Azure OpenAI." }, "AzureChatDataSourceAccessTokenAuthenticationOptions": { "type": "object", "required": [ "type", "access_token" ], "properties": { "type": { "type": "string", "enum": [ "access_token" ] }, "access_token": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceAuthenticationOptions" } ] }, "AzureChatDataSourceApiKeyAuthenticationOptions": { "type": "object", "required": [ "type", "key" ], "properties": { "type": { "type": "string", "enum": [ "api_key" ] }, "key": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceAuthenticationOptions" } ] }, "AzureChatDataSourceAuthenticationOptions": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/AzureChatDataSourceAuthenticationOptionsType" } }, "discriminator": { "propertyName": "type", "mapping": { "system_assigned_managed_identity": "#/components/schemas/AzureChatDataSourceSystemAssignedManagedIdentityAuthenticationOptions", "user_assigned_managed_identity": "#/components/schemas/AzureChatDataSourceUserAssignedManagedIdentityAuthenticationOptions", "access_token": "#/components/schemas/AzureChatDataSourceAccessTokenAuthenticationOptions", "connection_string": "#/components/schemas/AzureChatDataSourceConnectionStringAuthenticationOptions", "key_and_key_id": "#/components/schemas/AzureChatDataSourceKeyAndKeyIdAuthenticationOptions", "encoded_api_key": "#/components/schemas/AzureChatDataSourceEncodedApiKeyAuthenticationOptions", "username_and_password": "#/components/schemas/AzureChatDataSourceUsernameAndPasswordAuthenticationOptions" } } }, "AzureChatDataSourceAuthenticationOptionsType": { "type": "string", "enum": [ "api_key", "username_and_password", "connection_string", "key_and_key_id", "encoded_api_key", "access_token", "system_assigned_managed_identity", "user_assigned_managed_identity" ] }, "AzureChatDataSourceConnectionStringAuthenticationOptions": { "type": "object", "required": [ "type", "connection_string" ], "properties": { "type": { "type": "string", "enum": [ "connection_string" ] }, "connection_string": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceAuthenticationOptions" } ] }, "AzureChatDataSourceDeploymentNameVectorizationSource": { "type": "object", "required": [ "type", "deployment_name" ], "properties": { "type": { "type": "string", "enum": [ "deployment_name" ], "description": "The type identifier, always 'deployment_name' for this vectorization source type." }, "deployment_name": { "type": "string", "description": "The embedding model deployment to use for vectorization. This deployment must exist within the same Azure OpenAI\nresource as the model deployment being used for chat completions." }, "dimensions": { "type": "integer", "format": "int32", "description": "The number of dimensions to request on embeddings.\nOnly supported in 'text-embedding-3' and later models." } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceVectorizationSource" } ], "description": "Represents a vectorization source that makes internal service calls against an Azure OpenAI embedding model\ndeployment. In contrast with the endpoint-based vectorization source, a deployment-name-based vectorization source\nmust be part of the same Azure OpenAI resource but can be used even in private networks." }, "AzureChatDataSourceEncodedApiKeyAuthenticationOptions": { "type": "object", "required": [ "type", "encoded_api_key" ], "properties": { "type": { "type": "string", "enum": [ "encoded_api_key" ] }, "encoded_api_key": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceAuthenticationOptions" } ] }, "AzureChatDataSourceEndpointVectorizationSource": { "type": "object", "required": [ "type", "endpoint", "authentication" ], "properties": { "type": { "type": "string", "enum": [ "endpoint" ], "description": "The type identifier, always 'endpoint' for this vectorization source type." }, "endpoint": { "type": "string", "format": "uri", "description": "Specifies the resource endpoint URL from which embeddings should be retrieved.\nIt should be in the format of:\nhttps://YOUR_RESOURCE_NAME.openai.azure.com/openai/deployments/YOUR_DEPLOYMENT_NAME/embeddings.\nThe api-version query parameter is not allowed." }, "authentication": { "anyOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceApiKeyAuthenticationOptions" }, { "$ref": "#/components/schemas/AzureChatDataSourceAccessTokenAuthenticationOptions" } ], "description": "The authentication mechanism to use with the endpoint-based vectorization source.\nEndpoint authentication supports API key and access token mechanisms." }, "dimensions": { "type": "integer", "format": "int32", "description": "The number of dimensions to request on embeddings.\nOnly supported in 'text-embedding-3' and later models." } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceVectorizationSource" } ], "description": "Represents a vectorization source that makes public service calls against an Azure OpenAI embedding model deployment." }, "AzureChatDataSourceIntegratedVectorizationSource": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "integrated" ], "description": "The type identifier, always 'integrated' for this vectorization source type." } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceVectorizationSource" } ], "description": "Represents an integrated vectorization source as defined within the supporting search resource." }, "AzureChatDataSourceKeyAndKeyIdAuthenticationOptions": { "type": "object", "required": [ "type", "key", "key_id" ], "properties": { "type": { "type": "string", "enum": [ "key_and_key_id" ] }, "key": { "type": "string" }, "key_id": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceAuthenticationOptions" } ] }, "AzureChatDataSourceModelIdVectorizationSource": { "type": "object", "required": [ "type", "model_id" ], "properties": { "type": { "type": "string", "enum": [ "model_id" ], "description": "The type identifier, always 'model_id' for this vectorization source type." }, "model_id": { "type": "string", "description": "The embedding model build ID to use for vectorization." } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceVectorizationSource" } ], "description": "Represents a vectorization source that makes service calls based on a search service model ID.\nThis source type is currently only supported by Elasticsearch." }, "AzureChatDataSourceSystemAssignedManagedIdentityAuthenticationOptions": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "system_assigned_managed_identity" ] } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceAuthenticationOptions" } ] }, "AzureChatDataSourceType": { "type": "string", "enum": [ "azure_search", "azure_cosmos_db", "elasticsearch", "pinecone", "mongo_db" ] }, "AzureChatDataSourceUserAssignedManagedIdentityAuthenticationOptions": { "type": "object", "required": [ "type", "managed_identity_resource_id" ], "properties": { "type": { "type": "string", "enum": [ "user_assigned_managed_identity" ] }, "managed_identity_resource_id": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceAuthenticationOptions" } ] }, "AzureChatDataSourceUsernameAndPasswordAuthenticationOptions": { "type": "object", "required": [ "type", "username", "password" ], "properties": { "type": { "type": "string", "enum": [ "username_and_password" ] }, "username": { "type": "string" }, "password": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceAuthenticationOptions" } ] }, "AzureChatDataSourceVectorizationSource": { "type": "object", "required": [ "type" ], "properties": { "type": { "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceVectorizationSourceType" } ], "description": "The differentiating identifier for the concrete vectorization source." } }, "discriminator": { "propertyName": "type", "mapping": { "deployment_name": "#/components/schemas/AzureChatDataSourceDeploymentNameVectorizationSource", "integrated": "#/components/schemas/AzureChatDataSourceIntegratedVectorizationSource", "model_id": "#/components/schemas/AzureChatDataSourceModelIdVectorizationSource" } }, "description": "A representation of a data vectorization source usable as an embedding resource with a data source." }, "AzureChatDataSourceVectorizationSourceType": { "type": "string", "enum": [ "endpoint", "deployment_name", "model_id", "integrated" ] }, "AzureChatMessageContext": { "type": "object", "properties": { "intent": { "type": "string", "description": "The detected intent from the chat history, which is used to carry conversation context between interactions" }, "citations": { "type": "array", "items": { "type": "object", "properties": { "content": { "type": "string", "description": "The content of the citation." }, "title": { "type": "string", "description": "The title for the citation." }, "url": { "type": "string", "description": "The URL of the citation." }, "filepath": { "type": "string", "description": "The file path for the citation." }, "chunk_id": { "type": "string", "description": "The chunk ID for the citation." }, "rerank_score": { "type": "number", "format": "double", "description": "The rerank score for the retrieval." } }, "required": [ "content" ] }, "description": "The citations produced by the data retrieval." }, "all_retrieved_documents": { "type": "object", "properties": { "content": { "type": "string", "description": "The content of the citation." }, "title": { "type": "string", "description": "The title for the citation." }, "url": { "type": "string", "description": "The URL of the citation." }, "filepath": { "type": "string", "description": "The file path for the citation." }, "chunk_id": { "type": "string", "description": "The chunk ID for the citation." }, "rerank_score": { "type": "number", "format": "double", "description": "The rerank score for the retrieval." }, "search_queries": { "type": "array", "items": { "type": "string" }, "description": "The search queries executed to retrieve documents." }, "data_source_index": { "type": "integer", "format": "int32", "description": "The index of the data source used for retrieval." }, "original_search_score": { "type": "number", "format": "double", "description": "The original search score for the retrieval." }, "filter_reason": { "type": "string", "enum": [ "score", "rerank" ], "description": "If applicable, an indication of why the document was filtered." } }, "required": [ "content", "search_queries", "data_source_index" ], "description": "Summary information about documents retrieved by the data retrieval operation." } }, "description": "An additional property, added to chat completion response messages, produced by the Azure OpenAI service when using\nextension behavior. This includes intent and citation information from the On Your Data feature." }, "AzureContentFilterBlocklistResult": { "type": "object", "required": [ "filtered" ], "properties": { "filtered": { "type": "boolean", "description": "A value indicating whether any of the detailed blocklists resulted in a filtering action." }, "details": { "type": "array", "items": { "type": "object", "properties": { "filtered": { "type": "boolean", "description": "A value indicating whether the blocklist produced a filtering action." }, "id": { "type": "string", "description": "The ID of the custom blocklist evaluated." } }, "required": [ "filtered", "id" ] }, "description": "The pairs of individual blocklist IDs and whether they resulted in a filtering action." } }, "description": "A collection of true/false filtering results for configured custom blocklists." }, "AzureContentFilterCompletionTextSpan": { "type": "object", "required": [ "completion_start_offset", "completion_end_offset" ], "properties": { "completion_start_offset": { "type": "integer", "format": "int32", "description": "Offset of the UTF32 code point which begins the span." }, "completion_end_offset": { "type": "integer", "format": "int32", "description": "Offset of the first UTF32 code point which is excluded from the span. This field is always equal to completion_start_offset for empty spans. This field is always larger than completion_start_offset for non-empty spans." } }, "description": "A representation of a span of completion text as used by Azure OpenAI content filter results." }, "AzureContentFilterCompletionTextSpanDetectionResult": { "type": "object", "required": [ "filtered", "detected", "details" ], "properties": { "filtered": { "type": "boolean", "description": "Whether the content detection resulted in a content filtering action." }, "detected": { "type": "boolean", "description": "Whether the labeled content category was detected in the content." }, "details": { "type": "array", "items": { "$ref": "#/components/schemas/AzureContentFilterCompletionTextSpan" }, "description": "Detailed information about the detected completion text spans." } } }, "AzureContentFilterCustomTopicResult": { "type": "object", "required": [ "filtered" ], "properties": { "filtered": { "type": "boolean", "description": "A value indicating whether any of the detailed topics resulted in a filtering action." }, "details": { "type": "array", "items": { "type": "object", "properties": { "detected": { "type": "boolean", "description": "A value indicating whether the topic is detected." }, "id": { "type": "string", "description": "The ID of the custom topic evaluated." } }, "required": [ "detected", "id" ] }, "description": "The pairs of individual topic IDs and whether they are detected." } }, "description": "A collection of true/false filtering results for configured custom topics." }, "AzureContentFilterDetectionResult": { "type": "object", "required": [ "filtered", "detected" ], "properties": { "filtered": { "type": "boolean", "description": "Whether the content detection resulted in a content filtering action." }, "detected": { "type": "boolean", "description": "Whether the labeled content category was detected in the content." } }, "description": "A labeled content filter result item that indicates whether the content was detected and whether the content was\nfiltered." }, "AzureContentFilterPersonallyIdentifiableInformationResult": { "type": "object", "properties": { "redacted_text": { "type": "string", "description": "The redacted text with PII information removed or masked." }, "sub_categories": { "type": "array", "items": { "$ref": "#/components/schemas/AzurePiiSubCategoryResult" }, "description": "Detailed results for individual PIIHarmSubCategory(s)." } }, "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A content filter detection result for Personally Identifiable Information that includes harm extensions." }, "AzureContentFilterResultForChoice": { "type": "object", "properties": { "sexual": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to anatomical organs and genitals, romantic relationships, acts\nportrayed in erotic or affectionate terms, pregnancy, physical sexual acts, including those portrayed as an\nassault or a forced sexual violent act against one's will, prostitution, pornography, and abuse." }, "hate": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that can refer to any content that attacks or uses pejorative or discriminatory\nlanguage with reference to a person or identity group based on certain differentiating attributes of these groups\nincluding but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation,\nreligion, immigration status, ability status, personal appearance, and body size." }, "violence": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to physical actions intended to hurt, injure, damage, or kill\nsomeone or something; describes weapons, guns and related entities, such as manufactures, associations,\nlegislation, and so on." }, "self_harm": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that describes language related to physical actions intended to purposely hurt, injure,\ndamage one's body or kill oneself." }, "profanity": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that identifies whether crude, vulgar, or otherwise objection language is present in the\ncontent." }, "custom_blocklists": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterBlocklistResult" } ], "description": "A collection of binary filtering outcomes for configured custom blocklists." }, "custom_topics": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterCustomTopicResult" } ], "description": "A collection of binary filtering outcomes for configured custom topics." }, "error": { "type": "object", "properties": { "code": { "type": "integer", "format": "int32", "description": "A distinct, machine-readable code associated with the error." }, "message": { "type": "string", "description": "A human-readable message associated with the error." } }, "required": [ "code", "message" ], "description": "If present, details about an error that prevented content filtering from completing its evaluation." }, "protected_material_text": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that describes a match against text protected under copyright or other status." }, "protected_material_code": { "type": "object", "properties": { "filtered": { "type": "boolean", "description": "Whether the content detection resulted in a content filtering action." }, "detected": { "type": "boolean", "description": "Whether the labeled content category was detected in the content." }, "citation": { "type": "object", "properties": { "license": { "type": "string", "description": "The name or identifier of the license associated with the detection." }, "URL": { "type": "string", "format": "uri", "description": "The URL associated with the license." } }, "description": "If available, the citation details describing the associated license and its location." } }, "required": [ "filtered", "detected" ], "description": "A detection result that describes a match against licensed code or other protected source material." }, "ungrounded_material": { "$ref": "#/components/schemas/AzureContentFilterCompletionTextSpanDetectionResult" }, "personally_identifiable_information": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterPersonallyIdentifiableInformationResult" } ], "description": "A detection result that describes matches against Personal Identifiable Information with configurable subcategories." } }, "description": "A content filter result for a single response item produced by a generative AI system." }, "AzureContentFilterResultForPrompt": { "type": "object", "properties": { "prompt_index": { "type": "integer", "format": "int32", "description": "The index of the input prompt associated with the accompanying content filter result categories." }, "content_filter_results": { "type": "object", "properties": { "sexual": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to anatomical organs and genitals, romantic relationships, acts\nportrayed in erotic or affectionate terms, pregnancy, physical sexual acts, including those portrayed as an\nassault or a forced sexual violent act against one's will, prostitution, pornography, and abuse." }, "hate": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that can refer to any content that attacks or uses pejorative or discriminatory\nlanguage with reference to a person or identity group based on certain differentiating attributes of these groups\nincluding but not limited to race, ethnicity, nationality, gender identity and expression, sexual orientation,\nreligion, immigration status, ability status, personal appearance, and body size." }, "violence": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category for language related to physical actions intended to hurt, injure, damage, or kill\nsomeone or something; describes weapons, guns and related entities, such as manufactures, associations,\nlegislation, and so on." }, "self_harm": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterSeverityResult" } ], "description": "A content filter category that describes language related to physical actions intended to purposely hurt, injure,\ndamage one's body or kill oneself." }, "profanity": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that identifies whether crude, vulgar, or otherwise objection language is present in the\ncontent." }, "custom_blocklists": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterBlocklistResult" } ], "description": "A collection of binary filtering outcomes for configured custom blocklists." }, "custom_topics": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterCustomTopicResult" } ], "description": "A collection of binary filtering outcomes for configured custom topics." }, "error": { "type": "object", "properties": { "code": { "type": "integer", "format": "int32", "description": "A distinct, machine-readable code associated with the error." }, "message": { "type": "string", "description": "A human-readable message associated with the error." } }, "required": [ "code", "message" ], "description": "If present, details about an error that prevented content filtering from completing its evaluation." }, "jailbreak": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that describes user prompt injection attacks, where malicious users deliberately exploit\nsystem vulnerabilities to elicit unauthorized behavior from the LLM. This could lead to inappropriate content\ngeneration or violations of system-imposed restrictions." }, "indirect_attack": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterDetectionResult" } ], "description": "A detection result that describes attacks on systems powered by Generative AI models that can happen every time\nan application processes information that wasn’t directly authored by either the developer of the application or\nthe user." } }, "required": [ "jailbreak", "indirect_attack" ], "description": "The content filter category details for the result." } }, "description": "A content filter result associated with a single input prompt item into a generative AI system." }, "AzureContentFilterSeverityResult": { "type": "object", "required": [ "filtered", "severity" ], "properties": { "filtered": { "type": "boolean", "description": "Whether the content severity resulted in a content filtering action." }, "severity": { "type": "string", "enum": [ "safe", "low", "medium", "high" ], "description": "The labeled severity of the content." } }, "description": "A labeled content filter result item that indicates whether the content was filtered and what the qualitative\nseverity level of the content was, as evaluated against content filter configuration for the category." }, "AzureCosmosDBChatDataSource": { "type": "object", "required": [ "type", "parameters" ], "properties": { "type": { "type": "string", "enum": [ "azure_cosmos_db" ], "description": "The discriminated type identifier, which is always 'azure_cosmos_db'." }, "parameters": { "type": "object", "properties": { "top_n_documents": { "type": "integer", "format": "int32", "description": "The configured number of documents to feature in the query." }, "in_scope": { "type": "boolean", "description": "Whether queries should be restricted to use of the indexed data." }, "strictness": { "type": "integer", "format": "int32", "minimum": 1, "maximum": 5, "description": "The configured strictness of the search relevance filtering.\nHigher strictness will increase precision but lower recall of the answer." }, "max_search_queries": { "type": "integer", "format": "int32", "description": "The maximum number of rewritten queries that should be sent to the search provider for a single user message.\nBy default, the system will make an automatic determination." }, "allow_partial_result": { "type": "boolean", "description": "If set to true, the system will allow partial search results to be used and the request will fail if all\npartial queries fail. If not specified or specified as false, the request will fail if any search query fails.", "default": false }, "include_contexts": { "type": "array", "items": { "type": "string", "enum": [ "citations", "intent", "all_retrieved_documents" ] }, "maxItems": 3, "description": "The output context properties to include on the response.\nBy default, citations and intent will be requested.", "default": [ "citations", "intent" ] }, "container_name": { "type": "string" }, "database_name": { "type": "string" }, "embedding_dependency": { "$ref": "#/components/schemas/AzureChatDataSourceVectorizationSource" }, "index_name": { "type": "string" }, "authentication": { "$ref": "#/components/schemas/AzureChatDataSourceConnectionStringAuthenticationOptions" }, "fields_mapping": { "type": "object", "properties": { "content_fields": { "type": "array", "items": { "type": "string" } }, "vector_fields": { "type": "array", "items": { "type": "string" } }, "title_field": { "type": "string" }, "url_field": { "type": "string" }, "filepath_field": { "type": "string" }, "content_fields_separator": { "type": "string" } }, "required": [ "content_fields", "vector_fields" ] } }, "required": [ "container_name", "database_name", "embedding_dependency", "index_name", "authentication", "fields_mapping" ], "description": "The parameter information to control the use of the Azure CosmosDB data source." } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSource" } ], "description": "Represents a data source configuration that will use an Azure CosmosDB resource." }, "AzureCreateChatCompletionRequest": { "type": "object", "required": [ "messages", "model" ], "properties": { "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "temperature": { "type": "number", "format": "float", "nullable": true, "minimum": 0, "maximum": 2, "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both.", "default": 1 }, "top_p": { "type": "number", "format": "float", "nullable": true, "minimum": 0, "maximum": 1, "description": "An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability\nmass. So 0.1 means only the tokens comprising the top 10% probability mass\nare considered.\n\nWe generally recommend altering this or `temperature` but not both.", "default": 1 }, "user": { "type": "string", "description": "A unique identifier representing your end-user, which can help to\nmonitor and detect abuse." }, "top_logprobs": { "type": "integer", "format": "int32", "minimum": 0, "maximum": 20, "description": "An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability." }, "messages": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" }, "minItems": 1, "description": "A list of messages comprising the conversation so far. Depending on the\nmodel you use, different message types (modalities) are supported,\nlike text, images, and audio." }, "modalities": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/ResponseModalities" } ], "nullable": true }, "reasoning_effort": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ReasoningEffort" } ], "nullable": true, "default": "medium" }, "max_completion_tokens": { "type": "integer", "format": "int32", "nullable": true, "description": "An upper bound for the number of tokens that can be generated for a\ncompletion, including visible output tokens and reasoning tokens." }, "frequency_penalty": { "type": "number", "format": "float", "nullable": true, "minimum": -2, "maximum": 2, "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\ntheir existing frequency in the text so far, decreasing the model's\nlikelihood to repeat the same line verbatim.", "default": 0 }, "presence_penalty": { "type": "number", "format": "float", "nullable": true, "minimum": -2, "maximum": 2, "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on\nwhether they appear in the text so far, increasing the model's likelihood\nto talk about new topics.", "default": 0 }, "response_format": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseFormat" } ], "description": "An object specifying the format that the model must output.\n\nSetting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables\nStructured Outputs which ensures the model will match your supplied JSON\nschema.\n\nSetting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures\nthe message the model generates is valid JSON.\n\n**Important:** when using JSON mode, you **must** also instruct the model\nto produce JSON yourself via a system or user message. Without this, the\nmodel may generate an unending stream of whitespace until the generation\nreaches the token limit, resulting in a long-running and seemingly \"stuck\"\nrequest. Also note that the message content may be partially cut off if\n`finish_reason=\"length\"`, which indicates the generation exceeded\n`max_tokens` or the conversation exceeded the max context length." }, "audio": { "type": "object", "properties": { "voice": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.VoiceIdsShared" } ], "description": "The voice the model uses to respond. Supported voices are\n`alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`." }, "format": { "type": "string", "enum": [ "wav", "aac", "mp3", "flac", "opus", "pcm16" ], "description": "Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,\n`opus`, or `pcm16`." } }, "required": [ "voice", "format" ], "nullable": true, "description": "Parameters for audio output. Required when audio output is requested with\n`modalities: [\"audio\"]`." }, "store": { "type": "boolean", "nullable": true, "description": "Whether or not to store the output of this chat completion request for\nuse in model distillation or evals products.", "default": false }, "stream": { "type": "boolean", "nullable": true, "description": "If set to true, the model response data will be streamed to the client\nas it is generated using server-sent events.", "default": false }, "stop": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.StopConfiguration" } ], "nullable": true, "default": null }, "logit_bias": { "type": "object", "additionalProperties": { "type": "integer", "format": "int32" }, "nullable": true, "description": "Modify the likelihood of specified tokens appearing in the completion.\n\nAccepts a JSON object that maps tokens (specified by their token ID in the\ntokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling.\nThe exact effect will vary per model, but values between -1 and 1 should\ndecrease or increase likelihood of selection; values like -100 or 100\nshould result in a ban or exclusive selection of the relevant token.", "x-oaiTypeLabel": "map", "default": null }, "logprobs": { "type": "boolean", "nullable": true, "description": "Whether to return log probabilities of the output tokens or not. If true,\nreturns the log probabilities of each output token returned in the\n`content` of `message`.", "default": false }, "max_tokens": { "type": "integer", "format": "int32", "nullable": true, "description": "The maximum number of tokens that can be generated in the chat completion.\nThis value can be used to control costs for text generated via API.\n\nThis value is now deprecated in favor of `max_completion_tokens`, and is\nnot compatible with o1 series models.", "deprecated": true }, "n": { "type": "integer", "format": "int32", "nullable": true, "minimum": 1, "maximum": 128, "description": "How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.", "default": 1 }, "prediction": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatOutputPrediction" } ], "nullable": true, "description": "Configuration for a predicted output, which can greatly improve\nresponse times when large parts of the model response are known\nahead of time. This is most common when you are regenerating a\nfile with only minor changes to most of the content." }, "seed": { "type": "integer", "format": "int64", "nullable": true, "minimum": -9223372036854776000, "maximum": 9223372036854776000, "description": "This feature is in Beta.\nIf specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.\nDeterminism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend." }, "stream_options": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionStreamOptions" } ], "nullable": true, "default": null }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTool" }, "description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported." }, "tool_choice": { "$ref": "#/components/schemas/OpenAI.ChatCompletionToolChoiceOption" }, "parallel_tool_calls": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ParallelToolCalls" } ], "default": true }, "function_call": { "anyOf": [ { "type": "string", "enum": [ "none", "auto" ] }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionFunctionCallOption" } ], "description": "Deprecated in favor of `tool_choice`.\n\nControls which (if any) function is called by the model.\n\n`none` means the model will not call a function and instead generates a\nmessage.\n\n`auto` means the model can pick between generating a message or calling a\nfunction.\n\nSpecifying a particular function via `{\"name\": \"my_function\"}` forces the\nmodel to call that function.\n\n`none` is the default when no functions are present. `auto` is the default\nif functions are present.", "deprecated": true }, "functions": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionFunctions" }, "minItems": 1, "maxItems": 128, "description": "Deprecated in favor of `tools`.\n\nA list of functions the model may generate JSON inputs for.", "deprecated": true }, "model": { "type": "string", "description": "The model deployment identifier to use for the chat completion request." }, "data_sources": { "type": "array", "items": { "$ref": "#/components/schemas/AzureChatDataSource" }, "description": "The data sources to use for the On Your Data feature, exclusive to Azure OpenAI." }, "user_security_context": { "$ref": "#/components/schemas/AzureUserSecurityContext" } }, "description": "The extended request model for chat completions against the Azure OpenAI service.\nThis adds the ability to provide data sources for the On Your Data feature." }, "AzureCreateChatCompletionResponse": { "type": "object", "required": [ "id", "created", "model", "object", "choices" ], "properties": { "id": { "type": "string", "description": "A unique identifier for the chat completion." }, "created": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) of when the chat completion was created." }, "model": { "type": "string", "description": "The model used for the chat completion." }, "system_fingerprint": { "type": "string", "description": "This fingerprint represents the backend configuration that the model runs with.\n\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism." }, "object": { "type": "string", "enum": [ "chat.completion" ], "description": "The object type, which is always `chat.completion`." }, "usage": { "$ref": "#/components/schemas/OpenAI.CompletionUsage" }, "choices": { "type": "array", "items": { "type": "object", "properties": { "finish_reason": { "type": "string", "enum": [ "stop", "length", "tool_calls", "content_filter", "function_call" ], "description": "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function." }, "index": { "type": "integer", "format": "int32", "description": "The index of the choice in the list of choices." }, "logprobs": { "type": "object", "properties": { "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTokenLogprob" }, "nullable": true, "description": "A list of message content tokens with log probability information.", "readOnly": true }, "refusal": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTokenLogprob" }, "nullable": true, "description": "A list of message refusal tokens with log probability information.", "readOnly": true } }, "required": [ "content", "refusal" ], "nullable": true, "description": "Log probability information for the choice." }, "content_filter_results": { "$ref": "#/components/schemas/AzureContentFilterResultForChoice" }, "message": { "allOf": [ { "$ref": "#/components/schemas/AzureChatCompletionResponseMessage" } ], "description": "The chat completion response message." } }, "required": [ "finish_reason", "index", "logprobs", "message" ] } }, "prompt_filter_results": { "type": "array", "items": { "type": "object", "properties": { "prompt_index": { "type": "integer", "format": "int32", "description": "The index of the input prompt that this content filter result corresponds to." }, "content_filter_results": { "allOf": [ { "$ref": "#/components/schemas/AzureContentFilterResultForPrompt" } ], "description": "The content filter results associated with the indexed input prompt." } }, "required": [ "prompt_index", "content_filter_results" ] }, "description": "The Responsible AI content filter annotations associated with prompt inputs into chat completions." } }, "description": "The extended top-level chat completion response model for the Azure OpenAI service.\nThis model adds Responsible AI content filter annotations for prompt input." }, "AzureCreateChatCompletionStreamResponse": { "type": "object", "required": [ "id", "choices", "created", "model", "object" ], "properties": { "id": { "type": "string", "description": "A unique identifier for the chat completion. Each chunk has the same ID." }, "choices": { "type": "array", "items": { "type": "object", "properties": { "delta": { "$ref": "#/components/schemas/OpenAI.ChatCompletionStreamResponseDelta" }, "logprobs": { "type": "object", "properties": { "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTokenLogprob" }, "nullable": true, "description": "A list of message content tokens with log probability information.", "readOnly": true }, "refusal": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTokenLogprob" }, "nullable": true, "description": "A list of message refusal tokens with log probability information.", "readOnly": true } }, "required": [ "content", "refusal" ], "nullable": true, "description": "Log probability information for the choice." }, "finish_reason": { "type": "string", "enum": [ "stop", "length", "tool_calls", "content_filter", "function_call" ], "nullable": true, "description": "The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,\n`length` if the maximum number of tokens specified in the request was reached,\n`content_filter` if content was omitted due to a flag from our content filters,\n`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function." }, "index": { "type": "integer", "format": "int32", "description": "The index of the choice in the list of choices." } }, "required": [ "delta", "finish_reason", "index" ] }, "description": "A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the\nlast chunk if you set `stream_options: {\"include_usage\": true}`." }, "created": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp." }, "model": { "type": "string", "description": "The model to generate the completion." }, "system_fingerprint": { "type": "string", "description": "This fingerprint represents the backend configuration that the model runs with.\nCan be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism." }, "object": { "type": "string", "enum": [ "chat.completion.chunk" ], "description": "The object type, which is always `chat.completion.chunk`." }, "usage": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.CompletionUsage" } ], "nullable": true, "description": "An optional field that will only be present when you set\n`stream_options: {\"include_usage\": true}` in your request. When present, it\ncontains a null value **except for the last chunk** which contains the\ntoken usage statistics for the entire request.\n\n**NOTE:** If the stream is interrupted or cancelled, you may not\nreceive the final usage chunk which contains the total token usage for\nthe request." }, "delta": { "$ref": "#/components/schemas/AzureChatCompletionStreamResponseDelta" }, "content_filter_results": { "$ref": "#/components/schemas/AzureContentFilterResultForChoice" } } }, "AzureCreateEmbeddingRequest": { "type": "object", "required": [ "model", "input" ], "properties": { "model": { "type": "string", "description": "The model to use for the embedding request." }, "input": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "type": "string" } }, { "type": "array", "items": { "type": "integer", "format": "int32" } }, { "type": "array", "items": { "type": "array", "items": { "type": "integer", "format": "int32" } } } ], "description": "Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for all embedding models), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. In addition to the per-input token limit, all embedding models enforce a maximum of 300,000 tokens summed across all inputs in a single request." }, "encoding_format": { "type": "string", "enum": [ "float", "base64" ], "description": "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/).", "default": "float" }, "dimensions": { "type": "integer", "format": "int32", "minimum": 1, "description": "The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models." }, "user": { "type": "string", "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids)." } } }, "AzureCreateFileRequestMultiPart": { "type": "object", "required": [ "file", "expires_after", "purpose" ], "properties": { "file": { "type": "string", "format": "binary" }, "expires_after": { "type": "object", "properties": { "seconds": { "type": "integer", "format": "int32" }, "anchor": { "$ref": "#/components/schemas/AzureFileExpiryAnchor" } }, "required": [ "seconds", "anchor" ] }, "purpose": { "type": "string", "enum": [ "assistants", "batch", "fine-tune", "evals" ], "description": "The intended purpose of the uploaded file. One of: - `assistants`: Used in the Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for fine-tuning - `evals`: Used for eval data sets" } } }, "AzureCreateResponse": { "type": "object", "required": [ "model" ], "properties": { "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "temperature": { "type": "number", "format": "float", "nullable": true, "minimum": 0, "maximum": 2, "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both.", "default": 1 }, "top_p": { "type": "number", "format": "float", "nullable": true, "minimum": 0, "maximum": 1, "description": "An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability\nmass. So 0.1 means only the tokens comprising the top 10% probability mass\nare considered.\n\nWe generally recommend altering this or `temperature` but not both.", "default": 1 }, "user": { "type": "string", "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids)." }, "top_logprobs": { "type": "integer", "format": "int32", "minimum": 0, "maximum": 20, "description": "An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability." }, "previous_response_id": { "type": "string", "nullable": true, "description": "The unique ID of the previous response to the model. Use this to\ncreate multi-turn conversations. Learn more about\n[conversation state](/docs/guides/conversation-state)." }, "reasoning": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.Reasoning" } ], "nullable": true }, "background": { "type": "boolean", "nullable": true, "description": "Whether to run the model response in the background.\n[Learn more](/docs/guides/background).", "default": false }, "max_output_tokens": { "type": "integer", "format": "int32", "nullable": true, "description": "An upper bound for the number of tokens that can be generated for a response, including visible output tokens and [reasoning tokens](/docs/guides/reasoning)." }, "max_tool_calls": { "type": "integer", "format": "int32", "nullable": true, "description": "The maximum number of total calls to built-in tools that can be processed in a response. This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored." }, "text": { "type": "object", "properties": { "format": { "$ref": "#/components/schemas/OpenAI.ResponseTextFormatConfiguration" } }, "description": "Configuration options for a text response from the model. Can be plain\ntext or structured JSON data. Learn more:\n- [Text inputs and outputs](/docs/guides/text)\n- [Structured Outputs](/docs/guides/structured-outputs)" }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Tool" }, "description": "An array of tools the model may call while generating a response. You \ncan specify which tool to use by setting the `tool_choice` parameter.\n\nThe two categories of tools you can provide the model are:\n\n- **Built-in tools**: Tools that are provided by OpenAI that extend the\n model's capabilities, like file search.\n- **Function calls (custom tools)**: Functions that are defined by you,\n enabling the model to call your own code." }, "tool_choice": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceOptions" }, { "$ref": "#/components/schemas/OpenAI.ToolChoiceObject" } ], "description": "How the model should select which tool (or tools) to use when generating\na response. See the `tools` parameter to see how to specify which tools\nthe model can call." }, "prompt": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.Prompt" } ], "nullable": true }, "truncation": { "type": "string", "enum": [ "auto", "disabled" ], "nullable": true, "description": "The truncation strategy to use for the model response.\n- `auto`: If the context of this response and previous ones exceeds\n the model's context window size, the model will truncate the\n response to fit the context window by dropping input items in the\n middle of the conversation.\n- `disabled` (default): If a model response will exceed the context window\n size for a model, the request will fail with a 400 error.", "default": "disabled" }, "input": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ImplicitUserMessage" }, { "$ref": "#/components/schemas/OpenAI.ItemParam" } ] } } ], "description": "Text, image, or file inputs to the model, used to generate a response.\n\nLearn more:\n- [Text inputs and outputs](/docs/guides/text)\n- [Image inputs](/docs/guides/images)\n- [File inputs](/docs/guides/pdf-files)\n- [Conversation state](/docs/guides/conversation-state)\n- [Function calling](/docs/guides/function-calling)" }, "include": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Includable" }, "nullable": true, "description": "Specify additional output data to include in the model response. Currently\nsupported values are:\n- `code_interpreter_call.outputs`: Includes the outputs of python code execution\n in code interpreter tool call items.\n- `computer_call_output.output.image_url`: Include image urls from the computer call output.\n- `file_search_call.results`: Include the search results of\n the file search tool call.\n- `message.input_image.image_url`: Include image urls from the input message.\n- `message.output_text.logprobs`: Include logprobs with assistant messages.\n- `reasoning.encrypted_content`: Includes an encrypted version of reasoning\n tokens in reasoning item outputs. This enables reasoning items to be used in\n multi-turn conversations when using the Responses API statelessly (like\n when the `store` parameter is set to `false`, or when an organization is\n enrolled in the zero data retention program)." }, "parallel_tool_calls": { "type": "boolean", "nullable": true, "description": "Whether to allow the model to run tool calls in parallel.", "default": true }, "store": { "type": "boolean", "nullable": true, "description": "Whether to store the generated model response for later retrieval via\nAPI.", "default": true }, "instructions": { "type": "string", "nullable": true, "description": "A system (or developer) message inserted into the model's context.\n\nWhen using along with `previous_response_id`, the instructions from a previous\nresponse will not be carried over to the next response. This makes it simple\nto swap out system (or developer) messages in new responses." }, "stream": { "type": "boolean", "nullable": true, "description": "If set to true, the model response data will be streamed to the client\nas it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\nSee the [Streaming section below](/docs/api-reference/responses-streaming)\nfor more information.", "default": false }, "model": { "type": "string", "description": "The model deployment to use for the creation of this response." } } }, "AzureErrorResponse": { "type": "object", "properties": { "error": { "type": "object", "properties": { "code": { "type": "string", "description": "The distinct, machine-generated identifier for the error." }, "message": { "type": "string", "description": "A human-readable message associated with the error." }, "param": { "type": "string", "description": "If applicable, the request input parameter associated with the error" }, "type": { "type": "string", "enum": [ "error" ], "description": "The object type, always 'error.'" }, "inner_error": {} }, "description": "The error details." } } }, "AzureEvalAPICompletionsSamplingParams": { "type": "object", "properties": { "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionTool" } }, "parallel_tool_calls": { "type": "boolean" }, "response_format": { "$ref": "#/components/schemas/OpenAI.ResponseTextFormatConfiguration" } }, "allOf": [ { "$ref": "#/components/schemas/AzureEvalAPIModelSamplingParams" } ] }, "AzureEvalAPIModelSamplingParams": { "type": "object", "properties": { "seed": { "type": "integer", "format": "int32", "description": "A seed value to initialize the randomness during sampling." }, "temperature": { "type": "number", "format": "float", "description": "A higher temperature increases randomness in the outputs." }, "max_tokens": { "type": "integer", "format": "int32", "description": "The maximum number of tokens in the generated output." }, "top_p": { "type": "number", "format": "float", "description": "An alternative to temperature for nucleus sampling; 1.0 includes all tokens." }, "reasoning_effort": { "type": "string", "enum": [ "low", "medium", "high" ], "description": "Controls the level of reasoning effort applied during generation." } } }, "AzureEvalAPIResponseSamplingParams": { "type": "object", "properties": { "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Tool" } }, "parallel_tool_calls": { "type": "boolean" }, "response_format": { "$ref": "#/components/schemas/OpenAI.ResponseTextFormatConfiguration" } }, "allOf": [ { "$ref": "#/components/schemas/AzureEvalAPIModelSamplingParams" } ] }, "AzureFileExpiryAnchor": { "type": "string", "enum": [ "created_at" ] }, "AzureFineTuneReinforcementMethod": { "type": "object", "required": [ "grader" ], "properties": { "grader": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.GraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.GraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.GraderScoreModel" }, { "$ref": "#/components/schemas/OpenAI.GraderMulti" } ] }, "response_format": { "allOf": [ { "$ref": "#/components/schemas/ResponseFormatJSONSchemaRequest" } ], "description": "Response format to be used while sampling during RFT training" }, "hyperparameters": { "$ref": "#/components/schemas/OpenAI.FineTuneReinforcementHyperparameters" } } }, "AzureListFilesResponse": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ] }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/AzureOpenAIFile" } }, "first_id": { "type": "string" }, "last_id": { "type": "string" }, "has_more": { "type": "boolean" } } }, "AzureOpenAIFile": { "type": "object", "required": [ "id", "bytes", "created_at", "filename", "object", "purpose", "status" ], "properties": { "id": { "type": "string", "description": "The file identifier, which can be referenced in the API endpoints." }, "bytes": { "type": "integer", "format": "int64", "nullable": true, "description": "The size of the file, in bytes." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the file was created." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the file will expire." }, "filename": { "type": "string", "description": "The name of the file." }, "object": { "type": "string", "enum": [ "file" ], "description": "The object type, which is always `file`." }, "status_details": { "type": "string", "description": "Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`.", "deprecated": true }, "purpose": { "type": "string", "enum": [ "assistants", "assistants_output", "batch", "batch_output", "fine-tune", "fine-tune-results", "evals" ], "description": "The intended purpose of the file. Supported values are `assistants`, `assistants_output`, `batch`, `batch_output`, `fine-tune` and `fine-tune-results`." }, "status": { "type": "string", "enum": [ "uploaded", "pending", "running", "processed", "error", "deleting", "deleted" ] } } }, "AzurePiiSubCategoryResult": { "type": "object", "required": [ "sub_category", "filtered", "detected", "redacted" ], "properties": { "sub_category": { "type": "string", "description": "The PIIHarmSubCategory that was evaluated." }, "filtered": { "type": "boolean", "description": "Whether the content detection resulted in a content filtering action for this subcategory." }, "detected": { "type": "boolean", "description": "Whether the labeled content subcategory was detected in the content." }, "redacted": { "type": "boolean", "description": "Whether the content was redacted for this subcategory." } }, "description": "Result details for individual PIIHarmSubCategory(s)." }, "AzureResponse": { "type": "object", "required": [ "metadata", "temperature", "top_p", "user", "id", "object", "created_at", "error", "incomplete_details", "output", "instructions", "parallel_tool_calls", "model" ], "properties": { "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "nullable": true, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "temperature": { "type": "number", "format": "float", "nullable": true, "minimum": 0, "maximum": 2, "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both." }, "top_p": { "type": "number", "format": "float", "nullable": true, "minimum": 0, "maximum": 1, "description": "An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability\nmass. So 0.1 means only the tokens comprising the top 10% probability mass\nare considered.\n\nWe generally recommend altering this or `temperature` but not both." }, "user": { "type": "string", "nullable": true, "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids)." }, "top_logprobs": { "type": "integer", "format": "int32", "nullable": true, "description": "An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability." }, "previous_response_id": { "type": "string", "nullable": true, "description": "The unique ID of the previous response to the model. Use this to\ncreate multi-turn conversations. Learn more about\n[conversation state](/docs/guides/conversation-state)." }, "reasoning": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.Reasoning" } ], "nullable": true }, "background": { "type": "boolean", "nullable": true, "description": "Whether to run the model response in the background.\n[Learn more](/docs/guides/background).", "default": false }, "max_output_tokens": { "type": "integer", "format": "int32", "nullable": true, "description": "An upper bound for the number of tokens that can be generated for a response, including visible output tokens and [reasoning tokens](/docs/guides/reasoning)." }, "max_tool_calls": { "type": "integer", "format": "int32", "nullable": true, "description": "The maximum number of total calls to built-in tools that can be processed in a response. This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored." }, "text": { "type": "object", "properties": { "format": { "$ref": "#/components/schemas/OpenAI.ResponseTextFormatConfiguration" } }, "description": "Configuration options for a text response from the model. Can be plain\ntext or structured JSON data. Learn more:\n- [Text inputs and outputs](/docs/guides/text)\n- [Structured Outputs](/docs/guides/structured-outputs)" }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Tool" }, "description": "An array of tools the model may call while generating a response. You\ncan specify which tool to use by setting the `tool_choice` parameter.\n\nThe two categories of tools you can provide the model are:\n\n- **Built-in tools**: Tools that are provided by OpenAI that extend the\n model's capabilities, like [web search](/docs/guides/tools-web-search)\n or [file search](/docs/guides/tools-file-search). Learn more about\n [built-in tools](/docs/guides/tools).\n- **Function calls (custom tools)**: Functions that are defined by you,\n enabling the model to call your own code. Learn more about\n [function calling](/docs/guides/function-calling)." }, "tool_choice": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceOptions" }, { "$ref": "#/components/schemas/OpenAI.ToolChoiceObject" } ], "description": "How the model should select which tool (or tools) to use when generating\na response. See the `tools` parameter to see how to specify which tools\nthe model can call." }, "prompt": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.Prompt" } ], "nullable": true }, "truncation": { "type": "string", "enum": [ "auto", "disabled" ], "nullable": true, "description": "The truncation strategy to use for the model response.\n- `auto`: If the context of this response and previous ones exceeds\n the model's context window size, the model will truncate the\n response to fit the context window by dropping input items in the\n middle of the conversation.\n- `disabled` (default): If a model response will exceed the context window\n size for a model, the request will fail with a 400 error.", "default": "disabled" }, "id": { "type": "string", "description": "Unique identifier for this Response." }, "object": { "type": "string", "enum": [ "response" ], "description": "The object type of this resource - always set to `response`." }, "status": { "type": "string", "enum": [ "completed", "failed", "in_progress", "cancelled", "queued", "incomplete" ], "description": "The status of the response generation. One of `completed`, `failed`,\n`in_progress`, `cancelled`, `queued`, or `incomplete`." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) of when this Response was created." }, "error": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseError" } ], "nullable": true }, "incomplete_details": { "type": "object", "properties": { "reason": { "type": "string", "enum": [ "max_output_tokens", "content_filter" ], "description": "The reason why the response is incomplete." } }, "nullable": true, "description": "Details about why the response is incomplete." }, "output": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemResource" }, "description": "An array of content items generated by the model.\n\n- The length and order of items in the `output` array is dependent\n on the model's response.\n- Rather than accessing the first item in the `output` array and\n assuming it's an `assistant` message with the content generated by\n the model, you might consider using the `output_text` property where\n supported in SDKs." }, "instructions": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemParam" } } ], "nullable": true, "description": "A system (or developer) message inserted into the model's context.\n\nWhen using along with `previous_response_id`, the instructions from a previous\nresponse will not be carried over to the next response. This makes it simple\nto swap out system (or developer) messages in new responses." }, "output_text": { "type": "string", "nullable": true, "description": "SDK-only convenience property that contains the aggregated text output\nfrom all `output_text` items in the `output` array, if any are present.\nSupported in the Python and JavaScript SDKs." }, "usage": { "$ref": "#/components/schemas/OpenAI.ResponseUsage" }, "parallel_tool_calls": { "type": "boolean", "description": "Whether to allow the model to run tool calls in parallel.", "default": true }, "model": { "type": "string", "description": "The model used to generate this response." } } }, "AzureSearchChatDataSource": { "type": "object", "required": [ "type", "parameters" ], "properties": { "type": { "type": "string", "enum": [ "azure_search" ], "description": "The discriminated type identifier, which is always 'azure_search'." }, "parameters": { "type": "object", "properties": { "top_n_documents": { "type": "integer", "format": "int32", "description": "The configured number of documents to feature in the query." }, "in_scope": { "type": "boolean", "description": "Whether queries should be restricted to use of the indexed data." }, "strictness": { "type": "integer", "format": "int32", "minimum": 1, "maximum": 5, "description": "The configured strictness of the search relevance filtering.\nHigher strictness will increase precision but lower recall of the answer." }, "max_search_queries": { "type": "integer", "format": "int32", "description": "The maximum number of rewritten queries that should be sent to the search provider for a single user message.\nBy default, the system will make an automatic determination." }, "allow_partial_result": { "type": "boolean", "description": "If set to true, the system will allow partial search results to be used and the request will fail if all\npartial queries fail. If not specified or specified as false, the request will fail if any search query fails.", "default": false }, "include_contexts": { "type": "array", "items": { "type": "string", "enum": [ "citations", "intent", "all_retrieved_documents" ] }, "maxItems": 3, "description": "The output context properties to include on the response.\nBy default, citations and intent will be requested.", "default": [ "citations", "intent" ] }, "endpoint": { "type": "string", "format": "uri", "description": "The absolute endpoint path for the Azure Search resource to use." }, "index_name": { "type": "string", "description": "The name of the index to use, as specified in the Azure Search resource." }, "authentication": { "anyOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceApiKeyAuthenticationOptions" }, { "$ref": "#/components/schemas/AzureChatDataSourceSystemAssignedManagedIdentityAuthenticationOptions" }, { "$ref": "#/components/schemas/AzureChatDataSourceUserAssignedManagedIdentityAuthenticationOptions" }, { "$ref": "#/components/schemas/AzureChatDataSourceAccessTokenAuthenticationOptions" } ], "description": "The authentication mechanism to use with Azure Search." }, "fields_mapping": { "type": "object", "properties": { "title_field": { "type": "string", "description": "The name of the index field to use as a title." }, "url_field": { "type": "string", "description": "The name of the index field to use as a URL." }, "filepath_field": { "type": "string", "description": "The name of the index field to use as a filepath." }, "content_fields": { "type": "array", "items": { "type": "string" }, "description": "The names of index fields that should be treated as content." }, "content_fields_separator": { "type": "string", "description": "The separator pattern that content fields should use." }, "vector_fields": { "type": "array", "items": { "type": "string" }, "description": "The names of fields that represent vector data." }, "image_vector_fields": { "type": "array", "items": { "type": "string" }, "description": "The names of fields that represent image vector data." } }, "description": "The field mappings to use with the Azure Search resource." }, "query_type": { "type": "string", "enum": [ "simple", "semantic", "vector", "vector_simple_hybrid", "vector_semantic_hybrid" ], "description": "The query type for the Azure Search resource to use." }, "semantic_configuration": { "type": "string", "description": "Additional semantic configuration for the query." }, "filter": { "type": "string", "description": "A filter to apply to the search." }, "embedding_dependency": { "anyOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceEndpointVectorizationSource" }, { "$ref": "#/components/schemas/AzureChatDataSourceDeploymentNameVectorizationSource" }, { "$ref": "#/components/schemas/AzureChatDataSourceIntegratedVectorizationSource" } ], "description": "The vectorization source to use with Azure Search.\nSupported sources for Azure Search include endpoint, deployment name, and integrated." } }, "required": [ "endpoint", "index_name", "authentication" ], "description": "The parameter information to control the use of the Azure Search data source." } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSource" } ], "description": "Represents a data source configuration that will use an Azure Search resource." }, "AzureUserSecurityContext": { "type": "object", "properties": { "application_name": { "type": "string", "description": "The name of the application. Sensitive personal information should not be included in this field." }, "end_user_id": { "type": "string", "description": "This identifier is the Microsoft Entra ID (formerly Azure Active Directory) user object ID used to authenticate end-users within the generative AI application. Sensitive personal information should not be included in this field." }, "end_user_tenant_id": { "type": "string", "description": "The Microsoft 365 tenant ID the end user belongs to. It's required when the generative AI application is multitenant." }, "source_ip": { "type": "string", "description": "Captures the original client's IP address." } }, "description": "User security context contains several parameters that describe the application itself, and the end user that interacts with the application. These fields assist your security operations teams to investigate and mitigate security incidents by providing a comprehensive approach to protecting your AI applications. [Learn more](https://aka.ms/TP4AI/Documentation/EndUserContext) about protecting AI applications using Microsoft Defender for Cloud." }, "ChatCompletionMessageToolCallsItem": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageToolCall" }, "description": "The tool calls generated by the model, such as function calls." }, "CopiedAccountDetails": { "type": "object", "required": [ "destinationResourceId", "region", "status" ], "properties": { "destinationResourceId": { "type": "string", "description": "The ID of the destination resource where the model was copied to." }, "region": { "type": "string", "description": "The region where the model was copied to." }, "status": { "type": "string", "enum": [ "Completed", "Failed", "InProgress" ], "description": "The status of the copy operation." } } }, "CopyModelRequest": { "type": "object", "required": [ "destinationResourceId", "region" ], "properties": { "destinationResourceId": { "type": "string", "description": "The ID of the destination Resource to copy." }, "region": { "type": "string", "description": "The region to copy the model to." } } }, "CopyModelResponse": { "type": "object", "required": [ "checkpointedModelName", "fineTuningJobId", "copiedAccountDetails" ], "properties": { "checkpointedModelName": { "type": "string", "description": "The ID of the copied model." }, "fineTuningJobId": { "type": "string", "description": "The ID of the fine-tuning job that the checkpoint was copied from." }, "copiedAccountDetails": { "type": "array", "items": { "$ref": "#/components/schemas/CopiedAccountDetails" }, "description": "The ID of the destination resource id where it was copied" } } }, "ElasticsearchChatDataSource": { "type": "object", "required": [ "type", "parameters" ], "properties": { "type": { "type": "string", "enum": [ "elasticsearch" ], "description": "The discriminated type identifier, which is always 'elasticsearch'." }, "parameters": { "type": "object", "properties": { "top_n_documents": { "type": "integer", "format": "int32", "description": "The configured number of documents to feature in the query." }, "in_scope": { "type": "boolean", "description": "Whether queries should be restricted to use of the indexed data." }, "strictness": { "type": "integer", "format": "int32", "minimum": 1, "maximum": 5, "description": "The configured strictness of the search relevance filtering.\nHigher strictness will increase precision but lower recall of the answer." }, "max_search_queries": { "type": "integer", "format": "int32", "description": "The maximum number of rewritten queries that should be sent to the search provider for a single user message.\nBy default, the system will make an automatic determination." }, "allow_partial_result": { "type": "boolean", "description": "If set to true, the system will allow partial search results to be used and the request will fail if all\npartial queries fail. If not specified or specified as false, the request will fail if any search query fails.", "default": false }, "include_contexts": { "type": "array", "items": { "type": "string", "enum": [ "citations", "intent", "all_retrieved_documents" ] }, "maxItems": 3, "description": "The output context properties to include on the response.\nBy default, citations and intent will be requested.", "default": [ "citations", "intent" ] }, "endpoint": { "type": "string", "format": "uri" }, "index_name": { "type": "string" }, "authentication": { "anyOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceKeyAndKeyIdAuthenticationOptions" }, { "$ref": "#/components/schemas/AzureChatDataSourceEncodedApiKeyAuthenticationOptions" } ] }, "fields_mapping": { "type": "object", "properties": { "title_field": { "type": "string" }, "url_field": { "type": "string" }, "filepath_field": { "type": "string" }, "content_fields": { "type": "array", "items": { "type": "string" } }, "content_fields_separator": { "type": "string" }, "vector_fields": { "type": "array", "items": { "type": "string" } } } }, "query_type": { "type": "string", "enum": [ "simple", "vector" ] }, "embedding_dependency": { "$ref": "#/components/schemas/AzureChatDataSourceVectorizationSource" } }, "required": [ "endpoint", "index_name", "authentication" ], "description": "The parameter information to control the use of the Elasticsearch data source." } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSource" } ] }, "MongoDBChatDataSource": { "type": "object", "required": [ "type", "parameters" ], "properties": { "type": { "type": "string", "enum": [ "mongo_db" ], "description": "The discriminated type identifier, which is always 'mongo_db'." }, "parameters": { "type": "object", "properties": { "top_n_documents": { "type": "integer", "format": "int32", "description": "The configured number of documents to feature in the query." }, "in_scope": { "type": "boolean", "description": "Whether queries should be restricted to use of the indexed data." }, "strictness": { "type": "integer", "format": "int32", "minimum": 1, "maximum": 5, "description": "The configured strictness of the search relevance filtering.\nHigher strictness will increase precision but lower recall of the answer." }, "max_search_queries": { "type": "integer", "format": "int32", "description": "The maximum number of rewritten queries that should be sent to the search provider for a single user message.\nBy default, the system will make an automatic determination." }, "allow_partial_result": { "type": "boolean", "description": "If set to true, the system will allow partial search results to be used and the request will fail if all\npartial queries fail. If not specified or specified as false, the request will fail if any search query fails.", "default": false }, "include_contexts": { "type": "array", "items": { "type": "string", "enum": [ "citations", "intent", "all_retrieved_documents" ] }, "maxItems": 3, "description": "The output context properties to include on the response.\nBy default, citations and intent will be requested.", "default": [ "citations", "intent" ] }, "endpoint": { "type": "string", "description": "The name of the MongoDB cluster endpoint." }, "database_name": { "type": "string", "description": "The name of the MongoDB database." }, "collection_name": { "type": "string", "description": "The name of the MongoDB collection." }, "app_name": { "type": "string", "description": "The name of the MongoDB application." }, "index_name": { "type": "string", "description": "The name of the MongoDB index." }, "authentication": { "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceUsernameAndPasswordAuthenticationOptions" } ], "description": "The authentication mechanism to use with Pinecone.\nSupported authentication mechanisms for Pinecone include: username and password." }, "embedding_dependency": { "anyOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceEndpointVectorizationSource" }, { "$ref": "#/components/schemas/AzureChatDataSourceDeploymentNameVectorizationSource" } ], "description": "The vectorization source to use as an embedding dependency for the MongoDB data source.\nSupported vectorization sources for MongoDB include: endpoint, deployment name." }, "fields_mapping": { "type": "object", "properties": { "content_fields": { "type": "array", "items": { "type": "string" } }, "vector_fields": { "type": "array", "items": { "type": "string" } }, "title_field": { "type": "string" }, "url_field": { "type": "string" }, "filepath_field": { "type": "string" }, "content_fields_separator": { "type": "string" } }, "required": [ "content_fields", "vector_fields" ], "description": "Field mappings to apply to data used by the MongoDB data source.\nNote that content and vector field mappings are required for MongoDB." } }, "required": [ "endpoint", "database_name", "collection_name", "app_name", "index_name", "authentication", "embedding_dependency", "fields_mapping" ], "description": "The parameter information to control the use of the MongoDB data source." } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSource" } ] }, "OpenAI.Annotation": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.AnnotationType" } }, "discriminator": { "propertyName": "type", "mapping": { "file_citation": "#/components/schemas/OpenAI.AnnotationFileCitation", "url_citation": "#/components/schemas/OpenAI.AnnotationUrlCitation", "file_path": "#/components/schemas/OpenAI.AnnotationFilePath" } } }, "OpenAI.AnnotationFileCitation": { "type": "object", "required": [ "type", "file_id", "index", "filename" ], "properties": { "type": { "type": "string", "enum": [ "file_citation" ], "description": "The type of the file citation. Always `file_citation`." }, "file_id": { "type": "string", "description": "The ID of the file." }, "index": { "type": "integer", "format": "int32", "description": "The index of the file in the list of files." }, "filename": { "type": "string", "description": "The filename of the file cited." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Annotation" } ], "description": "A citation to a file." }, "OpenAI.AnnotationFilePath": { "type": "object", "required": [ "type", "file_id", "index" ], "properties": { "type": { "type": "string", "enum": [ "file_path" ], "description": "The type of the file path. Always `file_path`." }, "file_id": { "type": "string", "description": "The ID of the file." }, "index": { "type": "integer", "format": "int32", "description": "The index of the file in the list of files." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Annotation" } ], "description": "A path to a file." }, "OpenAI.AnnotationType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "file_citation", "url_citation", "file_path", "container_file_citation" ] } ] }, "OpenAI.AnnotationUrlCitation": { "type": "object", "required": [ "type", "url", "start_index", "end_index", "title" ], "properties": { "type": { "type": "string", "enum": [ "url_citation" ], "description": "The type of the URL citation. Always `url_citation`." }, "url": { "type": "string", "format": "uri", "description": "The URL of the web resource." }, "start_index": { "type": "integer", "format": "int32", "description": "The index of the first character of the URL citation in the message." }, "end_index": { "type": "integer", "format": "int32", "description": "The index of the last character of the URL citation in the message." }, "title": { "type": "string", "description": "The title of the web resource." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Annotation" } ], "description": "A citation for a web resource used to generate a model response." }, "OpenAI.ApproximateLocation": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "approximate" ] }, "country": { "type": "string", "nullable": true }, "region": { "type": "string", "nullable": true }, "city": { "type": "string", "nullable": true }, "timezone": { "type": "string", "nullable": true } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Location" } ] }, "OpenAI.AutoChunkingStrategyRequestParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "auto" ], "description": "Always `auto`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyRequestParam" } ], "description": "The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`." }, "OpenAI.ChatCompletionFunctionCallOption": { "type": "object", "required": [ "name" ], "properties": { "name": { "type": "string", "description": "The name of the function to call." } }, "description": "Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function." }, "OpenAI.ChatCompletionFunctions": { "type": "object", "required": [ "name" ], "properties": { "description": { "type": "string", "description": "A description of what the function does, used by the model to choose when and how to call the function." }, "name": { "type": "string", "description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64." }, "parameters": { "description": "The parameters the functions accepts, described as a JSON Schema object.\nSee the [JSON Schema reference](https://json-schema.org/understanding-json-schema/)\nfor documentation about the format.\n\nOmitting `parameters` defines a function with an empty parameter list." } }, "deprecated": true }, "OpenAI.ChatCompletionMessageAudioChunk": { "type": "object", "properties": { "id": { "type": "string" }, "transcript": { "type": "string" }, "data": { "type": "string", "format": "base64" }, "expires_at": { "type": "integer", "format": "unixtime" } } }, "OpenAI.ChatCompletionMessageToolCall": { "type": "object", "required": [ "id", "type", "function" ], "properties": { "id": { "type": "string", "description": "The ID of the tool call." }, "type": { "type": "string", "enum": [ "function" ], "description": "The type of the tool. Currently, only `function` is supported." }, "function": { "type": "object", "properties": { "name": { "type": "string", "description": "The name of the function to call." }, "arguments": { "type": "string", "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function." } }, "required": [ "name", "arguments" ], "description": "The function that the model called." } } }, "OpenAI.ChatCompletionMessageToolCallChunk": { "type": "object", "required": [ "index" ], "properties": { "index": { "type": "integer", "format": "int32" }, "id": { "type": "string", "description": "The ID of the tool call." }, "type": { "type": "string", "enum": [ "function" ], "description": "The type of the tool. Currently, only `function` is supported." }, "function": { "type": "object", "properties": { "name": { "type": "string", "description": "The name of the function to call." }, "arguments": { "type": "string", "description": "The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function." } } } } }, "OpenAI.ChatCompletionNamedToolChoice": { "type": "object", "required": [ "type", "function" ], "properties": { "type": { "type": "string", "enum": [ "function" ], "description": "The type of the tool. Currently, only `function` is supported." }, "function": { "type": "object", "properties": { "name": { "type": "string", "description": "The name of the function to call." } }, "required": [ "name" ] } }, "description": "Specifies a tool the model should use. Use to force the model to call a specific function." }, "OpenAI.ChatCompletionRequestAssistantMessage": { "type": "object", "required": [ "role" ], "properties": { "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestAssistantMessageContentPart" } } ], "nullable": true, "description": "The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified." }, "refusal": { "type": "string", "nullable": true, "description": "The refusal message by the assistant." }, "role": { "type": "string", "enum": [ "assistant" ], "description": "The role of the messages author, in this case `assistant`." }, "name": { "type": "string", "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." }, "audio": { "type": "object", "properties": { "id": { "type": "string", "description": "Unique identifier for a previous audio response from the model." } }, "required": [ "id" ], "nullable": true, "description": "Data about a previous audio response from the model." }, "tool_calls": { "$ref": "#/components/schemas/ChatCompletionMessageToolCallsItem" }, "function_call": { "type": "object", "properties": { "name": { "type": "string" }, "arguments": { "type": "string" } }, "required": [ "name", "arguments" ], "nullable": true, "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", "deprecated": true } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ], "description": "Messages sent by the model in response to user messages." }, "OpenAI.ChatCompletionRequestAssistantMessageContentPart": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartText" }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartRefusal" } ] }, "OpenAI.ChatCompletionRequestDeveloperMessage": { "type": "object", "required": [ "content", "role" ], "properties": { "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartText" } } ], "description": "The contents of the developer message." }, "role": { "type": "string", "enum": [ "developer" ], "description": "The role of the messages author, in this case `developer`." }, "name": { "type": "string", "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ], "description": "Developer-provided instructions that the model should follow, regardless of\nmessages sent by the user. With o1 models and newer, `developer` messages\nreplace the previous `system` messages." }, "OpenAI.ChatCompletionRequestFunctionMessage": { "type": "object", "required": [ "role", "content", "name" ], "properties": { "role": { "type": "string", "enum": [ "function" ], "description": "The role of the messages author, in this case `function`." }, "content": { "type": "string", "nullable": true, "description": "The contents of the function message." }, "name": { "type": "string", "description": "The name of the function to call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ], "deprecated": true }, "OpenAI.ChatCompletionRequestMessage": { "type": "object", "required": [ "role" ], "properties": { "role": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRole" } ], "description": "The role of the author of this message." }, "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPart" } } ], "nullable": true, "description": "The content of the message. Valid content part types vary per message role." } }, "discriminator": { "propertyName": "role", "mapping": { "system": "#/components/schemas/OpenAI.ChatCompletionRequestSystemMessage", "developer": "#/components/schemas/OpenAI.ChatCompletionRequestDeveloperMessage", "user": "#/components/schemas/OpenAI.ChatCompletionRequestUserMessage", "assistant": "#/components/schemas/OpenAI.ChatCompletionRequestAssistantMessage", "tool": "#/components/schemas/OpenAI.ChatCompletionRequestToolMessage", "function": "#/components/schemas/OpenAI.ChatCompletionRequestFunctionMessage" } } }, "OpenAI.ChatCompletionRequestMessageContentPart": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartType" } }, "discriminator": { "propertyName": "type", "mapping": { "text": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartText", "image_url": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartImage", "refusal": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartRefusal", "file": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartFile", "input_audio": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartAudio" } } }, "OpenAI.ChatCompletionRequestMessageContentPartAudio": { "type": "object", "required": [ "type", "input_audio" ], "properties": { "type": { "type": "string", "enum": [ "input_audio" ], "description": "The type of the content part. Always `input_audio`." }, "input_audio": { "type": "object", "properties": { "data": { "type": "string", "format": "base64", "description": "Base64 encoded audio data." }, "format": { "type": "string", "enum": [ "wav", "mp3" ], "description": "The format of the encoded audio data. Currently supports \"wav\" and \"mp3\"." } }, "required": [ "data", "format" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPart" } ], "description": "" }, "OpenAI.ChatCompletionRequestMessageContentPartFile": { "type": "object", "required": [ "type", "file" ], "properties": { "type": { "type": "string", "enum": [ "file" ], "description": "The type of the content part. Always `file`." }, "file": { "type": "object", "properties": { "filename": { "type": "string", "description": "The name of the file, used when passing the file to the model as a\nstring." }, "file_data": { "type": "string", "description": "The base64 encoded file data, used when passing the file to the model\nas a string." }, "file_id": { "type": "string", "description": "The ID of an uploaded file to use as input." } } } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPart" } ], "description": "Learn about [file inputs](/docs/guides/text) for text generation." }, "OpenAI.ChatCompletionRequestMessageContentPartImage": { "type": "object", "required": [ "type", "image_url" ], "properties": { "type": { "type": "string", "enum": [ "image_url" ], "description": "The type of the content part." }, "image_url": { "type": "object", "properties": { "url": { "type": "string", "format": "uri", "description": "Either a URL of the image or the base64 encoded image data." }, "detail": { "type": "string", "enum": [ "auto", "low", "high" ], "description": "Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision#low-or-high-fidelity-image-understanding).", "default": "auto" } }, "required": [ "url" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPart" } ], "description": "" }, "OpenAI.ChatCompletionRequestMessageContentPartRefusal": { "type": "object", "required": [ "type", "refusal" ], "properties": { "type": { "type": "string", "enum": [ "refusal" ], "description": "The type of the content part." }, "refusal": { "type": "string", "description": "The refusal message generated by the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPart" } ] }, "OpenAI.ChatCompletionRequestMessageContentPartText": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "The type of the content part." }, "text": { "type": "string", "description": "The text content." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPart" } ], "description": "" }, "OpenAI.ChatCompletionRequestMessageContentPartType": { "type": "string", "enum": [ "text", "file", "input_audio", "image_url", "refusal" ] }, "OpenAI.ChatCompletionRequestSystemMessage": { "type": "object", "required": [ "content", "role" ], "properties": { "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestSystemMessageContentPart" } } ], "description": "The contents of the system message." }, "role": { "type": "string", "enum": [ "system" ], "description": "The role of the messages author, in this case `system`." }, "name": { "type": "string", "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ], "description": "Developer-provided instructions that the model should follow, regardless of\nmessages sent by the user. With o1 models and newer, use `developer` messages\nfor this purpose instead." }, "OpenAI.ChatCompletionRequestSystemMessageContentPart": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartText" }, "OpenAI.ChatCompletionRequestToolMessage": { "type": "object", "required": [ "role", "content", "tool_call_id" ], "properties": { "role": { "type": "string", "enum": [ "tool" ], "description": "The role of the messages author, in this case `tool`." }, "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestToolMessageContentPart" } } ], "description": "The contents of the tool message." }, "tool_call_id": { "type": "string", "description": "Tool call that this message is responding to." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ] }, "OpenAI.ChatCompletionRequestToolMessageContentPart": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartText" }, "OpenAI.ChatCompletionRequestUserMessage": { "type": "object", "required": [ "content", "role" ], "properties": { "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestUserMessageContentPart" } } ], "description": "The contents of the user message." }, "role": { "type": "string", "enum": [ "user" ], "description": "The role of the messages author, in this case `user`." }, "name": { "type": "string", "description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessage" } ], "description": "Messages sent by an end user, containing prompts or additional context\ninformation." }, "OpenAI.ChatCompletionRequestUserMessageContentPart": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartText" }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartImage" }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartAudio" }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartFile" } ] }, "OpenAI.ChatCompletionRole": { "type": "string", "enum": [ "system", "developer", "user", "assistant", "tool", "function" ], "description": "The role of the author of a message" }, "OpenAI.ChatCompletionStreamOptions": { "type": "object", "properties": { "include_usage": { "type": "boolean", "description": "If set, an additional chunk will be streamed before the `data: [DONE]`\nmessage. The `usage` field on this chunk shows the token usage statistics\nfor the entire request, and the `choices` field will always be an empty\narray.\n\nAll other chunks will also include a `usage` field, but with a null\nvalue. **NOTE:** If the stream is interrupted, you may not receive the\nfinal usage chunk which contains the total token usage for the request." } }, "description": "Options for streaming response. Only set this when you set `stream: true`." }, "OpenAI.ChatCompletionStreamResponseDelta": { "type": "object", "properties": { "audio": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageAudioChunk" } ], "description": "Response audio associated with the streaming chat delta payload." }, "content": { "type": "string", "nullable": true, "description": "The contents of the chunk message." }, "function_call": { "type": "object", "properties": { "name": { "type": "string" }, "arguments": { "type": "string" } }, "description": "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model.", "deprecated": true }, "tool_calls": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionMessageToolCallChunk" }, "readOnly": true }, "role": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatCompletionRole" } ], "description": "The role of the author of this message." }, "refusal": { "type": "string", "nullable": true, "description": "The refusal message generated by the model." } }, "description": "A chat completion delta generated by streamed model responses." }, "OpenAI.ChatCompletionTokenLogprob": { "type": "object", "required": [ "token", "logprob", "bytes", "top_logprobs" ], "properties": { "token": { "type": "string", "description": "The token." }, "logprob": { "type": "number", "format": "float", "description": "The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely." }, "bytes": { "type": "array", "items": { "type": "integer", "format": "int32" }, "nullable": true, "description": "A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token." }, "top_logprobs": { "type": "array", "items": { "type": "object", "properties": { "token": { "type": "string", "description": "The token." }, "logprob": { "type": "number", "format": "float", "description": "The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely." }, "bytes": { "type": "array", "items": { "type": "integer", "format": "int32" }, "nullable": true, "description": "A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token." } }, "required": [ "token", "logprob", "bytes" ] }, "description": "List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned." } } }, "OpenAI.ChatCompletionTool": { "type": "object", "required": [ "type", "function" ], "properties": { "type": { "type": "string", "enum": [ "function" ], "description": "The type of the tool. Currently, only `function` is supported." }, "function": { "$ref": "#/components/schemas/OpenAI.FunctionObject" } } }, "OpenAI.ChatCompletionToolChoiceOption": { "anyOf": [ { "type": "string", "enum": [ "none", "auto", "required" ] }, { "$ref": "#/components/schemas/OpenAI.ChatCompletionNamedToolChoice" } ], "description": "Controls which (if any) tool is called by the model.\n`none` means the model will not call any tool and instead generates a message.\n`auto` means the model can pick between generating a message or calling one or more tools.\n`required` means the model must call one or more tools.\nSpecifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool.\n\n`none` is the default when no tools are present. `auto` is the default if tools are present." }, "OpenAI.ChatOutputPrediction": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ChatOutputPredictionType" } }, "discriminator": { "propertyName": "type", "mapping": { "content": "#/components/schemas/OpenAI.ChatOutputPredictionContent" } }, "description": "Base representation of predicted output from a model." }, "OpenAI.ChatOutputPredictionContent": { "type": "object", "required": [ "type", "content" ], "properties": { "type": { "type": "string", "enum": [ "content" ], "description": "The type of the predicted content you want to provide. This type is\ncurrently always `content`." }, "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ChatCompletionRequestMessageContentPartText" } } ], "description": "The content that should be matched when generating a model response.\nIf generated tokens would match this content, the entire model response\ncan be returned much more quickly." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChatOutputPrediction" } ], "description": "Static predicted output content, such as the content of a text file that is\nbeing regenerated." }, "OpenAI.ChatOutputPredictionType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "content" ] } ] }, "OpenAI.ChunkingStrategyRequestParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "auto", "static" ], "description": "The type of chunking strategy." } }, "discriminator": { "propertyName": "type", "mapping": { "static": "#/components/schemas/OpenAI.StaticChunkingStrategyRequestParam" } }, "description": "The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy." }, "OpenAI.ChunkingStrategyResponseParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "static", "other" ] } }, "discriminator": { "propertyName": "type", "mapping": { "other": "#/components/schemas/OpenAI.OtherChunkingStrategyResponseParam", "static": "#/components/schemas/OpenAI.StaticChunkingStrategyResponseParam" } } }, "OpenAI.CodeInterpreterOutput": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.CodeInterpreterOutputType" } }, "discriminator": { "propertyName": "type", "mapping": { "image": "#/components/schemas/OpenAI.CodeInterpreterOutputImage", "logs": "#/components/schemas/OpenAI.CodeInterpreterOutputLogs" } } }, "OpenAI.CodeInterpreterOutputImage": { "type": "object", "required": [ "type", "url" ], "properties": { "type": { "type": "string", "enum": [ "image" ], "description": "The type of the output. Always 'image'." }, "url": { "type": "string", "format": "uri", "description": "The URL of the image output from the code interpreter." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.CodeInterpreterOutput" } ], "description": "The image output from the code interpreter." }, "OpenAI.CodeInterpreterOutputLogs": { "type": "object", "required": [ "type", "logs" ], "properties": { "type": { "type": "string", "enum": [ "logs" ], "description": "The type of the output. Always 'logs'." }, "logs": { "type": "string", "description": "The logs output from the code interpreter." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.CodeInterpreterOutput" } ], "description": "The logs output from the code interpreter." }, "OpenAI.CodeInterpreterOutputType": { "type": "string", "enum": [ "logs", "image" ] }, "OpenAI.CodeInterpreterTool": { "type": "object", "required": [ "type", "container" ], "properties": { "type": { "type": "string", "enum": [ "code_interpreter" ], "description": "The type of the code interpreter tool. Always `code_interpreter`." }, "container": { "anyOf": [ { "type": "string" }, { "$ref": "#/components/schemas/OpenAI.CodeInterpreterToolAuto" } ], "description": "The code interpreter container. Can be a container ID or an object that\nspecifies uploaded file IDs to make available to your code." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A tool that runs Python code to help generate a response to a prompt." }, "OpenAI.CodeInterpreterToolAuto": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "auto" ], "description": "Always `auto`." }, "file_ids": { "type": "array", "items": { "type": "string" }, "description": "An optional list of uploaded files to make available to your code." } }, "description": "Configuration for a code interpreter container. Optionally specify the IDs\nof the files to run the code on." }, "OpenAI.CodeInterpreterToolCallItemParam": { "type": "object", "required": [ "type", "container_id", "code", "outputs" ], "properties": { "type": { "type": "string", "enum": [ "code_interpreter_call" ] }, "container_id": { "type": "string", "description": "The ID of the container used to run the code." }, "code": { "type": "string", "nullable": true, "description": "The code to run, or null if not available." }, "outputs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CodeInterpreterOutput" }, "nullable": true, "description": "The outputs generated by the code interpreter, such as logs or images.\nCan be null if no outputs are available." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "A tool call to run code.\n" }, "OpenAI.CodeInterpreterToolCallItemResource": { "type": "object", "required": [ "type", "status", "container_id", "code", "outputs" ], "properties": { "type": { "type": "string", "enum": [ "code_interpreter_call" ] }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete", "interpreting", "failed" ] }, "container_id": { "type": "string", "description": "The ID of the container used to run the code." }, "code": { "type": "string", "nullable": true, "description": "The code to run, or null if not available." }, "outputs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CodeInterpreterOutput" }, "nullable": true, "description": "The outputs generated by the code interpreter, such as logs or images.\nCan be null if no outputs are available." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A tool call to run code.\n" }, "OpenAI.ComparisonFilter": { "type": "object", "required": [ "type", "key", "value" ], "properties": { "type": { "type": "string", "enum": [ "eq", "ne", "gt", "gte", "lt", "lte" ], "description": "Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.\n- `eq`: equals\n- `ne`: not equal\n- `gt`: greater than\n- `gte`: greater than or equal\n- `lt`: less than\n- `lte`: less than or equal", "default": "eq" }, "key": { "type": "string", "description": "The key to compare against the value." }, "value": { "anyOf": [ { "type": "string" }, { "type": "number", "format": "float" }, { "type": "boolean" } ], "description": "The value to compare against the attribute key; supports string, number, or boolean types." } }, "description": "A filter used to compare a specified attribute key to a given value using a defined comparison operation." }, "OpenAI.CompletionUsage": { "type": "object", "required": [ "completion_tokens", "prompt_tokens", "total_tokens" ], "properties": { "completion_tokens": { "type": "integer", "format": "int32", "description": "Number of tokens in the generated completion.", "default": 0 }, "prompt_tokens": { "type": "integer", "format": "int32", "description": "Number of tokens in the prompt.", "default": 0 }, "total_tokens": { "type": "integer", "format": "int32", "description": "Total number of tokens used in the request (prompt + completion).", "default": 0 }, "completion_tokens_details": { "type": "object", "properties": { "accepted_prediction_tokens": { "type": "integer", "format": "int32", "description": "When using Predicted Outputs, the number of tokens in the\nprediction that appeared in the completion.", "default": 0 }, "audio_tokens": { "type": "integer", "format": "int32", "description": "Audio input tokens generated by the model.", "default": 0 }, "reasoning_tokens": { "type": "integer", "format": "int32", "description": "Tokens generated by the model for reasoning.", "default": 0 }, "rejected_prediction_tokens": { "type": "integer", "format": "int32", "description": "When using Predicted Outputs, the number of tokens in the\nprediction that did not appear in the completion. However, like\nreasoning tokens, these tokens are still counted in the total\ncompletion tokens for purposes of billing, output, and context window\nlimits.", "default": 0 } }, "description": "Breakdown of tokens used in a completion." }, "prompt_tokens_details": { "type": "object", "properties": { "audio_tokens": { "type": "integer", "format": "int32", "description": "Audio input tokens present in the prompt.", "default": 0 }, "cached_tokens": { "type": "integer", "format": "int32", "description": "Cached tokens present in the prompt.", "default": 0 } }, "description": "Breakdown of tokens used in the prompt." } }, "description": "Usage statistics for the completion request." }, "OpenAI.CompoundFilter": { "type": "object", "required": [ "type", "filters" ], "properties": { "type": { "type": "string", "enum": [ "and", "or" ], "description": "Type of operation: `and` or `or`." }, "filters": { "type": "array", "items": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ComparisonFilter" }, { "$ref": "#/components/schemas/OpenAI.CompoundFilter" } ] }, "description": "Array of filters to combine. Items can be `ComparisonFilter` or `CompoundFilter`." } }, "description": "Combine multiple filters using `and` or `or`." }, "OpenAI.ComputerAction": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ComputerActionType" } }, "discriminator": { "propertyName": "type", "mapping": { "click": "#/components/schemas/OpenAI.ComputerActionClick", "double_click": "#/components/schemas/OpenAI.ComputerActionDoubleClick", "drag": "#/components/schemas/OpenAI.ComputerActionDrag", "move": "#/components/schemas/OpenAI.ComputerActionMove", "screenshot": "#/components/schemas/OpenAI.ComputerActionScreenshot", "scroll": "#/components/schemas/OpenAI.ComputerActionScroll", "type": "#/components/schemas/OpenAI.ComputerActionTypeKeys", "wait": "#/components/schemas/OpenAI.ComputerActionWait", "keypress": "#/components/schemas/OpenAI.ComputerActionKeyPress" } } }, "OpenAI.ComputerActionClick": { "type": "object", "required": [ "type", "button", "x", "y" ], "properties": { "type": { "type": "string", "enum": [ "click" ], "description": "Specifies the event type. For a click action, this property is\nalways set to `click`." }, "button": { "type": "string", "enum": [ "left", "right", "wheel", "back", "forward" ], "description": "Indicates which mouse button was pressed during the click. One of `left`, `right`, `wheel`, `back`, or `forward`." }, "x": { "type": "integer", "format": "int32", "description": "The x-coordinate where the click occurred." }, "y": { "type": "integer", "format": "int32", "description": "The y-coordinate where the click occurred." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A click action." }, "OpenAI.ComputerActionDoubleClick": { "type": "object", "required": [ "type", "x", "y" ], "properties": { "type": { "type": "string", "enum": [ "double_click" ], "description": "Specifies the event type. For a double click action, this property is\nalways set to `double_click`." }, "x": { "type": "integer", "format": "int32", "description": "The x-coordinate where the double click occurred." }, "y": { "type": "integer", "format": "int32", "description": "The y-coordinate where the double click occurred." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A double click action." }, "OpenAI.ComputerActionDrag": { "type": "object", "required": [ "type", "path" ], "properties": { "type": { "type": "string", "enum": [ "drag" ], "description": "Specifies the event type. For a drag action, this property is\nalways set to `drag`." }, "path": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Coordinate" }, "description": "An array of coordinates representing the path of the drag action. Coordinates will appear as an array\nof objects, eg\n```\n[\n { x: 100, y: 200 },\n { x: 200, y: 300 }\n]\n```" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A drag action." }, "OpenAI.ComputerActionKeyPress": { "type": "object", "required": [ "type", "keys" ], "properties": { "type": { "type": "string", "enum": [ "keypress" ], "description": "Specifies the event type. For a keypress action, this property is\nalways set to `keypress`." }, "keys": { "type": "array", "items": { "type": "string" }, "description": "The combination of keys the model is requesting to be pressed. This is an\narray of strings, each representing a key." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A collection of keypresses the model would like to perform." }, "OpenAI.ComputerActionMove": { "type": "object", "required": [ "type", "x", "y" ], "properties": { "type": { "type": "string", "enum": [ "move" ], "description": "Specifies the event type. For a move action, this property is\nalways set to `move`." }, "x": { "type": "integer", "format": "int32", "description": "The x-coordinate to move to." }, "y": { "type": "integer", "format": "int32", "description": "The y-coordinate to move to." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A mouse move action." }, "OpenAI.ComputerActionScreenshot": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "screenshot" ], "description": "Specifies the event type. For a screenshot action, this property is\nalways set to `screenshot`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A screenshot action." }, "OpenAI.ComputerActionScroll": { "type": "object", "required": [ "type", "x", "y", "scroll_x", "scroll_y" ], "properties": { "type": { "type": "string", "enum": [ "scroll" ], "description": "Specifies the event type. For a scroll action, this property is\nalways set to `scroll`." }, "x": { "type": "integer", "format": "int32", "description": "The x-coordinate where the scroll occurred." }, "y": { "type": "integer", "format": "int32", "description": "The y-coordinate where the scroll occurred." }, "scroll_x": { "type": "integer", "format": "int32", "description": "The horizontal scroll distance." }, "scroll_y": { "type": "integer", "format": "int32", "description": "The vertical scroll distance." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A scroll action." }, "OpenAI.ComputerActionType": { "type": "string", "enum": [ "screenshot", "click", "double_click", "scroll", "type", "wait", "keypress", "drag", "move" ] }, "OpenAI.ComputerActionTypeKeys": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "type" ], "description": "Specifies the event type. For a type action, this property is\nalways set to `type`." }, "text": { "type": "string", "description": "The text to type." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "An action to type in text." }, "OpenAI.ComputerActionWait": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "wait" ], "description": "Specifies the event type. For a wait action, this property is\nalways set to `wait`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerAction" } ], "description": "A wait action." }, "OpenAI.ComputerToolCallItemParam": { "type": "object", "required": [ "type", "call_id", "action", "pending_safety_checks" ], "properties": { "type": { "type": "string", "enum": [ "computer_call" ] }, "call_id": { "type": "string", "description": "An identifier used when responding to the tool call with output." }, "action": { "$ref": "#/components/schemas/OpenAI.ComputerAction" }, "pending_safety_checks": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ComputerToolCallSafetyCheck" }, "description": "The pending safety checks for the computer call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "A tool call to a computer use tool. See the\n[computer use guide](/docs/guides/tools-computer-use) for more information.\n" }, "OpenAI.ComputerToolCallItemResource": { "type": "object", "required": [ "type", "status", "call_id", "action", "pending_safety_checks" ], "properties": { "type": { "type": "string", "enum": [ "computer_call" ] }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n`incomplete`. Populated when items are returned via API." }, "call_id": { "type": "string", "description": "An identifier used when responding to the tool call with output." }, "action": { "$ref": "#/components/schemas/OpenAI.ComputerAction" }, "pending_safety_checks": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ComputerToolCallSafetyCheck" }, "description": "The pending safety checks for the computer call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A tool call to a computer use tool. See the\n[computer use guide](/docs/guides/tools-computer-use) for more information.\n" }, "OpenAI.ComputerToolCallOutputItemOutput": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ComputerToolCallOutputItemOutputType" } }, "discriminator": { "propertyName": "type", "mapping": { "computer_screenshot": "#/components/schemas/OpenAI.ComputerToolCallOutputItemOutputComputerScreenshot" } } }, "OpenAI.ComputerToolCallOutputItemOutputComputerScreenshot": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "computer_screenshot" ] }, "image_url": { "type": "string" }, "file_id": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ComputerToolCallOutputItemOutput" } ] }, "OpenAI.ComputerToolCallOutputItemOutputType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "computer_screenshot" ] } ], "description": "A computer screenshot image used with the computer use tool." }, "OpenAI.ComputerToolCallOutputItemParam": { "type": "object", "required": [ "type", "call_id", "output" ], "properties": { "type": { "type": "string", "enum": [ "computer_call_output" ] }, "call_id": { "type": "string", "description": "The ID of the computer tool call that produced the output." }, "acknowledged_safety_checks": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ComputerToolCallSafetyCheck" }, "description": "The safety checks reported by the API that have been acknowledged by the\ndeveloper." }, "output": { "$ref": "#/components/schemas/OpenAI.ComputerToolCallOutputItemOutput" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "The output of a computer tool call.\n" }, "OpenAI.ComputerToolCallOutputItemResource": { "type": "object", "required": [ "type", "status", "call_id", "output" ], "properties": { "type": { "type": "string", "enum": [ "computer_call_output" ] }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n`incomplete`. Populated when items are returned via API." }, "call_id": { "type": "string", "description": "The ID of the computer tool call that produced the output." }, "acknowledged_safety_checks": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ComputerToolCallSafetyCheck" }, "description": "The safety checks reported by the API that have been acknowledged by the\ndeveloper." }, "output": { "$ref": "#/components/schemas/OpenAI.ComputerToolCallOutputItemOutput" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "The output of a computer tool call.\n" }, "OpenAI.ComputerToolCallSafetyCheck": { "type": "object", "required": [ "id", "code", "message" ], "properties": { "id": { "type": "string", "description": "The ID of the pending safety check." }, "code": { "type": "string", "description": "The type of the pending safety check." }, "message": { "type": "string", "description": "Details about the pending safety check." } }, "description": "A pending safety check for the computer call." }, "OpenAI.ComputerUsePreviewTool": { "type": "object", "required": [ "type", "environment", "display_width", "display_height" ], "properties": { "type": { "type": "string", "enum": [ "computer_use_preview" ], "description": "The type of the computer use tool. Always `computer_use_preview`." }, "environment": { "type": "string", "enum": [ "windows", "mac", "linux", "ubuntu", "browser" ], "description": "The type of computer environment to control." }, "display_width": { "type": "integer", "format": "int32", "description": "The width of the computer display." }, "display_height": { "type": "integer", "format": "int32", "description": "The height of the computer display." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A tool that controls a virtual computer. Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use)." }, "OpenAI.ContainerFileListResource": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of object returned, must be 'list'." }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ContainerFileResource" }, "description": "A list of container files." }, "first_id": { "type": "string", "description": "The ID of the first file in the list." }, "last_id": { "type": "string", "description": "The ID of the last file in the list." }, "has_more": { "type": "boolean", "description": "Whether there are more files available." } } }, "OpenAI.ContainerFileResource": { "type": "object", "required": [ "id", "object", "container_id", "created_at", "bytes", "path", "source" ], "properties": { "id": { "type": "string", "description": "Unique identifier for the file." }, "object": { "type": "string", "description": "The type of this object (`container.file`)." }, "container_id": { "type": "string", "description": "The container this file belongs to." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) when the file was created." }, "bytes": { "type": "integer", "format": "int32", "description": "Size of the file in bytes." }, "path": { "type": "string", "description": "Path of the file in the container." }, "source": { "type": "string", "description": "Source of the file (e.g., `user`, `assistant`)." } } }, "OpenAI.ContainerListResource": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of object returned, must be 'list'." }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ContainerResource" }, "description": "A list of containers." }, "first_id": { "type": "string", "description": "The ID of the first container in the list." }, "last_id": { "type": "string", "description": "The ID of the last container in the list." }, "has_more": { "type": "boolean", "description": "Whether there are more containers available." } } }, "OpenAI.ContainerResource": { "type": "object", "required": [ "id", "object", "name", "created_at", "status" ], "properties": { "id": { "type": "string", "description": "Unique identifier for the container." }, "object": { "type": "string", "description": "The type of this object." }, "name": { "type": "string", "description": "Name of the container." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) when the container was created." }, "status": { "type": "string", "description": "Status of the container (e.g., active, deleted)." }, "expires_after": { "type": "object", "properties": { "anchor": { "type": "string", "enum": [ "last_active_at" ], "description": "The reference point for the expiration." }, "minutes": { "type": "integer", "format": "int32", "description": "The number of minutes after the anchor before the container expires." } }, "description": "The container will expire after this time period.\nThe anchor is the reference point for the expiration.\nThe minutes is the number of minutes after the anchor before the container expires." } } }, "OpenAI.Coordinate": { "type": "object", "required": [ "x", "y" ], "properties": { "x": { "type": "integer", "format": "int32", "description": "The x-coordinate." }, "y": { "type": "integer", "format": "int32", "description": "The y-coordinate." } }, "description": "An x/y coordinate pair, e.g. `{ x: 100, y: 200 }`." }, "OpenAI.CreateContainerBody": { "type": "object", "required": [ "name" ], "properties": { "name": { "type": "string", "description": "Name of the container to create." }, "file_ids": { "type": "array", "items": { "type": "string" }, "description": "IDs of files to copy to the container." }, "expires_after": { "type": "object", "properties": { "anchor": { "type": "string", "enum": [ "last_active_at" ], "description": "Time anchor for the expiration time. Currently only 'last_active_at' is supported." }, "minutes": { "type": "integer", "format": "int32" } }, "required": [ "anchor", "minutes" ], "description": "Container expiration time in seconds relative to the 'anchor' time." } } }, "OpenAI.CreateContainerFileBodyMultiPart": { "type": "object", "properties": { "file_id": { "type": "string", "description": "Name of the file to create." }, "file": { "type": "string", "format": "binary" } } }, "OpenAI.CreateEmbeddingResponse": { "type": "object", "required": [ "data", "model", "object", "usage" ], "properties": { "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Embedding" }, "description": "The list of embeddings generated by the model." }, "model": { "type": "string", "description": "The name of the model used to generate the embedding." }, "object": { "type": "string", "enum": [ "list" ], "description": "The object type, which is always \"list\"." }, "usage": { "type": "object", "properties": { "prompt_tokens": { "type": "integer", "format": "int32", "description": "The number of tokens used by the prompt." }, "total_tokens": { "type": "integer", "format": "int32", "description": "The total number of tokens used by the request." } }, "required": [ "prompt_tokens", "total_tokens" ], "description": "The usage information for the request." } } }, "OpenAI.CreateEvalItem": { "anyOf": [ { "type": "object", "properties": { "role": { "type": "string", "description": "The role of the message (e.g. \"system\", \"assistant\", \"user\")." }, "content": { "type": "string", "description": "The content of the message." } }, "required": [ "role", "content" ] }, { "$ref": "#/components/schemas/OpenAI.EvalItem" } ], "description": "A chat message that makes up the prompt or context. May include variable references to the `item` namespace, ie {{item.name}}." }, "OpenAI.CreateEvalRunRequest": { "type": "object", "required": [ "data_source" ], "properties": { "name": { "type": "string", "description": "The name of the run." }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "data_source": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataSourceParams" } ], "description": "Details about the run's data source." } } }, "OpenAI.CreateFineTuningJobRequest": { "type": "object", "required": [ "model", "training_file" ], "properties": { "model": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "babbage-002", "davinci-002", "gpt-3.5-turbo", "gpt-4o-mini" ] } ], "description": "The name of the model to fine-tune. You can select one of the\n[supported models](/docs/guides/fine-tuning#which-models-can-be-fine-tuned).", "x-oaiTypeLabel": "string" }, "training_file": { "type": "string", "description": "The ID of an uploaded file that contains training data.\n\nSee [upload file](/docs/api-reference/files/create) for how to upload a file.\n\nYour dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`.\n\nThe contents of the file should differ depending on if the model uses the [chat](/docs/api-reference/fine-tuning/chat-input), [completions](/docs/api-reference/fine-tuning/completions-input) format, or if the fine-tuning method uses the [preference](/docs/api-reference/fine-tuning/preference-input) format.\n\nSee the [fine-tuning guide](/docs/guides/model-optimization) for more details." }, "hyperparameters": { "type": "object", "properties": { "batch_size": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "description": "Number of examples in each batch. A larger batch size means that model parameters\nare updated less frequently, but with lower variance.", "default": "auto" }, "learning_rate_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number", "format": "float" } ], "description": "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid\noverfitting.", "default": "auto" }, "n_epochs": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "description": "The number of epochs to train the model for. An epoch refers to one full cycle\nthrough the training dataset.", "default": "auto" } }, "description": "The hyperparameters used for the fine-tuning job.\nThis value is now deprecated in favor of `method`, and should be passed in under the `method` parameter.", "deprecated": true }, "suffix": { "type": "string", "nullable": true, "minLength": 1, "maxLength": 64, "description": "A string of up to 64 characters that will be added to your fine-tuned model name.\n\nFor example, a `suffix` of \"custom-model-name\" would produce a model name like `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.", "default": null }, "validation_file": { "type": "string", "nullable": true, "description": "The ID of an uploaded file that contains validation data.\n\nIf you provide this file, the data is used to generate validation\nmetrics periodically during fine-tuning. These metrics can be viewed in\nthe fine-tuning results file.\nThe same data should not be present in both train and validation files.\n\nYour dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`.\n\nSee the [fine-tuning guide](/docs/guides/model-optimization) for more details." }, "integrations": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CreateFineTuningJobRequestIntegration" }, "nullable": true, "description": "A list of integrations to enable for your fine-tuning job." }, "seed": { "type": "integer", "format": "int32", "nullable": true, "minimum": 0, "maximum": 2147483647, "description": "The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases.\nIf a seed is not specified, one will be generated for you." }, "method": { "$ref": "#/components/schemas/OpenAI.FineTuneMethod" }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" } } }, "OpenAI.CreateFineTuningJobRequestIntegration": { "type": "object", "required": [ "type" ], "properties": { "type": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "wandb" ] } ] } }, "discriminator": { "propertyName": "type", "mapping": { "wandb": "#/components/schemas/OpenAI.CreateFineTuningJobRequestWandbIntegration" } } }, "OpenAI.CreateFineTuningJobRequestWandbIntegration": { "type": "object", "required": [ "type", "wandb" ], "properties": { "type": { "type": "string", "enum": [ "wandb" ] }, "wandb": { "type": "object", "properties": { "project": { "type": "string" }, "name": { "type": "string", "nullable": true }, "entity": { "type": "string", "nullable": true }, "tags": { "type": "array", "items": { "type": "string" } } }, "required": [ "project" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.CreateFineTuningJobRequestIntegration" } ] }, "OpenAI.CreateVectorStoreFileBatchRequest": { "type": "object", "required": [ "file_ids" ], "properties": { "file_ids": { "type": "array", "items": { "type": "string" }, "minItems": 1, "maxItems": 500, "description": "A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files." }, "chunking_strategy": { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyRequestParam" }, "attributes": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" } ], "nullable": true } } }, "OpenAI.CreateVectorStoreFileRequest": { "type": "object", "required": [ "file_id" ], "properties": { "file_id": { "type": "string", "description": "A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files." }, "chunking_strategy": { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyRequestParam" }, "attributes": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" } ], "nullable": true } } }, "OpenAI.CreateVectorStoreRequest": { "type": "object", "properties": { "file_ids": { "type": "array", "items": { "type": "string" }, "maxItems": 500, "description": "A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files." }, "name": { "type": "string", "description": "The name of the vector store." }, "expires_after": { "$ref": "#/components/schemas/OpenAI.VectorStoreExpirationAfter" }, "chunking_strategy": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.AutoChunkingStrategyRequestParam" }, { "$ref": "#/components/schemas/OpenAI.StaticChunkingStrategyRequestParam" } ], "description": "The chunking strategy used to chunk the file(s). If not set, will use the `auto` strategy. Only applicable if `file_ids` is non-empty." }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" } } }, "OpenAI.DeleteFileResponse": { "type": "object", "required": [ "id", "object", "deleted" ], "properties": { "id": { "type": "string" }, "object": { "type": "string", "enum": [ "file" ] }, "deleted": { "type": "boolean" } } }, "OpenAI.DeleteVectorStoreFileResponse": { "type": "object", "required": [ "id", "deleted", "object" ], "properties": { "id": { "type": "string" }, "deleted": { "type": "boolean" }, "object": { "type": "string", "enum": [ "vector_store.file.deleted" ] } } }, "OpenAI.DeleteVectorStoreResponse": { "type": "object", "required": [ "id", "deleted", "object" ], "properties": { "id": { "type": "string" }, "deleted": { "type": "boolean" }, "object": { "type": "string", "enum": [ "vector_store.deleted" ] } } }, "OpenAI.Embedding": { "type": "object", "required": [ "index", "embedding", "object" ], "properties": { "index": { "type": "integer", "format": "int32", "description": "The index of the embedding in the list of embeddings." }, "embedding": { "anyOf": [ { "type": "array", "items": { "type": "number" } }, { "type": "string" } ], "description": "The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings)." }, "object": { "type": "string", "enum": [ "embedding" ], "description": "The object type, which is always \"embedding\"." } }, "description": "Represents an embedding vector returned by embedding endpoint." }, "OpenAI.Eval": { "type": "object", "required": [ "object", "id", "name", "data_source_config", "testing_criteria", "created_at", "metadata" ], "properties": { "object": { "type": "string", "enum": [ "eval" ], "description": "The object type.", "default": "eval" }, "id": { "type": "string", "description": "Unique identifier for the evaluation." }, "name": { "type": "string", "description": "The name of the evaluation." }, "data_source_config": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalDataSourceConfigResource" } ], "description": "Configuration of data sources used in runs of the evaluation." }, "testing_criteria": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalGraderResource" }, "nullable": true, "description": "A list of testing criteria.", "default": null }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the eval was created." }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "nullable": true, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" } }, "description": "An Eval object with a data source config and testing criteria.\nAn Eval represents a task to be done for your LLM integration.\nLike:\n- Improve the quality of my chatbot\n- See how well my chatbot handles customer support\n- Check if o4-mini is better at my usecase than gpt-4o" }, "OpenAI.EvalApiError": { "type": "object", "required": [ "code", "message" ], "properties": { "code": { "type": "string", "description": "The error code." }, "message": { "type": "string", "description": "The error message." } }, "description": "An object representing an error response from the Eval API." }, "OpenAI.EvalCompletionsRunDataSourceParams": { "type": "object", "required": [ "type", "source" ], "properties": { "type": { "type": "string", "enum": [ "completions" ], "description": "The type of run data source. Always `completions`." }, "input_messages": { "anyOf": [ { "type": "object", "properties": { "type": { "type": "string", "enum": [ "template" ], "description": "The type of input messages. Always `template`." }, "template": { "type": "array", "items": { "anyOf": [ {}, { "$ref": "#/components/schemas/OpenAI.EvalItem" } ] }, "description": "A list of chat messages forming the prompt or context. May include variable references to the `item` namespace, ie {{item.name}}." } }, "required": [ "type", "template" ] }, { "type": "object", "properties": { "type": { "type": "string", "enum": [ "item_reference" ], "description": "The type of input messages. Always `item_reference`." }, "item_reference": { "type": "string", "description": "A reference to a variable in the `item` namespace. Ie, \"item.input_trajectory\"" } }, "required": [ "type", "item_reference" ] } ], "description": "Used when sampling from a model. Dictates the structure of the messages passed into the model. Can either be a reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template with variable references to the `item` namespace." }, "sampling_params": { "$ref": "#/components/schemas/AzureEvalAPICompletionsSamplingParams" }, "model": { "type": "string", "description": "The name of the model to use for generating completions (e.g. \"o3-mini\")." }, "source": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunFileContentDataContentSource" }, { "$ref": "#/components/schemas/OpenAI.EvalRunFileIdDataContentSource" }, { "$ref": "#/components/schemas/OpenAI.EvalRunStoredCompletionsDataContentSource" } ], "description": "Determines what populates the `item` namespace in this run's data source." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataSourceParams" } ], "description": "A CompletionsRunDataSource object describing a model sampling configuration." }, "OpenAI.EvalCustomDataSourceConfigParams": { "type": "object", "required": [ "type", "item_schema" ], "properties": { "type": { "type": "string", "enum": [ "custom" ], "description": "The type of data source. Always `custom`." }, "item_schema": { "type": "object", "additionalProperties": {}, "description": "The json schema for each row in the data source." }, "include_sample_schema": { "type": "boolean", "description": "Whether the eval should expect you to populate the sample namespace (ie, by generating responses off of your data source)", "default": false } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalDataSourceConfigParams" } ], "description": "A CustomDataSourceConfig object that defines the schema for the data source used for the evaluation runs.\nThis schema is used to define the shape of the data that will be:\n- Used to define your testing criteria and\n- What data is required when creating a run" }, "OpenAI.EvalCustomDataSourceConfigResource": { "type": "object", "required": [ "type", "schema" ], "properties": { "type": { "type": "string", "enum": [ "custom" ], "description": "The type of data source. Always `custom`." }, "schema": { "type": "object", "additionalProperties": {}, "description": "The json schema for the run data source items.\nLearn how to build JSON schemas [here](https://json-schema.org/)." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalDataSourceConfigResource" } ], "description": "A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces.\nThe response schema defines the shape of the data that will be:\n- Used to define your testing criteria and\n- What data is required when creating a run" }, "OpenAI.EvalDataSourceConfigParams": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.EvalDataSourceConfigType" } }, "discriminator": { "propertyName": "type", "mapping": { "custom": "#/components/schemas/OpenAI.EvalCustomDataSourceConfigParams", "logs": "#/components/schemas/OpenAI.EvalLogsDataSourceConfigParams", "stored_completions": "#/components/schemas/OpenAI.EvalStoredCompletionsDataSourceConfigParams" } } }, "OpenAI.EvalDataSourceConfigResource": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.EvalDataSourceConfigType" } }, "discriminator": { "propertyName": "type", "mapping": { "custom": "#/components/schemas/OpenAI.EvalCustomDataSourceConfigResource", "stored_completions": "#/components/schemas/OpenAI.EvalStoredCompletionsDataSourceConfigResource", "logs": "#/components/schemas/OpenAI.EvalLogsDataSourceConfigResource" } } }, "OpenAI.EvalDataSourceConfigType": { "type": "string", "enum": [ "custom", "logs", "stored_completions" ] }, "OpenAI.EvalGraderLabelModelParams": { "type": "object", "required": [ "type", "name", "model", "input", "labels", "passing_labels" ], "properties": { "type": { "type": "string", "enum": [ "label_model" ], "description": "The object type, which is always `label_model`." }, "name": { "type": "string", "description": "The name of the grader." }, "model": { "type": "string", "description": "The model to use for the evaluation. Must support structured outputs." }, "input": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.CreateEvalItem" }, "description": "A list of chat messages forming the prompt or context. May include variable references to the `item` namespace, ie {{item.name}}." }, "labels": { "type": "array", "items": { "type": "string" }, "description": "The labels to classify to each item in the evaluation." }, "passing_labels": { "type": "array", "items": { "type": "string" }, "description": "The labels that indicate a passing result. Must be a subset of labels." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalGraderParams" } ], "description": "A LabelModelGrader object which uses a model to assign labels to each item\nin the evaluation." }, "OpenAI.EvalGraderLabelModelResource": { "type": "object", "required": [ "type", "name", "model", "input", "labels", "passing_labels" ], "properties": { "type": { "type": "string", "enum": [ "label_model" ], "description": "The object type, which is always `label_model`." }, "name": { "type": "string", "description": "The name of the grader." }, "model": { "type": "string", "description": "The model to use for the evaluation. Must support structured outputs." }, "input": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalItem" } }, "labels": { "type": "array", "items": { "type": "string" }, "description": "The labels to assign to each item in the evaluation." }, "passing_labels": { "type": "array", "items": { "type": "string" }, "description": "The labels that indicate a passing result. Must be a subset of labels." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalGraderResource" } ] }, "OpenAI.EvalGraderParams": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.GraderType" } }, "discriminator": { "propertyName": "type", "mapping": { "label_model": "#/components/schemas/OpenAI.EvalGraderLabelModelParams", "string_check": "#/components/schemas/OpenAI.EvalGraderStringCheckParams", "text_similarity": "#/components/schemas/OpenAI.EvalGraderTextSimilarityParams", "python": "#/components/schemas/OpenAI.EvalGraderPythonParams", "score_model": "#/components/schemas/OpenAI.EvalGraderScoreModelParams" } } }, "OpenAI.EvalGraderPythonParams": { "type": "object", "required": [ "type", "name", "source" ], "properties": { "type": { "type": "string", "enum": [ "python" ], "description": "The object type, which is always `python`." }, "name": { "type": "string", "description": "The name of the grader." }, "source": { "type": "string", "description": "The source code of the python script." }, "image_tag": { "type": "string", "description": "The image tag to use for the python script." }, "pass_threshold": { "type": "number", "format": "float", "description": "The threshold for the score." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalGraderParams" } ] }, "OpenAI.EvalGraderPythonResource": { "type": "object", "required": [ "type", "name", "source" ], "properties": { "type": { "type": "string", "enum": [ "python" ], "description": "The object type, which is always `python`." }, "name": { "type": "string", "description": "The name of the grader." }, "source": { "type": "string", "description": "The source code of the python script." }, "image_tag": { "type": "string", "description": "The image tag to use for the python script." }, "pass_threshold": { "type": "number", "format": "float", "description": "The threshold for the score." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalGraderResource" } ] }, "OpenAI.EvalGraderResource": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.GraderType" } }, "discriminator": { "propertyName": "type", "mapping": { "label_model": "#/components/schemas/OpenAI.EvalGraderLabelModelResource", "text_similarity": "#/components/schemas/OpenAI.EvalGraderTextSimilarityResource", "python": "#/components/schemas/OpenAI.EvalGraderPythonResource", "score_model": "#/components/schemas/OpenAI.EvalGraderScoreModelResource" } } }, "OpenAI.EvalGraderScoreModelParams": { "type": "object", "required": [ "type", "name", "model", "input" ], "properties": { "type": { "type": "string", "enum": [ "score_model" ], "description": "The object type, which is always `score_model`." }, "name": { "type": "string", "description": "The name of the grader." }, "model": { "type": "string", "description": "The model to use for the evaluation." }, "sampling_params": { "description": "The sampling parameters for the model." }, "input": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalItem" }, "description": "The input text. This may include template strings." }, "range": { "type": "array", "items": { "type": "number", "format": "float" }, "description": "The range of the score. Defaults to `[0, 1]`." }, "pass_threshold": { "type": "number", "format": "float", "description": "The threshold for the score." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalGraderParams" } ] }, "OpenAI.EvalGraderScoreModelResource": { "type": "object", "required": [ "type", "name", "model", "input" ], "properties": { "type": { "type": "string", "enum": [ "score_model" ], "description": "The object type, which is always `score_model`." }, "name": { "type": "string", "description": "The name of the grader." }, "model": { "type": "string", "description": "The model to use for the evaluation." }, "sampling_params": { "description": "The sampling parameters for the model." }, "input": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalItem" }, "description": "The input text. This may include template strings." }, "range": { "type": "array", "items": { "type": "number", "format": "float" }, "description": "The range of the score. Defaults to `[0, 1]`." }, "pass_threshold": { "type": "number", "format": "float", "description": "The threshold for the score." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalGraderResource" } ] }, "OpenAI.EvalGraderStringCheckParams": { "type": "object", "required": [ "type", "name", "input", "reference", "operation" ], "properties": { "type": { "type": "string", "enum": [ "string_check" ], "description": "The object type, which is always `string_check`." }, "name": { "type": "string", "description": "The name of the grader." }, "input": { "type": "string", "description": "The input text. This may include template strings." }, "reference": { "type": "string", "description": "The reference text. This may include template strings." }, "operation": { "type": "string", "enum": [ "eq", "ne", "like", "ilike" ], "description": "The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalGraderParams" } ] }, "OpenAI.EvalGraderTextSimilarityParams": { "type": "object", "required": [ "type", "name", "input", "reference", "evaluation_metric", "pass_threshold" ], "properties": { "type": { "type": "string", "enum": [ "text_similarity" ], "description": "The type of grader." }, "name": { "type": "string", "description": "The name of the grader." }, "input": { "type": "string", "description": "The text being graded." }, "reference": { "type": "string", "description": "The text being graded against." }, "evaluation_metric": { "type": "string", "enum": [ "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l" ], "description": "The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`." }, "pass_threshold": { "type": "number", "format": "float", "description": "The threshold for the score." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalGraderParams" } ] }, "OpenAI.EvalGraderTextSimilarityResource": { "type": "object", "required": [ "type", "name", "input", "reference", "evaluation_metric", "pass_threshold" ], "properties": { "type": { "type": "string", "enum": [ "text_similarity" ], "description": "The type of grader." }, "name": { "type": "string", "description": "The name of the grader." }, "input": { "type": "string", "description": "The text being graded." }, "reference": { "type": "string", "description": "The text being graded against." }, "evaluation_metric": { "type": "string", "enum": [ "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l" ], "description": "The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`." }, "pass_threshold": { "type": "number", "format": "float", "description": "The threshold for the score." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalGraderResource" } ] }, "OpenAI.EvalItem": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "enum": [ "user", "assistant", "system", "developer" ], "description": "The role of the message input. One of `user`, `assistant`, `system`, or\n`developer`." }, "content": { "anyOf": [ { "type": "string" }, { "$ref": "#/components/schemas/OpenAI.EvalItemContent" } ], "description": "Text inputs to the model - can contain template strings." }, "type": { "type": "string", "enum": [ "message" ], "description": "The type of the message input. Always `message`." } }, "description": "A message input to the model with a role indicating instruction following\nhierarchy. Instructions given with the `developer` or `system` role take\nprecedence over instructions given with the `user` role. Messages with the\n`assistant` role are presumed to have been generated by the model in previous\ninteractions." }, "OpenAI.EvalItemContent": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.EvalItemContentType" } }, "discriminator": { "propertyName": "type", "mapping": { "input_text": "#/components/schemas/OpenAI.EvalItemContentInputText", "output_text": "#/components/schemas/OpenAI.EvalItemContentOutputText" } } }, "OpenAI.EvalItemContentInputText": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "input_text" ] }, "text": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalItemContent" } ] }, "OpenAI.EvalItemContentOutputText": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "output_text" ] }, "text": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalItemContent" } ] }, "OpenAI.EvalItemContentType": { "type": "string", "enum": [ "input_text", "output_text" ] }, "OpenAI.EvalJsonlRunDataSourceParams": { "type": "object", "required": [ "type", "source" ], "properties": { "type": { "type": "string", "enum": [ "jsonl" ], "description": "The type of data source. Always `jsonl`." }, "source": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunFileContentDataContentSource" }, { "$ref": "#/components/schemas/OpenAI.EvalRunFileIdDataContentSource" } ], "description": "Determines what populates the `item` namespace in the data source." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataSourceParams" } ], "description": "A JsonlRunDataSource object with that specifies a JSONL file that matches the eval" }, "OpenAI.EvalList": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of this object. It is always set to \"list\".", "default": "list" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Eval" }, "description": "An array of eval objects." }, "first_id": { "type": "string", "description": "The identifier of the first eval in the data array." }, "last_id": { "type": "string", "description": "The identifier of the last eval in the data array." }, "has_more": { "type": "boolean", "description": "Indicates whether there are more evals available." } }, "description": "An object representing a list of evals." }, "OpenAI.EvalLogsDataSourceConfigParams": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "logs" ], "description": "The type of data source. Always `logs`." }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalDataSourceConfigParams" } ], "description": "A data source config which specifies the metadata property of your logs query.\nThis is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc." }, "OpenAI.EvalLogsDataSourceConfigResource": { "type": "object", "required": [ "type", "metadata", "schema" ], "properties": { "type": { "type": "string", "enum": [ "logs" ], "description": "The type of data source. Always `logs`." }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "nullable": true, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "schema": { "type": "object", "additionalProperties": {}, "description": "The json schema for the run data source items.\nLearn how to build JSON schemas [here](https://json-schema.org/)." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalDataSourceConfigResource" } ], "description": "A LogsDataSourceConfig which specifies the metadata property of your logs query.\nThis is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc.\nThe schema returned by this data source config is used to defined what variables are available in your evals.\n`item` and `sample` are both defined when using this data source config." }, "OpenAI.EvalResponsesRunDataSourceParams": { "type": "object", "required": [ "type", "source" ], "properties": { "type": { "type": "string", "enum": [ "responses" ], "description": "The type of run data source. Always `responses`." }, "input_messages": { "anyOf": [ { "type": "object", "properties": { "type": { "type": "string", "enum": [ "template" ], "description": "The type of input messages. Always `template`." }, "template": { "type": "array", "items": { "anyOf": [ { "type": "object", "properties": { "role": { "type": "string", "description": "The role of the message (e.g. \"system\", \"assistant\", \"user\")." }, "content": { "type": "string", "description": "The content of the message." } }, "required": [ "role", "content" ] }, { "$ref": "#/components/schemas/OpenAI.EvalItem" } ] }, "description": "A list of chat messages forming the prompt or context. May include variable references to the `item` namespace, ie {{item.name}}." } }, "required": [ "type", "template" ] }, { "type": "object", "properties": { "type": { "type": "string", "enum": [ "item_reference" ], "description": "The type of input messages. Always `item_reference`." }, "item_reference": { "type": "string", "description": "A reference to a variable in the `item` namespace. Ie, \"item.name\"" } }, "required": [ "type", "item_reference" ] } ], "description": "Used when sampling from a model. Dictates the structure of the messages passed into the model. Can either be a reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template with variable references to the `item` namespace." }, "sampling_params": { "$ref": "#/components/schemas/AzureEvalAPIResponseSamplingParams" }, "model": { "type": "string", "description": "The name of the model to use for generating completions (e.g. \"o3-mini\")." }, "source": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunFileContentDataContentSource" }, { "$ref": "#/components/schemas/OpenAI.EvalRunFileIdDataContentSource" }, { "$ref": "#/components/schemas/OpenAI.EvalRunResponsesDataContentSource" } ], "description": "Determines what populates the `item` namespace in this run's data source." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataSourceParams" } ], "description": "A ResponsesRunDataSource object describing a model sampling configuration." }, "OpenAI.EvalRun": { "type": "object", "required": [ "object", "id", "eval_id", "status", "model", "name", "created_at", "report_url", "result_counts", "per_model_usage", "per_testing_criteria_results", "data_source", "metadata", "error" ], "properties": { "object": { "type": "string", "enum": [ "eval.run" ], "description": "The type of the object. Always \"eval.run\".", "default": "eval.run" }, "id": { "type": "string", "description": "Unique identifier for the evaluation run." }, "eval_id": { "type": "string", "description": "The identifier of the associated evaluation." }, "status": { "type": "string", "description": "The status of the evaluation run." }, "model": { "type": "string", "description": "The model that is evaluated, if applicable." }, "name": { "type": "string", "description": "The name of the evaluation run." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) when the evaluation run was created." }, "report_url": { "type": "string", "description": "The URL to the rendered evaluation run report on the UI dashboard." }, "result_counts": { "type": "object", "properties": { "total": { "type": "integer", "format": "int32", "description": "Total number of executed output items." }, "errored": { "type": "integer", "format": "int32", "description": "Number of output items that resulted in an error." }, "failed": { "type": "integer", "format": "int32", "description": "Number of output items that failed to pass the evaluation." }, "passed": { "type": "integer", "format": "int32", "description": "Number of output items that passed the evaluation." } }, "required": [ "total", "errored", "failed", "passed" ], "description": "Counters summarizing the outcomes of the evaluation run." }, "per_model_usage": { "type": "array", "items": { "type": "object", "properties": { "model_name": { "type": "string", "description": "The name of the model." }, "invocation_count": { "type": "integer", "format": "int32", "description": "The number of invocations." }, "prompt_tokens": { "type": "integer", "format": "int32", "description": "The number of prompt tokens used." }, "completion_tokens": { "type": "integer", "format": "int32", "description": "The number of completion tokens generated." }, "total_tokens": { "type": "integer", "format": "int32", "description": "The total number of tokens used." }, "cached_tokens": { "type": "integer", "format": "int32", "description": "The number of tokens retrieved from cache." } }, "required": [ "model_name", "invocation_count", "prompt_tokens", "completion_tokens", "total_tokens", "cached_tokens" ] }, "description": "Usage statistics for each model during the evaluation run." }, "per_testing_criteria_results": { "type": "array", "items": { "type": "object", "properties": { "testing_criteria": { "type": "string", "description": "A description of the testing criteria." }, "passed": { "type": "integer", "format": "int32", "description": "Number of tests passed for this criteria." }, "failed": { "type": "integer", "format": "int32", "description": "Number of tests failed for this criteria." } }, "required": [ "testing_criteria", "passed", "failed" ] }, "description": "Results per testing criteria applied during the evaluation run." }, "data_source": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataSourceResource" } ], "description": "Information about the run's data source." }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "nullable": true, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "error": { "$ref": "#/components/schemas/OpenAI.EvalApiError" } }, "description": "A schema representing an evaluation run." }, "OpenAI.EvalRunDataContentSource": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.EvalRunDataContentSourceType" } }, "discriminator": { "propertyName": "type", "mapping": { "file_id": "#/components/schemas/OpenAI.EvalRunFileIdDataContentSource", "stored_completions": "#/components/schemas/OpenAI.EvalRunStoredCompletionsDataContentSource", "responses": "#/components/schemas/OpenAI.EvalRunResponsesDataContentSource" } } }, "OpenAI.EvalRunDataContentSourceType": { "type": "string", "enum": [ "file_id", "file_content", "stored_completions", "responses" ] }, "OpenAI.EvalRunDataSourceCompletionsResource": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "completions" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataSourceResource" } ] }, "OpenAI.EvalRunDataSourceJsonlResource": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "jsonl" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataSourceResource" } ] }, "OpenAI.EvalRunDataSourceParams": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.EvalRunDataSourceType" } }, "discriminator": { "propertyName": "type", "mapping": { "jsonl": "#/components/schemas/OpenAI.EvalJsonlRunDataSourceParams", "completions": "#/components/schemas/OpenAI.EvalCompletionsRunDataSourceParams", "responses": "#/components/schemas/OpenAI.EvalResponsesRunDataSourceParams" } } }, "OpenAI.EvalRunDataSourceResource": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.EvalRunDataSourceType" } } }, "OpenAI.EvalRunDataSourceResponsesResource": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "responses" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataSourceResource" } ] }, "OpenAI.EvalRunDataSourceType": { "type": "string", "enum": [ "jsonl", "completions", "responses" ] }, "OpenAI.EvalRunFileContentDataContentSource": { "type": "object", "required": [ "type", "content" ], "properties": { "type": { "type": "string", "enum": [ "file_content" ], "description": "The type of jsonl source. Always `file_content`." }, "content": { "type": "array", "items": { "type": "object", "properties": { "item": { "type": "object", "additionalProperties": {} }, "sample": { "type": "object", "additionalProperties": {} } }, "required": [ "item" ] }, "description": "The content of the jsonl file." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataContentSource" } ] }, "OpenAI.EvalRunFileIdDataContentSource": { "type": "object", "required": [ "type", "id" ], "properties": { "type": { "type": "string", "enum": [ "file_id" ], "description": "The type of jsonl source. Always `file_id`." }, "id": { "type": "string", "description": "The identifier of the file." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataContentSource" } ] }, "OpenAI.EvalRunList": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of this object. It is always set to \"list\".", "default": "list" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalRun" }, "description": "An array of eval run objects." }, "first_id": { "type": "string", "description": "The identifier of the first eval run in the data array." }, "last_id": { "type": "string", "description": "The identifier of the last eval run in the data array." }, "has_more": { "type": "boolean", "description": "Indicates whether there are more evals available." } }, "description": "An object representing a list of runs for an evaluation." }, "OpenAI.EvalRunOutputItem": { "type": "object", "required": [ "object", "id", "run_id", "eval_id", "created_at", "status", "datasource_item_id", "datasource_item", "results", "sample" ], "properties": { "object": { "type": "string", "enum": [ "eval.run.output_item" ], "description": "The type of the object. Always \"eval.run.output_item\".", "default": "eval.run.output_item" }, "id": { "type": "string", "description": "Unique identifier for the evaluation run output item." }, "run_id": { "type": "string", "description": "The identifier of the evaluation run associated with this output item." }, "eval_id": { "type": "string", "description": "The identifier of the evaluation group." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) when the evaluation run was created." }, "status": { "type": "string", "description": "The status of the evaluation run." }, "datasource_item_id": { "type": "integer", "format": "int32", "description": "The identifier for the data source item." }, "datasource_item": { "type": "object", "additionalProperties": {}, "description": "Details of the input data source item." }, "results": { "type": "array", "items": { "type": "object", "additionalProperties": {} }, "description": "A list of results from the evaluation run." }, "sample": { "type": "object", "properties": { "input": { "type": "array", "items": { "type": "object", "properties": { "role": { "type": "string", "description": "The role of the message sender (e.g., system, user, developer)." }, "content": { "type": "string", "description": "The content of the message." } }, "required": [ "role", "content" ] }, "description": "An array of input messages." }, "output": { "type": "array", "items": { "type": "object", "properties": { "role": { "type": "string", "description": "The role of the message (e.g. \"system\", \"assistant\", \"user\")." }, "content": { "type": "string", "description": "The content of the message." } } }, "description": "An array of output messages." }, "finish_reason": { "type": "string", "description": "The reason why the sample generation was finished." }, "model": { "type": "string", "description": "The model used for generating the sample." }, "usage": { "type": "object", "properties": { "total_tokens": { "type": "integer", "format": "int32", "description": "The total number of tokens used." }, "completion_tokens": { "type": "integer", "format": "int32", "description": "The number of completion tokens generated." }, "prompt_tokens": { "type": "integer", "format": "int32", "description": "The number of prompt tokens used." }, "cached_tokens": { "type": "integer", "format": "int32", "description": "The number of tokens retrieved from cache." } }, "required": [ "total_tokens", "completion_tokens", "prompt_tokens", "cached_tokens" ], "description": "Token usage details for the sample." }, "error": { "$ref": "#/components/schemas/OpenAI.EvalApiError" }, "temperature": { "type": "number", "format": "float", "description": "The sampling temperature used." }, "max_completion_tokens": { "type": "integer", "format": "int32", "description": "The maximum number of tokens allowed for completion." }, "top_p": { "type": "number", "format": "float", "description": "The top_p value used for sampling." }, "seed": { "type": "integer", "format": "int32", "description": "The seed used for generating the sample." } }, "required": [ "input", "output", "finish_reason", "model", "usage", "error", "temperature", "max_completion_tokens", "top_p", "seed" ], "description": "A sample containing the input and output of the evaluation run." } }, "description": "A schema representing an evaluation run output item." }, "OpenAI.EvalRunOutputItemList": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of this object. It is always set to \"list\".", "default": "list" }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalRunOutputItem" }, "description": "An array of eval run output item objects." }, "first_id": { "type": "string", "description": "The identifier of the first eval run output item in the data array." }, "last_id": { "type": "string", "description": "The identifier of the last eval run output item in the data array." }, "has_more": { "type": "boolean", "description": "Indicates whether there are more eval run output items available." } }, "description": "An object representing a list of output items for an evaluation run." }, "OpenAI.EvalRunResponsesDataContentSource": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "responses" ], "description": "The type of run data source. Always `responses`." }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "model": { "type": "string", "nullable": true, "description": "The name of the model to find responses for. This is a query parameter used to select responses." }, "instructions_search": { "type": "string", "nullable": true, "description": "Optional string to search the 'instructions' field. This is a query parameter used to select responses." }, "created_after": { "type": "integer", "format": "int32", "nullable": true, "minimum": 0, "description": "Only include items created after this timestamp (inclusive). This is a query parameter used to select responses." }, "created_before": { "type": "integer", "format": "int32", "nullable": true, "minimum": 0, "description": "Only include items created before this timestamp (inclusive). This is a query parameter used to select responses." }, "reasoning_effort": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ReasoningEffort" } ], "nullable": true, "description": "Optional reasoning effort parameter. This is a query parameter used to select responses.", "default": "medium" }, "temperature": { "type": "number", "format": "float", "nullable": true, "description": "Sampling temperature. This is a query parameter used to select responses." }, "top_p": { "type": "number", "format": "float", "nullable": true, "description": "Nucleus sampling parameter. This is a query parameter used to select responses." }, "users": { "type": "array", "items": { "type": "string" }, "nullable": true, "description": "List of user identifiers. This is a query parameter used to select responses." }, "tools": { "type": "array", "items": { "type": "string" }, "nullable": true, "description": "List of tool names. This is a query parameter used to select responses." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataContentSource" } ], "description": "A EvalResponsesSource object describing a run data source configuration." }, "OpenAI.EvalRunStoredCompletionsDataContentSource": { "type": "object", "required": [ "type", "metadata" ], "properties": { "type": { "type": "string", "enum": [ "stored_completions" ], "description": "The type of source. Always `stored_completions`." }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "nullable": true, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "model": { "type": "string", "nullable": true, "description": "An optional model to filter by (e.g., 'gpt-4o')." }, "created_after": { "type": "integer", "format": "int32", "nullable": true, "description": "An optional Unix timestamp to filter items created after this time." }, "created_before": { "type": "integer", "format": "int32", "nullable": true, "description": "An optional Unix timestamp to filter items created before this time." }, "limit": { "type": "integer", "format": "int32", "nullable": true, "description": "An optional maximum number of items to return." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalRunDataContentSource" } ], "description": "A StoredCompletionsRunDataSource configuration describing a set of filters" }, "OpenAI.EvalStoredCompletionsDataSourceConfigParams": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "stored_completions" ], "description": "The type of data source. Always `stored_completions`." }, "metadata": { "type": "object", "additionalProperties": {}, "description": "Metadata filters for the stored completions data source." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalDataSourceConfigParams" } ], "description": "Deprecated in favor of LogsDataSourceConfig.", "deprecated": true }, "OpenAI.EvalStoredCompletionsDataSourceConfigResource": { "type": "object", "required": [ "type", "metadata", "schema" ], "properties": { "type": { "type": "string", "enum": [ "stored_completions" ], "description": "The type of data source. Always `stored_completions`." }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "nullable": true, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "schema": { "type": "object", "additionalProperties": {}, "description": "The json schema for the run data source items.\nLearn how to build JSON schemas [here](https://json-schema.org/)." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.EvalDataSourceConfigResource" } ], "description": "Deprecated in favor of LogsDataSourceConfig.", "deprecated": true }, "OpenAI.FileSearchTool": { "type": "object", "required": [ "type", "vector_store_ids" ], "properties": { "type": { "type": "string", "enum": [ "file_search" ], "description": "The type of the file search tool. Always `file_search`." }, "vector_store_ids": { "type": "array", "items": { "type": "string" }, "description": "The IDs of the vector stores to search." }, "max_num_results": { "type": "integer", "format": "int32", "description": "The maximum number of results to return. This number should be between 1 and 50 inclusive." }, "ranking_options": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.RankingOptions" } ], "description": "Ranking options for search." }, "filters": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Filters" } ], "nullable": true, "description": "A filter to apply." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A tool that searches for relevant content from uploaded files. Learn more about the [file search tool](https://platform.openai.com/docs/guides/tools-file-search)." }, "OpenAI.FileSearchToolCallItemParam": { "type": "object", "required": [ "type", "queries" ], "properties": { "type": { "type": "string", "enum": [ "file_search_call" ] }, "queries": { "type": "array", "items": { "type": "string" }, "description": "The queries used to search for files." }, "results": { "type": "array", "items": { "type": "object", "properties": { "file_id": { "type": "string", "description": "The unique ID of the file." }, "text": { "type": "string", "description": "The text that was retrieved from the file." }, "filename": { "type": "string", "description": "The name of the file." }, "attributes": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" }, "score": { "type": "number", "format": "float", "description": "The relevance score of the file - a value between 0 and 1." } } }, "nullable": true, "description": "The results of the file search tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "The results of a file search tool call. See the\n[file search guide](/docs/guides/tools-file-search) for more information.\n" }, "OpenAI.FileSearchToolCallItemResource": { "type": "object", "required": [ "type", "status", "queries" ], "properties": { "type": { "type": "string", "enum": [ "file_search_call" ] }, "status": { "type": "string", "enum": [ "in_progress", "searching", "completed", "incomplete", "failed" ], "description": "The status of the file search tool call. One of `in_progress`, \n`searching`, `incomplete` or `failed`," }, "queries": { "type": "array", "items": { "type": "string" }, "description": "The queries used to search for files." }, "results": { "type": "array", "items": { "type": "object", "properties": { "file_id": { "type": "string", "description": "The unique ID of the file." }, "text": { "type": "string", "description": "The text that was retrieved from the file." }, "filename": { "type": "string", "description": "The name of the file." }, "attributes": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" }, "score": { "type": "number", "format": "float", "description": "The relevance score of the file - a value between 0 and 1." } } }, "nullable": true, "description": "The results of the file search tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "The results of a file search tool call. See the\n[file search guide](/docs/guides/tools-file-search) for more information.\n" }, "OpenAI.Filters": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ComparisonFilter" }, { "$ref": "#/components/schemas/OpenAI.CompoundFilter" } ] }, "OpenAI.FineTuneDPOHyperparameters": { "type": "object", "properties": { "beta": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number", "format": "float" } ], "description": "The beta value for the DPO method. A higher beta value will increase the weight of the penalty between the policy and reference model.", "default": "auto" }, "batch_size": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "description": "Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.", "default": "auto" }, "learning_rate_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number", "format": "float" } ], "description": "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting.", "default": "auto" }, "n_epochs": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "description": "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.", "default": "auto" } }, "description": "The hyperparameters used for the DPO fine-tuning job." }, "OpenAI.FineTuneDPOMethod": { "type": "object", "properties": { "hyperparameters": { "$ref": "#/components/schemas/OpenAI.FineTuneDPOHyperparameters" } }, "description": "Configuration for the DPO fine-tuning method." }, "OpenAI.FineTuneMethod": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "supervised", "dpo", "reinforcement" ], "description": "The type of method. Is either `supervised`, `dpo`, or `reinforcement`." }, "supervised": { "$ref": "#/components/schemas/OpenAI.FineTuneSupervisedMethod" }, "dpo": { "$ref": "#/components/schemas/OpenAI.FineTuneDPOMethod" }, "reinforcement": { "$ref": "#/components/schemas/AzureFineTuneReinforcementMethod" } }, "description": "The method used for fine-tuning." }, "OpenAI.FineTuneReinforcementHyperparameters": { "type": "object", "properties": { "batch_size": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "description": "Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.", "default": "auto" }, "learning_rate_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number", "format": "float" } ], "description": "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting.", "default": "auto" }, "n_epochs": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "description": "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.", "default": "auto" }, "reasoning_effort": { "type": "string", "enum": [ "default", "low", "medium", "high" ], "description": "Level of reasoning effort.", "default": "default" }, "compute_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number", "format": "float" } ], "description": "Multiplier on amount of compute used for exploring search space during training.", "default": "auto" }, "eval_interval": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "description": "The number of training steps between evaluation runs.", "default": "auto" }, "eval_samples": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "description": "Number of evaluation samples to generate per training step.", "default": "auto" } }, "description": "The hyperparameters used for the reinforcement fine-tuning job." }, "OpenAI.FineTuneSupervisedHyperparameters": { "type": "object", "properties": { "batch_size": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "description": "Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.", "default": "auto" }, "learning_rate_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number", "format": "float" } ], "description": "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting.", "default": "auto" }, "n_epochs": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "description": "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.", "default": "auto" } }, "description": "The hyperparameters used for the fine-tuning job." }, "OpenAI.FineTuneSupervisedMethod": { "type": "object", "properties": { "hyperparameters": { "$ref": "#/components/schemas/OpenAI.FineTuneSupervisedHyperparameters" } }, "description": "Configuration for the supervised fine-tuning method." }, "OpenAI.FineTuningIntegration": { "type": "object", "required": [ "type" ], "properties": { "type": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "wandb" ] } ] } }, "discriminator": { "propertyName": "type", "mapping": { "wandb": "#/components/schemas/OpenAI.FineTuningIntegrationWandb" } } }, "OpenAI.FineTuningIntegrationWandb": { "type": "object", "required": [ "type", "wandb" ], "properties": { "type": { "type": "string", "enum": [ "wandb" ], "description": "The type of the integration being enabled for the fine-tuning job" }, "wandb": { "type": "object", "properties": { "project": { "type": "string", "description": "The name of the project that the new run will be created under." }, "name": { "type": "string", "nullable": true, "description": "A display name to set for the run. If not set, we will use the Job ID as the name." }, "entity": { "type": "string", "nullable": true, "description": "The entity to use for the run. This allows you to set the team or username of the WandB user that you would\nlike associated with the run. If not set, the default entity for the registered WandB API key is used." }, "tags": { "type": "array", "items": { "type": "string" }, "description": "A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some\ndefault tags are generated by OpenAI: \"openai/finetune\", \"openai/{base-model}\", \"openai/{ftjob-abcdef}\"." } }, "required": [ "project" ], "description": "The settings for your integration with Weights and Biases. This payload specifies the project that\nmetrics will be sent to. Optionally, you can set an explicit display name for your run, add tags\nto your run, and set a default entity (team, username, etc) to be associated with your run." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.FineTuningIntegration" } ] }, "OpenAI.FineTuningJob": { "type": "object", "required": [ "id", "created_at", "error", "fine_tuned_model", "finished_at", "hyperparameters", "model", "object", "organization_id", "result_files", "status", "trained_tokens", "training_file", "validation_file", "seed", "metadata" ], "properties": { "user_provided_suffix": { "type": "string", "nullable": true, "description": "The descriptive suffix applied to the job, as specified in the job creation request." }, "id": { "type": "string", "description": "The object identifier, which can be referenced in the API endpoints." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the fine-tuning job was created." }, "error": { "type": "object", "properties": { "code": { "type": "string", "description": "A machine-readable error code." }, "message": { "type": "string", "description": "A human-readable error message." }, "param": { "type": "string", "nullable": true, "description": "The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific." } }, "required": [ "code", "message", "param" ], "nullable": true, "description": "For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure." }, "fine_tuned_model": { "type": "string", "nullable": true, "description": "The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running." }, "finished_at": { "type": "integer", "format": "unixtime", "nullable": true, "description": "The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running." }, "hyperparameters": { "type": "object", "properties": { "batch_size": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "nullable": true, "description": "Number of examples in each batch. A larger batch size means that model parameters\nare updated less frequently, but with lower variance.", "default": "auto" }, "learning_rate_multiplier": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "number", "format": "float" } ], "description": "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid\noverfitting.", "default": "auto" }, "n_epochs": { "anyOf": [ { "type": "string", "enum": [ "auto" ] }, { "type": "integer", "format": "int32" } ], "description": "The number of epochs to train the model for. An epoch refers to one full cycle\nthrough the training dataset.", "default": "auto" } }, "description": "The hyperparameters used for the fine-tuning job. This value will only be returned when running `supervised` jobs." }, "model": { "type": "string", "description": "The base model that is being fine-tuned." }, "object": { "type": "string", "enum": [ "fine_tuning.job" ], "description": "The object type, which is always \"fine_tuning.job\"." }, "organization_id": { "type": "string", "description": "The organization that owns the fine-tuning job." }, "result_files": { "type": "array", "items": { "type": "string" }, "description": "The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents)." }, "status": { "type": "string", "enum": [ "validating_files", "queued", "running", "succeeded", "failed", "cancelled" ], "description": "The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`." }, "trained_tokens": { "type": "integer", "format": "int32", "nullable": true, "description": "The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running." }, "training_file": { "type": "string", "description": "The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents)." }, "validation_file": { "type": "string", "nullable": true, "description": "The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents)." }, "integrations": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FineTuningIntegration" }, "nullable": true, "description": "A list of integrations to enable for this fine-tuning job." }, "seed": { "type": "integer", "format": "int32", "description": "The seed used for the fine-tuning job." }, "estimated_finish": { "type": "integer", "format": "unixtime", "nullable": true, "description": "The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running." }, "method": { "$ref": "#/components/schemas/OpenAI.FineTuneMethod" }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "nullable": true, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" } }, "description": "The `fine_tuning.job` object represents a fine-tuning job that has been created through the API." }, "OpenAI.FineTuningJobCheckpoint": { "type": "object", "required": [ "id", "created_at", "fine_tuned_model_checkpoint", "step_number", "metrics", "fine_tuning_job_id", "object" ], "properties": { "id": { "type": "string", "description": "The checkpoint identifier, which can be referenced in the API endpoints." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the checkpoint was created." }, "fine_tuned_model_checkpoint": { "type": "string", "description": "The name of the fine-tuned checkpoint model that is created." }, "step_number": { "type": "integer", "format": "int32", "description": "The step number that the checkpoint was created at." }, "metrics": { "type": "object", "properties": { "step": { "type": "number", "format": "float" }, "train_loss": { "type": "number", "format": "float" }, "train_mean_token_accuracy": { "type": "number", "format": "float" }, "valid_loss": { "type": "number", "format": "float" }, "valid_mean_token_accuracy": { "type": "number", "format": "float" }, "full_valid_loss": { "type": "number", "format": "float" }, "full_valid_mean_token_accuracy": { "type": "number", "format": "float" } }, "description": "Metrics at the step number during the fine-tuning job." }, "fine_tuning_job_id": { "type": "string", "description": "The name of the fine-tuning job that this checkpoint was created from." }, "object": { "type": "string", "enum": [ "fine_tuning.job.checkpoint" ], "description": "The object type, which is always \"fine_tuning.job.checkpoint\"." } }, "description": "The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use." }, "OpenAI.FineTuningJobEvent": { "type": "object", "required": [ "object", "id", "created_at", "level", "message" ], "properties": { "object": { "type": "string", "enum": [ "fine_tuning.job.event" ], "description": "The object type, which is always \"fine_tuning.job.event\"." }, "id": { "type": "string", "description": "The object identifier." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the fine-tuning job was created." }, "level": { "type": "string", "enum": [ "info", "warn", "error" ], "description": "The log level of the event." }, "message": { "type": "string", "description": "The message of the event." }, "type": { "type": "string", "enum": [ "message", "metrics" ], "description": "The type of event." }, "data": { "description": "The data associated with the event." } }, "description": "Fine-tuning job event object" }, "OpenAI.FunctionObject": { "type": "object", "required": [ "name" ], "properties": { "description": { "type": "string", "description": "A description of what the function does, used by the model to choose when and how to call the function." }, "name": { "type": "string", "description": "The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64." }, "parameters": { "description": "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.\n\nOmitting `parameters` defines a function with an empty parameter list." }, "strict": { "type": "boolean", "nullable": true, "description": "Whether to enable strict schema adherence when generating the function call. If set to true, the model will follow the exact schema defined in the `parameters` field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn more about Structured Outputs in the [function calling guide](docs/guides/function-calling).", "default": false } } }, "OpenAI.FunctionTool": { "type": "object", "required": [ "type", "name", "parameters", "strict" ], "properties": { "type": { "type": "string", "enum": [ "function" ], "description": "The type of the function tool. Always `function`." }, "name": { "type": "string", "description": "The name of the function to call." }, "description": { "type": "string", "nullable": true, "description": "A description of the function. Used by the model to determine whether or not to call the function." }, "parameters": { "nullable": true, "description": "A JSON schema object describing the parameters of the function." }, "strict": { "type": "boolean", "nullable": true, "description": "Whether to enforce strict parameter validation. Default `true`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "Defines a function in your own code the model can choose to call. Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling)." }, "OpenAI.FunctionToolCallItemParam": { "type": "object", "required": [ "type", "call_id", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "function_call" ] }, "call_id": { "type": "string", "description": "The unique ID of the function tool call generated by the model." }, "name": { "type": "string", "description": "The name of the function to run." }, "arguments": { "type": "string", "description": "A JSON string of the arguments to pass to the function." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "A tool call to run a function. See the\n[function calling guide](/docs/guides/function-calling) for more information.\n" }, "OpenAI.FunctionToolCallItemResource": { "type": "object", "required": [ "type", "status", "call_id", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "function_call" ] }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n`incomplete`. Populated when items are returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the function tool call generated by the model." }, "name": { "type": "string", "description": "The name of the function to run." }, "arguments": { "type": "string", "description": "A JSON string of the arguments to pass to the function." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A tool call to run a function. See the\n[function calling guide](/docs/guides/function-calling) for more information.\n" }, "OpenAI.FunctionToolCallOutputItemParam": { "type": "object", "required": [ "type", "call_id", "output" ], "properties": { "type": { "type": "string", "enum": [ "function_call_output" ] }, "call_id": { "type": "string", "description": "The unique ID of the function tool call generated by the model." }, "output": { "type": "string", "description": "A JSON string of the output of the function tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "The output of a function tool call.\n" }, "OpenAI.FunctionToolCallOutputItemResource": { "type": "object", "required": [ "type", "status", "call_id", "output" ], "properties": { "type": { "type": "string", "enum": [ "function_call_output" ] }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n`incomplete`. Populated when items are returned via API." }, "call_id": { "type": "string", "description": "The unique ID of the function tool call generated by the model." }, "output": { "type": "string", "description": "A JSON string of the output of the function tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "The output of a function tool call.\n" }, "OpenAI.Grader": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.GraderType" } }, "discriminator": { "propertyName": "type", "mapping": { "label_model": "#/components/schemas/OpenAI.GraderLabelModel", "text_similarity": "#/components/schemas/OpenAI.GraderTextSimilarity", "python": "#/components/schemas/OpenAI.GraderPython", "score_model": "#/components/schemas/OpenAI.GraderScoreModel", "multi": "#/components/schemas/OpenAI.GraderMulti" } } }, "OpenAI.GraderLabelModel": { "type": "object", "required": [ "type", "name", "model", "input", "labels", "passing_labels" ], "properties": { "type": { "type": "string", "enum": [ "label_model" ], "description": "The object type, which is always `label_model`." }, "name": { "type": "string", "description": "The name of the grader." }, "model": { "type": "string", "description": "The model to use for the evaluation. Must support structured outputs." }, "input": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalItem" } }, "labels": { "type": "array", "items": { "type": "string" }, "description": "The labels to assign to each item in the evaluation." }, "passing_labels": { "type": "array", "items": { "type": "string" }, "description": "The labels that indicate a passing result. Must be a subset of labels." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Grader" } ], "description": "A LabelModelGrader object which uses a model to assign labels to each item\nin the evaluation." }, "OpenAI.GraderMulti": { "type": "object", "required": [ "type", "name", "graders", "calculate_output" ], "properties": { "type": { "type": "string", "enum": [ "multi" ], "description": "The object type, which is always `multi`." }, "name": { "type": "string", "description": "The name of the grader." }, "graders": { "type": "object", "additionalProperties": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.GraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.GraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.GraderScoreModel" } ] } }, "calculate_output": { "type": "string", "description": "A formula to calculate the output based on grader results." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Grader" } ], "description": "A MultiGrader object combines the output of multiple graders to produce a single score." }, "OpenAI.GraderPython": { "type": "object", "required": [ "type", "name", "source" ], "properties": { "type": { "type": "string", "enum": [ "python" ], "description": "The object type, which is always `python`." }, "name": { "type": "string", "description": "The name of the grader." }, "source": { "type": "string", "description": "The source code of the python script." }, "image_tag": { "type": "string", "description": "The image tag to use for the python script." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Grader" } ], "description": "A PythonGrader object that runs a python script on the input." }, "OpenAI.GraderScoreModel": { "type": "object", "required": [ "type", "name", "model", "input" ], "properties": { "type": { "type": "string", "enum": [ "score_model" ], "description": "The object type, which is always `score_model`." }, "name": { "type": "string", "description": "The name of the grader." }, "model": { "type": "string", "description": "The model to use for the evaluation." }, "sampling_params": { "description": "The sampling parameters for the model." }, "input": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.EvalItem" }, "description": "The input text. This may include template strings." }, "range": { "type": "array", "items": { "type": "number", "format": "float" }, "description": "The range of the score. Defaults to `[0, 1]`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Grader" } ], "description": "A ScoreModelGrader object that uses a model to assign a score to the input." }, "OpenAI.GraderStringCheck": { "type": "object", "required": [ "type", "name", "input", "reference", "operation" ], "properties": { "type": { "type": "string", "enum": [ "string_check" ], "description": "The object type, which is always `string_check`." }, "name": { "type": "string", "description": "The name of the grader." }, "input": { "type": "string", "description": "The input text. This may include template strings." }, "reference": { "type": "string", "description": "The reference text. This may include template strings." }, "operation": { "type": "string", "enum": [ "eq", "ne", "like", "ilike" ], "description": "The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Grader" } ], "description": "A StringCheckGrader object that performs a string comparison between input and reference using a specified operation." }, "OpenAI.GraderTextSimilarity": { "type": "object", "required": [ "type", "name", "input", "reference", "evaluation_metric" ], "properties": { "type": { "type": "string", "enum": [ "text_similarity" ], "description": "The type of grader." }, "name": { "type": "string", "description": "The name of the grader." }, "input": { "type": "string", "description": "The text being graded." }, "reference": { "type": "string", "description": "The text being graded against." }, "evaluation_metric": { "type": "string", "enum": [ "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l" ], "description": "The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Grader" } ], "description": "A TextSimilarityGrader object which grades text based on similarity metrics." }, "OpenAI.GraderType": { "type": "string", "enum": [ "string_check", "text_similarity", "score_model", "label_model", "python", "multi" ] }, "OpenAI.ImageGenTool": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "image_generation" ], "description": "The type of the image generation tool. Always `image_generation`." }, "model": { "type": "string", "enum": [ "gpt-image-1" ], "description": "The image generation model to use. Default: `gpt-image-1`.", "default": "gpt-image-1" }, "quality": { "type": "string", "enum": [ "low", "medium", "high", "auto" ], "description": "The quality of the generated image. One of `low`, `medium`, `high`,\nor `auto`. Default: `auto`.", "default": "auto" }, "size": { "type": "string", "enum": [ "1024x1024", "1024x1536", "1536x1024", "auto" ], "description": "The size of the generated image. One of `1024x1024`, `1024x1536`,\n`1536x1024`, or `auto`. Default: `auto`.", "default": "auto" }, "output_format": { "type": "string", "enum": [ "png", "webp", "jpeg" ], "description": "The output format of the generated image. One of `png`, `webp`, or\n`jpeg`. Default: `png`.", "default": "png" }, "output_compression": { "type": "integer", "format": "int32", "minimum": 0, "maximum": 100, "description": "Compression level for the output image. Default: 100.", "default": 100 }, "moderation": { "type": "string", "enum": [ "auto", "low" ], "description": "Moderation level for the generated image. Default: `auto`.", "default": "auto" }, "background": { "type": "string", "enum": [ "transparent", "opaque", "auto" ], "description": "Background type for the generated image. One of `transparent`,\n`opaque`, or `auto`. Default: `auto`.", "default": "auto" }, "input_image_mask": { "type": "object", "properties": { "image_url": { "type": "string", "description": "Base64-encoded mask image." }, "file_id": { "type": "string", "description": "File ID for the mask image." } }, "description": "Optional mask for inpainting. Contains `image_url`\n(string, optional) and `file_id` (string, optional)." }, "partial_images": { "type": "integer", "format": "int32", "minimum": 0, "maximum": 3, "description": "Number of partial images to generate in streaming mode, from 0 (default value) to 3.", "default": 0 } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A tool that generates images using a model like `gpt-image-1`." }, "OpenAI.ImageGenToolCallItemParam": { "type": "object", "required": [ "type", "result" ], "properties": { "type": { "type": "string", "enum": [ "image_generation_call" ] }, "result": { "type": "string", "nullable": true, "description": "The generated image encoded in base64." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "An image generation request made by the model.\n" }, "OpenAI.ImageGenToolCallItemResource": { "type": "object", "required": [ "type", "status", "result" ], "properties": { "type": { "type": "string", "enum": [ "image_generation_call" ] }, "status": { "type": "string", "enum": [ "in_progress", "completed", "generating", "failed" ] }, "result": { "type": "string", "nullable": true, "description": "The generated image encoded in base64." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "An image generation request made by the model.\n" }, "OpenAI.ImplicitUserMessage": { "type": "object", "required": [ "content" ], "properties": { "content": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemContent" } } ] } } }, "OpenAI.Includable": { "type": "string", "enum": [ "code_interpreter_call.outputs", "computer_call_output.output.image_url", "file_search_call.results", "message.input_image.image_url", "message.output_text.logprobs", "reasoning.encrypted_content" ], "description": "Specify additional output data to include in the model response. Currently\nsupported values are:\n- `code_interpreter_call.outputs`: Includes the outputs of python code execution\n in code interpreter tool call items.\n- `computer_call_output.output.image_url`: Include image urls from the computer call output.\n- `file_search_call.results`: Include the search results of\n the file search tool call.\n- `message.input_image.image_url`: Include image urls from the input message.\n- `message.output_text.logprobs`: Include logprobs with assistant messages.\n- `reasoning.encrypted_content`: Includes an encrypted version of reasoning\n tokens in reasoning item outputs. This enables reasoning items to be used in\n multi-turn conversations when using the Responses API statelessly (like\n when the `store` parameter is set to `false`, or when an organization is\n enrolled in the zero data retention program)." }, "OpenAI.ItemContent": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ItemContentType" } }, "discriminator": { "propertyName": "type", "mapping": { "input_audio": "#/components/schemas/OpenAI.ItemContentInputAudio", "output_audio": "#/components/schemas/OpenAI.ItemContentOutputAudio", "refusal": "#/components/schemas/OpenAI.ItemContentRefusal", "input_text": "#/components/schemas/OpenAI.ItemContentInputText", "input_image": "#/components/schemas/OpenAI.ItemContentInputImage", "input_file": "#/components/schemas/OpenAI.ItemContentInputFile", "output_text": "#/components/schemas/OpenAI.ItemContentOutputText" } } }, "OpenAI.ItemContentInputAudio": { "type": "object", "required": [ "type", "data", "format" ], "properties": { "type": { "type": "string", "enum": [ "input_audio" ], "description": "The type of the input item. Always `input_audio`." }, "data": { "type": "string", "description": "Base64-encoded audio data." }, "format": { "type": "string", "enum": [ "mp3", "wav" ], "description": "The format of the audio data. Currently supported formats are `mp3` and\n`wav`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemContent" } ], "description": "An audio input to the model." }, "OpenAI.ItemContentInputFile": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "input_file" ], "description": "The type of the input item. Always `input_file`." }, "file_id": { "type": "string", "nullable": true, "description": "The ID of the file to be sent to the model." }, "filename": { "type": "string", "description": "The name of the file to be sent to the model." }, "file_data": { "type": "string", "description": "The content of the file to be sent to the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemContent" } ], "description": "A file input to the model." }, "OpenAI.ItemContentInputImage": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "input_image" ], "description": "The type of the input item. Always `input_image`." }, "image_url": { "type": "string", "nullable": true, "description": "The URL of the image to be sent to the model. A fully qualified URL or base64 encoded image in a data URL." }, "file_id": { "type": "string", "nullable": true, "description": "The ID of the file to be sent to the model." }, "detail": { "type": "string", "enum": [ "low", "high", "auto" ], "description": "The detail level of the image to be sent to the model. One of `high`, `low`, or `auto`. Defaults to `auto`.", "default": "auto" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemContent" } ], "description": "An image input to the model. Learn about [image inputs](/docs/guides/vision)." }, "OpenAI.ItemContentInputText": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "input_text" ], "description": "The type of the input item. Always `input_text`." }, "text": { "type": "string", "description": "The text input to the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemContent" } ], "description": "A text input to the model." }, "OpenAI.ItemContentOutputAudio": { "type": "object", "required": [ "type", "data", "transcript" ], "properties": { "type": { "type": "string", "enum": [ "output_audio" ], "description": "The type of the output audio. Always `output_audio`." }, "data": { "type": "string", "description": "Base64-encoded audio data from the model." }, "transcript": { "type": "string", "description": "The transcript of the audio data from the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemContent" } ], "description": "An audio output from the model." }, "OpenAI.ItemContentOutputText": { "type": "object", "required": [ "type", "text", "annotations" ], "properties": { "type": { "type": "string", "enum": [ "output_text" ], "description": "The type of the output text. Always `output_text`." }, "text": { "type": "string", "description": "The text output from the model." }, "annotations": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Annotation" }, "description": "The annotations of the text output." }, "logprobs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.LogProb" } } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemContent" } ], "description": "A text output from the model." }, "OpenAI.ItemContentRefusal": { "type": "object", "required": [ "type", "refusal" ], "properties": { "type": { "type": "string", "enum": [ "refusal" ], "description": "The type of the refusal. Always `refusal`." }, "refusal": { "type": "string", "description": "The refusal explanationfrom the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemContent" } ], "description": "A refusal from the model." }, "OpenAI.ItemContentType": { "type": "string", "enum": [ "input_text", "input_audio", "input_image", "input_file", "output_text", "output_audio", "refusal" ], "description": "Multi-modal input and output contents." }, "OpenAI.ItemParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ItemType" } }, "discriminator": { "propertyName": "type", "mapping": { "file_search_call": "#/components/schemas/OpenAI.FileSearchToolCallItemParam", "computer_call": "#/components/schemas/OpenAI.ComputerToolCallItemParam", "computer_call_output": "#/components/schemas/OpenAI.ComputerToolCallOutputItemParam", "web_search_call": "#/components/schemas/OpenAI.WebSearchToolCallItemParam", "function_call": "#/components/schemas/OpenAI.FunctionToolCallItemParam", "function_call_output": "#/components/schemas/OpenAI.FunctionToolCallOutputItemParam", "reasoning": "#/components/schemas/OpenAI.ReasoningItemParam", "item_reference": "#/components/schemas/OpenAI.ItemReferenceItemParam", "image_generation_call": "#/components/schemas/OpenAI.ImageGenToolCallItemParam", "code_interpreter_call": "#/components/schemas/OpenAI.CodeInterpreterToolCallItemParam", "local_shell_call": "#/components/schemas/OpenAI.LocalShellToolCallItemParam", "local_shell_call_output": "#/components/schemas/OpenAI.LocalShellToolCallOutputItemParam", "mcp_list_tools": "#/components/schemas/OpenAI.MCPListToolsItemParam", "mcp_approval_request": "#/components/schemas/OpenAI.MCPApprovalRequestItemParam", "mcp_approval_response": "#/components/schemas/OpenAI.MCPApprovalResponseItemParam", "mcp_call": "#/components/schemas/OpenAI.MCPCallItemParam", "message": "#/components/schemas/OpenAI.ResponsesMessageItemParam" } }, "description": "Content item used to generate a response." }, "OpenAI.ItemReferenceItemParam": { "type": "object", "required": [ "type", "id" ], "properties": { "type": { "type": "string", "enum": [ "item_reference" ] }, "id": { "type": "string", "description": "The service-originated ID of the previously generated response item being referenced." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "An internal identifier for an item to reference." }, "OpenAI.ItemResource": { "type": "object", "required": [ "type", "id" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ItemType" }, "id": { "type": "string" } }, "discriminator": { "propertyName": "type", "mapping": { "file_search_call": "#/components/schemas/OpenAI.FileSearchToolCallItemResource", "computer_call": "#/components/schemas/OpenAI.ComputerToolCallItemResource", "computer_call_output": "#/components/schemas/OpenAI.ComputerToolCallOutputItemResource", "web_search_call": "#/components/schemas/OpenAI.WebSearchToolCallItemResource", "function_call": "#/components/schemas/OpenAI.FunctionToolCallItemResource", "function_call_output": "#/components/schemas/OpenAI.FunctionToolCallOutputItemResource", "reasoning": "#/components/schemas/OpenAI.ReasoningItemResource", "image_generation_call": "#/components/schemas/OpenAI.ImageGenToolCallItemResource", "code_interpreter_call": "#/components/schemas/OpenAI.CodeInterpreterToolCallItemResource", "local_shell_call": "#/components/schemas/OpenAI.LocalShellToolCallItemResource", "local_shell_call_output": "#/components/schemas/OpenAI.LocalShellToolCallOutputItemResource", "mcp_list_tools": "#/components/schemas/OpenAI.MCPListToolsItemResource", "mcp_approval_request": "#/components/schemas/OpenAI.MCPApprovalRequestItemResource", "mcp_approval_response": "#/components/schemas/OpenAI.MCPApprovalResponseItemResource", "mcp_call": "#/components/schemas/OpenAI.MCPCallItemResource", "message": "#/components/schemas/OpenAI.ResponsesMessageItemResource" } }, "description": "Content item used to generate a response." }, "OpenAI.ItemType": { "type": "string", "enum": [ "message", "file_search_call", "function_call", "function_call_output", "computer_call", "computer_call_output", "web_search_call", "reasoning", "item_reference", "image_generation_call", "code_interpreter_call", "local_shell_call", "local_shell_call_output", "mcp_list_tools", "mcp_approval_request", "mcp_approval_response", "mcp_call" ] }, "OpenAI.ListFineTuningJobCheckpointsResponse": { "type": "object", "required": [ "data", "object", "has_more" ], "properties": { "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FineTuningJobCheckpoint" } }, "object": { "type": "string", "enum": [ "list" ] }, "first_id": { "type": "string", "nullable": true }, "last_id": { "type": "string", "nullable": true }, "has_more": { "type": "boolean" } } }, "OpenAI.ListFineTuningJobEventsResponse": { "type": "object", "required": [ "data", "object", "has_more" ], "properties": { "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FineTuningJobEvent" } }, "object": { "type": "string", "enum": [ "list" ] }, "has_more": { "type": "boolean" } } }, "OpenAI.ListModelsResponse": { "type": "object", "required": [ "object", "data" ], "properties": { "object": { "type": "string", "enum": [ "list" ] }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Model" } } } }, "OpenAI.ListPaginatedFineTuningJobsResponse": { "type": "object", "required": [ "data", "has_more", "object" ], "properties": { "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.FineTuningJob" } }, "has_more": { "type": "boolean" }, "object": { "type": "string", "enum": [ "list" ] } } }, "OpenAI.ListVectorStoreFilesFilter": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "in_progress", "completed", "failed", "cancelled" ] } ] }, "OpenAI.ListVectorStoreFilesResponse": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ] }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.VectorStoreFileObject" } }, "first_id": { "type": "string" }, "last_id": { "type": "string" }, "has_more": { "type": "boolean" } } }, "OpenAI.ListVectorStoresResponse": { "type": "object", "required": [ "object", "data", "first_id", "last_id", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ] }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.VectorStoreObject" } }, "first_id": { "type": "string" }, "last_id": { "type": "string" }, "has_more": { "type": "boolean" } } }, "OpenAI.LocalShellExecAction": { "type": "object", "required": [ "type", "command", "env" ], "properties": { "type": { "type": "string", "enum": [ "exec" ], "description": "The type of the local shell action. Always `exec`." }, "command": { "type": "array", "items": { "type": "string" }, "description": "The command to run." }, "timeout_ms": { "type": "integer", "format": "int32", "nullable": true, "description": "Optional timeout in milliseconds for the command." }, "working_directory": { "type": "string", "nullable": true, "description": "Optional working directory to run the command in." }, "env": { "type": "object", "additionalProperties": { "type": "string" }, "description": "Environment variables to set for the command." }, "user": { "type": "string", "nullable": true, "description": "Optional user to run the command as." } }, "description": "Execute a shell command on the server." }, "OpenAI.LocalShellTool": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "local_shell" ], "description": "The type of the local shell tool. Always `local_shell`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "A tool that allows the model to execute shell commands in a local environment." }, "OpenAI.LocalShellToolCallItemParam": { "type": "object", "required": [ "type", "call_id", "action" ], "properties": { "type": { "type": "string", "enum": [ "local_shell_call" ] }, "call_id": { "type": "string", "description": "The unique ID of the local shell tool call generated by the model." }, "action": { "$ref": "#/components/schemas/OpenAI.LocalShellExecAction" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "A tool call to run a command on the local shell.\n" }, "OpenAI.LocalShellToolCallItemResource": { "type": "object", "required": [ "type", "status", "call_id", "action" ], "properties": { "type": { "type": "string", "enum": [ "local_shell_call" ] }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ] }, "call_id": { "type": "string", "description": "The unique ID of the local shell tool call generated by the model." }, "action": { "$ref": "#/components/schemas/OpenAI.LocalShellExecAction" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A tool call to run a command on the local shell.\n" }, "OpenAI.LocalShellToolCallOutputItemParam": { "type": "object", "required": [ "type", "output" ], "properties": { "type": { "type": "string", "enum": [ "local_shell_call_output" ] }, "output": { "type": "string", "description": "A JSON string of the output of the local shell tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "The output of a local shell tool call.\n" }, "OpenAI.LocalShellToolCallOutputItemResource": { "type": "object", "required": [ "type", "status", "output" ], "properties": { "type": { "type": "string", "enum": [ "local_shell_call_output" ] }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ] }, "output": { "type": "string", "description": "A JSON string of the output of the local shell tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "The output of a local shell tool call.\n" }, "OpenAI.Location": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.LocationType" } }, "discriminator": { "propertyName": "type", "mapping": { "approximate": "#/components/schemas/OpenAI.ApproximateLocation" } } }, "OpenAI.LocationType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "approximate" ] } ] }, "OpenAI.LogProb": { "type": "object", "required": [ "token", "logprob", "bytes", "top_logprobs" ], "properties": { "token": { "type": "string" }, "logprob": { "type": "number", "format": "float" }, "bytes": { "type": "array", "items": { "type": "integer", "format": "int32" } }, "top_logprobs": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.TopLogProb" } } }, "description": "The log probability of a token." }, "OpenAI.MCPApprovalRequestItemParam": { "type": "object", "required": [ "type", "server_label", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "mcp_approval_request" ] }, "server_label": { "type": "string", "description": "The label of the MCP server making the request." }, "name": { "type": "string", "description": "The name of the tool to run." }, "arguments": { "type": "string", "description": "A JSON string of arguments for the tool." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "A request for human approval of a tool invocation.\n" }, "OpenAI.MCPApprovalRequestItemResource": { "type": "object", "required": [ "type", "server_label", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "mcp_approval_request" ] }, "server_label": { "type": "string", "description": "The label of the MCP server making the request." }, "name": { "type": "string", "description": "The name of the tool to run." }, "arguments": { "type": "string", "description": "A JSON string of arguments for the tool." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A request for human approval of a tool invocation.\n" }, "OpenAI.MCPApprovalResponseItemParam": { "type": "object", "required": [ "type", "approval_request_id", "approve" ], "properties": { "type": { "type": "string", "enum": [ "mcp_approval_response" ] }, "approval_request_id": { "type": "string", "description": "The ID of the approval request being answered." }, "approve": { "type": "boolean", "description": "Whether the request was approved." }, "reason": { "type": "string", "nullable": true, "description": "Optional reason for the decision." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "A response to an MCP approval request.\n" }, "OpenAI.MCPApprovalResponseItemResource": { "type": "object", "required": [ "type", "approval_request_id", "approve" ], "properties": { "type": { "type": "string", "enum": [ "mcp_approval_response" ] }, "approval_request_id": { "type": "string", "description": "The ID of the approval request being answered." }, "approve": { "type": "boolean", "description": "Whether the request was approved." }, "reason": { "type": "string", "nullable": true, "description": "Optional reason for the decision." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A response to an MCP approval request.\n" }, "OpenAI.MCPCallItemParam": { "type": "object", "required": [ "type", "server_label", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "mcp_call" ] }, "server_label": { "type": "string", "description": "The label of the MCP server running the tool." }, "name": { "type": "string", "description": "The name of the tool that was run." }, "arguments": { "type": "string", "description": "A JSON string of the arguments passed to the tool." }, "output": { "type": "string", "nullable": true, "description": "The output from the tool call." }, "error": { "type": "string", "nullable": true, "description": "The error from the tool call, if any." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "An invocation of a tool on an MCP server.\n" }, "OpenAI.MCPCallItemResource": { "type": "object", "required": [ "type", "server_label", "name", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "mcp_call" ] }, "server_label": { "type": "string", "description": "The label of the MCP server running the tool." }, "name": { "type": "string", "description": "The name of the tool that was run." }, "arguments": { "type": "string", "description": "A JSON string of the arguments passed to the tool." }, "output": { "type": "string", "nullable": true, "description": "The output from the tool call." }, "error": { "type": "string", "nullable": true, "description": "The error from the tool call, if any." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "An invocation of a tool on an MCP server.\n" }, "OpenAI.MCPListToolsItemParam": { "type": "object", "required": [ "type", "server_label", "tools" ], "properties": { "type": { "type": "string", "enum": [ "mcp_list_tools" ] }, "server_label": { "type": "string", "description": "The label of the MCP server." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.MCPListToolsTool" }, "description": "The tools available on the server." }, "error": { "type": "string", "nullable": true, "description": "Error message if the server could not list tools." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "A list of tools available on an MCP server.\n" }, "OpenAI.MCPListToolsItemResource": { "type": "object", "required": [ "type", "server_label", "tools" ], "properties": { "type": { "type": "string", "enum": [ "mcp_list_tools" ] }, "server_label": { "type": "string", "description": "The label of the MCP server." }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.MCPListToolsTool" }, "description": "The tools available on the server." }, "error": { "type": "string", "nullable": true, "description": "Error message if the server could not list tools." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A list of tools available on an MCP server.\n" }, "OpenAI.MCPListToolsTool": { "type": "object", "required": [ "name", "input_schema" ], "properties": { "name": { "type": "string", "description": "The name of the tool." }, "description": { "type": "string", "nullable": true, "description": "The description of the tool." }, "input_schema": { "description": "The JSON schema describing the tool's input." }, "annotations": { "nullable": true, "description": "Additional annotations about the tool." } }, "description": "A tool available on an MCP server." }, "OpenAI.MCPTool": { "type": "object", "required": [ "type", "server_label", "server_url" ], "properties": { "type": { "type": "string", "enum": [ "mcp" ], "description": "The type of the MCP tool. Always `mcp`." }, "server_label": { "type": "string", "description": "A label for this MCP server, used to identify it in tool calls." }, "server_url": { "type": "string", "description": "The URL for the MCP server." }, "headers": { "type": "object", "additionalProperties": { "type": "string" }, "nullable": true, "description": "Optional HTTP headers to send to the MCP server. Use for authentication\nor other purposes." }, "allowed_tools": { "anyOf": [ { "type": "array", "items": { "type": "string" } }, { "type": "object", "properties": { "tool_names": { "type": "array", "items": { "type": "string" }, "description": "List of allowed tool names." } } } ], "nullable": true, "description": "List of allowed tool names or a filter object." }, "require_approval": { "anyOf": [ { "type": "object", "properties": { "always": { "type": "object", "properties": { "tool_names": { "type": "array", "items": { "type": "string" }, "description": "List of tools that require approval." } }, "description": "A list of tools that always require approval." }, "never": { "type": "object", "properties": { "tool_names": { "type": "array", "items": { "type": "string" }, "description": "List of tools that do not require approval." } }, "description": "A list of tools that never require approval." } } }, { "type": "string", "enum": [ "always", "never" ] } ], "nullable": true, "description": "Specify which of the MCP server's tools require approval.", "default": "always" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "Give the model access to additional tools via remote Model Context Protocol\n(MCP) servers. [Learn more about MCP](/docs/guides/tools-remote-mcp)." }, "OpenAI.MetadataPropertyForRequest": { "type": "object", "properties": { "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" } }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters." }, "OpenAI.Model": { "type": "object", "required": [ "id", "created", "object", "owned_by" ], "properties": { "id": { "type": "string", "description": "The model identifier, which can be referenced in the API endpoints." }, "created": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) when the model was created." }, "object": { "type": "string", "enum": [ "model" ], "description": "The object type, which is always \"model\"." }, "owned_by": { "type": "string", "description": "The organization that owns the model." } }, "description": "Describes an OpenAI model offering that can be used with the API." }, "OpenAI.OtherChunkingStrategyResponseParam": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "other" ], "description": "Always `other`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyResponseParam" } ], "description": "This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API." }, "OpenAI.ParallelToolCalls": { "type": "boolean", "description": "Whether to enable [parallel function calling](/docs/guides/function-calling#configuring-parallel-function-calling) during tool use." }, "OpenAI.Prompt": { "type": "object", "required": [ "id" ], "properties": { "id": { "type": "string", "description": "The unique identifier of the prompt template to use." }, "version": { "type": "string", "nullable": true, "description": "Optional version of the prompt template." }, "variables": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsePromptVariables" } ], "nullable": true } }, "description": "Reference to a prompt template and its variables.\n[Learn more](/docs/guides/text?api-mode=responses#reusable-prompts)." }, "OpenAI.RankingOptions": { "type": "object", "properties": { "ranker": { "type": "string", "enum": [ "auto", "default-2024-11-15" ], "description": "The ranker to use for the file search." }, "score_threshold": { "type": "number", "format": "float", "description": "The score threshold for the file search, a number between 0 and 1. Numbers closer to 1 will attempt to return only the most relevant results, but may return fewer results." } } }, "OpenAI.Reasoning": { "type": "object", "properties": { "effort": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ReasoningEffort" } ], "nullable": true, "default": "medium" }, "summary": { "type": "string", "enum": [ "auto", "concise", "detailed" ], "nullable": true, "description": "A summary of the reasoning performed by the model. This can be\nuseful for debugging and understanding the model's reasoning process.\nOne of `auto`, `concise`, or `detailed`." }, "generate_summary": { "type": "string", "enum": [ "auto", "concise", "detailed" ], "nullable": true, "description": "**Deprecated:** use `summary` instead.\n\nA summary of the reasoning performed by the model. This can be\nuseful for debugging and understanding the model's reasoning process.\nOne of `auto`, `concise`, or `detailed`.", "deprecated": true, "default": null } }, "description": "**o-series models only**\n\nConfiguration options for\n[reasoning models](https://platform.openai.com/docs/guides/reasoning)." }, "OpenAI.ReasoningEffort": { "type": "string", "enum": [ "low", "medium", "high" ], "description": "**o-series models only**\n\nConstrains effort on reasoning for\n[reasoning models](https://platform.openai.com/docs/guides/reasoning).\nCurrently supported values are `low`, `medium`, and `high`. Reducing\nreasoning effort can result in faster responses and fewer tokens used\non reasoning in a response." }, "OpenAI.ReasoningItemParam": { "type": "object", "required": [ "type", "summary" ], "properties": { "type": { "type": "string", "enum": [ "reasoning" ] }, "encrypted_content": { "type": "string", "nullable": true, "description": "The encrypted content of the reasoning item - populated when a response is\ngenerated with `reasoning.encrypted_content` in the `include` parameter." }, "summary": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ReasoningItemSummaryPart" }, "description": "Reasoning text contents." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "A description of the chain of thought used by a reasoning model while generating\na response. Be sure to include these items in your `input` to the Responses API\nfor subsequent turns of a conversation if you are manually\n[managing context](/docs/guides/conversation-state).\n" }, "OpenAI.ReasoningItemResource": { "type": "object", "required": [ "type", "summary" ], "properties": { "type": { "type": "string", "enum": [ "reasoning" ] }, "encrypted_content": { "type": "string", "nullable": true, "description": "The encrypted content of the reasoning item - populated when a response is\ngenerated with `reasoning.encrypted_content` in the `include` parameter." }, "summary": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ReasoningItemSummaryPart" }, "description": "Reasoning text contents." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A description of the chain of thought used by a reasoning model while generating\na response. Be sure to include these items in your `input` to the Responses API\nfor subsequent turns of a conversation if you are manually\n[managing context](/docs/guides/conversation-state).\n" }, "OpenAI.ReasoningItemSummaryPart": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ReasoningItemSummaryPartType" } }, "discriminator": { "propertyName": "type", "mapping": { "summary_text": "#/components/schemas/OpenAI.ReasoningItemSummaryTextPart" } } }, "OpenAI.ReasoningItemSummaryPartType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "summary_text" ] } ] }, "OpenAI.ReasoningItemSummaryTextPart": { "type": "object", "required": [ "type", "text" ], "properties": { "type": { "type": "string", "enum": [ "summary_text" ] }, "text": { "type": "string" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ReasoningItemSummaryPart" } ] }, "OpenAI.Response": { "type": "object", "required": [ "metadata", "temperature", "top_p", "user", "id", "object", "created_at", "error", "incomplete_details", "output", "instructions", "parallel_tool_calls" ], "properties": { "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "nullable": true, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" }, "temperature": { "type": "number", "format": "float", "nullable": true, "minimum": 0, "maximum": 2, "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.\nWe generally recommend altering this or `top_p` but not both." }, "top_p": { "type": "number", "format": "float", "nullable": true, "minimum": 0, "maximum": 1, "description": "An alternative to sampling with temperature, called nucleus sampling,\nwhere the model considers the results of the tokens with top_p probability\nmass. So 0.1 means only the tokens comprising the top 10% probability mass\nare considered.\n\nWe generally recommend altering this or `temperature` but not both." }, "user": { "type": "string", "nullable": true, "description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices#end-user-ids)." }, "top_logprobs": { "type": "integer", "format": "int32", "nullable": true, "description": "An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability." }, "previous_response_id": { "type": "string", "nullable": true, "description": "The unique ID of the previous response to the model. Use this to\ncreate multi-turn conversations. Learn more about\n[conversation state](/docs/guides/conversation-state)." }, "reasoning": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.Reasoning" } ], "nullable": true }, "background": { "type": "boolean", "nullable": true, "description": "Whether to run the model response in the background.\n[Learn more](/docs/guides/background).", "default": false }, "max_output_tokens": { "type": "integer", "format": "int32", "nullable": true, "description": "An upper bound for the number of tokens that can be generated for a response, including visible output tokens and [reasoning tokens](/docs/guides/reasoning)." }, "max_tool_calls": { "type": "integer", "format": "int32", "nullable": true, "description": "The maximum number of total calls to built-in tools that can be processed in a response. This maximum number applies across all built-in tool calls, not per individual tool. Any further attempts to call a tool by the model will be ignored." }, "text": { "type": "object", "properties": { "format": { "$ref": "#/components/schemas/OpenAI.ResponseTextFormatConfiguration" } }, "description": "Configuration options for a text response from the model. Can be plain\ntext or structured JSON data. Learn more:\n- [Text inputs and outputs](/docs/guides/text)\n- [Structured Outputs](/docs/guides/structured-outputs)" }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.Tool" }, "description": "An array of tools the model may call while generating a response. You\ncan specify which tool to use by setting the `tool_choice` parameter.\n\nThe two categories of tools you can provide the model are:\n\n- **Built-in tools**: Tools that are provided by OpenAI that extend the\n model's capabilities, like [web search](/docs/guides/tools-web-search)\n or [file search](/docs/guides/tools-file-search). Learn more about\n [built-in tools](/docs/guides/tools).\n- **Function calls (custom tools)**: Functions that are defined by you,\n enabling the model to call your own code. Learn more about\n [function calling](/docs/guides/function-calling)." }, "tool_choice": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceOptions" }, { "$ref": "#/components/schemas/OpenAI.ToolChoiceObject" } ], "description": "How the model should select which tool (or tools) to use when generating\na response. See the `tools` parameter to see how to specify which tools\nthe model can call." }, "prompt": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.Prompt" } ], "nullable": true }, "truncation": { "type": "string", "enum": [ "auto", "disabled" ], "nullable": true, "description": "The truncation strategy to use for the model response.\n- `auto`: If the context of this response and previous ones exceeds\n the model's context window size, the model will truncate the\n response to fit the context window by dropping input items in the\n middle of the conversation.\n- `disabled` (default): If a model response will exceed the context window\n size for a model, the request will fail with a 400 error.", "default": "disabled" }, "id": { "type": "string", "description": "Unique identifier for this Response." }, "object": { "type": "string", "enum": [ "response" ], "description": "The object type of this resource - always set to `response`." }, "status": { "type": "string", "enum": [ "completed", "failed", "in_progress", "cancelled", "queued", "incomplete" ], "description": "The status of the response generation. One of `completed`, `failed`,\n`in_progress`, `cancelled`, `queued`, or `incomplete`." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (in seconds) of when this Response was created." }, "error": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseError" } ], "nullable": true }, "incomplete_details": { "type": "object", "properties": { "reason": { "type": "string", "enum": [ "max_output_tokens", "content_filter" ], "description": "The reason why the response is incomplete." } }, "nullable": true, "description": "Details about why the response is incomplete." }, "output": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemResource" }, "description": "An array of content items generated by the model.\n\n- The length and order of items in the `output` array is dependent\n on the model's response.\n- Rather than accessing the first item in the `output` array and\n assuming it's an `assistant` message with the content generated by\n the model, you might consider using the `output_text` property where\n supported in SDKs." }, "instructions": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemParam" } } ], "nullable": true, "description": "A system (or developer) message inserted into the model's context.\n\nWhen using along with `previous_response_id`, the instructions from a previous\nresponse will not be carried over to the next response. This makes it simple\nto swap out system (or developer) messages in new responses." }, "output_text": { "type": "string", "nullable": true, "description": "SDK-only convenience property that contains the aggregated text output\nfrom all `output_text` items in the `output` array, if any are present.\nSupported in the Python and JavaScript SDKs." }, "usage": { "$ref": "#/components/schemas/OpenAI.ResponseUsage" }, "parallel_tool_calls": { "type": "boolean", "description": "Whether to allow the model to run tool calls in parallel.", "default": true } } }, "OpenAI.ResponseCodeInterpreterCallCodeDeltaEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "delta", "obfuscation" ], "properties": { "type": { "type": "string", "enum": [ "response.code_interpreter_call_code.delta" ], "description": "The type of the event. Always `response.code_interpreter_call_code.delta`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response for which the code is being streamed." }, "item_id": { "type": "string", "description": "The unique identifier of the code interpreter tool call item." }, "delta": { "type": "string", "description": "The partial code snippet being streamed by the code interpreter." }, "obfuscation": { "type": "string", "description": "A field of random characters introduced by stream obfuscation. Stream obfuscation is a mechanism that mitigates certain side-channel attacks." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a partial code snippet is streamed by the code interpreter." }, "OpenAI.ResponseCodeInterpreterCallCodeDoneEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "code" ], "properties": { "type": { "type": "string", "enum": [ "response.code_interpreter_call_code.done" ], "description": "The type of the event. Always `response.code_interpreter_call_code.done`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response for which the code is finalized." }, "item_id": { "type": "string", "description": "The unique identifier of the code interpreter tool call item." }, "code": { "type": "string", "description": "The final code snippet output by the code interpreter." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when the code snippet is finalized by the code interpreter." }, "OpenAI.ResponseCodeInterpreterCallCompletedEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.code_interpreter_call.completed" ], "description": "The type of the event. Always `response.code_interpreter_call.completed`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response for which the code interpreter call is completed." }, "item_id": { "type": "string", "description": "The unique identifier of the code interpreter tool call item." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when the code interpreter call is completed." }, "OpenAI.ResponseCodeInterpreterCallInProgressEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.code_interpreter_call.in_progress" ], "description": "The type of the event. Always `response.code_interpreter_call.in_progress`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response for which the code interpreter call is in progress." }, "item_id": { "type": "string", "description": "The unique identifier of the code interpreter tool call item." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a code interpreter call is in progress." }, "OpenAI.ResponseCodeInterpreterCallInterpretingEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.code_interpreter_call.interpreting" ], "description": "The type of the event. Always `response.code_interpreter_call.interpreting`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response for which the code interpreter is interpreting code." }, "item_id": { "type": "string", "description": "The unique identifier of the code interpreter tool call item." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when the code interpreter is actively interpreting the code snippet." }, "OpenAI.ResponseCompletedEvent": { "type": "object", "required": [ "type", "response" ], "properties": { "type": { "type": "string", "enum": [ "response.completed" ], "description": "The type of the event. Always `response.completed`." }, "response": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Response" } ], "description": "Properties of the completed response." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when the model response is complete." }, "OpenAI.ResponseContentPartAddedEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "part" ], "properties": { "type": { "type": "string", "enum": [ "response.content_part.added" ], "description": "The type of the event. Always `response.content_part.added`." }, "item_id": { "type": "string", "description": "The ID of the output item that the content part was added to." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the content part was added to." }, "content_index": { "type": "integer", "format": "int32", "description": "The index of the content part that was added." }, "part": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemContent" } ], "description": "The content part that was added." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a new content part is added." }, "OpenAI.ResponseContentPartDoneEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "part" ], "properties": { "type": { "type": "string", "enum": [ "response.content_part.done" ], "description": "The type of the event. Always `response.content_part.done`." }, "item_id": { "type": "string", "description": "The ID of the output item that the content part was added to." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the content part was added to." }, "content_index": { "type": "integer", "format": "int32", "description": "The index of the content part that is done." }, "part": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemContent" } ], "description": "The content part that is done." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a content part is done." }, "OpenAI.ResponseCreatedEvent": { "type": "object", "required": [ "type", "response" ], "properties": { "type": { "type": "string", "enum": [ "response.created" ], "description": "The type of the event. Always `response.created`." }, "response": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Response" } ], "description": "The response that was created." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "An event that is emitted when a response is created." }, "OpenAI.ResponseError": { "type": "object", "required": [ "code", "message" ], "properties": { "code": { "$ref": "#/components/schemas/OpenAI.ResponseErrorCode" }, "message": { "type": "string", "description": "A human-readable description of the error." } }, "description": "An error object returned when the model fails to generate a Response." }, "OpenAI.ResponseErrorCode": { "type": "string", "enum": [ "server_error", "rate_limit_exceeded", "invalid_prompt", "vector_store_timeout", "invalid_image", "invalid_image_format", "invalid_base64_image", "invalid_image_url", "image_too_large", "image_too_small", "image_parse_error", "image_content_policy_violation", "invalid_image_mode", "image_file_too_large", "unsupported_image_media_type", "empty_image_file", "failed_to_download_image", "image_file_not_found" ], "description": "The error code for the response." }, "OpenAI.ResponseErrorEvent": { "type": "object", "required": [ "type", "code", "message", "param" ], "properties": { "type": { "type": "string", "enum": [ "error" ], "description": "The type of the event. Always `error`." }, "code": { "type": "string", "nullable": true, "description": "The error code." }, "message": { "type": "string", "description": "The error message." }, "param": { "type": "string", "nullable": true, "description": "The error parameter." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when an error occurs." }, "OpenAI.ResponseFailedEvent": { "type": "object", "required": [ "type", "response" ], "properties": { "type": { "type": "string", "enum": [ "response.failed" ], "description": "The type of the event. Always `response.failed`." }, "response": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Response" } ], "description": "The response that failed." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "An event that is emitted when a response fails." }, "OpenAI.ResponseFileSearchCallCompletedEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.file_search_call.completed" ], "description": "The type of the event. Always `response.file_search_call.completed`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the file search call is initiated." }, "item_id": { "type": "string", "description": "The ID of the output item that the file search call is initiated." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a file search call is completed (results found)." }, "OpenAI.ResponseFileSearchCallInProgressEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.file_search_call.in_progress" ], "description": "The type of the event. Always `response.file_search_call.in_progress`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the file search call is initiated." }, "item_id": { "type": "string", "description": "The ID of the output item that the file search call is initiated." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a file search call is initiated." }, "OpenAI.ResponseFileSearchCallSearchingEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.file_search_call.searching" ], "description": "The type of the event. Always `response.file_search_call.searching`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the file search call is searching." }, "item_id": { "type": "string", "description": "The ID of the output item that the file search call is initiated." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a file search is currently searching." }, "OpenAI.ResponseFormat": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "text", "json_object", "json_schema" ] } }, "discriminator": { "propertyName": "type", "mapping": { "text": "#/components/schemas/OpenAI.ResponseFormatText", "json_object": "#/components/schemas/OpenAI.ResponseFormatJsonObject", "json_schema": "#/components/schemas/OpenAI.ResponseFormatJsonSchema" } } }, "OpenAI.ResponseFormatJsonObject": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "json_object" ], "description": "The type of response format being defined. Always `json_object`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseFormat" } ], "description": "JSON object response format. An older method of generating JSON responses.\nUsing `json_schema` is recommended for models that support it. Note that the\nmodel will not generate JSON without a system or user message instructing it\nto do so." }, "OpenAI.ResponseFormatJsonSchema": { "type": "object", "required": [ "type", "json_schema" ], "properties": { "type": { "type": "string", "enum": [ "json_schema" ], "description": "The type of response format being defined. Always `json_schema`." }, "json_schema": { "type": "object", "properties": { "description": { "type": "string", "description": "A description of what the response format is for, used by the model to\ndetermine how to respond in the format." }, "name": { "type": "string", "description": "The name of the response format. Must be a-z, A-Z, 0-9, or contain\nunderscores and dashes, with a maximum length of 64." }, "schema": { "$ref": "#/components/schemas/OpenAI.ResponseFormatJsonSchemaSchema" }, "strict": { "type": "boolean", "nullable": true, "description": "Whether to enable strict schema adherence when generating the output.\nIf set to true, the model will always follow the exact schema defined\nin the `schema` field. Only a subset of JSON Schema is supported when\n`strict` is `true`. To learn more, read the [Structured Outputs\nguide](/docs/guides/structured-outputs).", "default": false } }, "required": [ "name" ], "description": "Structured Outputs configuration options, including a JSON Schema." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseFormat" } ], "description": "JSON Schema response format. Used to generate structured JSON responses.\nLearn more about [Structured Outputs](/docs/guides/structured-outputs)." }, "OpenAI.ResponseFormatJsonSchemaSchema": { "type": "object", "additionalProperties": {}, "description": "The schema for the response format, described as a JSON Schema object.\nLearn how to build JSON schemas [here](https://json-schema.org/)." }, "OpenAI.ResponseFormatText": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "text" ], "description": "The type of response format being defined. Always `text`." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseFormat" } ], "description": "Default response format. Used to generate text responses." }, "OpenAI.ResponseFunctionCallArgumentsDeltaEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "delta", "obfuscation" ], "properties": { "type": { "type": "string", "enum": [ "response.function_call_arguments.delta" ], "description": "The type of the event. Always `response.function_call_arguments.delta`." }, "item_id": { "type": "string", "description": "The ID of the output item that the function-call arguments delta is added to." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the function-call arguments delta is added to." }, "delta": { "type": "string", "description": "The function-call arguments delta that is added." }, "obfuscation": { "type": "string", "description": "A field of random characters introduced by stream obfuscation. Stream obfuscation is a mechanism that mitigates certain side-channel attacks." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when there is a partial function-call arguments delta." }, "OpenAI.ResponseFunctionCallArgumentsDoneEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "response.function_call_arguments.done" ] }, "item_id": { "type": "string", "description": "The ID of the item." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item." }, "arguments": { "type": "string", "description": "The function-call arguments." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when function-call arguments are finalized." }, "OpenAI.ResponseImageGenCallCompletedEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.image_generation_call.completed" ], "description": "The type of the event. Always 'response.image_generation_call.completed'." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the image generation item being processed." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when an image generation tool call has completed and the final image is available." }, "OpenAI.ResponseImageGenCallGeneratingEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.image_generation_call.generating" ], "description": "The type of the event. Always 'response.image_generation_call.generating'." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the image generation item being processed." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when an image generation tool call is actively generating an image (intermediate state)." }, "OpenAI.ResponseImageGenCallInProgressEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.image_generation_call.in_progress" ], "description": "The type of the event. Always 'response.image_generation_call.in_progress'." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the image generation item being processed." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when an image generation tool call is in progress." }, "OpenAI.ResponseImageGenCallPartialImageEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "partial_image_index", "partial_image_b64" ], "properties": { "type": { "type": "string", "enum": [ "response.image_generation_call.partial_image" ], "description": "The type of the event. Always 'response.image_generation_call.partial_image'." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the image generation item being processed." }, "partial_image_index": { "type": "integer", "format": "int32", "description": "0-based index for the partial image (backend is 1-based, but this is 0-based for the user)." }, "partial_image_b64": { "type": "string", "description": "Base64-encoded partial image data, suitable for rendering as an image." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a partial image is available during image generation streaming." }, "OpenAI.ResponseInProgressEvent": { "type": "object", "required": [ "type", "response" ], "properties": { "type": { "type": "string", "enum": [ "response.in_progress" ], "description": "The type of the event. Always `response.in_progress`." }, "response": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Response" } ], "description": "The response that is in progress." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when the response is in progress." }, "OpenAI.ResponseIncompleteEvent": { "type": "object", "required": [ "type", "response" ], "properties": { "type": { "type": "string", "enum": [ "response.incomplete" ], "description": "The type of the event. Always `response.incomplete`." }, "response": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Response" } ], "description": "The response that was incomplete." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "An event that is emitted when a response finishes as incomplete." }, "OpenAI.ResponseItemList": { "type": "object", "required": [ "object", "data", "has_more", "first_id", "last_id" ], "properties": { "object": { "type": "string", "enum": [ "list" ], "description": "The type of object returned, must be `list`." }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemResource" }, "description": "A list of items used to generate this response." }, "has_more": { "type": "boolean", "description": "Whether there are more items available." }, "first_id": { "type": "string", "description": "The ID of the first item in the list." }, "last_id": { "type": "string", "description": "The ID of the last item in the list." } }, "description": "A list of Response items." }, "OpenAI.ResponseMCPCallArgumentsDeltaEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "delta", "obfuscation" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_call.arguments_delta" ], "description": "The type of the event. Always 'response.mcp_call.arguments_delta'." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the MCP tool call item being processed." }, "delta": { "description": "The partial update to the arguments for the MCP tool call." }, "obfuscation": { "type": "string", "description": "A field of random characters introduced by stream obfuscation. Stream obfuscation is a mechanism that mitigates certain side-channel attacks." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when there is a delta (partial update) to the arguments of an MCP tool call." }, "OpenAI.ResponseMCPCallArgumentsDoneEvent": { "type": "object", "required": [ "type", "output_index", "item_id", "arguments" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_call.arguments_done" ], "description": "The type of the event. Always 'response.mcp_call.arguments_done'." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the MCP tool call item being processed." }, "arguments": { "description": "The finalized arguments for the MCP tool call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when the arguments for an MCP tool call are finalized." }, "OpenAI.ResponseMCPCallCompletedEvent": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_call.completed" ], "description": "The type of the event. Always 'response.mcp_call.completed'." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when an MCP tool call has completed successfully." }, "OpenAI.ResponseMCPCallFailedEvent": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_call.failed" ], "description": "The type of the event. Always 'response.mcp_call.failed'." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when an MCP tool call has failed." }, "OpenAI.ResponseMCPCallInProgressEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_call.in_progress" ], "description": "The type of the event. Always 'response.mcp_call.in_progress'." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response's output array." }, "item_id": { "type": "string", "description": "The unique identifier of the MCP tool call item being processed." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when an MCP tool call is in progress." }, "OpenAI.ResponseMCPListToolsCompletedEvent": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_list_tools.completed" ], "description": "The type of the event. Always 'response.mcp_list_tools.completed'." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when the list of available MCP tools has been successfully retrieved." }, "OpenAI.ResponseMCPListToolsFailedEvent": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_list_tools.failed" ], "description": "The type of the event. Always 'response.mcp_list_tools.failed'." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when the attempt to list available MCP tools has failed." }, "OpenAI.ResponseMCPListToolsInProgressEvent": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "response.mcp_list_tools.in_progress" ], "description": "The type of the event. Always 'response.mcp_list_tools.in_progress'." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when the system is in the process of retrieving the list of available MCP tools." }, "OpenAI.ResponseOutputItemAddedEvent": { "type": "object", "required": [ "type", "output_index", "item" ], "properties": { "type": { "type": "string", "enum": [ "response.output_item.added" ], "description": "The type of the event. Always `response.output_item.added`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that was added." }, "item": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "The output item that was added." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a new output item is added." }, "OpenAI.ResponseOutputItemDoneEvent": { "type": "object", "required": [ "type", "output_index", "item" ], "properties": { "type": { "type": "string", "enum": [ "response.output_item.done" ], "description": "The type of the event. Always `response.output_item.done`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that was marked done." }, "item": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "The output item that was marked done." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when an output item is marked done." }, "OpenAI.ResponsePromptVariables": { "type": "object", "additionalProperties": { "$ref": "#/components/schemas/OpenAI.ItemParam" }, "description": "Optional map of values to substitute in for variables in your\nprompt. The substitution values can either be strings, or other\nResponse input types like images or files.", "x-oaiExpandable": true, "x-oaiTypeLabel": "map" }, "OpenAI.ResponseQueuedEvent": { "type": "object", "required": [ "type", "response" ], "properties": { "type": { "type": "string", "enum": [ "response.queued" ], "description": "The type of the event. Always 'response.queued'." }, "response": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.Response" } ], "description": "The full response object that is queued." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a response is queued and waiting to be processed." }, "OpenAI.ResponseReasoningDeltaEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "delta", "obfuscation" ], "properties": { "type": { "type": "string", "enum": [ "response.reasoning.delta" ], "description": "The type of the event. Always 'response.reasoning.delta'." }, "item_id": { "type": "string", "description": "The unique identifier of the item for which reasoning is being updated." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response's output array." }, "content_index": { "type": "integer", "format": "int32", "description": "The index of the reasoning content part within the output item." }, "delta": { "description": "The partial update to the reasoning content." }, "obfuscation": { "type": "string", "description": "A field of random characters introduced by stream obfuscation. Stream obfuscation is a mechanism that mitigates certain side-channel attacks." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when there is a delta (partial update) to the reasoning content." }, "OpenAI.ResponseReasoningDoneEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "text" ], "properties": { "type": { "type": "string", "enum": [ "response.reasoning.done" ], "description": "The type of the event. Always 'response.reasoning.done'." }, "item_id": { "type": "string", "description": "The unique identifier of the item for which reasoning is finalized." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response's output array." }, "content_index": { "type": "integer", "format": "int32", "description": "The index of the reasoning content part within the output item." }, "text": { "type": "string", "description": "The finalized reasoning text." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when the reasoning content is finalized for an item." }, "OpenAI.ResponseReasoningSummaryDeltaEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "summary_index", "delta", "obfuscation" ], "properties": { "type": { "type": "string", "enum": [ "response.reasoning_summary.delta" ], "description": "The type of the event. Always 'response.reasoning_summary.delta'." }, "item_id": { "type": "string", "description": "The unique identifier of the item for which the reasoning summary is being updated." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response's output array." }, "summary_index": { "type": "integer", "format": "int32", "description": "The index of the summary part within the output item." }, "delta": { "description": "The partial update to the reasoning summary content." }, "obfuscation": { "type": "string", "description": "A field of random characters introduced by stream obfuscation. Stream obfuscation is a mechanism that mitigates certain side-channel attacks." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when there is a delta (partial update) to the reasoning summary content." }, "OpenAI.ResponseReasoningSummaryDoneEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "summary_index", "text" ], "properties": { "type": { "type": "string", "enum": [ "response.reasoning_summary.done" ], "description": "The type of the event. Always 'response.reasoning_summary.done'." }, "item_id": { "type": "string", "description": "The unique identifier of the item for which the reasoning summary is finalized." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item in the response's output array." }, "summary_index": { "type": "integer", "format": "int32", "description": "The index of the summary part within the output item." }, "text": { "type": "string", "description": "The finalized reasoning summary text." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when the reasoning summary content is finalized for an item." }, "OpenAI.ResponseReasoningSummaryPartAddedEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "summary_index", "part" ], "properties": { "type": { "type": "string", "enum": [ "response.reasoning_summary_part.added" ], "description": "The type of the event. Always `response.reasoning_summary_part.added`." }, "item_id": { "type": "string", "description": "The ID of the item this summary part is associated with." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item this summary part is associated with." }, "summary_index": { "type": "integer", "format": "int32", "description": "The index of the summary part within the reasoning summary." }, "part": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ReasoningItemSummaryPart" } ], "description": "The summary part that was added." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a new reasoning summary part is added." }, "OpenAI.ResponseReasoningSummaryPartDoneEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "summary_index", "part" ], "properties": { "type": { "type": "string", "enum": [ "response.reasoning_summary_part.done" ], "description": "The type of the event. Always `response.reasoning_summary_part.done`." }, "item_id": { "type": "string", "description": "The ID of the item this summary part is associated with." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item this summary part is associated with." }, "summary_index": { "type": "integer", "format": "int32", "description": "The index of the summary part within the reasoning summary." }, "part": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ReasoningItemSummaryPart" } ], "description": "The completed summary part." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a reasoning summary part is completed." }, "OpenAI.ResponseReasoningSummaryTextDeltaEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "summary_index", "delta", "obfuscation" ], "properties": { "type": { "type": "string", "enum": [ "response.reasoning_summary_text.delta" ], "description": "The type of the event. Always `response.reasoning_summary_text.delta`." }, "item_id": { "type": "string", "description": "The ID of the item this summary text delta is associated with." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item this summary text delta is associated with." }, "summary_index": { "type": "integer", "format": "int32", "description": "The index of the summary part within the reasoning summary." }, "delta": { "type": "string", "description": "The text delta that was added to the summary." }, "obfuscation": { "type": "string", "description": "A field of random characters introduced by stream obfuscation. Stream obfuscation is a mechanism that mitigates certain side-channel attacks." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a delta is added to a reasoning summary text." }, "OpenAI.ResponseReasoningSummaryTextDoneEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "summary_index", "text" ], "properties": { "type": { "type": "string", "enum": [ "response.reasoning_summary_text.done" ], "description": "The type of the event. Always `response.reasoning_summary_text.done`." }, "item_id": { "type": "string", "description": "The ID of the item this summary text is associated with." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item this summary text is associated with." }, "summary_index": { "type": "integer", "format": "int32", "description": "The index of the summary part within the reasoning summary." }, "text": { "type": "string", "description": "The full text of the completed reasoning summary." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when a reasoning summary text is completed." }, "OpenAI.ResponseRefusalDeltaEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "delta", "obfuscation" ], "properties": { "type": { "type": "string", "enum": [ "response.refusal.delta" ], "description": "The type of the event. Always `response.refusal.delta`." }, "item_id": { "type": "string", "description": "The ID of the output item that the refusal text is added to." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the refusal text is added to." }, "content_index": { "type": "integer", "format": "int32", "description": "The index of the content part that the refusal text is added to." }, "delta": { "type": "string", "description": "The refusal text that is added." }, "obfuscation": { "type": "string", "description": "A field of random characters introduced by stream obfuscation. Stream obfuscation is a mechanism that mitigates certain side-channel attacks." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when there is a partial refusal text." }, "OpenAI.ResponseRefusalDoneEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "refusal" ], "properties": { "type": { "type": "string", "enum": [ "response.refusal.done" ], "description": "The type of the event. Always `response.refusal.done`." }, "item_id": { "type": "string", "description": "The ID of the output item that the refusal text is finalized." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the refusal text is finalized." }, "content_index": { "type": "integer", "format": "int32", "description": "The index of the content part that the refusal text is finalized." }, "refusal": { "type": "string", "description": "The refusal text that is finalized." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when refusal text is finalized." }, "OpenAI.ResponseStreamEvent": { "type": "object", "required": [ "type", "sequence_number" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ResponseStreamEventType" }, "sequence_number": { "type": "integer", "format": "int32", "description": "The sequence number for this event." } }, "discriminator": { "propertyName": "type", "mapping": { "response.completed": "#/components/schemas/OpenAI.ResponseCompletedEvent", "response.content_part.added": "#/components/schemas/OpenAI.ResponseContentPartAddedEvent", "response.content_part.done": "#/components/schemas/OpenAI.ResponseContentPartDoneEvent", "response.created": "#/components/schemas/OpenAI.ResponseCreatedEvent", "error": "#/components/schemas/OpenAI.ResponseErrorEvent", "response.file_search_call.completed": "#/components/schemas/OpenAI.ResponseFileSearchCallCompletedEvent", "response.file_search_call.in_progress": "#/components/schemas/OpenAI.ResponseFileSearchCallInProgressEvent", "response.file_search_call.searching": "#/components/schemas/OpenAI.ResponseFileSearchCallSearchingEvent", "response.function_call_arguments.delta": "#/components/schemas/OpenAI.ResponseFunctionCallArgumentsDeltaEvent", "response.function_call_arguments.done": "#/components/schemas/OpenAI.ResponseFunctionCallArgumentsDoneEvent", "response.in_progress": "#/components/schemas/OpenAI.ResponseInProgressEvent", "response.failed": "#/components/schemas/OpenAI.ResponseFailedEvent", "response.incomplete": "#/components/schemas/OpenAI.ResponseIncompleteEvent", "response.output_item.added": "#/components/schemas/OpenAI.ResponseOutputItemAddedEvent", "response.output_item.done": "#/components/schemas/OpenAI.ResponseOutputItemDoneEvent", "response.refusal.delta": "#/components/schemas/OpenAI.ResponseRefusalDeltaEvent", "response.refusal.done": "#/components/schemas/OpenAI.ResponseRefusalDoneEvent", "response.output_text.delta": "#/components/schemas/OpenAI.ResponseTextDeltaEvent", "response.output_text.done": "#/components/schemas/OpenAI.ResponseTextDoneEvent", "response.reasoning_summary_part.added": "#/components/schemas/OpenAI.ResponseReasoningSummaryPartAddedEvent", "response.reasoning_summary_part.done": "#/components/schemas/OpenAI.ResponseReasoningSummaryPartDoneEvent", "response.reasoning_summary_text.delta": "#/components/schemas/OpenAI.ResponseReasoningSummaryTextDeltaEvent", "response.reasoning_summary_text.done": "#/components/schemas/OpenAI.ResponseReasoningSummaryTextDoneEvent", "response.web_search_call.completed": "#/components/schemas/OpenAI.ResponseWebSearchCallCompletedEvent", "response.web_search_call.in_progress": "#/components/schemas/OpenAI.ResponseWebSearchCallInProgressEvent", "response.web_search_call.searching": "#/components/schemas/OpenAI.ResponseWebSearchCallSearchingEvent", "response.image_generation_call.completed": "#/components/schemas/OpenAI.ResponseImageGenCallCompletedEvent", "response.image_generation_call.generating": "#/components/schemas/OpenAI.ResponseImageGenCallGeneratingEvent", "response.image_generation_call.in_progress": "#/components/schemas/OpenAI.ResponseImageGenCallInProgressEvent", "response.image_generation_call.partial_image": "#/components/schemas/OpenAI.ResponseImageGenCallPartialImageEvent", "response.mcp_call.arguments_delta": "#/components/schemas/OpenAI.ResponseMCPCallArgumentsDeltaEvent", "response.mcp_call.arguments_done": "#/components/schemas/OpenAI.ResponseMCPCallArgumentsDoneEvent", "response.mcp_call.completed": "#/components/schemas/OpenAI.ResponseMCPCallCompletedEvent", "response.mcp_call.failed": "#/components/schemas/OpenAI.ResponseMCPCallFailedEvent", "response.mcp_call.in_progress": "#/components/schemas/OpenAI.ResponseMCPCallInProgressEvent", "response.mcp_list_tools.completed": "#/components/schemas/OpenAI.ResponseMCPListToolsCompletedEvent", "response.mcp_list_tools.failed": "#/components/schemas/OpenAI.ResponseMCPListToolsFailedEvent", "response.mcp_list_tools.in_progress": "#/components/schemas/OpenAI.ResponseMCPListToolsInProgressEvent", "response.queued": "#/components/schemas/OpenAI.ResponseQueuedEvent", "response.reasoning.delta": "#/components/schemas/OpenAI.ResponseReasoningDeltaEvent", "response.reasoning.done": "#/components/schemas/OpenAI.ResponseReasoningDoneEvent", "response.reasoning_summary.delta": "#/components/schemas/OpenAI.ResponseReasoningSummaryDeltaEvent", "response.reasoning_summary.done": "#/components/schemas/OpenAI.ResponseReasoningSummaryDoneEvent", "response.code_interpreter_call_code.delta": "#/components/schemas/OpenAI.ResponseCodeInterpreterCallCodeDeltaEvent", "response.code_interpreter_call_code.done": "#/components/schemas/OpenAI.ResponseCodeInterpreterCallCodeDoneEvent", "response.code_interpreter_call.completed": "#/components/schemas/OpenAI.ResponseCodeInterpreterCallCompletedEvent", "response.code_interpreter_call.in_progress": "#/components/schemas/OpenAI.ResponseCodeInterpreterCallInProgressEvent", "response.code_interpreter_call.interpreting": "#/components/schemas/OpenAI.ResponseCodeInterpreterCallInterpretingEvent" } } }, "OpenAI.ResponseStreamEventType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "response.audio.delta", "response.audio.done", "response.audio_transcript.delta", "response.audio_transcript.done", "response.code_interpreter_call_code.delta", "response.code_interpreter_call_code.done", "response.code_interpreter_call.completed", "response.code_interpreter_call.in_progress", "response.code_interpreter_call.interpreting", "response.completed", "response.content_part.added", "response.content_part.done", "response.created", "error", "response.file_search_call.completed", "response.file_search_call.in_progress", "response.file_search_call.searching", "response.function_call_arguments.delta", "response.function_call_arguments.done", "response.in_progress", "response.failed", "response.incomplete", "response.output_item.added", "response.output_item.done", "response.refusal.delta", "response.refusal.done", "response.output_text.annotation.added", "response.output_text.delta", "response.output_text.done", "response.reasoning_summary_part.added", "response.reasoning_summary_part.done", "response.reasoning_summary_text.delta", "response.reasoning_summary_text.done", "response.web_search_call.completed", "response.web_search_call.in_progress", "response.web_search_call.searching", "response.image_generation_call.completed", "response.image_generation_call.generating", "response.image_generation_call.in_progress", "response.image_generation_call.partial_image", "response.mcp_call.arguments_delta", "response.mcp_call.arguments_done", "response.mcp_call.completed", "response.mcp_call.failed", "response.mcp_call.in_progress", "response.mcp_list_tools.completed", "response.mcp_list_tools.failed", "response.mcp_list_tools.in_progress", "response.queued", "response.reasoning.delta", "response.reasoning.done", "response.reasoning_summary.delta", "response.reasoning_summary.done" ] } ] }, "OpenAI.ResponseTextDeltaEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "delta", "obfuscation" ], "properties": { "type": { "type": "string", "enum": [ "response.output_text.delta" ], "description": "The type of the event. Always `response.output_text.delta`." }, "item_id": { "type": "string", "description": "The ID of the output item that the text delta was added to." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the text delta was added to." }, "content_index": { "type": "integer", "format": "int32", "description": "The index of the content part that the text delta was added to." }, "delta": { "type": "string", "description": "The text delta that was added." }, "obfuscation": { "type": "string", "description": "A field of random characters introduced by stream obfuscation. Stream obfuscation is a mechanism that mitigates certain side-channel attacks." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when there is an additional text delta." }, "OpenAI.ResponseTextDoneEvent": { "type": "object", "required": [ "type", "item_id", "output_index", "content_index", "text" ], "properties": { "type": { "type": "string", "enum": [ "response.output_text.done" ], "description": "The type of the event. Always `response.output_text.done`." }, "item_id": { "type": "string", "description": "The ID of the output item that the text content is finalized." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the text content is finalized." }, "content_index": { "type": "integer", "format": "int32", "description": "The index of the content part that the text content is finalized." }, "text": { "type": "string", "description": "The text content that is finalized." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Emitted when text content is finalized." }, "OpenAI.ResponseTextFormatConfiguration": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ResponseTextFormatConfigurationType" } }, "discriminator": { "propertyName": "type", "mapping": { "text": "#/components/schemas/OpenAI.ResponseTextFormatConfigurationText", "json_object": "#/components/schemas/OpenAI.ResponseTextFormatConfigurationJsonObject", "json_schema": "#/components/schemas/OpenAI.ResponseTextFormatConfigurationJsonSchema" } } }, "OpenAI.ResponseTextFormatConfigurationJsonObject": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "json_object" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseTextFormatConfiguration" } ] }, "OpenAI.ResponseTextFormatConfigurationJsonSchema": { "type": "object", "required": [ "type", "name", "schema" ], "properties": { "type": { "type": "string", "enum": [ "json_schema" ], "description": "The type of response format being defined. Always `json_schema`." }, "description": { "type": "string", "description": "A description of what the response format is for, used by the model to\ndetermine how to respond in the format." }, "name": { "type": "string", "description": "The name of the response format. Must be a-z, A-Z, 0-9, or contain\nunderscores and dashes, with a maximum length of 64." }, "schema": { "$ref": "#/components/schemas/OpenAI.ResponseFormatJsonSchemaSchema" }, "strict": { "type": "boolean", "nullable": true, "description": "Whether to enable strict schema adherence when generating the output.\nIf set to true, the model will always follow the exact schema defined\nin the `schema` field. Only a subset of JSON Schema is supported when\n`strict` is `true`. To learn more, read the [Structured Outputs\nguide](/docs/guides/structured-outputs).", "default": false } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseTextFormatConfiguration" } ], "description": "JSON Schema response format. Used to generate structured JSON responses.\nLearn more about [Structured Outputs](/docs/guides/structured-outputs)." }, "OpenAI.ResponseTextFormatConfigurationText": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "text" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseTextFormatConfiguration" } ] }, "OpenAI.ResponseTextFormatConfigurationType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "text", "json_schema", "json_object" ] } ], "description": "An object specifying the format that the model must output.\n\nConfiguring `{ \"type\": \"json_schema\" }` enables Structured Outputs,\nwhich ensures the model will match your supplied JSON schema. Learn more in the\n[Structured Outputs guide](/docs/guides/structured-outputs).\n\nThe default format is `{ \"type\": \"text\" }` with no additional options.\n\n**Not recommended for gpt-4o and newer models:**\n\nSetting to `{ \"type\": \"json_object\" }` enables the older JSON mode, which\nensures the message the model generates is valid JSON. Using `json_schema`\nis preferred for models that support it." }, "OpenAI.ResponseUsage": { "type": "object", "required": [ "input_tokens", "input_tokens_details", "output_tokens", "output_tokens_details", "total_tokens" ], "properties": { "input_tokens": { "type": "integer", "format": "int32", "description": "The number of input tokens." }, "input_tokens_details": { "type": "object", "properties": { "cached_tokens": { "type": "integer", "format": "int32", "description": "The number of tokens that were retrieved from the cache.\n[More on prompt caching](/docs/guides/prompt-caching)." } }, "required": [ "cached_tokens" ], "description": "A detailed breakdown of the input tokens." }, "output_tokens": { "type": "integer", "format": "int32", "description": "The number of output tokens." }, "output_tokens_details": { "type": "object", "properties": { "reasoning_tokens": { "type": "integer", "format": "int32", "description": "The number of reasoning tokens." } }, "required": [ "reasoning_tokens" ], "description": "A detailed breakdown of the output tokens." }, "total_tokens": { "type": "integer", "format": "int32", "description": "The total number of tokens used." } }, "description": "Represents token usage details including input tokens, output tokens,\na breakdown of output tokens, and the total tokens used." }, "OpenAI.ResponseWebSearchCallCompletedEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.web_search_call.completed" ], "description": "The type of the event. Always `response.web_search_call.completed`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the web search call is associated with." }, "item_id": { "type": "string", "description": "Unique ID for the output item associated with the web search call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Note: web_search is not yet available via Azure OpenAI." }, "OpenAI.ResponseWebSearchCallInProgressEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.web_search_call.in_progress" ], "description": "The type of the event. Always `response.web_search_call.in_progress`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the web search call is associated with." }, "item_id": { "type": "string", "description": "Unique ID for the output item associated with the web search call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Note: web_search is not yet available via Azure OpenAI." }, "OpenAI.ResponseWebSearchCallSearchingEvent": { "type": "object", "required": [ "type", "output_index", "item_id" ], "properties": { "type": { "type": "string", "enum": [ "response.web_search_call.searching" ], "description": "The type of the event. Always `response.web_search_call.searching`." }, "output_index": { "type": "integer", "format": "int32", "description": "The index of the output item that the web search call is associated with." }, "item_id": { "type": "string", "description": "Unique ID for the output item associated with the web search call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponseStreamEvent" } ], "description": "Note: web_search is not yet available via Azure OpenAI." }, "OpenAI.ResponsesAssistantMessageItemParam": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "enum": [ "assistant" ], "description": "The role of the message, which is always `assistant`." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemContent" }, "description": "The content associated with the message." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsesMessageItemParam" } ], "description": "A message parameter item with the `assistant` role." }, "OpenAI.ResponsesAssistantMessageItemResource": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "enum": [ "assistant" ], "description": "The role of the message, which is always `assistant`." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemContent" }, "description": "The content associated with the message." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsesMessageItemResource" } ], "description": "A message resource item with the `assistant` role." }, "OpenAI.ResponsesDeveloperMessageItemParam": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "enum": [ "developer" ], "description": "The role of the message, which is always `developer`." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemContent" }, "description": "The content associated with the message." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsesMessageItemParam" } ], "description": "A message parameter item with the `developer` role." }, "OpenAI.ResponsesDeveloperMessageItemResource": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "enum": [ "developer" ], "description": "The role of the message, which is always `developer`." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemContent" }, "description": "The content associated with the message." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsesMessageItemResource" } ], "description": "A message resource item with the `developer` role." }, "OpenAI.ResponsesMessageItemParam": { "type": "object", "required": [ "type", "role" ], "properties": { "type": { "type": "string", "enum": [ "message" ], "description": "The type of the responses item, which is always 'message'." }, "role": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsesMessageRole" } ], "description": "The role associated with the message." } }, "discriminator": { "propertyName": "role", "mapping": { "user": "#/components/schemas/OpenAI.ResponsesUserMessageItemParam", "system": "#/components/schemas/OpenAI.ResponsesSystemMessageItemParam", "developer": "#/components/schemas/OpenAI.ResponsesDeveloperMessageItemParam", "assistant": "#/components/schemas/OpenAI.ResponsesAssistantMessageItemParam" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "A response message item, representing a role and content, as provided as client request parameters." }, "OpenAI.ResponsesMessageItemResource": { "type": "object", "required": [ "type", "status", "role" ], "properties": { "type": { "type": "string", "enum": [ "message" ], "description": "The type of the responses item, which is always 'message'." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "incomplete" ], "description": "The status of the item. One of `in_progress`, `completed`, or\n`incomplete`. Populated when items are returned via API." }, "role": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsesMessageRole" } ], "description": "The role associated with the message." } }, "discriminator": { "propertyName": "role", "mapping": { "user": "#/components/schemas/OpenAI.ResponsesUserMessageItemResource", "system": "#/components/schemas/OpenAI.ResponsesSystemMessageItemResource", "developer": "#/components/schemas/OpenAI.ResponsesDeveloperMessageItemResource", "assistant": "#/components/schemas/OpenAI.ResponsesAssistantMessageItemResource" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "A response message resource item, representing a role and content, as provided on service responses." }, "OpenAI.ResponsesMessageRole": { "type": "string", "enum": [ "system", "developer", "user", "assistant" ], "description": "The collection of valid roles for responses message items." }, "OpenAI.ResponsesSystemMessageItemParam": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "enum": [ "system" ], "description": "The role of the message, which is always `system`." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemContent" }, "description": "The content associated with the message." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsesMessageItemParam" } ], "description": "A message parameter item with the `system` role." }, "OpenAI.ResponsesSystemMessageItemResource": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "enum": [ "system" ], "description": "The role of the message, which is always `system`." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemContent" }, "description": "The content associated with the message." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsesMessageItemResource" } ], "description": "A message resource item with the `system` role." }, "OpenAI.ResponsesUserMessageItemParam": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "enum": [ "user" ], "description": "The role of the message, which is always `user`." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemContent" }, "description": "The content associated with the message." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsesMessageItemParam" } ], "description": "A message parameter item with the `user` role." }, "OpenAI.ResponsesUserMessageItemResource": { "type": "object", "required": [ "role", "content" ], "properties": { "role": { "type": "string", "enum": [ "user" ], "description": "The role of the message, which is always `user`." }, "content": { "type": "array", "items": { "$ref": "#/components/schemas/OpenAI.ItemContent" }, "description": "The content associated with the message." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ResponsesMessageItemResource" } ], "description": "A message resource item with the `user` role." }, "OpenAI.RunGraderRequest": { "type": "object", "required": [ "grader", "model_sample" ], "properties": { "grader": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.GraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.GraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.GraderPython" }, { "$ref": "#/components/schemas/OpenAI.GraderScoreModel" }, { "$ref": "#/components/schemas/OpenAI.GraderMulti" } ], "description": "The grader used for the fine-tuning job." }, "item": { "description": "The dataset item provided to the grader. This will be used to populate\nthe `item` namespace. See [the guide](/docs/guides/graders) for more details." }, "model_sample": { "type": "string", "description": "The model sample to be evaluated. This value will be used to populate\nthe `sample` namespace. See [the guide](/docs/guides/graders) for more details.\nThe `output_json` variable will be populated if the model sample is a\nvalid JSON string." } } }, "OpenAI.RunGraderResponse": { "type": "object", "required": [ "reward", "metadata", "sub_rewards", "model_grader_token_usage_per_model" ], "properties": { "reward": { "type": "number", "format": "float" }, "metadata": { "type": "object", "properties": { "name": { "type": "string" }, "type": { "type": "string" }, "errors": { "type": "object", "properties": { "formula_parse_error": { "type": "boolean" }, "sample_parse_error": { "type": "boolean" }, "truncated_observation_error": { "type": "boolean" }, "unresponsive_reward_error": { "type": "boolean" }, "invalid_variable_error": { "type": "boolean" }, "other_error": { "type": "boolean" }, "python_grader_server_error": { "type": "boolean" }, "python_grader_server_error_type": { "type": "string", "nullable": true }, "python_grader_runtime_error": { "type": "boolean" }, "python_grader_runtime_error_details": { "type": "string", "nullable": true }, "model_grader_server_error": { "type": "boolean" }, "model_grader_refusal_error": { "type": "boolean" }, "model_grader_parse_error": { "type": "boolean" }, "model_grader_server_error_details": { "type": "string", "nullable": true } }, "required": [ "formula_parse_error", "sample_parse_error", "truncated_observation_error", "unresponsive_reward_error", "invalid_variable_error", "other_error", "python_grader_server_error", "python_grader_server_error_type", "python_grader_runtime_error", "python_grader_runtime_error_details", "model_grader_server_error", "model_grader_refusal_error", "model_grader_parse_error", "model_grader_server_error_details" ] }, "execution_time": { "type": "number", "format": "float" }, "scores": {}, "token_usage": { "type": "integer", "format": "int32", "nullable": true }, "sampled_model_name": { "type": "string", "nullable": true } }, "required": [ "name", "type", "errors", "execution_time", "scores", "token_usage", "sampled_model_name" ] }, "sub_rewards": {}, "model_grader_token_usage_per_model": {} } }, "OpenAI.StaticChunkingStrategy": { "type": "object", "required": [ "max_chunk_size_tokens", "chunk_overlap_tokens" ], "properties": { "max_chunk_size_tokens": { "type": "integer", "format": "int32", "minimum": 100, "maximum": 4096, "description": "The maximum number of tokens in each chunk. The default value is `800`. The minimum value is `100` and the maximum value is `4096`." }, "chunk_overlap_tokens": { "type": "integer", "format": "int32", "description": "The number of tokens that overlap between chunks. The default value is `400`.\n\nNote that the overlap must not exceed half of `max_chunk_size_tokens`." } } }, "OpenAI.StaticChunkingStrategyRequestParam": { "type": "object", "required": [ "type", "static" ], "properties": { "type": { "type": "string", "enum": [ "static" ], "description": "Always `static`." }, "static": { "$ref": "#/components/schemas/OpenAI.StaticChunkingStrategy" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyRequestParam" } ], "description": "Customize your own chunking strategy by setting chunk size and chunk overlap." }, "OpenAI.StaticChunkingStrategyResponseParam": { "type": "object", "required": [ "type", "static" ], "properties": { "type": { "type": "string", "enum": [ "static" ], "description": "Always `static`." }, "static": { "$ref": "#/components/schemas/OpenAI.StaticChunkingStrategy" } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyResponseParam" } ] }, "OpenAI.StopConfiguration": { "anyOf": [ { "type": "string" }, { "type": "array", "items": { "type": "string" } } ], "description": "Not supported with latest reasoning models `o3` and `o4-mini`.\n\nUp to 4 sequences where the API will stop generating further tokens. The\nreturned text will not contain the stop sequence." }, "OpenAI.Tool": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ToolType" } }, "discriminator": { "propertyName": "type", "mapping": { "function": "#/components/schemas/OpenAI.FunctionTool", "file_search": "#/components/schemas/OpenAI.FileSearchTool", "computer_use_preview": "#/components/schemas/OpenAI.ComputerUsePreviewTool", "web_search_preview": "#/components/schemas/OpenAI.WebSearchPreviewTool", "code_interpreter": "#/components/schemas/OpenAI.CodeInterpreterTool", "image_generation": "#/components/schemas/OpenAI.ImageGenTool", "local_shell": "#/components/schemas/OpenAI.LocalShellTool", "mcp": "#/components/schemas/OpenAI.MCPTool" } } }, "OpenAI.ToolChoiceObject": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.ToolChoiceObjectType" } }, "discriminator": { "propertyName": "type", "mapping": { "file_search": "#/components/schemas/OpenAI.ToolChoiceObjectFileSearch", "computer_use_preview": "#/components/schemas/OpenAI.ToolChoiceObjectComputer", "web_search_preview": "#/components/schemas/OpenAI.ToolChoiceObjectWebSearch", "image_generation": "#/components/schemas/OpenAI.ToolChoiceObjectImageGen", "code_interpreter": "#/components/schemas/OpenAI.ToolChoiceObjectCodeInterpreter", "function": "#/components/schemas/OpenAI.ToolChoiceObjectFunction", "mcp": "#/components/schemas/OpenAI.ToolChoiceObjectMCP" } } }, "OpenAI.ToolChoiceObjectCodeInterpreter": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "code_interpreter" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceObject" } ] }, "OpenAI.ToolChoiceObjectComputer": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "computer_use_preview" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceObject" } ] }, "OpenAI.ToolChoiceObjectFileSearch": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "file_search" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceObject" } ] }, "OpenAI.ToolChoiceObjectFunction": { "type": "object", "required": [ "type", "name" ], "properties": { "type": { "type": "string", "enum": [ "function" ], "description": "For function calling, the type is always `function`." }, "name": { "type": "string", "description": "The name of the function to call." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceObject" } ], "description": "Use this option to force the model to call a specific function." }, "OpenAI.ToolChoiceObjectImageGen": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "image_generation" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceObject" } ] }, "OpenAI.ToolChoiceObjectMCP": { "type": "object", "required": [ "type", "server_label" ], "properties": { "type": { "type": "string", "enum": [ "mcp" ], "description": "For MCP tools, the type is always `mcp`." }, "server_label": { "type": "string", "description": "The label of the MCP server to use." }, "name": { "type": "string", "nullable": true, "description": "The name of the tool to call on the server." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceObject" } ], "description": "Use this option to force the model to call a specific tool on a remote MCP server." }, "OpenAI.ToolChoiceObjectType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "file_search", "function", "computer_use_preview", "web_search_preview", "image_generation", "code_interpreter", "mcp" ] } ], "description": "Indicates that the model should use a built-in tool to generate a response.\n[Learn more about built-in tools](/docs/guides/tools)." }, "OpenAI.ToolChoiceObjectWebSearch": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "web_search_preview" ] } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ToolChoiceObject" } ], "description": "Note: web_search is not yet available via Azure OpenAI." }, "OpenAI.ToolChoiceOptions": { "type": "string", "enum": [ "none", "auto", "required" ], "description": "Controls which (if any) tool is called by the model.\n\n`none` means the model will not call any tool and instead generates a message.\n\n`auto` means the model can pick between generating a message or calling one or\nmore tools.\n\n`required` means the model must call one or more tools." }, "OpenAI.ToolType": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "file_search", "function", "computer_use_preview", "web_search_preview", "mcp", "code_interpreter", "image_generation", "local_shell" ] } ], "description": "A tool that can be used to generate a response." }, "OpenAI.TopLogProb": { "type": "object", "required": [ "token", "logprob", "bytes" ], "properties": { "token": { "type": "string" }, "logprob": { "type": "number", "format": "float" }, "bytes": { "type": "array", "items": { "type": "integer", "format": "int32" } } }, "description": "The top log probability of a token." }, "OpenAI.UpdateVectorStoreFileAttributesRequest": { "type": "object", "required": [ "attributes" ], "properties": { "attributes": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" } ], "nullable": true } } }, "OpenAI.UpdateVectorStoreRequest": { "type": "object", "properties": { "name": { "type": "string", "nullable": true, "description": "The name of the vector store." }, "expires_after": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreExpirationAfter" } ], "nullable": true }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" } } }, "OpenAI.ValidateGraderRequest": { "type": "object", "required": [ "grader" ], "properties": { "grader": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.GraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.GraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.GraderPython" }, { "$ref": "#/components/schemas/OpenAI.GraderScoreModel" }, { "$ref": "#/components/schemas/OpenAI.GraderMulti" } ], "description": "The grader used for the fine-tuning job." } } }, "OpenAI.ValidateGraderResponse": { "type": "object", "properties": { "grader": { "anyOf": [ { "$ref": "#/components/schemas/OpenAI.GraderStringCheck" }, { "$ref": "#/components/schemas/OpenAI.GraderTextSimilarity" }, { "$ref": "#/components/schemas/OpenAI.GraderPython" }, { "$ref": "#/components/schemas/OpenAI.GraderScoreModel" }, { "$ref": "#/components/schemas/OpenAI.GraderMulti" } ], "description": "The grader used for the fine-tuning job." } } }, "OpenAI.VectorStoreExpirationAfter": { "type": "object", "required": [ "anchor", "days" ], "properties": { "anchor": { "type": "string", "enum": [ "last_active_at" ], "description": "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." }, "days": { "type": "integer", "format": "int32", "minimum": 1, "maximum": 365, "description": "The number of days after the anchor time that the vector store will expire." } }, "description": "The expiration policy for a vector store." }, "OpenAI.VectorStoreFileAttributes": { "type": "object", "additionalProperties": { "anyOf": [ { "type": "string" }, { "type": "boolean" }, { "type": "integer", "format": "int32" }, { "type": "number", "format": "float" } ] }, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard. Keys are strings\nwith a maximum length of 64 characters. Values are strings with a maximum\nlength of 512 characters, booleans, or numbers.", "x-oaiTypeLabel": "map" }, "OpenAI.VectorStoreFileBatchObject": { "type": "object", "required": [ "id", "object", "created_at", "vector_store_id", "status", "file_counts" ], "properties": { "id": { "type": "string", "description": "The identifier, which can be referenced in API endpoints." }, "object": { "type": "string", "enum": [ "vector_store.files_batch" ], "description": "The object type, which is always `vector_store.file_batch`." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the vector store files batch was created." }, "vector_store_id": { "type": "string", "description": "The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "cancelled", "failed" ], "description": "The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`." }, "file_counts": { "type": "object", "properties": { "in_progress": { "type": "integer", "format": "int32", "description": "The number of files that are currently being processed." }, "completed": { "type": "integer", "format": "int32", "description": "The number of files that have been processed." }, "failed": { "type": "integer", "format": "int32", "description": "The number of files that have failed to process." }, "cancelled": { "type": "integer", "format": "int32", "description": "The number of files that where cancelled." }, "total": { "type": "integer", "format": "int32", "description": "The total number of files." } }, "required": [ "in_progress", "completed", "failed", "cancelled", "total" ] } }, "description": "A batch of files attached to a vector store." }, "OpenAI.VectorStoreFileObject": { "type": "object", "required": [ "id", "object", "usage_bytes", "created_at", "vector_store_id", "status", "last_error" ], "properties": { "id": { "type": "string", "description": "The identifier, which can be referenced in API endpoints." }, "object": { "type": "string", "enum": [ "vector_store.file" ], "description": "The object type, which is always `vector_store.file`." }, "usage_bytes": { "type": "integer", "format": "int32", "description": "The total vector store usage in bytes. Note that this may be different from the original file size." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the vector store file was created." }, "vector_store_id": { "type": "string", "description": "The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to." }, "status": { "type": "string", "enum": [ "in_progress", "completed", "cancelled", "failed" ], "description": "The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use." }, "last_error": { "type": "object", "properties": { "code": { "type": "string", "enum": [ "server_error", "unsupported_file", "invalid_file" ], "description": "One of `server_error` or `rate_limit_exceeded`." }, "message": { "type": "string", "description": "A human-readable description of the error." } }, "required": [ "code", "message" ], "nullable": true, "description": "The last error associated with this vector store file. Will be `null` if there are no errors." }, "chunking_strategy": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.ChunkingStrategyResponseParam" } ], "description": "The strategy used to chunk the file." }, "attributes": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.VectorStoreFileAttributes" } ], "nullable": true } }, "description": "A list of files attached to a vector store." }, "OpenAI.VectorStoreObject": { "type": "object", "required": [ "id", "object", "created_at", "name", "usage_bytes", "file_counts", "status", "last_active_at", "metadata" ], "properties": { "id": { "type": "string", "description": "The identifier, which can be referenced in API endpoints." }, "object": { "type": "string", "enum": [ "vector_store" ], "description": "The object type, which is always `vector_store`." }, "created_at": { "type": "integer", "format": "unixtime", "description": "The Unix timestamp (in seconds) for when the vector store was created." }, "name": { "type": "string", "description": "The name of the vector store." }, "usage_bytes": { "type": "integer", "format": "int32", "description": "The total number of bytes used by the files in the vector store." }, "file_counts": { "type": "object", "properties": { "in_progress": { "type": "integer", "format": "int32", "description": "The number of files that are currently being processed." }, "completed": { "type": "integer", "format": "int32", "description": "The number of files that have been successfully processed." }, "failed": { "type": "integer", "format": "int32", "description": "The number of files that have failed to process." }, "cancelled": { "type": "integer", "format": "int32", "description": "The number of files that were cancelled." }, "total": { "type": "integer", "format": "int32", "description": "The total number of files." } }, "required": [ "in_progress", "completed", "failed", "cancelled", "total" ] }, "status": { "type": "string", "enum": [ "expired", "in_progress", "completed" ], "description": "The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use." }, "expires_after": { "$ref": "#/components/schemas/OpenAI.VectorStoreExpirationAfter" }, "expires_at": { "type": "integer", "format": "unixtime", "nullable": true, "description": "The Unix timestamp (in seconds) for when the vector store will expire." }, "last_active_at": { "type": "integer", "format": "unixtime", "nullable": true, "description": "The Unix timestamp (in seconds) for when the vector store was last active." }, "metadata": { "type": "object", "additionalProperties": { "type": "string" }, "nullable": true, "description": "Set of 16 key-value pairs that can be attached to an object. This can be\nuseful for storing additional information about the object in a structured\nformat, and querying for objects via API or the dashboard.\n\nKeys are strings with a maximum length of 64 characters. Values are strings\nwith a maximum length of 512 characters.", "x-oaiTypeLabel": "map" } }, "description": "A vector store is a collection of processed files can be used by the `file_search` tool." }, "OpenAI.VoiceIdsShared": { "anyOf": [ { "type": "string" }, { "type": "string", "enum": [ "alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse" ] } ] }, "OpenAI.WebSearchAction": { "type": "object", "required": [ "type" ], "properties": { "type": { "$ref": "#/components/schemas/OpenAI.WebSearchActionType" } }, "discriminator": { "propertyName": "type", "mapping": { "find": "#/components/schemas/OpenAI.WebSearchActionFind", "open_page": "#/components/schemas/OpenAI.WebSearchActionOpenPage", "search": "#/components/schemas/OpenAI.WebSearchActionSearch" } } }, "OpenAI.WebSearchActionFind": { "type": "object", "required": [ "type", "url", "pattern" ], "properties": { "type": { "type": "string", "enum": [ "find" ], "description": "The action type." }, "url": { "type": "string", "format": "uri", "description": "The URL of the page searched for the pattern." }, "pattern": { "type": "string", "description": "The pattern or text to search for within the page." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.WebSearchAction" } ], "description": "Action type \"find\": Searches for a pattern within a loaded page." }, "OpenAI.WebSearchActionOpenPage": { "type": "object", "required": [ "type", "url" ], "properties": { "type": { "type": "string", "enum": [ "open_page" ], "description": "The action type." }, "url": { "type": "string", "format": "uri", "description": "The URL opened by the model." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.WebSearchAction" } ], "description": "Action type \"open_page\" - Opens a specific URL from search results." }, "OpenAI.WebSearchActionSearch": { "type": "object", "required": [ "type", "query" ], "properties": { "type": { "type": "string", "enum": [ "search" ], "description": "The action type." }, "query": { "type": "string", "description": "The search query." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.WebSearchAction" } ], "description": "Action type \"search\" - Performs a web search query." }, "OpenAI.WebSearchActionType": { "type": "string", "enum": [ "search", "open_page", "find" ] }, "OpenAI.WebSearchPreviewTool": { "type": "object", "required": [ "type" ], "properties": { "type": { "type": "string", "enum": [ "web_search_preview" ], "description": "The type of the web search tool. One of `web_search_preview` or `web_search_preview_2025_03_11`." }, "user_location": { "type": "object", "allOf": [ { "$ref": "#/components/schemas/OpenAI.Location" } ], "nullable": true, "description": "The user's location." }, "search_context_size": { "type": "string", "enum": [ "low", "medium", "high" ], "description": "High level guidance for the amount of context window space to use for the search. One of `low`, `medium`, or `high`. `medium` is the default." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.Tool" } ], "description": "Note: web_search is not yet available via Azure OpenAI." }, "OpenAI.WebSearchToolCallItemParam": { "type": "object", "required": [ "type", "action" ], "properties": { "type": { "type": "string", "enum": [ "web_search_call" ] }, "action": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.WebSearchAction" } ], "description": "An object describing the specific action taken in this web search call.\nIncludes details on how the model used the web (search, open_page, find)." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemParam" } ], "description": "Note: web_search is not yet available via Azure OpenAI." }, "OpenAI.WebSearchToolCallItemResource": { "type": "object", "required": [ "type", "status", "action" ], "properties": { "type": { "type": "string", "enum": [ "web_search_call" ] }, "status": { "type": "string", "enum": [ "in_progress", "searching", "completed", "failed" ], "description": "The status of the web search tool call." }, "action": { "allOf": [ { "$ref": "#/components/schemas/OpenAI.WebSearchAction" } ], "description": "An object describing the specific action taken in this web search call.\nIncludes details on how the model used the web (search, open_page, find)." } }, "allOf": [ { "$ref": "#/components/schemas/OpenAI.ItemResource" } ], "description": "Note: web_search is not yet available via Azure OpenAI." }, "PineconeChatDataSource": { "type": "object", "required": [ "type", "parameters" ], "properties": { "type": { "type": "string", "enum": [ "pinecone" ], "description": "The discriminated type identifier, which is always 'pinecone'." }, "parameters": { "type": "object", "properties": { "top_n_documents": { "type": "integer", "format": "int32", "description": "The configured number of documents to feature in the query." }, "in_scope": { "type": "boolean", "description": "Whether queries should be restricted to use of the indexed data." }, "strictness": { "type": "integer", "format": "int32", "minimum": 1, "maximum": 5, "description": "The configured strictness of the search relevance filtering.\nHigher strictness will increase precision but lower recall of the answer." }, "max_search_queries": { "type": "integer", "format": "int32", "description": "The maximum number of rewritten queries that should be sent to the search provider for a single user message.\nBy default, the system will make an automatic determination." }, "allow_partial_result": { "type": "boolean", "description": "If set to true, the system will allow partial search results to be used and the request will fail if all\npartial queries fail. If not specified or specified as false, the request will fail if any search query fails.", "default": false }, "include_contexts": { "type": "array", "items": { "type": "string", "enum": [ "citations", "intent", "all_retrieved_documents" ] }, "maxItems": 3, "description": "The output context properties to include on the response.\nBy default, citations and intent will be requested.", "default": [ "citations", "intent" ] }, "environment": { "type": "string", "description": "The environment name to use with Pinecone." }, "index_name": { "type": "string", "description": "The name of the Pinecone database index to use." }, "authentication": { "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceApiKeyAuthenticationOptions" } ], "description": "The authentication mechanism to use with Pinecone.\nSupported authentication mechanisms for Pinecone include: API key." }, "embedding_dependency": { "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSourceVectorizationSource" } ], "description": "The vectorization source to use as an embedding dependency for the Pinecone data source.\nSupported vectorization sources for Pinecone include: deployment name." }, "fields_mapping": { "type": "object", "properties": { "content_fields": { "type": "array", "items": { "type": "string" } }, "title_field": { "type": "string" }, "url_field": { "type": "string" }, "filepath_field": { "type": "string" }, "content_fields_separator": { "type": "string" } }, "required": [ "content_fields" ], "description": "Field mappings to apply to data used by the Pinecone data source.\nNote that content field mappings are required for Pinecone." } }, "required": [ "environment", "index_name", "authentication", "embedding_dependency", "fields_mapping" ], "description": "The parameter information to control the use of the Pinecone data source." } }, "allOf": [ { "$ref": "#/components/schemas/AzureChatDataSource" } ] }, "ResponseFormatJSONSchemaRequest": { "type": "object", "required": [ "type", "json_schema" ], "properties": { "type": { "type": "string", "enum": [ "json_schema" ], "description": "Type of response format" }, "json_schema": { "type": "object", "additionalProperties": {}, "description": "JSON Schema for the response format" } } }, "ResponseModalities": { "type": "array", "items": { "type": "string", "enum": [ "text", "audio" ] }, "description": "Output types that you would like the model to generate.\nMost models are capable of generating text, which is the default:\n\n`[\"text\"]`\n\nThe `gpt-4o-audio-preview` model can also be used to\n[generate audio](/docs/guides/audio). To request that this model generate\nboth text and audio responses, you can use:\n\n`[\"text\", \"audio\"]`" }, "CreateVideoBody": { "type": "object", "required": [ "model", "prompt" ], "properties": { "model": { "type": "string", "description": "The name of the deployment to use for this request." }, "prompt": { "type": "string", "minLength": 1, "description": "Text prompt that describes the video to generate." }, "seconds": { "allOf": [ { "$ref": "#/components/schemas/VideoSeconds" } ], "description": "Clip duration in seconds. Defaults to 4 seconds.", "default": "4" }, "size": { "allOf": [ { "$ref": "#/components/schemas/VideoSize" } ], "description": "Output resolution formatted as width x height. Defaults to 720x1280.", "default": "720x1280" } } }, "CreateVideoBodyWithInputReference": { "type": "object", "properties": { "model": { "type": "string", "description": "The name of the deployment to use for this request." }, "prompt": { "type": "string", "description": "Text prompt that describes the video to generate." }, "seconds": { "allOf": [ { "$ref": "#/components/schemas/VideoSeconds" } ], "description": "Clip duration in seconds. Defaults to 4 seconds." }, "size": { "allOf": [ { "$ref": "#/components/schemas/VideoSize" } ], "description": "Output resolution formatted as width x height. Defaults to 720x1280." }, "input_reference": { "type": "string", "format": "binary", "description": "Optional image reference that guides generation.", "x-oaiTypeLabel": "file" } }, "required": [ "model", "prompt", "input_reference" ] }, "DeletedVideoResource": { "type": "object", "required": [ "object", "deleted", "id" ], "properties": { "object": { "type": "string", "description": "The object type that signals the deletion response.", "default": "video.deleted" }, "deleted": { "type": "boolean", "description": "Indicates that the video resource was deleted.", "default": true }, "id": { "type": "string", "description": "Identifier of the deleted video." } }, "description": "Confirmation payload returned after deleting a video." }, "Error": { "type": "object", "required": [ "code", "message" ], "properties": { "code": { "type": "string" }, "message": { "type": "string" } } }, "Order": { "type": "string", "enum": [ "asc", "desc" ] }, "VideoContentVariant": { "anyOf": [ { "type": "string", "enum": [ "video", "thumbnail", "spritesheet" ] }, { "type": "string" } ], "description": "Selectable asset variants for downloaded content." }, "VideoList": { "type": "object", "required": [ "object", "data", "has_more" ], "properties": { "object": { "type": "string", "enum": [ "list" ] }, "data": { "type": "array", "items": { "$ref": "#/components/schemas/VideoResource" }, "description": "The list of video generation jobs." }, "has_more": { "type": "boolean", "description": "A flag indicating whether there are more jobs available after the list." }, "first_id": { "type": "string", "description": "The ID of the first video in the current page, if available." }, "last_id": { "type": "string", "description": "The ID of the last video in the current page, if available." } }, "description": "A list of video generation jobs." }, "VideoResource": { "type": "object", "required": [ "id", "object", "model", "status", "progress", "created_at", "size", "seconds" ], "properties": { "id": { "type": "string", "description": "Unique identifier for the video job." }, "object": { "type": "string", "description": "The object type, which is always `video`." }, "model": { "type": "string", "description": "The video generation model deployment that produced the job." }, "status": { "allOf": [ { "$ref": "#/components/schemas/VideoStatus" } ], "description": "Current lifecycle status of the video job." }, "progress": { "type": "integer", "format": "int32", "description": "Approximate completion percentage for the generation task." }, "created_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (seconds) for when the job was created." }, "completed_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (seconds) for when the job completed, if finished." }, "expires_at": { "type": "integer", "format": "unixtime", "description": "Unix timestamp (seconds) for when the video generation expires (and will be deleted)." }, "size": { "allOf": [ { "$ref": "#/components/schemas/VideoSize" } ], "description": "The resolution of the generated video." }, "seconds": { "allOf": [ { "$ref": "#/components/schemas/VideoSeconds" } ], "description": "Duration of the generated clip in seconds." }, "remixed_from_video_id": { "type": "string", "description": "Identifier of the source video if this video is a remix." }, "error": { "allOf": [ { "$ref": "#/components/schemas/Error" } ], "description": "Error payload that explains why generation failed, if applicable." } }, "description": "Structured information describing a generated video job." }, "VideoSeconds": { "anyOf": [ { "type": "string", "enum": [ "4", "8", "12" ] }, { "type": "string" } ], "description": "Supported clip durations, measured in seconds." }, "VideoSize": { "anyOf": [ { "type": "string", "enum": [ "720x1280", "1280x720", "1024x1792", "1792x1024" ] }, { "type": "string" } ], "description": "Output dimensions formatted as `{width}x{height}`." }, "VideoStatus": { "anyOf": [ { "type": "string", "enum": [ "queued", "in_progress", "completed", "failed" ] }, { "type": "string" } ], "description": "Lifecycle state of a generated video." } }, "securitySchemes": { "ApiKeyAuth": { "type": "apiKey", "in": "header", "name": "api-key" }, "ApiKeyAuth_": { "type": "apiKey", "in": "header", "name": "authorization" }, "OAuth2Auth": { "type": "oauth2", "flows": { "implicit": { "authorizationUrl": "https://login.microsoftonline.com/common/oauth2/v2.0/authorize", "scopes": { "https://cognitiveservices.azure.com/.default": "" } } } } } }, "servers": [ { "url": "{endpoint}/openai/v1", "description": "Azure AI Foundry Models APIs", "variables": { "endpoint": { "default": "", "description": "A supported Azure AI Foundry Models APIs endpoint, including protocol and hostname.\nFor example:\nhttps://westus.api.cognitive.microsoft.com)." } } } ] }